query
stringlengths
9
3.4k
document
stringlengths
9
87.4k
metadata
dict
negatives
sequencelengths
4
101
negative_scores
sequencelengths
4
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
Initialize your data structure here.
def __init__(self): # list for all numbers self.nums = [] # the index for each number in the list self.inds = {}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _init_empty(self):\n self._data = []", "def __init__(self):\n self._data = []", "def __init__(self):\n self._data = []", "def __init__(self):\n self._data = []", "def __init__(self):\n self._data = []", "def __init__(self):\n self._data = []", "def __init__(self):\n self._data = []", "def __init__(self):\n self.data = []\n self.record = {}", "def initialize(self):\n self.data = None\n self.errors = []", "def initialize(self):\n\t\tpass", "def initialize(self):\n pass", "def initialize(self):\n pass", "def initialize(self):\n pass", "def initialize(self):\n pass", "def initialize(self):\n pass", "def initialize(self):\r\n pass", "def initialize(self):\r\n pass", "def __init__(self):\n self.structure = {}", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def __init__(self):\n\n # initialise the empty mappings dictionary\n self.data = {\n 'loan_id': None,\n 'product': None,\n 'origination_date': None,\n 'reversion_date': None,\n 'rate_term': None,\n 'loan_amount': None,\n 'initial_rate': None,\n 'reversion_rate': None,\n 'term': None,\n 'interest_only_amount': None,\n 'upfront_fees': None,\n 'upfront_costs': None,\n 'entity_eir': None\n }", "def __init__(self):\n self._data = [] # non-public underlying Python list as storage", "def __init__(self):\n self._distance_data = []\n self._location_data = []\n self._package_data = []", "def __init__(self):\n\t\tsuper().__init__()\n\t\t\n\t\t# Typically a list of data here\n\t\t# Typically a dict of header keys and values here", "def __init__(self):\n self.data = []", "def __init__(self):\n self.data = []", "def __init__(self):\n self.data = []\n self.idx = {}", "def __init__(self):\n self._dict = {}\n self._array = []", "def init(self) -> None:", "def __init__(self):\n self.relation = ''\n self.attributes = []\n self.attribute_types = dict()\n self.attribute_data = dict()\n self.comment = []\n self.data = []\n pass", "def _init(self):\n pass", "def initialize(self):\n return", "def initialize(self):\n self.muondEdx = []\n self.muondNdx = []\n self.muonmomentum = []\n self.piondEdx = []\n self.piondNdx = []\n self.pionmomentum = []\n self.kaondEdx = []\n self.kaondNdx = []\n self.kaonmomentum = []\n self.protdEdx = []\n self.protdNdx = []\n self.protmomentum = []\n self.elecdEdx = []\n self.elecdNdx = []\n self.elecmomentum = []", "def initialize(self) -> None:\n pass", "def initialize(self):\n pass # pragma: no cover", "def __init__(self):\n self.d = {}\n self.l = []", "def _initialize(self):\n pass", "def _initialize(self):\n pass", "def _initialize(self):\n pass", "def initialize(self):\n raise NotImplementedError", "def initialize(self):\n raise NotImplementedError", "def initialize(self):\n raise NotImplementedError", "def initialise(self):", "def __init__(self):\n self.l = {}\n self.s = {}", "def __init__(self):\n self._data=[]", "def __init__(self, initial_data=[]):\n hdict.__init__(self)\n\n for elt in initial_data:\n self.add(elt)", "def initialize(self):", "def initialize(self):", "def initialize(self):", "def initialize(self):", "def init(self):\n raise NotImplementedError", "def init(self):\n raise NotImplementedError", "def init(self) -> None:\n ...", "def initialize(self):\n\n db = dict()\n\n db['meta'] = Meta(None)\n db['race'] = Race(None, None, None, None, None)\n db['track'] = Track(None, None)\n db['classes'] = set([])\n db['teams'] = set([])\n db['drivers'] = set([])\n\n self.db = db", "def __init__(self):\n self.keys = []\n self.values = []", "def __init__(self):\n self.d = {}\n self.h = []", "def memb_init(self):\n self.initialize()", "def __init__(self):\n self.dic={}\n self.data=[]", "def _init_data(self) -> None:\n self.dtype = dict()\n self.shape = dict()\n self.size = dict()\n self.attrs = dict()\n self.data_ptr = dict()\n\n if self.mode == 'r':\n for k in self.fp.keys():\n self.dtype[k] = self.fp[k].dtype\n self.shape[k] = self.fp[k].shape\n self.size[k] = self.fp[k].shape[0]\n self.data_ptr[k] = 0", "def __init__(self):\n dict.__init__(self)\n self.datatype = None", "def __init__(self, data={}):\n self._update_(data)", "def _init_data(self, data):\n assert type(data) is dict, \"dict expected: %r\" % type(data)\n assert len(data) is 1, \"size of dict should be 1: %r\" % len(data)\n self._name = data.keys()[0]\n self._data = np.asarray(data[self._name])\n self._set = True", "def __init__(self):\n self._data = PositionalList() # list of _Item instances", "def __init__(self):\n self.d = {}", "def __init__(self):\n self.d = {}", "def __init__(self):\n self.d = {}", "def __init__(self):\n self.d = {}", "def _init(self):\n raise NotImplementedError", "def __init__(self):\r\n self.indices = {}\r\n self.data = []\r\n self.len = 0", "def __init__(self):\n self.metadata = {}\n self.geometry = {'array': None, \n 'geom': None, \n 'wkt': None}", "def __init__(self, data):\n self.data = data\n return", "def __init__(self):\n self.root = [None, dict(), False] # val, sons, end-able", "def _initialize_data(self):\n self.unique_id = 123\n\n self.gas_valve_open = False\n self.buffer_valve_open = False\n self.pump_valve_open = False\n\n self.operatingmode = 0\n\n self.sample_pressure_high_limit = 100\n self.sample_pressure_low_limit = 10\n self.sample_pressure = 0\n\n self.error = 0\n\n self.buffer_pressure_high = True", "def __init__(self):\n self.data = {}\n self.refresh()", "def initialize(self):\n self.voteskips = []\n self.response = {}\n self.route = {}\n self.userlist = []\n self.poll = []\n self.media = []\n self.init = False\n self.question = None\n self.jumble = None\n self.imgur = None", "def _initialize_data(self):\n self.reset_count = 0\n self._idn_no_firmware = \"KEPCO,BOP 50-20,E1234,\"\n self._firmware = 2.6\n self._init_data()", "def init(self):", "def init(self):", "def __init__(self):\n self.x = {}\n self.len = 0\n self.annotations = {}", "def __init__(self):\n # Dict of minecraft object in form of \"dict[id] = name\"\n self.data_values = dict()\n self.parser = self.setup_parser()", "def initialize(self):\n self.keys = [None] * BUCKET_SIZE\n self.values = [None] * BUCKET_SIZE", "def __init__(self, data: dict = {}):\n pass", "def initialize(self): \r\n pass", "def __init__(self):\n self._data = PositionalList() # list of Item instances", "def __init__(self):\n self.table = {}\n self.ls = []", "def initialize(self):\r\n self.bucket_array.initialize()", "def initialise(self):\r\n return", "def initialise(self):\r\n return", "def __init__(self, data):\n self.data = data", "def __init__(self, data):\n self.data = data", "def __init__(self, data):\n self.data = data", "def __init__(self, data):\n self.data = data", "def __init__(self, data=None):\n self.data = data", "def __init__(self):\n self._data = set()", "def __init__(self):\n self.key_dict = {}\n self.value_dict = {}\n self.head, self.last = None, None" ]
[ "0.7765608", "0.7645274", "0.7645274", "0.7645274", "0.7645274", "0.7645274", "0.7645274", "0.7595176", "0.75853467", "0.7558298", "0.7530608", "0.7530608", "0.7530608", "0.7530608", "0.7530608", "0.74971247", "0.74971247", "0.7478105", "0.7477832", "0.7477832", "0.7477832", "0.7477832", "0.7477832", "0.7477832", "0.7477832", "0.7477832", "0.744441", "0.7426435", "0.74157697", "0.74143684", "0.73898417", "0.73898417", "0.7389144", "0.7387738", "0.7383786", "0.7324126", "0.731669", "0.73065454", "0.729799", "0.7287291", "0.7271846", "0.725931", "0.72522944", "0.72522944", "0.72522944", "0.72494334", "0.72494334", "0.72494334", "0.7243696", "0.7239823", "0.72368526", "0.7208368", "0.72016877", "0.72016877", "0.72016877", "0.72016877", "0.71985286", "0.71985286", "0.7195241", "0.71885264", "0.71857035", "0.7176733", "0.7160906", "0.7159325", "0.7149614", "0.71474445", "0.7135992", "0.7128525", "0.7123646", "0.71142536", "0.71142536", "0.71142536", "0.71142536", "0.71109176", "0.71011794", "0.7099338", "0.708543", "0.70676583", "0.70648897", "0.70618606", "0.70606047", "0.7059818", "0.7039291", "0.7039291", "0.7035077", "0.70237756", "0.70142615", "0.6999669", "0.69952625", "0.6994778", "0.6987417", "0.6981039", "0.6976582", "0.6976582", "0.6976431", "0.6976431", "0.6976431", "0.6976431", "0.69684774", "0.69561034", "0.69411176" ]
0.0
-1
Inserts a value to the set. Returns true if the set did not already contain the specified element.
def insert(self, val): # if it already exists return error if val in self.inds: return False # record the index and save the number self.inds[val] = len(self.nums) self.nums.append(val) return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def insert(self, val: int) -> bool:\n if val not in self.set:\n self.set.add(val)\n return True\n return False", "def insert(self, val: int) -> bool:\n if val not in self.value_set:\n self.value_set.add(val)\n self.values.append(val)\n return True\n else:\n return False", "def insert(self, val: int) -> bool:\n if val not in self.set:\n self.nums.append(val);\n self.set.add(val);\n return True;\n return False;", "def insert(self, val: int) -> bool:\n if(val not in self.randomSet):\n self.randomSet[val] = 1\n return True\n else:\n return False", "def insert(self, val: int) -> bool:\n if val in self.l:\n return False\n self.l.add(val)\n return True", "def insert(self, val):\n new_item = False\n if val not in self.ds:\n self.ds.add(val)\n self.keys.append(val)\n new_item = True\n return new_item", "def insert(self, val):\n if val in self.d:\n return False\n self.d[val] = len(self.l)\n self.l.append(val)\n return True", "def insert(self, val: int) -> bool:\n value = val not in self.container\n self.container.add(val)\n return value", "def insert(self, val):\n if val in self.dic:\n return False\n else:\n self.data.append(val)\n self.dic[val]=len(self.data)-1\n return True", "def insert(self, val):\n res = val in self.map\n idx = len(self.vec)\n if res:\n self.map[val].append(idx)\n self.vec.append(val)\n else:\n self.map[val] = [idx]\n self.vec.append(val)\n return not res", "def insert(self, val: int) -> bool:\n if self.d.get(val):\n return False\n else:\n self.d[val] = True\n return True", "def insert(self, val: int) -> bool:\n if val in self._dict:\n return False\n \n self._dict[val] = len(self._list)\n self._list.append(val)\n return True", "def insert(self, val: int) -> bool:\n self.elements.append(val)\n self.idx[val].add(len(self.elements) - 1)\n return len(self.idx[val]) == 1", "def insert(self, val):\n if val in self.map:\n return False\n \n self.nums.append(val)\n self.map[val] = len(self.nums) - 1\n \n return True", "def insert(self, val: int) -> bool:\n if val in self.data:\n return False\n self.data[val] = None\n self.total += 1\n return True", "def insert(self, val: int) -> bool:\n if self.store_dict.get(val) != None:\n return False\n self.store_list.append(val)\n self.store_dict[val] = len(self.store_list) - 1\n return True", "def insert(self, e):\n if not e in self.vals:\n self.vals.append(e)", "def insert(self, val: int) -> bool:\n if val in self.dict:\n return False\n self.dict[val] = len(self.list)\n self.list.append(val)\n return True", "def insert(self, val: int) -> bool:\n if val in self.dict:\n return False\n self.dict[val] = len(self.list)\n self.list.append(val)\n return True", "def insert(self, val: int) -> bool:\n if val in self.dict:\n return False\n self.dict[val] = len(self.list)\n self.list.append(val)\n return True", "def insert(self, val: int) -> bool:\n if val in self.dict:\n return False\n self.dict[val] = len(self.list)\n self.list.append(val)\n return True", "def insert(self, val: int) -> bool:\n if val in self.dict:\n return False\n self.dict[val] = len(self.list)\n self.list.append(val)\n return True", "def insert(self, val):\n if val in self.record:\n return False\n \n self.record[val] = len(self.data)\n self.data.append(val)\n return True", "def insert(self, val: int) -> bool:\n if val in self.map:\n return False\n index = len(self.keys)\n self.map[val] = index\n self.keys.append(val)\n return True", "def insert(self, val: int) -> bool:\n if val not in self.dic:\n self.lst.append(val)\n self.dic[val] = len(self.lst) - 1\n return True", "def insert(self, val: int) -> bool:\n \n retVal = True if val not in self.map else False\n if retVal:\n self.map[val] = len(self.arr)\n self.arr.append(val)\n return retVal", "def insert(self, val: int) -> bool:\n self.dict[val].add(len(self.arr))\n self.arr.append(val)\n return len(self.dict[val]) == 1", "def insert(self, e): \n if not e in self.vals:\n self.vals.append(e)", "def insert(self, e): \n if not e in self.vals:\n self.vals.append(e)", "def insert(self, val: int) -> bool:\n if val in self.d:\n return False\n self.d[val] = len(self.arr)\n self.arr.append(val) \n return True", "def insert(self, val):\n if val not in self.dict_val:\n self.dict_val[val] = len(self.list_val)\n self.list_val.append(val)\n return True\n return False", "def insert(self, val: int) -> bool:\n if val in self.map:\n return False\n self.array.append(val)\n self.map[val] = len(self.array)-1\n return True", "def add(self, val):\n val = self._conversion(val)\n if self.__tree.search(val):\n print(\"Element is already exist\") \n else:\n if isinstance(val, TYPES[self.__set_type]):\n self.__tree.insert(val)\n else:\n print(\"TypeError : Wrong Input\")", "def insert(self, val: int) -> bool:\n if val not in self.arr:\n self.arr.append(val)\n index = len(self.arr) - 1\n self.map[val] = index\n return True\n return False", "def insert(self, val: int) -> bool:\n if val in self.hashmap:\n return False\n self.hashmap[val] = len(self.array)\n self.array.append(val)\n return True", "def insert(self, e): \r\n if not e in self.vals:\r\n self.vals.append(e)", "def insert(self, val: int) -> bool:\n \n self.items.append(val)\n self.ind[val].add(len(self.items)-1)\n return len(self.ind[val]) == 1", "def insert(self, val: int) -> bool:\n # print(f\"insert {val}\")\n already = val in self.indexes\n self.items.append(val)\n self.indexes[val].add(len(self.items) - 1)\n return not already", "def insert(self, val: int) -> bool:\n if val not in self.dict:\n self.dict[val] = len(self.arr)\n self.arr.append(val)\n return True\n return False", "def insert(self, val: int) -> bool:\n if val not in self._dict:\n self._dict[val] = len(self._array)\n self._array.append(val)\n return True\n return False", "def insert(self, val: int) -> bool:\n        if val not in self.hashmap:\n            self.list.append(val)\n            self.hashmap[val]=len(self.list)-1\n            return True\n        else:\n            return False\n            ", "def insert(self, val: int) -> bool:\n if val not in self.counts:\n self.counts[val] = 1\n return True\n else:\n self.counts[val] += 1\n return False", "def insert(self, val):\n if val not in self.numSet:\n self.numSet.add(val)\n # add to tail first\n self.nums.append(val)\n # if the last few numbers are invalid, swap the new value to the end of the valid prefix\n self.swap(self.size, -1)\n self.valToIndex[val] = self.size\n self.size += 1\n return True\n else:\n return False", "def insert(self, val: int) -> bool:\n if val in self.randomized_hash:\n self.randomized_hash[val].append(len(self.array))\n self.array.append(val)\n return False\n else:\n self.randomized_hash[val] = [len(self.array)]\n self.array.append(val)\n return True", "def insert_and_check(self, item) -> bool:\n with Monitor.acquire(self):\n if item in self:\n return False\n self.add(item)\n return True", "def add(self, item):\n if not (item in self.set):\n self.set[item] = True\n heapq.heappush(self.heap, item)", "def insert(self, e):\n if e not in self.vals:\n self.vals.append(e)", "def insert(self, val: int) -> bool:\n if val in self.map:\n return False\n # put in slot\n self.slot.append(val)\n # insert to map\n self.map[val] = len(self.slot) - 1\n return True", "def insert(self, val):\n if val not in self.posFind or self.posFind[val] == -1:\n self.nums.append(val)\n self.posFind[val] = len(self.nums) - 1\n return True\n return False", "def insert(self, val: int) -> bool:\n if val in self.idx:\n return False\n else:\n # append value into data \n self.data.append(val)\n \n # record the idx of the value in data\n self.idx[val] = len(self.data) - 1\n return True", "def add(self, el: T) -> bool:\n if el in self:\n return False\n else:\n self[el] = el\n return True", "def insert(self, val: int) -> bool:\n if val in self.dict: return False\n self.dict[val] = len(self.list)\n self.list.append(val)\n # print(\"insert\",val, \"==>\", self.dict, self.list)\n return True", "def insert(self, val: int) -> bool:", "def insert(self, val: int) -> bool:\n if val in self.val2i: return False\n if self.size == len(self.array): self.array.append(val)\n else: self.array[self.size] = val\n self.val2i[val] = self.size\n self.size += 1\n #print(self.size)\n return True", "def add(self, x):\n if x not in self:\n self._seen.add(x)\n self._list.append(x)\n return True\n return False", "def insert(self, val):\n if val not in self.table.keys():\n self.table[val] = len(self.ls)\n self.ls.append(val)\n return True\n return False", "def insert(self, val):\r\n if len(self.data) != self.len:\r\n self.data[self.len] = val\r\n else:\r\n self.data.append(val)\r\n if val in self.indices:\r\n self.indices[val].append(self.len)\r\n self.len += 1\r\n return False\r\n else:\r\n self.indices[val] = [self.len]\r\n self.len += 1\r\n return True", "def add(self, element) -> bool:\n if self.data == element.data:\n return False\n\n if self.data > element.data:\n if self.left is None:\n self.left = element\n return True\n else:\n return self.left.add(element)\n else:\n if self.right is None:\n self.right = element\n return True\n else:\n return self.right.add(element)", "def insert(self, item):\n for h_num in xrange(self.k):\n val = self.hash_value(item, h_num)\n self.arr[val] = True", "def insert(self, key, value):\n tags = self.__all_tags()\n if value not in tags:\n tags.insert(key, value)\n self.__post_changes(tags)", "def add(self, elem: T):\n if elem not in self._unique_values:\n if len(self._heap) < self.maxsize:\n heapq.heappush(self._heap, HeapObj(elem))\n elif elem < self._heap[0].val:\n heapq.heappushpop(self._heap, HeapObj(elem))\n self._unique_values.add(elem)", "def insertLast(self, value):\n if not self.isFull():\n self._data.append(value)\n return True\n else:\n return False", "def insert(self, val):\n if val not in self.table.keys():\n self.table[val] = 0\n return True\n return False", "def add_new_element_to_store(entry_sequence, element, is_propagated_call=False):\n\t\tglobal board, node_id\n\t\tsuccess = False\n\t\ttry:\n\t\t\tboard[int(entry_sequence)] = element\n\t\t\tsuccess = True\n\t\texcept Exception as e:\n\t\t\tprint e\n\t\treturn success", "def push(self, element):\n if not self.full():\n heapq.heappush(self.queue, element)\n self.size += 1\n return True\n else:\n if element >= self.queue[0]:\n heapq.heapreplace(self.queue, element)\n return True\n else:\n return False", "def __setitem__(self, key, value):\n ndx = self._findPosition(key)\n if ndx:\n self._entryList[ndx].value = value\n return False\n else:\n entry = _MapEntry(key, value)\n self._entryList.append(entry)\n return True", "def appendIntoSet(_session, _segment, _el, _set, _arc_type, _duplicate_allow = False):\n if not _duplicate_allow and checkIncToSets(_session, _el, [_set], 0): # check all arc types\n import suit.core.exceptions\n raise suit.core.exceptions.ItemAlreadyExistsError(\"element %s already exist in set %s\" %\n (str(_el), str(_set)))\n \n createPair(_session, _segment, _set, _el, _arc_type)", "def add_value(self, value):\n h = self.hash_value(value)\n self.values[h] = True", "def _insert(self, key, value):\n entry = self._lookup(key)\n if entry.value is None:\n self.used += 1\n if entry.key is not dummy:\n self.filled += 1\n entry.key = key\n entry.hash = self.first_hash(key)\n entry.value = value", "def add(self, item):\n item = self._prepare_item(len(self), item)\n if item not in self._data:\n self._data.append(item)\n self.__log__.append(SetAdd(value=item))", "def insert(self, key: int) -> bool:\n if self.empty(): # empty tree, so value becomes the root\n self.root = Node(key)\n return True\n\n current = self.root # start at the root\n while current.key != key:\n\n if key < current.key:\n\n if current.left is None: # if no left child exists, insert element as left child\n self.root = current.add_left(key=key)\n return True\n\n else: # if a left child does exist, traverse left\n current = current.left\n\n elif key > current.key:\n\n if current.right is None: # if no right child exists, insert element as right child\n self.root = current.add_right(key=key)\n return True\n\n else: # if a right child does exist, traverse right\n current = current.right\n\n return False # failure to insert", "def insert(self, value):\n if self._root:\n node = self._root\n child = self._root\n parent = None\n while node and child:\n if node.key == value:\n child = None\n else:\n parent = node\n if value < node.key:\n node = node._left\n else:\n node = node._right\n if child:\n child = Node(value, None, None)\n if value < parent.key:\n parent._left = child\n else:\n parent._right = child\n return True\n else:\n return False\n else:\n self._root = Node(value, None, None)\n return True", "def contains(self, element):\n pass", "def insert(self, key, value):\n\n\t\t# If the key already exists in redis, then return\n\t\tif self.checkIfExists(key):\n\t\t\traise Exception(\"Key/Value pair already exists in Redis\")\n\t\t\n\t\t# Otherwise, insert into Redis\n\t\telse:\n\t\t\tself.db.set(key, value)", "def insert(self, key, values=[]):\n\n return If(\n self.contains(key),\n BadRequest(\"cannot insert: key already exists\"),\n self._put(\"\", key, values))", "def insertFront(self, value):\n if not self.isFull():\n self._data.insert(0,value)\n return True\n else:\n return False", "def test_contains(self):\n s = djset()\n s.add([1, 2, 3])\n s.add([4, 5, 6])\n self.assertTrue(2 in s)\n self.assertTrue(5 in s)", "def insertLast(self, value: int) -> bool:\n if not self.isFull():\n # 后端插入始终是先移动后插入,self.rear始终指向后端最后插入的元素\n self.rear = self.move_forward(self.rear)\n self.q[self.rear] = value\n return True\n else:\n return False", "async def _add(self, key, value, ttl=None):\n\n with await self._connect() as redis:\n was_set = await redis.set(key, value, expire=ttl, exist=redis.SET_IF_NOT_EXIST)\n if not was_set:\n raise ValueError(\n \"Key {} already exists, use .set to update the value\".format(key))\n return was_set", "def insert(self, value):\n if self.root is None:\n self.root = Node(value)\n self.size = 1\n return self.root\n inserted = self._insert(self.root, value)\n # if this does not hold true then the value was already\n # contained within the tree\n if inserted is not AVLTree.NULL_NODE:\n self.size += 1\n self.root = inserted\n return inserted", "def add(self, element):\n if not self.contains(element):\n bucket_index = self._bucket_index(element)\n self.buckets[bucket_index].append(element)\n self.size += 1", "def insert(self, value):\n bucketNum = self.__hash(value)\n self.__buckets[bucketNum].append(value)", "def insert(self, row, col, value):\n if self.valid_square(row, col, value) or value == 0:\n self.puzzle[row][col] = value\n return True\n return False", "def _contains(self, element):\n if not isinstance(element, Tuple) or len(element) != 2:\n return S.false\n\n if not element[1].is_Integer:\n return S.false\n\n if element[1] >= len(self.sets) or element[1] < 0:\n return S.false\n\n return self.sets[element[1]]._contains(element[0])", "def insert(self, key, value):\n # If the array is full, call rehash function\n\n key = str(key)\n position = self.hash(key)\n probe_count = 0\n insert_done = False\n\n for _ in range(self.table_capacity):\n if self.array[position] is None:\n self.array[position] = (key, value) # if the element at the position is 'None', insert tuple directly.\n self.count += 1\n insert_done = True\n break\n elif self.array[position][0] == key:\n # if the key in the tuple at the position corresponds to the given key; the element is already present\\\n # and only the data of the tuple needs to be updated,\n # in order to do this a new tuple is created as tuples are immutable.\n self.array[position] = (key, value)\n insert_done = True\n break\n else: # Checking next Position\n position = (position + 1) % self.table_capacity\n probe_count += 1\n\n if probe_count > 0 and insert_done:\n self.probe_array.append(probe_count)\n\n if self.is_full() and not insert_done:\n self.rehash()\n self.insert(key, value )", "def contains(self, value):\n if self.root is None:\n return False\n else:\n if type(value) != self.typing: # not an error\n return False\n # TODO allow different yet comparable types\n else: \n hasElement, self.root = self.root.contains(value)\n return hasElement", "def findSetWithElement(self, element, \n is_present=True):\n sets = [s for s in self.sets if element in s]\n if len(sets) > 1:\n raise RuntimeError(\"Should have at most instance of values.\")\n if len(sets) == 0:\n if is_present:\n raise ValueError(\"%s is not present\" % str(element))\n result = cn.NULL_SET \n else:\n result = sets[0]\n return result", "def insert(self, element: Node):\r\n if self._top == None:\r\n self._top = Node(None, element)\r\n return None\r\n new_element = self._add_element(element)\r\n self._correct_tree(new_element)", "def _can_insert(self, index, value):\n return not bool(self._insert_callback(index, value))", "def insert(self, key, value):\n hash_key = hash(key) % self.length\n bucket = self.array[hash_key]\n for idx, key_val_pair in enumerate(bucket):\n k, v = key_val_pair\n if k == key:\n bucket[idx] = [key, value]\n return\n bucket.append([key, value])", "def add(self, item):\n self.update(set([item]))", "def insert(self, key, value):\n\n if None == self.root:\n self.root = BSTNode(key,value)\n return True\n current_node = self.root\n while current_node:\n if key == current_node.key:\n print(\"The key does exist!\")\n return False\n elif key < current_node.key:\n if current_node.left:\n current_node = current_node.left\n else:\n current_node.left = BSTNode(key, value, current_node)\n return True\n else:\n if current_node.right:\n current_node = current_node.right\n else:\n current_node.right = BSTNode(key,value,current_node)\n return True", "def insertLast(self, value: int) -> bool:\n if self.isFull():\n return False\n\n self.arr[self.rear] = value\n self.rear = (self.rear + 1) % self.capacity\n return True", "def insert(self, item):\r\n if not self.is_full():\r\n for i in range(1,len(self.items)):\r\n if self.items[i] is None:\r\n self.items[i] = item\r\n self.size += 1\r\n self.perc_up(i)\r\n return True\r\n return False", "def insert(self, table, element):\n\n update = self.update(table, element)\n if update:\n return update\n\n fields = []\n values = []\n for key in element.keys():\n fields.append(key)\n values.append(element[key])\n result = self.__insert(table, fields, values)\n return result", "def insert(self, key, value):\n if key in self.map:\n return\n\n try:\n tag_key = TagKey(key)\n tag_val = TagValue(value)\n self.map[tag_key] = tag_val\n except ValueError:\n raise", "def insert(self, value):\n bucketNum = self.__hash(value)\n originalBucketNum = bucketNum\n if self.__buckets[bucketNum] is not None:\n bucketNum = self.__rehash(bucketNum)\n while self.__buckets[bucketNum] is not None and bucketNum != originalBucketNum:\n bucketNum = self.__rehash(bucketNum)\n if self.__buckets[bucketNum] is None:\n self.__buckets[bucketNum] = value\n else:\n raise Exception(\"Table Full\")", "def __setitem__(self, key, value):\n self.insert(key, value)", "def insertLast(self, value: int) -> bool:\n if not self.isFull():\n self._rear = (self._rear - 1)%self._k\n self._deque[self._rear] = value\n self._elems += 1\n return True\n \n return False", "def insertFront(self, value: int) -> bool:\n if self.isFull():\n return False\n \n self.front = (self.front - 1 + self.capacity) % self.capacity\n self.arr[self.front] = value\n return True" ]
[ "0.7981645", "0.7609813", "0.75033724", "0.7475974", "0.72454274", "0.72146404", "0.7200908", "0.71701366", "0.7164788", "0.7147172", "0.7050728", "0.69496", "0.69478387", "0.6929294", "0.69116586", "0.6902306", "0.6878695", "0.68752456", "0.68752456", "0.68752456", "0.68752456", "0.68752456", "0.68483794", "0.68282676", "0.6824123", "0.6815112", "0.67934644", "0.67887264", "0.67887264", "0.6772863", "0.676715", "0.67639124", "0.6758365", "0.6746595", "0.6744333", "0.6708911", "0.67029166", "0.66870564", "0.6669649", "0.6644539", "0.6630677", "0.663008", "0.6627293", "0.6620656", "0.6610448", "0.66012", "0.660104", "0.6549685", "0.65345955", "0.65091157", "0.65091044", "0.64907193", "0.63113374", "0.6275656", "0.6238977", "0.61729246", "0.6133655", "0.6124323", "0.6048807", "0.6011311", "0.6010429", "0.5999179", "0.5970628", "0.5931052", "0.5902473", "0.58896124", "0.58414835", "0.58236927", "0.5816867", "0.5800011", "0.57901853", "0.5779278", "0.57512146", "0.57402706", "0.5681101", "0.5658488", "0.56289405", "0.5627451", "0.56255686", "0.56077474", "0.557429", "0.5568321", "0.55583483", "0.5547409", "0.55232865", "0.5519372", "0.5515763", "0.5501178", "0.5476854", "0.54613036", "0.5454871", "0.5436165", "0.5425267", "0.5423334", "0.54130965", "0.54129714", "0.54083884", "0.5399216", "0.53926224", "0.53859794" ]
0.6459848
52
Removes a value from the set. Returns true if the set contained the specified element.
def remove(self, val): # if it doesn't exist return error if val not in self.inds: return False # find the index for the val in list, and take the last element ind, temp = self.inds[val], self.nums.pop() # if the one to delete is not the last number if ind < len(self.nums): # place the last element at where the val was # and update the index for the last element self.nums[ind], self.inds[temp] = temp, ind # delete the index for val del self.inds[val] return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def remove(self, val: int) -> bool:\n if val in self.set:\n self.set.remove(val)\n return True\n return False", "def remove(self, val: int) -> bool:\n if val in self.set:\n self.set.remove(val);\n self.nums.remove(val);\n return True;\n return False;", "def remove(self, val: int) -> bool:\n temp = self.randomSet.pop(val, False)\n return True if temp != False else temp", "def remove(self, val: int) -> bool:\n if val in self.value_set:\n self.value_set.remove(val)\n if val in self.values:\n self.values.remove(val)\n return True\n else:\n return False", "def remove(self, value: object) -> bool:\n for _ in range(self.da.length()):\n if value == self.da[_]:\n self.da.remove_at_index(_)\n return True\n return False", "def remove(self, val: int) -> bool:\n value = val in self.container\n self.container.discard(val)\n return value", "def remove(self, el: T) -> bool:\n if el in self:\n del self[el]\n return True\n else:\n return False", "def remove(self, element):\n\n currentNodePointer = self.head\n # case where the first node has the element as value then erase the value\n if(currentNodePointer.getData() == element):\n self.head = self.head.getNext()\n return True\n \n while(currentNodePointer.getNext() is not None):\n if(currentNodePointer.getNext().getData() == element):\n currentNodePointer.setNext(currentNodePointer.getNext().getNext())\n return True\n else:\n currentNodePointer = currentNodePointer.getNext()\n return False", "def remove_value(self, value: Hashable) -> bool:\n\t\treturn self.remove_values([value])", "def remove(self, val: int) -> bool:\n \n # print(self.ind)\n # no value\n if val not in self.ind or not self.ind[val]:\n return False\n else:\n remove_ind = self.ind[val].pop() # random removal\n if not self.ind[val]:\n self.ind.pop(val)\n \n # set will not add duplicate values. So adding first is OK evenif the last elem is the one to delete\n self.ind[self.items[-1]].add(remove_ind)\n self.ind[self.items[-1]].discard(len(self.items)-1)\n self.items[-1], self.items[remove_ind] = self.items[remove_ind], self.items[-1]\n self.items.pop(-1)\n # print(self.ind)\n return True", "def remove(self, val):\n i = self.d.get(val)\n if i is None:\n return False\n assert 0 <= i < len(self.l)\n last_val = self.l[-1]\n if val != last_val:\n self.d[last_val] = i\n self.l[i] = last_val\n del self.d[val]\n _ = self.l.pop()\n return True", "def remove(self, val: int) -> bool:\n if self.d.get(val):\n del self.d[val]\n return True\n else:\n return False", "def remove(self, val: int) -> bool:\n if val in self.l:\n self.l.remove(val)\n return True\n return False", "def remove(self, val):\n if val in self.numSet:\n # remove from numSet\n self.numSet.discard(val)\n # remove from valToIndex\n index = self.valToIndex[val]\n del self.valToIndex[val]\n # remove from nums & update the index of the swapped value\n valToSwap = self.nums[self.size - 1]\n self.swap(index, self.size - 1)\n self.valToIndex[valToSwap] = index\n # don't forget to decrease the size\n self.size -= 1\n return True\n else:\n return False", "def remove(self, val: int) -> bool:\n if val in self.dict:\n last_element, idx = self.list[-1], self.dict[val]\n self.list[idx], self.dict[last_element] = last_element, idx\n self.list.pop()\n self.dict.pop(val)\n return True\n return False", "def remove(self, val: int) -> bool:\n if val in self.dict:\n last_element, idx = self.list[-1], self.dict[val]\n self.list[idx], self.dict[last_element] = last_element, idx\n self.list.pop()\n self.dict.pop(val)\n return True\n return False", "def remove(self, val: int) -> bool:\n if val in self.data:\n self.data.pop(val)\n self.total -= 1\n return True\n return False", "def remove(self, val: int) -> bool:\n if val in self.hashmap:\n last_elem, idx = self.array[-1], self.hashmap[val]\n self.array[idx], self.hashmap[last_elem] = last_elem, idx\n self.array.pop()\n self.hashmap.pop(val)\n return True\n return False", "def remove(self, val: int) -> bool:\n if val not in self.dict:\n return False\n last_ele, idx = self.list[-1], self.dict[val]\n self.list[idx], self.dict[last_ele] = last_ele, idx\n\n self.list.pop()\n del self.dict[val]\n return True", "def remove(self, val: int) -> bool:\n if not self.idx[val]:\n return False\n last = self.elements[-1]\n to_remove = self.idx[val].pop()\n self.elements[to_remove] = last\n self.idx[last].add(to_remove)\n self.idx[last].discard(len(self.elements) - 1)\n\n self.elements.pop()\n return True", "def remove(self, value):\n found = False\n for i in range(len(self.data)):\n if self.data[i] != value:\n pass\n else:\n found = True\n self.__delitem__(i)\n break\n if not found:\n raise ValueError", "def remove(self, val: int) -> bool:\n if val in self.dict:\n idx, last_elem = self.dict[val], self.list[-1]\n self.list[idx] = last_elem\n self.dict[last_elem] = idx\n self.list.pop()\n self.dict.pop(val)\n return True\n return False", "def remove(self, val: int) -> bool:\n if val in self.dict:\n idx, last_elem = self.dict[val], self.list[-1]\n self.list[idx] = last_elem\n self.dict[last_elem] = idx\n self.list.pop()\n self.dict.pop(val)\n return True\n return False", "def remove(self, val: int) -> bool:\n if val not in self._dict:\n return False\n idx = self._dict[val]\n last_elem = self._array[-1]\n self._array[idx], self._array[-1] = self._array[-1], self._array[idx]\n self._dict[last_elem] = idx\n self._dict.pop(val)\n self._array.pop()\n return True", "def remove(self, val):\n temp = self.table.pop(val, None)\n if temp is None:\n return False\n return True", "def remove(self, val: int) -> bool:\n if val in self.arr:\n index, lastVal = self.map[val], self.arr[-1]\n self.arr[index], self.arr[-1] = lastVal, self.arr[index]\n self.map[lastVal] = index\n self.arr.pop()\n self.map.pop(val)\n return True\n return False", "def remove(self, val):\n in_ds = False\n if val in self.ds:\n self.ds.remove(val)\n in_ds = True\n return in_ds", "def remove(self, val: int) -> bool:\n if val not in self.map:\n return False\n rm_idx = self.map[val]\n last_idx = len(self.slot) - 1\n last_val = self.slot[last_idx]\n self.slot[rm_idx] = last_val\n self.map[last_val] = rm_idx\n del self.map[val]\n self.slot.pop()\n return True", "def remove(self, val: int) -> bool:\n if not self.dict[val]: return False\n last_num = self.arr[-1]\n removed_idx = self.dict[val].pop()\n\n self.dict[last_num].add(removed_idx)\n self.arr[removed_idx] = last_num\n\n self.dict[last_num].discard(len(self.arr) - 1)\n self.arr.pop()\n\n return True", "def remove(self, val):\n if val in self.dic:\n i = self.dic[val]\n if i<len(self.data)-1:\n self.data[i]=self.data[-1]\n self.dic[self.data[i]]=i\n self.data.pop()\n self.dic.pop(val,0)\n return True\n else:\n return False", "def remove(self, val):\n if val not in self.map:\n return False\n \n to_remove_idx = self.map[val]\n self.map.pop(val)\n if to_remove_idx != len(self.nums) - 1:\n to_swap = self.nums[-1]\n self.nums[-1], self.nums[to_remove_idx] = self.nums[to_remove_idx], self.nums[-1]\n self.map[to_swap] = to_remove_idx\n self.nums = self.nums[:-1]\n return True", "def remove(self, val: int) -> bool:\n idx = self.store_dict.get(val)\n if idx is None:\n return False\n\n l = len(self.store_list)\n self.store_dict[self.store_list[l - 1]] = idx\n self.store_list[idx], self.store_list[l - 1] = self.store_list[l - 1],self.store_list[idx]\n self.store_list.pop()\n del self.store_dict[val]\n return True", "def remove(self, value: object) -> bool:\n # Loops through the indices of the underlying dynamic array.\n end = self.size()\n for ind in range(end):\n # If the value is found, the value is removed from the dynamic array and True is returned.\n if self.da[ind] == value:\n self.da.remove_at_index(ind)\n return True\n # Else false is returned.\n return False", "def remove(self, val: int) -> bool:\n if val not in self.map:\n return False\n idx = self.map[val]\n tail = self.array[-1]\n self.map[tail] = idx\n self.array[idx] = tail\n self.array.pop()\n del self.map[val]\n return True", "def remove(self, val: int) -> bool:\n if val in self.idx:\n # swap the target value and the last value in the data set\n last_val, val_idx = self.data[-1], self.idx[val]\n self.data[val_idx], self.idx[last_val] = last_val, val_idx\n self.data.pop()\n self.idx.pop(val)\n return True\n else:\n return False", "def remove(self, val: int) -> bool:\n if val in self.dic:\n index = self.dic.pop(val)\n self.lst[index], self.lst[len(self.lst) - 1] = self.lst[len(self.lst) - 1], self.lst[index]\n if self.lst[index] in self.dic:\n self.dic[self.lst[index]] = index\n self.lst.pop()\n return True", "def remove(self, val: int) -> bool:\n if val not in self._dict:\n return False\n \n last_val = self._list[-1]\n idx = self._dict[val]\n\n self._list[-1], self._list[self._dict[val]] = self._list[self._dict[val]], self._list[-1]\n self._dict[last_val] = idx\n \n self._dict.pop(self._list[-1])\n self._list.pop()\n\n return True", "def remove(self, val: int) -> bool:", "def remove(self, val: int) -> bool:\n if val not in self.dict: return False\n \n index_of_removing_element = self.dict[val]\n last_element = self.list[-1]\n # put list last element into that index \n self.list[index_of_removing_element] = self.list[-1]\n \n # change index of last element which got swapped\n self.dict[last_element] = index_of_removing_element\n \n self.list.pop()\n del self.dict[val]\n # print(\"remove\",val, \"==>\", self.dict, self.list)\n return True", "def remove(self, val: int) -> bool:\n \n if val not in self.d:\n return False\n \n index = self.d[val]\n \n #swap\n temp = self.arr[-1]\n self.arr[-1] = self.arr[index]\n self.arr[index] = temp \n \n self.d[temp] = index\n \n self.arr.pop()\n del self.d[val]\n return True", "def remove(self, val: int) -> bool:\n        if val in self.hashmap:\n            temp=self.list[-1]\n            self.list[-1],self.list[self.hashmap[val]]=self.list[self.hashmap[val]],self.list[-1]\n            self.hashmap[temp]=self.hashmap[val]\n            self.list.pop()\n            del self.hashmap[val]\n            return True\n        return False", "def remove(self, val: int) -> bool:\n if val in self.dict:\n curr_idx = self.dict[val]\n\n self.dict[self.arr[-1]] = curr_idx\n self.arr[curr_idx] = self.arr[-1]\n\n self.arr.pop()\n del self.dict[val]\n return True\n return False", "def remove(self, val):\n if not val in self.record:\n return False\n index = self.record[val]\n self.data[index], self.data[-1] = self.data[-1], self.data[index]\n self.record[self.data[index]] = index\n self.data.pop()\n self.record.pop(val)\n return True", "def remove(self, val: int) -> bool:\n if val not in self.randomized_hash:\n return False\n else:\n array_for_val = self.randomized_hash[val]\n val_index = array_for_val.pop()\n if len(array_for_val) == 0:\n self.randomized_hash.pop(val)\n if val_index == len(self.array) - 1:\n self.array.pop()\n return True\n self.array[val_index], self.array[-1] = self.array[-1], self.array[val_index]\n self.array.pop()\n self.randomized_hash[self.array[val_index]].remove(len(self.array))\n self.randomized_hash[self.array[val_index]].append(val_index)\n return True", "def remove(self, elem):\n if self.inicio == None:\n raise ValueError(\"{} nao esta na lista\".format(elem))\n elif self.inicio.dado == elem:\n self.inicio = self.inicio.prox\n self._size = self._size - 1\n return True\n else:\n ancestor = self.inicio\n ponteiro = self.inicio.prox\n while ponteiro:\n if ponteiro.dado == elem:\n ancestor.prox = ponteiro.prox\n ponteiro.prox = None\n ancestor = ponteiro\n ponteiro = ponteiro.prox\n self._size = self._size - 1\n return True\n raise ValueError(\"{} nao esta na lista\".format(elem))", "def remove(self, val: int) -> bool:\n if val in self.counts:\n self.counts[val] -= 1\n if self.counts[val] == 0:\n del self.counts[val]\n return True\n return False", "def remove(self, val: int) -> bool:\n retVal = True if val in self.map else False\n if retVal:\n index = self.map.pop(val)\n self.arr[index], self.arr[-1] = self.arr[-1], self.arr[index]\n self.arr.pop(-1)\n if len(self.arr) > 0 and index < len(self.arr):\n self.map[self.arr[index]] = index\n \n\n return retVal", "def remove(self, value):\n if self.root is None:\n return self.NULL_NODE\n removed = self._remove(self.root, value)\n if removed and removed.value:\n self.size -= 1\n self.root = removed\n return True\n else:\n return False", "def remove(self, val: int) -> bool:\n if val not in self.map:\n return False\n index = self.map[val]\n del self.map[val]\n \n if index+1 != len(self.keys):\n var = self.keys[-1]\n self.keys[index] = self.keys[-1]\n self.map[var] = index\n self.keys = self.keys[:-1]\n # print('removing. ', self.map)\n return True", "def remove(self, element) -> bool:\n\n target_node = self.__find_node(element)\n\n if target_node is None:\n return False\n\n self.__size -= 1\n\n if target_node.left is None or target_node.right is None:\n self.__remove_node(target_node)\n else:\n successor_node = self.__get_largest_node(target_node.left)\n target_node.data = successor_node.data\n\n self.__remove_node(successor_node)\n\n if AVLTree.__DEBUG and not self.__is_balanced(self.__root):\n raise AssertionError(\"This AVL Tree is not balanced any more.\")\n\n return True", "def remove(self, val):\n res = val in self.map\n if res:\n idx = self.map[val][-1]\n if idx != len(self.vec) - 1:\n num_back = self.vec[-1]\n self.map[num_back].remove(len(self.vec) - 1)\n self.vec[-1], self.vec[idx] = self.vec[idx], self.vec[-1]\n self.map[val].pop()\n if len(self.map[val]) == 0:\n del self.map[val]\n self.vec.pop()\n self.map[num_back].append(idx)\n else:\n self.map[val].pop()\n if len(self.map[val]) == 0:\n del self.map[val]\n self.vec.pop()\n return res", "def remove(self, val):\n ind = self.table.pop(val, None)\n if ind is None:\n return False\n key = self.ls.pop()\n if len(self.ls)!=0 and len(self.ls) != ind:\n self.ls[ind] = key\n self.table[key] = ind\n return True", "def remove(self, val):\n if val in self.posFind and self.posFind[val] != -1:\n delPos = self.posFind[val]\n self.nums[delPos] = self.nums[-1]\n self.posFind[self.nums[-1]] = delPos\n self.posFind[val] = -1\n self.nums.pop()\n return True\n return False", "def remove(self, key: int) -> bool:\n current = self.root.find(key) if not self.empty() else None\n if current is None: # if no such key, failure\n return False\n\n self.root = current.remove() # update root\n return True", "def remove(self, val: int) -> bool:\n # print(f\"remove {val}, indexes={self.indexes}, items={self.items}\")\n if val not in self.indexes:\n return False\n n = len(self.items)\n\n if self.items[-1] == val:\n self.indexes[val].remove(n-1)\n self.items.pop()\n else:\n i = self.indexes[val].pop()\n # swap i, n-1\n tail = self.items[n-1]\n self.items[i] = tail\n self.indexes[tail].remove(n-1)\n self.indexes[tail].add(i)\n # remove from items\n self.items.pop()\n if len(self.indexes[val]) == 0:\n del self.indexes[val]\n\n return True", "def remove(self, val):\n if val in self.dict_val:\n list_index = self.dict_val[val]\n last_ele_index = len(self.list_val) -1\n if list_index == last_ele_index:\n self.dict_val.pop(val)\n self.list_val.pop()\n else:\n self.dict_val[self.list_val[last_ele_index]] = list_index\n self.list_val[list_index], self.list_val[last_ele_index] = self.list_val[last_ele_index], self.list_val[list_index]\n self.dict_val.pop(val)\n self.list_val.pop()\n # for index in range(list_index, len(self.list_val)):\n # self.dict_val[self.list_val[index]] -= 1\n # self.dict_val.pop(val)\n # self.list_val.pop(list_index)\n return True\n else:\n return False", "def remove(self, item):\n try:\n self._data.remove(item)\n except ValueError as exc:\n raise KeyError from exc\n else:\n self.__log__.append(SetRemove(value=item))", "def remove_values(self, values: Collection[Hashable]) -> bool:\n\t\tany_values_removed = False\n\n\t\tfor value in values:\n\t\t\tif value in self._potential_values:\n\t\t\t\tself._potential_values.remove(value)\n\t\t\t\tany_values_removed = True\n\n\t\treturn any_values_removed", "def remove(self, val):\r\n\r\n if val in self.indices:\r\n swap = self.data[self.len - 1]\r\n if val == swap:\r\n self.indices[val].remove(self.len - 1)\r\n self.len -= 1\r\n if len(self.indices[val]) == 0:\r\n del self.indices[val]\r\n return True\r\n self.indices[swap].remove(self.len - 1)\r\n self.indices[swap].append(self.indices[val][-1])\r\n self.data[self.indices[val][-1]], self.data[self.len - 1] = self.data[self.len - 1], self.data[self.indices[val][-1]]\r\n self.indices[val] = self.indices[val][:-1]\r\n # need to change the index of the value we swapped with\r\n if len(self.indices[val]) == 0:\r\n del self.indices[val]\r\n self.len -= 1\r\n return True\r\n else:\r\n return False", "def remove(self, item):\n try:\n entry = self.set.pop(item)\n entry[-1] = self.REMOVED\n except KeyError:\n print(\"Can't remove a non-existing item\")", "def remove(self, value):\n # note: we do not consider shrinking the dynamic array in this version\n for k in range(self._n):\n if self._Array[k] == value: # found a match!\n for j in range(k, self._size - 1): # shift others to fill gap\n self._Array[j] = self._Array[j + 1]\n self._Array[self._n - 1] = None # help garbage collection\n self._size -= 1 # we have one less item\n\n return # exit immediately\n raise ValueError( \"value not found\" ) # only reached if no match", "def __delitem__(self, value) -> bool: # True -> if element was deleted else False\n if not self.head:\n return False\n if self.head.value == value:\n if self.head.next_value:\n self.head = self.head.next_value\n else:\n self.head = None\n return True\n link = self.head.next_value\n prev = self.head\n while link:\n if link.value == value:\n prev.next_value = link.next_value\n return True\n prev = link\n link = link.next_value\n return False", "def must_remove(self, tag_name, tag_value):\n return self._ruleset[tag_name][tag_value].get(self.REMOVE_KEY, False)", "def remove(self, value):\n for i, v in enumerate(self):\n if v == value:\n self._table.pop(i); return\n raise ValueError, \"list.remove(x): x not in list\"", "def remove(self, value):\r\n if value not in self:\r\n raise KeyError(value)\r\n self.discard(value)", "def contains(self, val):\n return not not self.search(val)", "def _contains(self, element):\n if not isinstance(element, Tuple) or len(element) != 2:\n return S.false\n\n if not element[1].is_Integer:\n return S.false\n\n if element[1] >= len(self.sets) or element[1] < 0:\n return S.false\n\n return self.sets[element[1]]._contains(element[0])", "def remove(self) -> object:\n return self._contains.pop()", "def removeFromSet(_session, _el, _set):\n it = _session.create_iterator(_session.sc_constraint_new(sc_constants.CONSTR_3_f_a_f,\n _set,\n sc.SC_ARC,\n _el), True)\n while not it.is_over():\n _session.erase_el(it.value(1))\n it.next()", "def remove(self, value):\n\t\tself.__remove(self, value, None)", "def remove(self, key: str) -> bool:\n prev, cur = None, self.head\n while cur is not None:\n if cur.key == key:\n if prev:\n prev.next = cur.next\n else:\n self.head = cur.next\n self.size -= 1\n return True\n prev, cur = cur, cur.next\n return False", "def remove(self, key: str) -> bool:\n prev, cur = None, self.head\n while cur is not None:\n if cur.key == key:\n if prev:\n prev.next = cur.next\n else:\n self.head = cur.next\n self.size -= 1\n return True\n prev, cur = cur, cur.next\n return False", "def remove(self, val):\n val = self._conversion(val)\n if isinstance(val, TYPES[self.__set_type]):\n self.__tree.delete(val)\n else:\n print(\"TypeError : Wrong Input\")", "def findpop(value, lst):\n if value in lst:\n while True: # remove all instances `value` from lst\n try:\n lst.pop(lst.index(value))\n except ValueError:\n break\n return True # and return yes we found the value\n else:\n return False # didn't find the value", "def remove(self, val: int) -> bool:\n if val not in self.val2i: return False\n #print(self.val2i)\n i = self.val2i[val]\n del self.val2i[val]\n #print(self.val2i)\n if i != self.size - 1:\n self.array[i], self.array[self.size - 1] = self.array[self.size - 1], self.array[i]\n self.val2i[self.array[i]] = i\n self.size -= 1\n \n #print(self.size)\n return True", "def find(self, value: int) -> bool:\n hashset = set()\n for num in self._nums :\n if num in hashset : return True\n else : hashset.add(value - num)\n return False", "def remove(self, e):\n try:\n del self.vals[e]\n except:\n return", "def removeItem(self, value):\n\t\tif self._linkHead == None:\n\t\t\treturn False\n\n\t\tif self._linkHead._itemValue == value:\n\t\t\tif self._linkHead == self._linkTail:\n\t\t\t\tself._linkHead = None\n\t\t\t\tself._linkTail = None\n\t\t\telse:\n\t\t\t\tself._linkHead = self._linkHead._itemNext\n\t\t\t\tself._linkHead._itemPre = None\n\n\t\t\treturn True\n\n\t\t_nodeCursor = self._linkHead\n\n\t\twhile _nodeCursor != None and _nodeCursor._itemValue != value:\n\t\t\t_nodeCursor = _nodeCursor._itemNext\n\n\t\tif _nodeCursor != None:\n\t\t\tif _nodeCursor == self._linkTail:\n\t\t\t\tself._linkTail = _nodeCursor._itemPre\n\t\t\t\tself._linkTail._itemNext = None\n\t\t\telse:\n\t\t\t\t_nodeCursor._itemPre._itemNext = _nodeCursor._itemNext\n\t\t\t\t_nodeCursor._itemNext._itemPre = _nodeCursor._itemPre\n\n\t\t\treturn True\n\n\t\treturn False", "def remove(self, e):\r\n try:\r\n self.vals.remove(e)\r\n except:\r\n raise ValueError(str(e) + ' not found')", "def remove(self, key):\n elem = self.find(key)\n if not elem:\n return\n self.remove_elem(elem)", "def remove(self,value):\n if self.is_empty():\n return\n current = self._head\n if current.value == value:\n self._head = self._head.next\n elif current.next is None:\n # Contains one element only, but it is not the one we are looking for.\n return\n else:\n while current.next.value != value:\n current = current.next\n if current.next is None: # Remove value not found.\n return\n\n # Find removed value, remove it.\n current.next = current.next.next\n if current.next is None:\n self._tail = current\n self._size -= 1", "def remove(self, e):\n try:\n self.vals.remove(e)\n except:\n raise ValueError(str(e) + ' not found')", "def remove(self, e):\n try:\n self.vals.remove(e)\n except:\n raise ValueError(str(e) + ' not found')", "def remove(self, e):\n try:\n self.vals.remove(e)\n except:\n raise ValueError(str(e) + ' not found')", "def remove_option_from_value(self, o):\n result = False\n for k in self._options:\n if self._options.get(k) == o:\n self._options.pop(k)\n result = True\n return result", "def _remove(self, key: bytes) -> bool:\n if self._get(key) != None:\n self.db.delete(key)\n return True\n else:\n return False", "def remove(self, e):\n try:\n self.vals.remove(e)\n except:\n raise(ValueError(str(e) + ' not found.'))", "def remove(self, key):\r\n\t\tif self.head is None:\r\n\t\t\treturn False\r\n\t\tif self.head.key == key:\r\n\t\t\tself.head = self.head.next\r\n\t\t\tself.size = self.size - 1\r\n\t\t\treturn True\r\n\t\tcur = self.head.next\r\n\t\tprev = self.head\r\n\t\twhile cur is not None:\r\n\t\t\tif cur.key == key:\r\n\t\t\t\tprev.next = cur.next\r\n\t\t\t\tself.size = self.size - 1\r\n\t\t\t\treturn True\r\n\t\t\tprev = cur\r\n\t\t\tcur = cur.next\r\n\t\treturn False", "def remove(self, item):\n item_found = False\n\n try:\n # Traverse through the array to look for the 'item'\n for i in range(len(self)):\n if self.the_array[i] == item:\n # Move every item after the 'item' found to left in order\n # to remove the 'item'\n for j in range(i, self.count - 1):\n self.the_array[j] = self.the_array[j + 1]\n self.count -= 1\n item_found = True\n\n if (self.capacity // 2 >= self.BASE_SIZE) and (self.count < self.capacity / 8):\n self._resize(self.capacity // 2)\n break\n\n if not item_found:\n raise ValueError\n\n except ValueError:\n print(\"Item not found in list.\")\n\n return item_found", "def remove(self, value):\n self.values.remove(value)", "def remove(self, pset):\n self._sets.remove(pset)", "def remove(self, key):\n if self.head is None:\n return False\n if self.head.key == key:\n self.head = self.head.next\n self.size = self.size - 1\n return True\n cur = self.head.next\n prev = self.head\n while cur is not None:\n if cur.key == key:\n prev.next = cur.next\n self.size = self.size - 1\n return True\n prev = cur\n cur = cur.next\n return False", "def remove(self, key):\n if self.head is None:\n return False\n if self.head.key == key:\n self.head = self.head.next\n self.size = self.size - 1\n return True\n cur = self.head.next\n prev = self.head\n while cur is not None:\n if cur.key == key:\n prev.next = cur.next\n self.size = self.size - 1\n return True\n prev = cur\n cur = cur.next\n return False", "def remove(self, value):\n pass", "def remove(self, element):\n for i in range(self._length): # Find index of element in array\n if self._arr[i] == element:\n # Move all elements after index j one forward to \"delete\" element\n for j in range(i, self._length - 1):\n self._arr[j] = self._arr[j + 1]\n self._length -= 1\n self._check_shrink() # Shrink array if length is too small\n return\n raise ValueError(f'{element} not in list') # Raise if element not found", "def delete(self, value):\r\n # Return false if tree was empty\r\n if self.empty():\r\n return False\r\n\r\n # Find the node containing the value\r\n node = self.descend_to_node(value)\r\n # If that node is 'False', value wasn't found. Give error and return False.\r\n if not node:\r\n print(\"Value\", value, \"not found.\")\r\n return False\r\n else:\r\n # If it wasn't False, call on helper function delete_node\r\n self.delete_node(node)", "def contains(self, val):\n return False if not self.search(val) else True", "def remove(self, value): # real signature unknown; restored from __doc__\n pass", "def remove(self, v):\n if v in self.elements:\n del self.elements[self.elements.index(v)]\n self._heapify()", "def remove(self, element: int) -> None:\n self._used.remove(element)\n if element < self.search_pos:\n self.search_pos = element" ]
[ "0.77930737", "0.75553113", "0.7472567", "0.7312134", "0.71163213", "0.70876503", "0.69920707", "0.6944722", "0.6899799", "0.68561065", "0.6852177", "0.68362844", "0.6835518", "0.6825324", "0.6823078", "0.6823078", "0.6814828", "0.6778132", "0.67599577", "0.67399085", "0.67260617", "0.6722024", "0.6722024", "0.6668142", "0.6652983", "0.66081226", "0.66012657", "0.6581387", "0.6524345", "0.65121067", "0.6492137", "0.6481731", "0.6459095", "0.64489406", "0.64353603", "0.6424592", "0.6411584", "0.63979065", "0.6393546", "0.6382703", "0.6370911", "0.6356157", "0.63131404", "0.6310309", "0.6277501", "0.6272212", "0.62658733", "0.6258839", "0.62491447", "0.62475705", "0.62396634", "0.6225557", "0.6221955", "0.6053053", "0.60366654", "0.5987575", "0.59312505", "0.59082794", "0.58819216", "0.58704746", "0.58666354", "0.58657867", "0.5862623", "0.58500886", "0.58394706", "0.57986325", "0.579584", "0.5790805", "0.5778573", "0.57607013", "0.57477427", "0.57477427", "0.57392186", "0.57292587", "0.5694361", "0.5687757", "0.56700456", "0.56539106", "0.5630221", "0.561128", "0.5608833", "0.5605357", "0.5605357", "0.5605357", "0.5594197", "0.5593989", "0.5584661", "0.55821884", "0.5580075", "0.5559886", "0.55414927", "0.5528388", "0.5528388", "0.5525037", "0.5522737", "0.55201983", "0.54887617", "0.54852074", "0.5472125", "0.54710376" ]
0.6174827
53
Get a random element from the set.
def getRandom(self): # pick a random number from the list return random.choice(self.nums)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getRandom(self):\n return self.nums[random.randint(0, len(self.nums) - 1)]\n\n # Your RandomizedSet object will be instantiated and called as such:\n # obj = RandomizedSet()\n # param_1 = obj.insert(val)\n # param_2 = obj.remove(val)\n # param_3 = obj.getRandom()", "def getRandom(self):\n n = len(self.keys)\n while n > 0:\n index = random.randint(0, n - 1)\n my_key = self.keys[index]\n if my_key in self.ds:\n return my_key\n else:\n self.keys[index] = self.keys[n - 1]\n self.keys.pop()\n n = n - 1\n\n\n\n\n\n # Your RandomizedSet object will be instantiated and called as such:\n # obj = RandomizedSet()\n # param_1 = obj.insert(val)\n # param_2 = obj.remove(val)\n # param_3 = obj.getRandom()", "def getRandom(self) -> int:\n return random.choice(list(self.set))", "def random_element(self) -> 'PFElement':\n return random.choice(list(iter(self)))", "def get_one(_set):\r\n assert _set # _set is not empty\r\n return next(iter(_set))", "def random (self, checkfn=None):\n if len(self) == 0:\n return None\n return self.random_pick(checkfn=checkfn)[1]", "def pick_one(self):\n index = 0\n r = random.random()\n while r >= 0:\n r = r - self.normalised_fitness[index]\n index += 1\n index -= 1\n return self.population[index]", "def _rand_elem(self, iterable):\n\n lst = list(iterable)\n idx = self._rand_int(0, len(lst))\n return lst[idx]", "def getRandom(self):\n random_index = randint(0, len(self.list_val)-1)\n return self.list_val[random_index]", "def getRandom(self) -> int:\n return random.choice(self.elements)", "def get_from_set(set_):\n for e in set_: return e", "def getRandom(self):\n return random.choice(self.vec)", "def choose(self):\n\n i = bisect.bisect(self._p, random.random())\n return self._values[i]", "def choice(L):\r\n LEN = len(L) # Get the length\r\n randomindex = int(LEN*random()) # Get a random index\r\n return L[randomindex] # Return that element\r", "def getRandom(self) -> int:\n size = len(self.value_set)\n if size > 0:\n from random import randint\n x = randint(1, size)\n return self.values[x - 1]", "def getRandom(self) -> int:\n randomArray = []\n i = 0\n for key in self.randomSet:\n randomArray.append(key)\n return randomArray[random.randint(0,len(randomArray)-1)]", "def getRandom(self) -> int:\n from random import choice\n return choice(self.list)", "def pick_one(_lst):\n if len(_lst) == 2:\n return _lst[0] if int(random(2)) else _lst[1]\n elif len(_lst) == 3:\n return _lst[int(random(3))]", "def _random_pick(lst):\n\n choice = random.randint(0, len(lst) - 1)\n return lst[choice]", "def getRandom(self):\n return random.choice(self.data)", "def getRandom(self) -> int:\n some_item = self.container.pop()\n self.container.add(some_item)\n return some_item", "def random(self):\n try:\n return self.order_by('?')[0]\n except IndexError:\n raise self.model.DoesNotExist", "def patch_random_choice(x, element):\r\n if isinstance(x, int): # np.random.choice can accept an int or 1D array-like input\r\n return element\r\n\r\n return x[element]", "def random_select(self, room_set):\n set_size = len(room_set)\n set_keys = list(room_set.keys())\n random_key = set_keys[randint(0, set_size - 1)]\n return random_key", "def __getitem__(self, index):\n assert(isinstance(index,int)), \"Index should be an integer value\"\n assert(0 <= index < len(self.set)), \" Index out of bounds\"\n return self.set[index]", "def getRandom(self):\r\n return self.data[rnd.randrange(self.len)]", "def getRandom(self) -> int:\n return random.choice(self._list)", "def getRandom(self) -> int:\n return random.choice(self.list)", "def getRandom(self) -> int:\n return random.choice(self.list)", "def getRandom(self) -> int:\n return random.choice(self.list)", "def getRandom(self) -> int:\n return random.choice(self.list)", "def getRandom(self) -> int:\n # print(self.ind)\n return choice(self.items)", "def getRandom(self) -> int:\n n = len(self.list)\n rand = random.randrange(n)\n return self.list[rand]", "def getRandom(self):\n if not self.l:\n return -1\n return random.choice(self.l)", "def pop_random(self):\n\n rand_index = randint(0, len(self._list) - 1)\n item = self._list[rand_index]\n self.remove(item)\n return item", "def getRandom(self):\n return self.nums[randint(0, len(self.nums)-1)]", "def getRandom(self):\n import random\n res = -1\n len = 0\n head = self.head\n while head:\n if random.randint(0,len) == 0:\n res = head.val\n head = head.next\n len += 1\n return res", "def random_element(self):\n from sage.graphs.schnyder import minimal_schnyder_wood\n from sage.graphs.generators.random import RandomTriangulation\n n = self._size\n tri = RandomTriangulation(n + 3)\n TIP = TamariIntervalPosets\n schnyder = minimal_schnyder_wood(tri, root_edge=('a', 'b'),\n check=False)\n return TIP.from_minimal_schnyder_wood(schnyder)", "def getRandom(self) -> int:\n return choice(self.list)", "def getRandom(self) -> int:\n return random.choice(self.items)", "def _find_element_not_in_set(self, already_used: set) -> int:\n new_element = random.randint(a=self.min_value, b=self.max_value)\n while new_element in already_used:\n new_element = random.randint(a=self.min_value, b=self.max_value)\n return new_element", "def getRandom(self):\n if not self.head:\n return\n candidate, pv = self.head.val, 1\n tmp = self.head\n while tmp:\n if random.randint(1, pv) == 1:\n candidate = tmp.val\n tmp = tmp.next\n pv += 1\n return candidate", "def getRandom(self):\n randomIndex = random.randrange(0, self.size)\n return self.nums[randomIndex]", "def getRandom(self):\n \n return self.data[random.randint(0, len(self.data) - 1)]", "def get(self):\n if len(self._words) < 1:\n raise ValueError(\"You have exhausted the list of words!\")\n index = randint(0, len(self._words) - 1)\n return self._words.pop(index)", "def get(self):\n if len(self._words) < 1:\n raise ValueError(\"You have exhausted the list of words!\")\n index = randint(0, len(self._words) -1)\n return self._words.pop(index)", "def random(self, af=False):\n rank = randrange(self.order())\n return self.coset_unrank(rank, af)", "def getRandom(self) -> int:\n return random.choice(self.keys)", "def pick_random(items: Iterable[T]) -> Optional[T]:\n\n # Choose to remember the `i`th element from the stream with probability\n # `1/i`. Then probability of choosing the `i`th element is\n #\n # P(i) = (1 / i) * (1 - 1 / (i + 1)) * (1 - 1 / (i + 2)) * ...\n # ... * (1 - 1 / n) =\n # = (1 / i) * ((i + 1 - 1) / (i + 1)) * ((i + 2 - 1) / (i + 2) * ...\n # ... * ((n - 1) / n) =\n # = (1 / i) * (i / (i + 1)) * ((i + 1) / (i + 2)) * ...\n # ... * ((n - 1) / n) =\n # = 1 / n\n\n cur_item: Optional[T] = None\n\n for i, item in enumerate(items, start=1):\n if random() <= 1 / i:\n cur_item = item\n\n return cur_item", "def getValue(self):\n return random.choices(self.indices, weights=self.weights, k=1)[0]", "def get(self, keep=None):\n result = random.choice(self.pile_list)\n if keep is None:\n self.update(result)\n return str(result)", "def getRandom(self) -> int:\n return random.choice(tuple(self.l))", "def getRandom(self):\n res = self.head.val\n cur = self.head.next\n count = 2\n\n while cur != None:\n if random() <= 1.0 / count:\n res = cur.val\n\n count += 1\n cur = cur.next\n return res", "def random_pop (self, checkfn=None):\n if len(self) == 0:\n return None\n\n index = self.random_pick(checkfn=checkfn)[0]\n\n if index == None:\n return None\n\n return self.pop(index)", "def random_pick(id_list):\n return random.choice(id_list)", "def getRandom(self) -> int:\n return random.choice(self.store_list)", "def getRandom(self) -> int:\n count = 0\n temp = self.head\n while temp:\n if random.randint(0,count)==0:\n res = temp.val\n temp = temp.next\n count+=1\n return res", "def getRandom(self) -> int:\n rand = random.randint(0, self.count - 1)\n worker = self.head\n while rand:\n worker = worker.next\n rand -= 1\n return worker.val", "def getRandom(self) -> int:\n return random.choice(list(self.d.keys()))", "def get_random(self):\n return self._get_random()", "def getRandom(self) -> int:\n index = random.randint(0, len(self.lst) - 1)\n # self.lst[index], self.lst[len(self.lst) - 1] = self.lst[len(self.lst) - 1], self.lst[index]\n # val = self.lst.pop()\n # self.dic.pop(val)\n return self.lst[index]", "def random_item(self):\n if self.sample_negative_items_empirically:\n # just pick something someone rated!\n u = self.uniform_user()\n i = random.choice(self.data[u].indices)\n else:\n i = random.randint(0,self.num_items-1)\n return i", "def sample(self):\n return self.items[self.np_random.choice(len(self.items))]", "def getRandom(self):\n return random.choice(self.table.keys())", "def random_item(self):\n if self.sample_negative_items_empirically:\n # just pick something someone rated!\n # TODO: choose a user randomly\n u = self.uniform_user()\n i = random.choice(self.data[u].indices)\n else:\n i = random.randint(0, self.num_items - 1)\n return i", "def getRandom(self) -> int:\n import random\n return random.choice(self._array)", "def randomRow(self):\r\n l = []\r\n for row in self.data:\r\n l.append(row)\r\n return random.choice(l)", "def choice(seq):\r\n i = int(random() * len(seq))\r\n return seq[i]", "def getRandom(self) -> int:\n return choice(self.arr)", "def getRandom(self) -> int:\n return choice(self.arr)", "def getRandom(self) -> int:\n return choice(self.arr)", "def uniform_select(xs):\n n = len(xs) - 1\n i = randint(0, n)\n return xs[i]", "def getRandom(self) -> int:\n return random.choice(self.arr)", "def Chose_rand():\r\n total_list=list(range(1,467681))\r\n select=13788\r\n random_selected= random.sample(total_list,select)\r\n return (random_selected)", "def popitem(self):\n all_items = self.items()\n removed_item = random.choice(all_items)\n self[removed_item[0]] = None\n return removed_item", "def Choose(self):\n lst = [x for x in self.bag.keys()]\n choice = random.choices(lst, k=1)[0]\n self.bag.update({choice : self.bag.get(choice) - 1})\n return(choice)", "def get(s: Iterable[T]) -> T:\n return next(iter(s))", "def getRandom( self ):\n import random \n count = Mysql.ex( \"SELECT count(*) AS c FROM `%s`.`people`;\" % self.db_name )\n the_id = random.randint( 1, count[0]['c'] )\n people = self.getByID( the_id )\n return people", "def getRandom(self):\n return random.choice(self.ls)", "def random_entry(): \n\n files = list_entries()\n return random.choice(files)", "def getRandom(self):\n index = random.randrange(0, self.length)\n node = self.head\n while index:\n node = node.next\n index -= 1\n return node.val", "def getRandom(self) -> int:\n return random.choice(self.array)", "def getRandom(self) -> int:\n return choice(self.array)", "def get_random_object():\n\n return random.choice([\n get_random_alphabetic_string,\n get_random_alphanumeric_string,\n get_random_integer,\n get_random_real_number\n ])()", "def any(t, seed=937162211):\n random.seed(seed)\n return random.choices(t)[0]", "def one(self):\n count = self.aggregate(count=Count('id'))['count']\n\n if count < 2:\n raise ValueError('not enough words')\n\n idx = randint(0, count - 1)\n\n return self.all()[idx]", "def getRandom(self) -> int:\n return random.choice(self.l)", "def random_element(self):\n c = self.original_code().random_element()\n c_list = c.list()\n F = self.base_ring()\n last_element = F.zero()\n for i in c_list:\n last_element += i\n c_list.append(-last_element)\n return vector(F, c_list)", "def get_random_question(self):\n available_qs = self.available_qs\n if available_qs.exists():\n return random.choice(available_qs)", "def random_item(self):\n if self.sample_negative_items_empirically:\n # just pick something someone rated!\n u = self.uniform_user()\n i = random.choice(self.dataModel.getItemIDsFromUid(u))\n else:\n i = random.randint(0,self.num_items-1)\n return i", "def get_random_song(self):\n return random.choice(self.song_list)", "def select_item(items, weights, k):\n x = random.choices(items, weights=weights, k=k)\n return x", "def randomly_pick_recipe(cls):\n return random.choice(cls._recipes)", "def _get_random_pool(pool_list):\n if not pool_list:\n return None\n if len(pool_list) == 1:\n return pool_list[0]\n\n last = len(pool_list) - 1\n index = random.randint(0, last)\n return pool_list[index]", "def get_random(self):\n if len(self.quotes_list) >= 1:\n num = random.choice(list(self.quotes_list.keys()))\n return self.get_quote(num)\n else:\n return -1", "def getRandom(self) -> int:\n count = len(self.arr)\n return self.arr[randint(0, count-1)]", "def _get_dataset_node(self, nodes):\n if not nodes:\n raise WNoNodesFound()\n return random.choice(nodes)", "def getRandomFromList(self, l):\n if (len(l) == 0):\n return -1\n return l[randint(0, len(l) - 1)]", "def peek(set_):\n ensure_set(set_)\n if not set_:\n raise KeyError(\"peek into an empty set\")\n return next(iter(set_))", "def rand_elem(seq, n=None):\n return map(random.choice, repeat(seq, n) if n is not None else repeat(seq))" ]
[ "0.77060276", "0.738916", "0.7296881", "0.72187954", "0.6871744", "0.68453854", "0.68330806", "0.67740154", "0.67226046", "0.6703282", "0.6665089", "0.66356236", "0.66217655", "0.6612035", "0.66000897", "0.6567767", "0.6477719", "0.6462421", "0.64428514", "0.64162946", "0.6400411", "0.63985956", "0.63965166", "0.6384556", "0.63507736", "0.63358206", "0.6322437", "0.6318925", "0.6318925", "0.6318925", "0.6318925", "0.63087296", "0.63049567", "0.62901545", "0.62845665", "0.6278972", "0.62767875", "0.6270726", "0.62435496", "0.624169", "0.62364674", "0.62320834", "0.621031", "0.620111", "0.618882", "0.6180096", "0.6165314", "0.61588156", "0.6144825", "0.61311704", "0.6127774", "0.6122921", "0.6120845", "0.61199105", "0.60798764", "0.6064852", "0.6055682", "0.60449225", "0.60399026", "0.601593", "0.60073423", "0.59963316", "0.59960943", "0.59945565", "0.59898704", "0.59890753", "0.59879035", "0.5981835", "0.5981498", "0.5981498", "0.5981498", "0.5970117", "0.5954455", "0.59525657", "0.5939653", "0.59190553", "0.5915506", "0.59126323", "0.59072405", "0.59054476", "0.59004784", "0.5895898", "0.5895298", "0.589289", "0.5886267", "0.5881088", "0.58790445", "0.5878437", "0.5870677", "0.58698833", "0.5866814", "0.58649206", "0.58647233", "0.5861778", "0.5846389", "0.5845808", "0.583735", "0.5816446", "0.58082026", "0.5802635" ]
0.6388269
23
Accumulate observed stars on the same dates.
def accumulate_dates(dates, stars): start = min(dates) stop = max(dates) t_range = (stop - start).days a_dates = [start + timedelta(days = n) for n in range(t_range + 1)] a_stars = [0 for n in range(t_range + 1)] for i in range(len(dates)): idx = (dates[i] - start).days a_stars[idx] = a_stars[idx] + stars[i] return a_dates, a_stars
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _update_rating_history(self, rating: float, date: Union[str, float]):\n self.rating_history.append((date, rating))", "def _increment_num_user_stars(user_id, match, now):\n\tassert match.is_streamed\n\n\tmissing = session.query(CalendarEntry)\\\n\t\t\t.filter(\n\t\t\t\tCalendarEntry.user_id == user_id,\n\t\t\t\tCalendarEntry.match_id == match.id)\\\n\t\t\t.count() == 0\n\tif missing:\n\t\t# No existing CalendarEntry; create a new one.\n\t\tentry = _get_calendar_entry(user_id, match)\n\t\tsession.add(entry)\n\telse:\n\t\t# Increment the count of stars for an existing CalendarEntry.\n\t\tsession.execute(CalendarEntries.update()\n\t\t\t\t.where(sa.and_(\n\t\t\t\t\tCalendarEntry.user_id == user_id,\n\t\t\t\t\tCalendarEntry.match_id == match.id))\n\t\t\t\t.values({CalendarEntry.num_user_stars: CalendarEntry.num_user_stars + 1}))", "def average_review_stars():\n # get all un-counted reviews\n reviews = Review.query.filter_by(marked=False).join(Restaurant)\\\n .with_entities(Review, Restaurant).all()\n logging.info(f\"Averaging review stars of {len(reviews)} retrieved reviews..\")\n for review, restaurant in reviews:\n # compute running mean of reviews\n restaurant.num_reviews += 1\n restaurant.avg_stars = 1/restaurant.num_reviews * \\\n (restaurant.avg_stars * (restaurant.num_reviews-1) + review.stars)\n review.marked = True\n # update rows \n db.session.commit()", "def running_total(date_list):\n return sum(d.price for d in date_list)", "def SumSpectra(A, Rates, Times, offset=0.0):\n\n #print '*** in SumSpectra: ***'\n result = np.zeros( Times.shape )\n for i in range(len(Rates)):\n #print '***', Rates[i]\n result += A[i]*np.exp( -1.0*Rates[i]*Times) \n return result", "def update(self, delta_time):\n for b in self.star_list:\n b.update()", "def daily_motion(cls, date):\n mean_motion = 360 / cls.SIDEREAL_YEAR\n anomaly = cls.mean_position(date, cls.ANOMALISTIC_YEAR)\n epicycle = 14/360 - abs(cls.sine(anomaly)) / 1080\n entry = quotient(float(anomaly), angle(0, 225, 0))\n sine_table_step = cls.sine_table(entry + 1) - cls.sine_table(entry)\n factor = -3438/225 * sine_table_step * epicycle\n return mean_motion * (factor + 1)", "def new_entry_update(cls, summary):\n totaltimes = [x.totaltime for x in summary.entries]\n total = sum(totaltimes, timedelta())\n average = total / len(totaltimes)\n summary.total_time = total\n summary.daily_average = average", "def _accumulate_rewards(self) -> None:\n for agent, reward in self.rewards.items():\n self._cumulative_rewards[agent] += reward", "def apply(self, obs, fcst):\n D = obs.shape[0]\n LT = obs.shape[1]\n L = obs.shape[2]\n efcst = copy.deepcopy(fcst)\n for lt in range(LT):\n day = int(np.floor(lt / 24.0)) + 1\n for d in range(day, D):\n recent = obs[d, 0, :] - fcst[d, 0, :]\n yesterday = obs[d - day, lt, :] - fcst[d - day, lt, :]\n efcst[d, lt, :] = efcst[d, lt, :] + self.weight_recent[lt] * recent\n + self.weight_yesterday[lt] * yesterday\n\n return efcst", "def visitCalculated(self, date):\n raise NotImplementedError()", "def add_star(array, star_data, disk_star_ratio=0.001):\n left_bound = np.shape(star_data)[1]//2 - np.shape(array)[1]//2\n right_bound = np.shape(star_data)[1]//2 + np.shape(array)[1]//2\n\n # Cutting star data into the shape of the model\n star_data = star_data[:, left_bound:right_bound, left_bound:right_bound]\n star_data /= np.amax(star_data)\n\n star_addition = array * (disk_star_ratio) + star_data * (1-disk_star_ratio)\n\n return star_addition", "def rate(self, rating, series, is_gs=False, counts=False):\n k = self.calculate_k(rating, counts)*1.1 if is_gs else self.calculate_k(rating, counts)\n rating.value = float(rating.value) + k * self.adjust(rating, series)\n rating.times += 1\n return rating", "def __simulate_one_day__(self):\n self.compute()\n self.days.append(next(self.index))", "def average(self, returns):\r\n return returns.mean() * self.day", "def rate(self, rating, series, is_gs=False, counts=False):\n k = self.calculate_k(rating,counts)*1.1 if is_gs else self.calculate_k(rating,counts)\n rating.value = float(rating.value) + k * self.adjust(rating, series)\n rating.times += 1\n return rating", "def refresh_accumulated_point(self, school_year=str(timezone.now().year - 1) + '-' + str(timezone.now().year), school_semester=1):\n total = 0\n for activity in self.activities.all():\n if activity.school_year == school_year and activity.semester == school_semester:\n total += activity.point\n total = total if total <= 30 else 30\n self.accumulated_point += total", "def add_star_team(client_id, team_id, now=None):\n\tnow = _get_now(now)\n\n\ttry:\n\t\t# Get the indexed name of the team.\n\t\tteam_indexed_name = session.query(Team.indexed_name)\\\n\t\t\t\t.filter(Team.id == team_id)\\\n\t\t\t\t.one()\\\n\t\t\t\t.indexed_name\n\t\t# Add the client's star for the team.\n\t\tstarred_team = StarredTeam(user_id=client_id,\n\t\t\t\tteam_id=team_id,\n\t\t\t\tindexed_name=team_indexed_name,\n\t\t\t\tadded=now)\n\t\tsession.add(starred_team)\n\t\tsession.flush()\n\texcept sa_orm.exc.NoResultFound:\n\t\tsession.rollback()\n\t\traise common_db.DbException._chain()\n\texcept sa.exc.IntegrityError:\n\t\t# The flush failed because the client has already starred this team.\n\t\tsession.rollback()\n\t\traise common_db.DbException._chain()\n\n\t# Increment the count of stars for the team.\n\tsession.execute(Teams.update()\n\t\t\t.where(Team.id == team_id)\n\t\t\t.values({Team.num_stars: Team.num_stars + 1}))\n\n\t# If needed, add a CalendarEntry for each streamed match.\n\tmatches_cursor = session.query(MatchOpponent.match_id, Match)\\\n\t\t\t.join(Match, MatchOpponent.match_id == Match.id)\\\n\t\t\t.filter(MatchOpponent.team_id == team_id, MatchOpponent.is_streamed == True)\n\tfor match_id, match in matches_cursor:\n\t\t_increment_num_user_stars(client_id, match, now)\n\t\n\tsession.commit()", "def _discounted_cumsum(self, rewards, rate=None):\n # HINT1: note that each entry of the output should now be unique,\n # because the summation happens over [t, T] instead of [0, T]\n # HINT2: it is possible to write a vectorized solution, but a solution\n # using a for loop is also fine\n rate = self.gamma if rate is None else rate\n\n rewards = np.array(rewards)\n disounted_return = list(\n accumulate(rewards[::-1], lambda ret, rew: rate * ret + rew))\n disounted_return = np.array(disounted_return)[::-1]\n return disounted_return", "def accumulate(self,tod,weights,pixels):\n binFuncs.binValues(self.sigwei, pixels, weights=tod*weights)\n binFuncs.binValues(self.wei , pixels, weights=weights )\n if self.storehits:\n binFuncs.binValues(self.hits, pixels,mask=weights)", "def update_mean_movie_rating(self):\n self.mean_movie_rating = self.ratings.groupby(['movie_id'])['rating'].mean().reset_index()", "def avg_ttm_2y(df):\n return 0.5 * (df + df.shift(4))", "def _update_current_ratings(self, pid, obs_time, rating, variance):\n\n\t\tself.current_player_ratings[pid]['rating'] = rating\n\t\tself.current_player_ratings[pid]['variance'] = variance\n\t\tself.current_player_ratings[pid]['last_obs'] = obs_time", "def accumulate(self, days: int, dt: float, plot=True):\r\n self.floatCheck([days, dt])\r\n self.negValCheck([days, dt])\r\n t = np.linspace(0, days, int(days / dt) + 1)\r\n S, E, I, R = self._simulate(days, dt)\r\n # create a numpy array that will hold all of the values\r\n cases = np.zeros(len(I))\r\n # add up the total infected and removed at given time to account for everyone with the virus\r\n for i in range(len(I)):\r\n cases[i] = I[i] + R[i]\r\n # create a dictionary that holds the data for easy conversion to dataframe\r\n data1 = {\r\n \"Days\": t,\r\n \"Susceptible\": S,\r\n \"Exposed\": E,\r\n \"Infected\": I,\r\n \"Removed\": R,\r\n \"Total Cases\": cases,\r\n }\r\n # create the column labels\r\n labels = [\r\n \"Days\",\r\n \"Susceptible\",\r\n \"Exposed\",\r\n \"Infected\",\r\n \"Removed\",\r\n \"Total Cases\",\r\n ]\r\n # convert to dataframe\r\n df = pd.DataFrame(data=data1, columns=labels)\r\n if plot:\r\n # do some plotting\r\n df.plot(x=\"Days\", y=[\"Total Cases\"])\r\n plt.xlabel(\"Days\")\r\n plt.ylabel(\"Total Cases\")\r\n plt.show()\r\n # return dataframe\r\n return df", "def _increment_date_data(klass, series, date_data):\n\n # delta is the timedelta in between events\n delta = timedelta(days=7 * series.every)\n date_data['start_date'] = date_data['start_date'] + delta\n date_data['end_date'] = date_data['end_date'] + delta", "def get_track_rating_from_history(user_track_timestamp_MSD):\n time_format = \"%Y-%m-%dT%H:%M:%SZ\"\n user_rate_dict = dict()\n for user in user_track_timestamp_MSD:\n user_rate_dict[user] = dict()\n for key in user_track_timestamp_MSD[user]:\n length = len(user_track_timestamp_MSD[user][key])\n if length == 1:\n user_rate_dict[user][key] = 3\n continue\n\n # if a track played more than 10 times, 5 star rating\n if length > 10:\n user_rate_dict[user][key] = 5\n continue\n\n if length > 1:\n user_rate_dict[user][key] = 4\n\n # if a track played more than once in a single day, 5 star rating\n for i in range(0, length-1):\n diff_time = abs(time.mktime(time.strptime(user_track_timestamp_MSD[user][key][i], time_format)) \\\n - time.mktime(time.strptime(user_track_timestamp_MSD[user][key][i+1], time_format))) /3600\n if diff_time < 24:\n user_rate_dict[user][key] = 5\n break\n if user_rate_dict[user][key] == 5:\n continue\n\n # if a track played more than 4 times per month, 5 star rating\n if length > 4:\n for i in range(0, length-4):\n diff_time = abs(time.mktime(time.strptime(user_track_timestamp_MSD[user][key][i], time_format)) \\\n - time.mktime(time.strptime(user_track_timestamp_MSD[user][key][i+3], time_format))) /3600/24\n if diff_time < 30:\n user_rate_dict[user][key] = 5\n break\n if user_rate_dict[user][key] == 5:\n continue\n\n return user_rate_dict", "def update_mean_user_rating(self):\n self.mean_user_rating = self.ratings.groupby(['user_id'])['rating'].mean().reset_index()", "def accumulate_privacy_spending(self, sigma=1, num_examples=5040):\n q = tf.cast(num_examples, tf.float64) * 1.0 / self._total_examples\n\n moments_accum_ops = []\n for i in range(len(self._log_moments)):\n moment = self._compute_log_moment()\n moments_accum_ops.append(tf.compat.v1.assign_add(self._log_moments[i], moment[i]))\n #print(moments_accum_ops)\n return tf.group(*moments_accum_ops)", "def sum(self):\n\n return time_stat(self, stat=\"sum\")", "def updateAllShifts(shiftList):\n \n for shift in shiftList.measurements:\n averageShiftValue(shift)", "def accumulation(intensities, forecast_interval_mins):\n return sum(map(lambda mm_h: forecast_interval_mins*(mm_h/60), intensities))", "def add_star_streamer(client_id, streamer_id, now=None):\n\tnow = _get_now(now)\n\n\ttry:\n\t\t# Get the indexed name of the streaming user.\n\t\tstreamer_indexed_name = session.query(User.indexed_name)\\\n\t\t\t\t.filter(User.id == streamer_id)\\\n\t\t\t\t.one()\\\n\t\t\t\t.indexed_name\n\t\t# Add the client's star for the streaming user.\n\t\tstarred_streamer = StarredStreamer(user_id=client_id,\n\t\t\t\tstreamer_id=streamer_id,\n\t\t\t\tindexed_name=streamer_indexed_name,\n\t\t\t\tadded=now)\n\t\tsession.add(starred_streamer)\n\t\tsession.flush()\n\texcept sa_orm.exc.NoResultFound:\n\t\tsession.rollback()\n\t\traise common_db.DbException._chain()\n\texcept sa.exc.IntegrityError:\n\t\t# The flush failed because the client has already starred this streaming user.\n\t\tsession.rollback()\n\t\traise common_db.DbException._chain()\n\n\t# Increment the count of stars for the streaming user.\n\tsession.execute(Users.update()\n\t\t\t.where(User.id == streamer_id)\n\t\t\t.values({User.num_stars: User.num_stars + 1}))\n\n\t# If needed, add a CalendarEntry for each streamed match.\n\tmatches_cursor = session.query(StreamedMatch.match_id, Match)\\\n\t\t\t.join(Match, StreamedMatch.match_id == Match.id)\\\n\t\t\t.filter(StreamedMatch.streamer_id == streamer_id)\n\tfor match_id, match in matches_cursor:\n\t\t_increment_num_user_stars(client_id, match, now)\n\n\tsession.commit()", "def sum(self):\n return sum(self.times)", "def sum(self):\n return sum(self.times)", "def sum(self):\n return sum(self.times)", "def calc_temps_2(start_date):\r\n print(\"one date\\n\")\r\n return session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\\\r\n filter(Measurement.date >= start_date).all()", "def daily_retained_list(self, fday, tday):\n dayList = self._list_day(fday,tday)\n return zip(dayList, \n self.make_bitmap(dayList[0],'dau').retained_count(\n (self.make_bitmap(day, 'dau') for day in dayList)\n )\n )", "def compute_EMA(self, series, num_days=50):\n temp = series.copy().reset_index(drop=True) # DO NOT MODIFY THE ORIGINAL DATAFRAME!\n smoothing_factor = 2/(num_days+1)\n EMA_prev = 0.0\n for idx in range(len(temp)):\n EMA_current = (temp[idx]*smoothing_factor)+EMA_prev*(1-smoothing_factor)\n # update values for next iteration\n temp[idx] = EMA_current\n EMA_prev = EMA_current \n return temp", "def annualized_gains(self, day='today'):\n assert day == 'today' or isinstance(day, date), 'Error! You have to pass a datetime.date istance to the day parameter.'\n if day == 'today':\n day = self.data.index[-1]\n if self.data.index[-1] >= day >= self.data.index[0]:\n day = self._first_good_date(day)\n initialValue = self.invested_amount(day)\n finalValue = self.value(day)\n numberOfDays = (day - self.data.index[0]).days\n return round(((finalValue / initialValue)**(365/numberOfDays) - 1) * 100, 2) \n else:\n return 0", "def rollingAvg( lag, oldSet ):\r\n\r\n newSet = []\r\n\r\n # insert lag-1 number of nans at beginning of list\r\n for i in range(0, lag - 1):\r\n newSet.append(Decimal('nan'))\r\n\r\n # calculate new values for list\r\n for i in range((lag - 1), len(oldSet)):\r\n sum = 0\r\n for j in range(lag):\r\n sum += oldSet[i - j]\r\n\r\n avg = sum / Decimal(lag)\r\n newSet.append(Decimal(avg))\r\n\r\n return newSet", "def impute_previous_cases(events, rate, delta_t=1.0):\n prev_case_distn = distribute_geom(events, rate, delta_t)\n prev_cases = reduce_diagonals(prev_case_distn)\n\n # Trim preceding zero days\n total_events = tf.reduce_sum(prev_cases, axis=-2)\n num_zero_days = total_events.shape[-1] - tf.math.count_nonzero(\n tf.cumsum(total_events, axis=-1)\n )\n return (\n prev_cases[..., num_zero_days:],\n prev_case_distn.shape[-2] - num_zero_days,\n )", "def increment_number_served(self, numbers):\n\t\tself.number_served += numbers", "def calculate_ma(rates: [[]], size: int, current: int = 0):\n total = current\n stop = size if current == 0 else size - 1\n i = 0\n while i < stop:\n total += rates[i][0]\n i += 1\n return total / size", "def update(self, val, n=1):\n self.sum += val * n\n self.cnt += n\n self.avg = self.sum / self.cnt", "def daily_avg(self, run_id):\n time_series = self.get_data(run_id=run_id,\n metric_ids=['00003', '00060', '00001'])\n if len(time_series) == 0:\n return None\n\n precip = time_series[time_series.metric_id == '00003']\n precip['date_time'] = pd.to_datetime(precip['date_time'], utc=True)\n precip.index = precip['date_time']\n precip_daily = precip.resample('D').sum()\n\n flow = time_series[time_series.metric_id == '00060']\n flow['date_time'] = pd.to_datetime(flow['date_time'], utc=True)\n flow.index = flow['date_time']\n flow_daily = flow.resample('D').mean()\n\n temp = time_series[time_series.metric_id == '00001']\n temp['date_time'] = pd.to_datetime(temp['date_time'], utc=True)\n temp.index = temp['date_time']\n temp_daily = temp.resample('D').mean()\n\n time_series_daily = temp_daily\\\n .merge(flow_daily,\n how='inner',\n left_index=True,\n right_index=True) \\\n .merge(precip_daily,\n how='inner',\n left_index=True,\n right_index=True)\n time_series_daily.columns = ['temp', 'flow', 'precip']\n time_series_daily = time_series_daily.dropna()\n return time_series_daily", "def update(self, dt):\n\n self.collecting(dt)", "def cum_avg(mylist):\n cumsum, cum_aves = [0], []\n \n for i, x in enumerate(mylist, 1):\n cumsum.append(cumsum[i-1] + x)\n cum_ave = (cumsum[i])/(i)\n cum_aves.append(cum_ave)\n \n return cum_aves", "def visitEstimated(self, date):\n raise NotImplementedError()", "def calculate(self):\n avg = self.sum / self.n if self.n != 0 else 0\n self.running_avg.append(avg)\n return avg", "def daily_cards_min_comparison(df):\n df = convert_to_datetime(df)\n today = datetime.date.today()\n yesterday = today - timedelta(days=1)\n todays_per_min = []\n yesterday_per_min = []\n today_viewed = []\n yesterday_viewed = []\n # this iterates over each row in the dataframe, applying the logic and adding the cards_per_min value to the\n # appropriate list\n for index, row in df.iterrows():\n if row['session_start'].date() == today:\n per_min = get_cards_per_min(row)\n todays_per_min.append(per_min)\n today_viewed.append(row['total_looked_at'])\n if row['session_start'].date() == yesterday:\n per_min = get_cards_per_min(row)\n yesterday_per_min.append(per_min)\n yesterday_viewed.append(row['total_looked_at'])\n today_average = 0\n yesterday_average = 0\n if len(todays_per_min) > 0 and len(yesterday_per_min) > 0:\n # if both days have data, then calculate the average of the list\n today_average = sum(todays_per_min) / len(todays_per_min)\n yesterday_average = sum(yesterday_per_min) / len(yesterday_per_min)\n elif len(todays_per_min) == 0:\n # if no cards viewed today, cards per min average is 0\n today_average = 0\n elif len(yesterday_per_min) == 0:\n yesterday_average = 0\n try:\n difference = abs((today_average - yesterday_average) / yesterday_average) * 100\n except ZeroDivisionError:\n # if no cards viewed yesterday, cards per min up 100% today\n # if both averages are zero, this will display '0 100% =' in black\n difference = 100\n if today_average > yesterday_average:\n color_code = \"09B109\"\n # hex color code for green\n arrow = \"\\u2191\"\n # unicode for upward arrow\n elif today_average < yesterday_average:\n color_code = \"CE2929\"\n # hex color code for red\n arrow = \"\\u2193\"\n # unicode for downward arrow\n else:\n color_code = \"000000\"\n # hex color code for black\n arrow = \"\\u003D\"\n # unicode for equal sign\n result = make_results_dict(today_average, difference, color_code, arrow)\n result['daily_cards_min'] = result.pop('metric')\n return result", "def __add__ ( self, other, resample_opts=None ):\n result = ObservationStorage (datadir=self.datadir, \\\n resample_opts=resample_opts )\n if self.date[0] > other.date[0]:\n start_date = other.date[0]\n else:\n start_date = self.date[0]\n if self.date[-1] > other.date[-1]:\n end_date = other.date[-1]\n else:\n end_date = self.date[-1]\n \n delta = datetime.timedelta ( days=1 )\n this_date = start_date.date()\n end_date = end_date.date() + delta\n \n this_obs_dates = [ x.date() for x in self.date ]\n other_obs_dates = [ x.date() for x in other.date ]\n \n date = [] ; vza = [] ; vaa = [] ; sza = [] ; saa = []\n emulator = [] ; mask = [] ; data_pntr = [] ; spectral = []\n sensor = []\n \n while this_date < end_date:\n if this_date in this_obs_dates:\n iloc = this_obs_dates.index ( this_date )\n date.append ( self.date[iloc] )\n emulator.append ( self.emulator[iloc] )\n vza.append ( self.vza[iloc] )\n sza.append ( self.sza[iloc] )\n vaa.append ( self.vaa[iloc] )\n saa.append ( self.saa[iloc] )\n spectral.append ( self.spectral )\n mask.append ( ( self.get_mask, [iloc] ) )\n sensor.append ( self.sensor )\n \n data_pntr.append ( self._data_pntr[iloc] )\n if this_date in other_obs_dates:\n iloc = other_obs_dates.index ( this_date )\n date.append ( other.date[iloc] )\n emulator.append ( other.emulator[iloc] )\n vza.append ( other.vza[iloc] )\n sza.append ( other.sza[iloc] )\n vaa.append ( other.vaa[iloc] )\n saa.append ( other.saa[iloc] )\n spectral.append ( other.spectral )\n mask.append ( ( other.get_mask, [iloc] ) )\n sensor.append ( other.sensor )\n data_pntr.append ( other._data_pntr[iloc] )\n this_date += delta\n result.vza = vza\n result.vaa = vaa\n result.sza = sza \n result.saa = saa \n result.date = date\n result.spectral = spectral\n result.masks = mask\n result.sensor = sensor\n result.emulator = emulator\n result._data_pntr = data_pntr\n return result", "def update_by_day(self, date):\n print 'UPDATE EXCHANGE RATE for day: %s' % date\n currencies = self.get_currencies()\n for code, name in currencies:\n if code in self.base_curr:\n _, created = Currency.objects.get_or_create(\n code=code, defaults={'name': name})\n if created:\n print('currency: %s created', code)\n\n for source in Currency.objects.filter(code__in=self.base_curr).all():\n exchange_rates = self.get_exchangerates_by_day(source.code, date)\n if exchange_rates:\n exchange_rates.pop(source.code)\n for code, rate in exchange_rates.iteritems():\n try:\n target = Currency.objects.get(code=code)\n exchange_rate = ExchangeRate.objects.get(date=date, source=source, target=target)\n exchange_rate.rate = rate\n exchange_rate.save()\n print('exchange rate updated %s, %s/%s=%s' % (date, source, target, rate))\n except ExchangeRate.DoesNotExist:\n exchange_rate = ExchangeRate.objects.create(date=date, source=source, target=target, rate=rate)\n print('exchange rate created %s, %s/%s=%s' % (date, source, target, rate))\n else:\n print('There is no rate for the current day')\n mail_admins('Exchange Rates Warning', 'There is no today exchange rate')\n break", "def daily_viewed(df):\n df = convert_to_datetime(df)\n today = datetime.date.today()\n yesterday = today - timedelta(days=1)\n todays_per_min = []\n yesterday_per_min = []\n today_viewed = []\n yesterday_viewed = []\n # this iterates over each row in the dataframe, applying the logic and adding the cards_per_min value to the\n # appropriate list\n for index, row in df.iterrows():\n if row['session_start'].date() == today:\n per_min = get_cards_per_min(row)\n todays_per_min.append(per_min)\n today_viewed.append(row['total_looked_at'])\n if row['session_start'].date() == yesterday:\n per_min = get_cards_per_min(row)\n yesterday_per_min.append(per_min)\n yesterday_viewed.append(row['total_looked_at'])\n today_viewed_result = total_viewed(today_viewed, yesterday_viewed)\n today_viewed_result['total_viewed_daily'] = today_viewed_result.pop('total_viewed')\n return today_viewed_result", "def _update_accumulation(self, index, grad):\n self.accumulation[index] = self.accumulation[index] + grad**2", "def update(self,x): #update the estimate of rewards and number of esteps run\n\t\tself.N += 1\n\t\tself.estimate_mean = (1.0-1.0/self.N)*self.estimate_mean + (1.0/self.N)*x #recurence relation for averages", "def update_rating_average(self, rating):\n self.num_ratings += 1\n self.rating_total += rating\n self.save(update_fields=[\"num_ratings\", \"rating_total\"])\n self.average_rating = int(round(self.rating_total/self.num_ratings))\n self.save(update_fields=[\"average_rating\"])\n return", "def trips_per_day(self):\n dfs = []\n for _, row in self.tables.iterrows():\n tbl, yr, col = row[\"tables\"], row[\"year\"], row[\"color\"]\n LOGGER.info(\"Processing %s\", tbl)\n query = (read_sql(\"total_trips.sql\")\n .format(TABLE=tbl, YEAR=yr, COLOR=col))\n query = query.replace(\"\\n\", \" \")\n dfs.append(get_dataframe_from_bigquery(query))\n (pd.concat(dfs)\n .loc[:, ['date', 'total_trips']]\n .groupby(['date']).sum()\n .reset_index() # .head())\n .pipe(save_to_gcs, settings.ASSETS.FILES.TOTAL_TRIPS))", "def impute_dates(tables, dates):\n new_fights = []\n for idx, date in enumerate(dates):\n if date == 'FUTURE EVENTS':\n break\n tables[idx]['Date'] = date\n for table in tables[:-1]:\n fights = [table[x:x+2] for x in range(0, len(table), 2)] \n for idxf, fight in enumerate(fights):\n fight.reset_index(drop=True, inplace=True)\n fight['Time'] = fight['Time'][0]\n new_fights.append(fight) \n return new_fights", "def _cumulative_sum(xs):\r\n cumsum = 0\r\n for x in xs:\r\n cumsum += x\r\n yield cumsum", "def cum_sum_strain_isotype(cls):\n df = pd.read_sql_table(cls.__tablename__, db_2.engine)\n cumulative_isotype = df[['isotype', 'isolation_date']].sort_values(['isolation_date'], axis=0) \\\n .drop_duplicates(['isotype']) \\\n .groupby(['isolation_date'], as_index=True) \\\n .count() \\\n .cumsum() \\\n .reset_index()\n cumulative_isotype = cumulative_isotype.append({'isolation_date': np.datetime64(datetime.datetime.today().strftime(\"%Y-%m-%d\")),\n 'isotype': len(df['isotype'].unique())}, ignore_index=True)\n cumulative_strain = df[['strain', 'isolation_date']].sort_values(['isolation_date'], axis=0) \\\n .drop_duplicates(['strain']) \\\n .dropna(how='any') \\\n .groupby(['isolation_date']) \\\n .count() \\\n .cumsum() \\\n .reset_index()\n cumulative_strain = cumulative_strain.append({'isolation_date': np.datetime64(datetime.datetime.today().strftime(\"%Y-%m-%d\")),\n 'strain': len(df['strain'].unique())}, ignore_index=True)\n df = cumulative_isotype.set_index('isolation_date') \\\n .join(cumulative_strain.set_index('isolation_date')) \\\n .reset_index()\n return df", "def update_attendance_rate(self):\n session_avg_rate = self.session_set\\\n .filter(attendance_rate__isnull=False)\\\n .aggregate(Avg('attendance_rate'))\n self.attendance_rate = session_avg_rate['attendance_rate__avg']\n self.save()", "def star(request):\n account = models.Account.current_user_account\n account.user_has_selected_nickname() # This will preserve account.fresh.\n if account.stars is None:\n account.stars = []\n keyid = request.issue.key.id()\n if keyid not in account.stars:\n account.stars.append(keyid)\n account.put()\n return respond(request, 'issue_star.html', {'issue': request.issue})", "def performStats(dataArray):\n yearArray = [[0,0] for i in range(20)]\n for entry in dataArray:\n oSum = 0\n nSum = 0\n for k, v in entry.old.items():\n # print(k,v)\n oSum += v\n for k,v in entry.new.items():\n # print(k,v)\n nSum += v\n entry.oldSum = oSum\n entry.newSum = nSum\n idx = int(entry.year)%20 #0-19 index\n yearArray[idx][0] += entry.oldSum\n yearArray[idx][1] += entry.newSum\n return yearArray", "def aveVolumeCalc(ins, date):\n cal = ins.Currency().Calendar()\n enddate = cal.AdjustBankingDays(date, 0)\n startdate = cal.AdjustBankingDays(date, AVERAGING_PERIOD)\n\n prices=[]\n histprices = acm.FPrice.Select(\"instrument = %s and market = '%s' \\\n and day > '%s' and day <='%s'\" % \n (ins.Oid(), DAILY_MARKET, startdate, enddate))\n \n for price in histprices:\n settle = price.Settle()\n if settle >= 0:\n prices.append(settle)\n \n #upgrade 2013 fix for failure during run - acm.Math().AverageOf seems buggy\n try:\n avgprice = (sum(prices)/len(prices))\n except ZeroDivisionError:\n avgprice = 0\n \n #avgprice = acm.Math().AverageOf(prices, None)\n \n #Overwrite today's price if you find it \n newPrice = acm.FPrice.Select01(\"instrument = %s and market = '%s' and day = %s\" % \n (ins.Oid(), THREE_MONTH_MARKET, enddate),\n 'NaN')\n if not newPrice:\n newPrice = acm.FPrice()\n newPrice.Instrument(ins)\n newPrice.Day(enddate)\n newPrice.Market(THREE_MONTH_MARKET)\n newPrice.Currency(ins.Currency())\n\n newPrice.Settle(avgprice)\n try:\n newPrice.Commit()\n print 'INFO: %s price for %s was created on %s' %(THREE_MONTH_MARKET, ins.Name(), date)\n except Exception, err:\n print 'ERROR: %s price for %s did not commit: %s' %(THREE_MONTH_MARKET, ins.Name(), str(err))\n \n return newPrice", "def daily_returns(self, df):\n daily_returns = df.copy()\n daily_returns[1:] = (df[1:] / df[:-1].values) - 1\n daily_returns.ix[0] = 0\n return daily_returns", "def calculateDataRate(self):\n pass", "def __apply_accumulators():\n self.__xdata = np.array([])\n self.__ydata = np.array([])\n for acc in self.signal_accumulators:\n self.__xdata = __array_append(self.__xdata,acc.attempt)\n self.__ydata = __array_append(self.__ydata,acc.count)\n self.__applied = True", "def get_freqs_by_date(self, entries):\n\n prev_date = ''\n curr_date = ''\n\n entries_by_date = {}\n\n curr_date_entries = []\n\n for entry in entries:\n\n curr_date = entry['date']\n\n if prev_date == '':\n curr_date_entries.append(entry)\n else:\n if curr_date != prev_date:\n entries_by_date[prev_date] = self.get_m_freqs(curr_date_entries)\n curr_date_entries = []\n curr_date_entries.append(entry)\n\n prev_date = curr_date\n\n entries_by_date[prev_date] = self.get_m_freqs(curr_date_entries)\n\n return entries_by_date", "def increment(self, stats, sample_rate=1):\n self.update_stats(stats, 1, sample_rate=sample_rate)", "def success(frame, frequency = 'M'):\n\n result = frame.set_index(DatetimeIndex(frame.inserted))\n if frequency=='M':\n result = result.groupby(['user',lambda x: x.year,lambda x: x.month])\n elif frequency == 'W':\n result = result.groupby(['user',lambda x: x.year,lambda x: x.week])\n else:\n result = result.groupby(['user',lambda x: x.year,lambda x: x.day])\n result = result.apply(lambda x: Series({'success_rate':_success(x,frequency), 'date':x.inserted.values[0]}))\n result = result.set_index(DatetimeIndex(result['date']))\n result = result.resample(frequency, how='mean').success_rate\n result.index = result.index - DateOffset(days=1)\n return result", "def increment(self):\n self._deltas += 1", "def increment_number_served(self, increment):\n self.number_served += increment", "def increment_number_served(self, increment):\n self.number_served += increment", "def points_earned(self):\n delta_counts = self.alive_counts - self.initial_counts\n points = self.points_table * delta_counts\n points = points.reshape(-1,72) # unravel the points for easier sum\n return np.sum(points, axis=1) + super().current_points()", "def _recompute(self):\n current_date = self.start_date\n self.quarterly_date_list = []\n self.daily_date_list = []\n while current_date <= self.end_date:\n current_quarter = get_quarter(current_date)\n current_year = current_date.year\n next_year, next_quarter = add_quarter(current_year, current_quarter)\n next_start_quarter_date = date(next_year, get_month(next_quarter),\n 1)\n\n days_till_next_quarter = (next_start_quarter_date -\n current_date).days\n days_till_end = (self.end_date - current_date).days\n if days_till_next_quarter <= days_till_end:\n current_start_quarter_date = date(current_year,\n get_month(current_quarter), 1)\n if current_start_quarter_date == current_date:\n self.quarterly_date_list.append(\n (current_year, current_quarter, lambda x: True))\n current_date = next_start_quarter_date\n elif days_till_next_quarter > self.balancing_point:\n self.quarterly_date_list.append(\n (current_year, current_quarter,\n lambda x: date(x['date_filed']) >= self.start_date))\n current_date = next_start_quarter_date\n else:\n while current_date < next_start_quarter_date:\n self.daily_date_list.append(current_date)\n current_date += timedelta(days=1)\n else:\n if days_till_end > self.balancing_point:\n if days_till_next_quarter - 1 == days_till_end:\n self.quarterly_date_list.append(\n (current_year, current_quarter, lambda x: True))\n current_date = next_start_quarter_date\n else:\n self.quarterly_date_list.append(\n (current_year, current_quarter,\n lambda x: date(x['date_filed']) <= self.end_date))\n current_date = self.end_date\n else:\n while current_date <= self.end_date:\n self.daily_date_list.append(current_date)\n current_date += timedelta(days=1)", "def morning_star(self):\n self.data['morning_star'] = ((self.data['Open'].shift(2) > self.data['Close'].shift(2)) & \\\n ((self.data['Open'].shift(2) - self.data['Close'].shift(2))/\\\n (0.001+self.data['High'].shift(2)-self.data['Low'].shift(2))>.6) &\\\n (self.data['Close'].shift(2) > self.data['Open'].shift(1)) & \\\n (self.data['Open'].shift(1)>self.data['Close'].shift(1)) & \\\n ((self.data['High'].shift(1)-self.data['Low'].shift(1)) > \\\n (3*(self.data['Close'].shift(1)-self.data['Open'].shift(1))))&\\\n (self.data['Close']>self.data['Open'])&\\\n (self.data['Open']>self.data['Open'].shift(1)))", "def running_avg (mylist, N):\n import numpy as np\n \n cumsum = np.cumsum(np.insert(mylist, 0, 0))\n return (cumsum[N:] - cumsum[:-N]) / float(N)", "def xirrrate(self, date=yesterdayobj(), startdate=None, guess=0.01):\n return xirrcal(self.totcftable, self.fundtradeobj, date, startdate, guess)", "def update_average(self,result):\n a = 1/self.iters\n b = 1 - a\n self.average = a * result + b * self.average\n self.iters += 1", "def rating_date(self, rating_date):\n\n self._rating_date = rating_date", "def visitInterpreted(self, date):\n raise NotImplementedError()", "def return_weekly_figure():\n today = datetime.datetime.now()\n\n while 1:\n try:\n today_str = str(today.day) + \"/\" + \"{:02d}\".format(today.month) + \"/\" + str(today.year)\n match = covid_table.find(date=today_str)\n match.next()\n running_total = 0\n for i in range(7):\n running_total += return_daily_figure(today)\n today = today - datetime.timedelta(days=1)\n average_dose_per_day = round(running_total/7)\n return running_total, average_dose_per_day \n except:\n today = today - datetime.timedelta(days=1)", "def add_observations(self, json_data):\n record = self.record\n update_id = uuid.uuid4().hex\n self.add_pending_update(update_id)\n\n new_data = json.loads(json_data)\n calculator = Calculator(self)\n\n new_dframe_raw = calculator.dframe_from_update(\n new_data, self.schema.labels_to_slugs)\n calculator._check_update_is_valid(new_dframe_raw)\n\n call_async(calculator.calculate_updates, calculator, new_data,\n new_dframe_raw=new_dframe_raw, update_id=update_id)", "def update_attendance_rate(self):\n\n total_attendees = self.attendee_set.all().count()\n attended = self.attendee_set\\\n .filter(presented=True)\\\n .count()\n self.attendance_rate = attended / total_attendees\n assert(self.attendance_rate != None)\n self.save()", "def accumulate(self,tod,weights,chunk):\n binFuncs.binValues(self.sigwei, self.offsetpixels[chunk[0]:chunk[1]], weights=tod*weights )\n binFuncs.binValues(self.wei , self.offsetpixels[chunk[0]:chunk[1]], weights=weights )", "def daily_speed_sum_reduce(key, values):\n\tyield \"%s: %s, %s\\n\" % (key, sum([int(value) for value in values]), len(values))", "def update(self, arm, reward):\n\n # Increate pulls by one\n self.pulls[arm] += 1\n\n # New number of pull\n n = self.pulls[arm]\n\n # Old value\n old_val = self.values[arm]\n\n # New value (online weighted average)\n new_val = ((n - 1) / n) * old_val + (1 / n) * reward\n\n # Update value\n self.values[arm] = new_val\n\n # Update epsilon\n self.t += 1\n self.epsilon = self.calc_dynamic_epsilon()", "def update(self, result):\n self.visits += 1\n self.wins += result", "def accumulate(self, scopes=None):\n scopes = scopes if scopes is not None else self.scopes\n for scope in scopes:\n for k, v in self.partials.items():\n self.metrics[scope][k] += v\n self.metric_counts[scope][k] += self.partial_counts.get(k, 1)\n\n self.partials.clear()\n self.partial_counts.clear()", "def avg(self):\n return sum(self.times) / len(self.times)", "def avg(self):\n return sum(self.times) / len(self.times)", "def avg(self):\n return sum(self.times) / len(self.times)", "def _series_date_value_iter(data_points: List[dict]) -> Generator:\n for data_point in data_points:\n yield data_point[\"generic:ObsDimension\"][\"@value\"], data_point[\"generic:ObsValue\"][\"@value\"]", "def add_disc_sum_rew(trajectories, gamma):\n for trajectory in trajectories:\n if gamma < 0.999: # don't scale for gamma ~= 1\n rewards = trajectory['rewards'] * (1 - gamma)\n else:\n rewards = trajectory['rewards']\n disc_sum_rew = discount(rewards, gamma)\n trajectory['disc_sum_rew'] = disc_sum_rew", "def add_ratings(self, ratings):\n # Convert ratings to an RDD\n new_ratings_RDD = self.sc.parallelize(ratings)\n # Add new ratings to the existing ones\n self.ratings_RDD = self.ratings_RDD.union(new_ratings_RDD)\n # Re-compute movie ratings count\n self.__count_and_average_ratings()\n # Re-train the ALS model with the new ratings\n self.__train_model()\n \n return ratings", "def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count", "def accumulate(values):\n # TODO: replace usage with numpy.cumsum(values) after adding numpy\n accumulation = 0\n for value in values:\n accumulation += value\n yield accumulation", "def get_diffs_of_ratings(l):\n if len(l) <= 1:\n return str(None)\n total = 0.0\n for v in range(0, len(l)-1):\n total = total + float((l[v+1][\"date\"] - l[v][\"date\"]).days)\n return str(float(total / (len(l)-1)))", "def calcStartlingRates(startles):\n startle_rates = np.sum(startles, axis=-1)\n return startle_rates", "def add_disc_sum_rew(trajectories, gamma):\n\n for trajectory in trajectories:\n if gamma < 0.999: # don't scale for gamma ~= 1\n rewards = trajectory['rewards'] * (1 - gamma)\n else:\n rewards = trajectory['rewards']\n disc_sum_rew = discount(rewards, gamma)\n trajectory['disc_sum_rew'] = disc_sum_rew" ]
[ "0.5819865", "0.562832", "0.5501563", "0.5379064", "0.52238375", "0.5169488", "0.5166707", "0.5135347", "0.50705355", "0.50331134", "0.50316", "0.50230944", "0.5007228", "0.4953354", "0.49355468", "0.4928901", "0.49267507", "0.49242172", "0.49139744", "0.49059406", "0.48995715", "0.48837632", "0.48814723", "0.48799047", "0.48688054", "0.48593923", "0.48348004", "0.48159477", "0.48131004", "0.47940797", "0.4789789", "0.47888863", "0.4784692", "0.4784692", "0.4784692", "0.4783297", "0.47720468", "0.47594327", "0.47589305", "0.47564507", "0.4754154", "0.47538087", "0.4739922", "0.4739695", "0.47346106", "0.47302905", "0.4720969", "0.46937457", "0.46880314", "0.46873376", "0.468706", "0.46867767", "0.4686283", "0.46862194", "0.46804", "0.46575576", "0.46502814", "0.46474", "0.4644616", "0.46431962", "0.4640151", "0.46380997", "0.4636106", "0.46282458", "0.46239448", "0.46200535", "0.46166658", "0.46108934", "0.46098584", "0.46088922", "0.4607504", "0.4607295", "0.4607295", "0.46067095", "0.46067077", "0.46049327", "0.46039972", "0.4600172", "0.45972687", "0.45959908", "0.45948422", "0.4589346", "0.4582812", "0.4577934", "0.45774445", "0.45763978", "0.45762467", "0.45703772", "0.456887", "0.45589703", "0.45589703", "0.45589703", "0.45563284", "0.45512605", "0.4549756", "0.45463163", "0.4542788", "0.45345435", "0.4534411", "0.45324913" ]
0.7347785
0
Calculate estimated number of stars observed during VLASS observation Assume 4.2 sec per pointing as estimated by Paul.
def vlass_stars(duration, n_beams): n_pointings = duration//4.2 n_observed = n_pointings*n_beams return n_observed
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ventilation_rate(self):\n # TODO: calculate based on MERV ratings/efficiency/power/etc.\n return (\n sum(v.calculate_ach(self.volume) for v in self.air_quality_measures)\n + self.outdoor_air_ventilation\n )", "def getStarRating(waveHeight, windDir, avgWind, tideHeight):\n\n starRating = 0\n\n # wave height\n if waveHeight > 2:\n starRating += 4\n elif waveHeight > 1.6:\n starRating += 3\n elif waveHeight > 1.4:\n starRating += 2\n elif waveHeight > 1.2:\n starRating += 1\n\n # wind direction\n if windDir >= 270 or windDir <= 30:\n starRating += 1\n\n # wind strength\n if avgWind < 15:\n starRating += 1\n\n # tide\n if tideHeight < 1.2:\n starRating += 1\n elif tideHeight > 2.2:\n starRating = 1\n\n # check upper bound of 5 stars\n if starRating > 5:\n starRating = 5\n elif waveHeight < 1:\n starRating = 0\n\n return starRating", "def supernovae_rate(self, time, timestep, metallicity):\n # get the mass limits of the timesteps the user passed in. The\n # lower time corresponds to the higher stellar mass\n m_low = self.lifetimes.turnoff_mass(time + timestep, metallicity)\n m_high = self.lifetimes.turnoff_mass(time, metallicity)\n\n # check the bounds, since supernovae can only happen for certain\n # mass stars\n min_mass = self.sn_ii_model.sn.mass_boundary_low\n max_mass = self.sn_ii_model.sn.mass_boundary_high\n m_low = max(m_low, min_mass)\n m_high = min(m_high, max_mass)\n if m_low > max_mass or m_high < min_mass:\n return 0\n\n # Here we just integrate over the IMF to count the stars in this mass\n # range that die\n number = self._integrate_mass_smart(self.imf.normalized_dn_dm,\n m_low, m_high, source=\"massive\")\n return number / timestep", "def ventilation_rate_per_second(self):\n return self.volume * self.outdoor_air_ventilation * 1000 / 3600", "def __len__(self):\n return self.params['nbins_sfh']+2 # z, mass, met, + logsfr_ratios", "def __len__(self):\n return self.params['nbins_sfh']+2 # z, mass, met, + logsfr_ratios", "def __len__(self):\n return self.params['nbins_sfh']+2 # z, mass, met, + logsfr_ratios", "def __len__(self):\n return self.params['nbins_sfh']+2 # z, mass, met, + logsfr_ratios", "def getavgvel(self):\n if self.total_time:\n return (6.28)/(self.total_time)", "def ndpm(self):\n\n merged = pd.merge(left=self.test, right=self.predict, on=['user', 'item'], how='inner')[\n ['user', 'rating_x', 'rating_y']]\n ndpms = []\n for user in merged.user.unique():\n frame = merged[merged.user == user]\n if frame.shape[0] <= 1:\n continue\n C_plus = self.num_of_ordered_positive(frame, 'rating_x', 'rating_y')\n C_minus = self.num_of_ordered_negative(frame, 'rating_x', 'rating_y')\n C_u = self.num_of_ordered(frame, 'rating_x')\n if C_u == 0:\n continue\n C_s = self.num_of_ordered(frame, 'rating_y')\n C_u0 = C_u - (C_plus + C_minus)\n ndpms.append(1 - (C_minus + 0.5 * C_u0) / C_u)\n\n return sum(ndpms) / len(ndpms)", "def P(lag):\n N = len(SP)\n ratios = SP[lag:N]/SP[0:N-lag]\n P = 100.*(ratios-1.)\n return P", "def estimate_arpu(x):\n arpu = 0\n if x['mean_luminosity_km2'] > 5:\n # #10 year time horizon\n # for i in range(0, 10):\n # #discounted_arpu = (arpu*months) / (1 + discount_rate) ** year\n # arpu += (\n # (20*12) / (1 + 0.03) ** i\n # )\n return 20 * 12 * 10#arpu\n elif x['mean_luminosity_km2'] > 1:\n # for i in range(0, 10):\n # #discounted_arpu = (arpu*months) / (1 + discount_rate) ** year\n # arpu += (\n # (5*12) / (1 + 0.03) ** i\n # )\n return 5 * 12 * 10#arpu\n else:\n # for i in range(0, 10):\n # #discounted_arpu = (arpu*months) / (1 + discount_rate) ** year\n # arpu += (\n # (2*12) / (1 + 0.03) ** i\n # )\n return 2 * 12 * 10#arpu", "def get_pvalue_thd(self):\n terminals_values = []\n for terminal in self.feature_tree.get_terminals():\n temp = self.get_mannwitneyu_pvalue(terminal)\n terminals_values.append(temp)\n if temp == 1:\n print('non siginificant')\n while 0 in terminals_values:\n terminals_values.remove(0)\n self.pvalue_thd = min(self.pvalue_thd,np.mean(terminals_values))\n #print('pvalue_thd',self.pvalue_thd)", "def success_rate(x_tapes):\n return np.sum([is_success(x_tape) for x_tape in x_tapes]) / len(x_tapes)", "def getRatio(probe_num, position_vector, shot_range, dir, day ='050119r'):\n ratio_x = 0\n ratio_y = 0\n ratio_z = 0\n # helm_B = [0,0,0]\n divideby = 0\n for shot in range(shot_range[0], shot_range[1]+1):\n print( 'On shot ', day+str(shot), ' for probe ',probe_num)\n x,y,z, currmax,helmB_new = probe_calib(day+str(shot), probe_num, position_vector,dir)\n ratio_x = ratio_x + x\n ratio_y = ratio_y + y\n ratio_z = ratio_z + z\n # helm_B = [helm_B[i] + helmB_new[i] for i in len(helmB)]\n divideby = divideby + 1 #averaging over the number of shots\n ratio_Bx = ratio_x/divideby\n ratio_By = ratio_y/divideby\n ratio_Bz = ratio_z/divideby\n # helmB = [helm_B]/divideby\n # print ratio_Bx, ratio_By, ratio_Bz, helmB\n # print(\"ratio_Bx %f, ratio_By %f, ratio_Bz %f, helmB%s\"%(ratio_Bx, ratio_By, ratio_Bz, helmB))\n Bx_sqr =ratio_x**2\n By_sqr =ratio_y**2\n Bz_sqr =ratio_z**2\n B = Bx_sqr + By_sqr+ Bz_sqr\n norm_factor = np.sqrt(B)\n ratio_Bx, ratio_By, ratio_Bz = [ratio_Bx, ratio_By, ratio_Bz]/norm_factor\n\n return (ratio_Bx, ratio_By, ratio_Bz, norm_factor)", "def rmspe(self) -> float:\n return float(np.sqrt(np.mean(np.square(((self.true - self.predicted) / self.true)), axis=0)))", "def _estimate(self, env):\n env_variables = env.env_variables\n obs = env_variables[0]\n current_player = env_variables[1]\n\n estimate = 0\n for player in range(len(obs)):\n # multiplier is +1 if player is same as current_player\n # multiplier is -1 if player is different from current_player\n multiplier = 2 * abs(player - current_player) - 1\n\n for row in range(len(obs[0])):\n for col in range(len(obs[0][0])):\n if obs[player, row, col] == 1:\n estimate += multiplier * (10 ** self._num_tokens_left_diagonally(obs, player, row, col))\n estimate += multiplier * (10 ** self._num_tokens_vertically(obs, player, row, col))\n estimate += multiplier * (10 ** self._num_tokens_right_diagonally(obs, player, row, col))\n estimate += multiplier * (10 ** self._num_tokens_horizontally(obs, player, row, col))\n\n return estimate", "def score(cur_ven, ven):\r\n try:\r\n alpha = 750\r\n numerator = (ven[\"rating\"] * 0.75) + (2.5 * (1- eulers**(-ven[\"ratingSignals\"]/144)))\r\n cur_coord = (cur_ven[\"location\"][\"lat\"], cur_ven[\"location\"][\"lng\"])\r\n ven_coord = (ven[\"location\"][\"lat\"], ven[\"location\"][\"lng\"])\r\n denominator = vincenty(cur_coord, ven_coord).meters + alpha\r\n except Exception as e:\r\n print \"{}, \\n has produced an error from {}\".format(ven[\"name\"], e)\r\n return float(\"-inf\")\r\n return numerator / denominator", "def get_rating(self):\n self.total = sum(int(review['stars']) for review in self.reviews.values())\n if self.total > 0:\n return round(self.total / self.reviews.count(), 1)\n else:\n return self.total", "def getActualNumObs(avgNumObs, proportion):\n result = round(avgNumObs*proportion)\n return result", "def count_star(self, tokens):\n return self.counts[tokens] - self.beta", "def dishlist_avg_cal(n:list)->float:\r\n all_cal = dishlist_cal(n)\r\n return sum(all_cal)/len(all_cal)", "def numberOfSamples (self) :\n S = self.mdp.S\n A = self.mdp.A\n gamma = self.mdp.gamma\n\n factor = 1 / (self.epsilon ** 2 * (1 - gamma) ** 4)\n term2 = np.log((S * A) / (self.epsilon * (1 - gamma) ** self.delta))\n return (S + term2) * factor", "def get_correct_lap_count(self):", "def nyquist(self):\n return 1 / (2 * np.median(np.diff(self.lc.time)))", "def calculate_text_stars(word_counts) -> int:\n if word_counts == []:\n return 3\n words_per_slide = sum(word_counts) / len(word_counts)\n stars = 5 - abs(words_per_slide - 35) / 8\n # print(stars)\n return max(0, min(5, int(stars + 0.5)))", "def NBIAS(self):\n return len(self.STARS[\"dist\"])", "def starsize(self, hipid):\n #if hipid<0 or len(self.hip_stars)<=hipid: return 0\n s = self.hip_stars[hipid]\n if s==None: return 0\n #return self.zerosize*(.8**(s[1]))\n #return self.zerosize-s[1]-2\n return self.dimmest_mag-s[1]+1", "def molar_mass_dry_air():\n return 28.9647", "def avg_num_visits_patient(self):\n pass", "def get_voltage_rating(self):\n summary = self.get_version_summary()\n pattern = '\\$.*? .*? .*? .*? .*? (.*?) .*? .*? .*? \\r\\n' \n rating = int(re.findall(pattern,summary).pop())\n return rating", "def emission_rate_per_aerosol_per_person_when_present(self) -> _VectorisedFloat:\n return self.known_individual_emission_rate", "def get_rate(timestamps):\n return (timestamps[1, 1] - timestamps[0, 1]) / (timestamps[1, 0])", "def average_rating(self):\n return ( self.rating_1 + self.rating_2 + self.rating_3) / 3", "def emission_rate_per_person_when_present(self) -> _VectorisedFloat:\n return (self.emission_rate_per_aerosol_per_person_when_present() *\n self.aerosols())", "def estimate(self, reps):\n return self.onerm / MaxCalc.coefficients[reps - 1]", "def getScore(self):\n tempscore = 1000 - 0.01*self.timeDriving \n tempscore -= 0.1*getDist(self.maze.checkpoints[self.checkpoint].getMid(),self.pos)\n tempscore += self.checkpoint *1000\n tempscore += self.laps * 1000 * len(self.maze.checkpoints)\n return tempscore", "def fitness(self):\r\n history = self.history\r\n return sum(history) / len(history)", "def n(self):\n if not hasattr(self, \"_n\"):\n self._n = np.sqrt(self.frame.center.body.µ / self.sma ** 3)\n return self._n", "def avgtr(self):\n return np.diff(self.trtimes).mean()", "def vol(x):\r\n return pi*(topdia(x)/2000.)**2 * length (x)", "def emission_rate_per_aerosol_per_person_when_present(self) -> _VectorisedFloat:\n # Note on units: exhalation rate is in m^3/h -> 1e6 conversion factor\n # Returns the emission rate times the number of infected hosts in the room\n\n ER = (self.virus.viral_load_in_sputum *\n self.activity.exhalation_rate *\n 10 ** 6)\n return ER", "def viterbiMeansEstimate(self):\n for i in range(self.noOfEmmittingStates):\n self.outputProbabilities[i,0] = \\\n self.observationSequence[0, nonzero(self.mostLikelyPath ==\n i+1)[1]-1].mean()", "def __calculate_estimation(self):\r\n estimation = 0.0\r\n for index_cluster in range(0, len(self.__clusters)):\r\n cluster = self.__clusters[index_cluster]\r\n index_medoid = self.__current[index_cluster]\r\n for index_point in cluster:\r\n estimation += euclidean_distance_square(self.__pointer_data[index_point], self.__pointer_data[index_medoid])\r\n\r\n return estimation", "def ams_estimate(self):\n return int(_mean([x ** 2 for x in self.ams_estimates]))", "def suspected_per_hour(self):\r\n return (3600.*(self.circ_suspected+self.strm_suspected\r\n +self.circ_failed+self.strm_failed))/self.current_uptime()", "def averageTime(self):\n \n pass", "def _get_n(self):#@Needs to fix it for general case\n n_60 = 0.55 * 1 * 1 * 0.75 * self._data[SoilProperty.SPT_N] /0.6\n if not self.is_clayey() and n_60>15: #apply dilitracy correction\n n_60 = 15 + 0.5 * (n_60 - 15)\n return n_60", "def avg_extend_time(self):\r\n if self.total_extended:\r\n return self.total_extend_time/self.total_extended\r\n else: return 0", "def intensity(self) -> int:", "def pc_nproduced_avg(self) -> \"float\":\n return _beamforming_swig.doaesprit_sptr_pc_nproduced_avg(self)", "def _calc_ap(self, mol):\n matches = mol.GetSubstructMatches(self.aromatic_query)\n return len(matches) / mol.GetNumAtoms()", "def get_naive_size(self) -> int:\n return (self.triples.time_end - self.triples.time_begin + 1).sum()", "def density(self):\n return self.num_arcs() / (self.nframes / FRATE)", "def taper_ratio(self) -> float:\n return self.xsecs[-1].chord / self.xsecs[0].chord", "def dishlist_avg(n:list)->float:\r\n all_prices = dishlist_prices(n)\r\n return sum(all_prices)/len(all_prices)", "def mean_deviation(self):\r\n\t\t_mean = sum(self.sample)/len(self.sample)\r\n\t\treturn sum(map(lambda x: abs(x - _mean), self.sample))/len(self.sample)", "def __calculate_estimation(self):\n estimation = 0.0\n for index_cluster in range(0, len(self.__clusters)):\n cluster = self.__clusters[index_cluster]\n index_medoid = self.__current[index_cluster]\n for index_point in cluster:\n estimation += euclidean_distance_square(\n self.__pointer_data[index_point],\n self.__pointer_data[index_medoid],\n )\n\n return estimation", "def get_number_of_measurement(self):\n num_of_meas = 0\n for time in self.mdvtc.keys():\n num_of_meas = num_of_meas + self.mdvtc[time].get_number_of_measurement()\n #\n return num_of_meas", "def cps(self):\n return self.datacounts / self.exptime", "def S(self):\n Ae = 1.0/float(len(self.K)) \n return (self.avg_Ao() - Ae)/(1.0 - Ae)", "def __calc_s(self, df):\n df.loc[:, \"avg_num_drivers\"] = df.idle + df.incoming\n s = df.total / df.avg_num_drivers # df.total := amount of demand\n s[s > 1] = 1\n s[np.isnan(s)] = 0.0001\n s[np.isinf(s)] = 1\n\n df.loc[:, \"prob_of_s\"] = s\n df = df[[\"zone_id\", \"prob_of_s\"]]\n return df", "def rate(self) -> float:\n return self.success_cnt / self.total_cnt if self.total_cnt > 0 else 1.0", "def updateVisitStatistics(self, s, a, s_) :\n self.N[s, a, s_] += 1\n self.Ntotal[s, a] += 1\n self.PHat[s, a] = self.N[s, a] / self.Ntotal[s, a]\n self.omega[s, a] = confidenceRadius(self.mdp, self.Ntotal[s, a], self.delta_)", "def trial_atd(trial, omit_missing_frames=True):\n frames = trial.HMM_MLE\n if omit_missing_frames:\n frames = frames[frames >= 0]\n total_frames = len(frames)\n num_runs = len([run for run in calc_run_lengths(frames)])\n if num_runs == 0:\n return float('nan')\n return (total_frames/num_runs)/60", "def avg(self):\n return sum(self.times) / len(self.times)", "def avg(self):\n return sum(self.times) / len(self.times)", "def avg(self):\n return sum(self.times) / len(self.times)", "def em_var(self) -> float:\n if self.__total_pulls == 0:\n raise Exception('Number of pulls is 0. No empirical variance.')\n return (self.__sum_of_square_reward -\n self.__total_rewards**2 / self.__total_pulls) / self.__total_pulls", "def pearson_sim( self, n_common, rest1_reviews, rest2_reviews ):\n\t\tif n_common == 0:\n\t\t\trho = 0\n\t\telse: \n\t\t\t# subtract user ratings if their averages \n\t\t\t# This makes the above ratings by the two users more comparable\n\t\t\tdiff1 = rest1_reviews['stars'] - rest1_reviews['user_avg']\n\t\t\tdiff2 = rest2_reviews['stars'] - rest2_reviews['user_avg']\n\t\t\trho = pearsonr( diff1, diff2 )[0]\n\n\t\t\tif np.isnan(rho):\n\t\t\t\trho = 0\n\t\t\t\t\n\t\treturn rho", "def findVWSP(self):\n num=0\n den=0\n ban=False\n for el in self.TL:\n if datetime.fromtimestamp(el.TS) > (datetime.now()-timedelta(minutes = 15)):\n ban=True\n num+=el.Price * el.NoSh\n den+= el.NoSh \n if ban:\n if den!=0:\n return num/den\n else:\n raise BaseException(\"Oops! the vwsp cannot be computed.\")\n else:\n return 0", "def numberOfPoints(self):\n return 20000", "def vector_strength(spikes, freq):\n \n per = 1e3/freq # convert from Hz to period in msec\n ph = 2*np.pi*np.fmod(spikes, per)/(per) # convert to radians within a cycle\n c = np.sum(np.cos(ph))**2\n s = np.sum(np.sin(ph))**2\n vs = (1./len(ph))*np.sqrt(c+s) # standard vector strength computation\n n = len(spikes)\n R = n*vs # Raleigh coefficient\n Rp = np.exp(-n*vs*vs) # p value for n > 50 (see Ashida et al. 2010).\n d = np.sqrt(2.*(1-vs))/(2*np.pi*freq)\n return{'r': vs, 'n': n, 'R': R, 'p': Rp, 'ph': ph, 'd': d}", "def dif_avg(u_beam):\n u = np.sort(u_beam)[::-1]\n# print(u)\n ind = u.shape[0]//100*5\n top5 = np.mean(u[:ind])\n# bottom5 = np.mean(u[-ind:])\n mean_wo_top5 = np.mean(u[ind:])\n return top5/mean_wo_top5", "def getLiters(self):\n return self._count/self._ppl", "def cronbach_alpha(self) -> float:\n itemscores = np.stack([self.true, self.predicted])\n itemvars = itemscores.var(axis=1, ddof=1)\n tscores = itemscores.sum(axis=0)\n nitems = len(itemscores)\n return float(nitems / (nitems - 1.) * (1 - itemvars.sum() / tscores.var(ddof=1)))", "def get_nsing(self,epsilon=1.0e-4):\n mx = self.xtqx.shape[0]\n nsing = mx - np.searchsorted(\n np.sort((self.xtqx.s.x / self.xtqx.s.x.max())[:,0]),epsilon)\n if nsing == mx:\n self.logger.warn(\"optimal nsing=npar\")\n nsing = None\n return nsing", "def pc_nproduced_avg(self) -> \"float\":\n return _beamforming_swig.randomsampler_sptr_pc_nproduced_avg(self)", "def nze(self) -> int:", "def nze(self) -> int:", "def calculate_ndvi(self):\n self.ndvi = (self.bands[\"n\"].astype(float) - self.bands[\"r\"].astype(float)) \\\n / (self.bands[\"n\"].astype(float) + self.bands[\"r\"].astype(float))", "def update(self,x): #update the estimate of rewards and number of esteps run\n\t\tself.N += 1\n\t\tself.estimate_mean = (1.0-1.0/self.N)*self.estimate_mean + (1.0/self.N)*x #recurence relation for averages", "def get_current_rating(self):\n summary = self.get_version_summary()\n pattern = '\\$.*? .*? .*? .*? .*? .*? (.*?) .*? .*? \\r\\n' \n rating = int(re.findall(pattern,summary).pop())\n return rating", "def pv(rate, n_years):\n return 1 / fv(rate, n_years)", "def emission_rate_per_aerosol_per_person_when_present(self) -> _VectorisedFloat:\n raise NotImplementedError(\"Subclass must implement\")", "def avg_inference_time(self):\n return self._avg_inference_time", "def CalcStar(self, starnum, imagerating, userrating):\r\n\t\ttype = 0\r\n\t\tif imagerating >= starnum: type += 1\r\n\t\tif userrating >= starnum: type += 2\r\n\t\treturn type", "def PValue(self, iters=1000):\n self.test_stats = np.array([self.TestStatistic(self.RunModel()) \n for _ in range(iters)])\n\n count = sum(self.test_stats >= self.actual)\n return count / iters", "def _compute_rating(self, cand):\n p = cand.info['bary_period']\n dm = cand.info['dm']\n ra = cand.info['raj_deg']\n decl = cand.info['decj_deg']\n pdiff_min = 0.0\n\n diff_ra = np.abs(self.known_ras - ra)\n diff_dec = np.abs(self.known_decls - decl)\n\n ii_nearby = (diff_ra < 0.2) & (diff_dec < 0.2)\n periods = self.known_periods[ii_nearby]\n dms = self.known_dms[ii_nearby]\n\n for b in range(1, M):\n pdiff = (2.0*np.abs(p*b-periods)/(p*b+periods))\n\n if np.any((pdiff < 0.002)):\n for dispm in dms:\n pdiff_dm=1.0/(2.0*np.abs(((dispm)-dm)/((dispm)+dm)))\n pdiff_min=np.min(pdiff_dm,pdiff_min)\n if pdiff_min == 0.0:\n for rat in self.ratios:\n pdiff = 2.0*np.abs(((p*rat)-periods)/((p*rat)+periods))\n if np.any((pdiff < 0.02)):\n for dispm in dms:\n pdiff_dm=1.0/(2.0*np.abs(((dispm)-dm)/((dispm)+dm)))\n pdiff_min=np.min(pdiff_dm,pdiff_min)\n return pdiff_min", "def count_rate(M, B, energy):\n pm_results = ga.peak_measurement(M, energy, sub_regions='none')\n bm_results = ga.peak_measurement(B, energy, sub_regions='none')\n sub_peak = ga.background_subtract(pm_results, bm_results, M.livetime,\n B.livetime)\n net_area = sub_peak[0]\n count_rate = net_area/M.livetime\n \n return(count_rate)", "def mape(self) -> float:\n return float(np.mean(np.abs((self.true - self.predicted) / self.true)) * 100)", "def adv_ratio(self): # XXX\r\n bw = StatsRouter.global_bw_mean\r\n if bw == 0.0: return 0\r\n else: return self.bw/bw", "def __tstar_calc(self, mu_pi, dist):\n return (dist**3 / (mu_pi[0] + mu_pi[1])) ** 0.5", "def progress(self) -> int:\n return int(round(100 * self.somme() / self.finances))", "def calc_score(pins_stats):\n count = 0\n new = pins_stats[:, :2] - ORIG_PINS_LOC\n for p in new:\n if np.linalg.norm(p) > R_PIN / 2:\n count += 1\n return count", "def obscurity(user):\n reader = open(\"similarities.txt\", \"r\")\n lines = reader.readlines()\n obscurity = 0.0\n count = 0\n for line in lines:\n a, b, sim = line.split(\"\\t\")\n if user == a:\n count += 1\n obscurity += float(sim)\n\n obscurity /= count\n return obscurity", "def width_calc(sigma_v):\n\t\t\n\t\tsigma_lambda = sigma_v/c*(lambda0*(1 + z)) #in observing frame\n\t\n\t\treturn np.sqrt(sigma_lambda**2 + sigma_slit**2)", "def calculateR(sapienses: list) -> float:\n r = 0\n for i in sapienses:\n r = r + i.numberInfected\n r=r/I0\n r = r*S/(S+R+D)\n return r", "def epsilon_delta(self):", "def theoretical_effective(dataset):\n return float(sum(dataset))/len(dataset)" ]
[ "0.61229646", "0.6093639", "0.59843814", "0.59413254", "0.5775997", "0.5775997", "0.5775997", "0.5775997", "0.57482773", "0.57340753", "0.5731418", "0.57153237", "0.5698593", "0.56730896", "0.5668431", "0.5663986", "0.5649949", "0.5639038", "0.5635766", "0.5622494", "0.561866", "0.5612916", "0.5608629", "0.5587175", "0.55677193", "0.55670786", "0.55634433", "0.5555399", "0.5553607", "0.55502486", "0.5545206", "0.55411625", "0.55409527", "0.5539517", "0.5516542", "0.5507252", "0.550395", "0.5496466", "0.5486344", "0.5485264", "0.5473958", "0.54735506", "0.54694724", "0.5468244", "0.54430306", "0.5442266", "0.54386413", "0.54386324", "0.54374534", "0.5433268", "0.5432025", "0.54310846", "0.54226434", "0.54178447", "0.54148614", "0.54093444", "0.54006624", "0.53963196", "0.53956383", "0.5392722", "0.5377853", "0.5374805", "0.5369862", "0.53682065", "0.5354057", "0.5352085", "0.5352085", "0.5352085", "0.53520644", "0.5348053", "0.5340313", "0.5339907", "0.53381836", "0.5334352", "0.5332601", "0.5331399", "0.53263164", "0.53222567", "0.531587", "0.531587", "0.53128433", "0.53103715", "0.5303832", "0.5303422", "0.52973235", "0.52957696", "0.5295213", "0.5291907", "0.529136", "0.5291166", "0.5290762", "0.5287095", "0.5286946", "0.5283808", "0.5283111", "0.52830434", "0.5274933", "0.52732867", "0.5271672", "0.5270922" ]
0.73987895
0
Calculation prize based on the points
def which_prize(points): prize = None if 0 <= points <= 50: prize = 'wooden rabbit' elif 51 <= points <= 150: prize = None elif 151 <= points <= 180: prize = 'wafer-thin mint' elif 181 <= points <= 200: prize = 'penguin' else: prize = None if prize: return 'Congratulations! You have won a {}!'.format(prize) else: return 'Oh dear, no prize this time.'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __call__(self, points):\n denom = sum([self.weights[i]/(points - self.xint[i]) for i in range(self.n)])\n numer = sum([self.yint[i]*self.weights[i]/(points - self.xint[i]) for i in range(self.n)])\n return numer/denom", "def numberOfPoints(self):\n return 20000", "def __evaluate(self, point):\n assert len(point) == len(self.weight)-1\n result = self.weight[0]\n for i in range(0,len(point)):\n result += self.weight[i+1] * point[i]\n return result", "def num_quadrature_points(self) -> int:", "def to_points(self, divisions=100):", "def AUC(points):\n\tauc = 0.0\n\tfor point2, point1 in zip(points[1:], points[:-1]):\n\t\t#print(point2, point1)\n\t\tbase = (point2[0] - point1[0]) / 100.0\n\t\theight = ( (point2[1] - point1[1])/2.0 + point1[1] ) / 100.0\n\t\tauc += (base*height)\n\treturn auc", "def total_points(self):\n total_points = 0.0\n for ingredient in self.ingredients:\n if (ingredient.has_property('ppg')):\n # Use given value if specified\n total_points += ingredient.property('ppg').to('ppg') * ingredient.quantity.to('lb')\n else:\n total_points += EXTRACTS[ingredient.type] * ingredient.quantity.to('lb')\n return(Quantity(total_points, 'points'))", "def total_points(self, **kwargs):\n points = 0.0\n for key, value in self.stat_data.items():\n points = points + STATS[key][1](value)\n return round(points, self.__class__.default_round)", "def cost(self) -> float:", "def points_per_dollar(self):\n if float(self.draftkings_salary) == 0.0:\n return 0.0\n\n return float(self.predicted_draftkings_points) / float(self.draftkings_salary)", "def propabilityLVQ(self):\n self.labels = self.labelingLVQ()\n for i in range(self.labels.shape[0]):\n for j in range(self.labels.shape[1]):\n for k in range(self.labels.shape[2]):\n total = sum(self.labels[i, j, k] for i in range(self.labels.shape[0]))\n if total == 0. :\n continue\n else:\n self.propa[i, j, k] = self.labels[i, j, k] / total\n self.propa[i, j, k] = round(self.propa[i, j, k], 2)\n return self.propa", "def _cost_caught_by_police(self):\n if self.fine_frequency != 0:\n if self.number_of_courses % self.fine_frequency == 0 and self.number_of_courses != 0:\n if self.number_of_courses % self.fine_frequency_paid_by_driver == 0 and self.number_of_courses != 0:\n self.fine_paid_number_of_courses += 1\n fine_value = np.random.choice([100, 200, 500], p=[0.25, 0.4, 0.35])\n self.total_penalty_points += self._add_penalty_points() # adding penalty points\n return fine_value\n else:\n return 0\n else:\n return 0\n else:\n return 0", "def calculate_profit(self):", "def distPlusProche(p,pts):\r\n\tpoints=pts[::]\r\n\r\n\t#on enleve p de la liste des points en cas de répétition\r\n\tif p in points:\r\n\t\tpoints.remove(p)\r\n\t#on initialise mini avec la distance au premier point de la liste des points\r\n\tmini=sqrt((p[0]-points[0][0])**2+(p[1]-points[0][1])**2)\r\n\t#on compare chaque point avec p pour trouver la plus petite distance\r\n\tfor p2 in points:\r\n\t\tdist=sqrt((p2[0]-p[0])**2+(p2[1]-p[1])**2)\r\n\t\tif dist<mini:\r\n\t\t\tmini=dist\r\n\r\n\treturn round(mini)", "def calculate_points(self):\n points = 0\n for power in self.stats['powers']:\n points += self.stats['powers'][power]\n return points", "def test_points_calculation(self):\n\n assert self.test_shape.points == [\n (1030.0, 525.0),\n (1030.0, 475.0),\n (970.0, 475.0),\n (970.0, 525.0),\n ]", "def calc_points_shop(self):\n rem_pop = self.popula - self.popula_used\n points = min(self.cnt_shop, rem_pop // 5) * 11\n rem_shop = self.cnt_shop - rem_pop // 5\n vptab_shop = (0, 1, 2, 4, 7)\n if rem_shop > 0:\n points += vptab_shop[rem_pop % 5]\n penalty_popula = max(rem_pop - self.cnt_shop * 5, 0)\n points -= penalty_popula\n return points", "def calc_points_park(self):\n be = ['_'] * 8\n be += self.b[ 0: 5]\n be += ['_'] * 2\n be += self.b[ 5:10]\n be += ['_'] * 2\n be += self.b[10:15]\n be += ['_'] * 2\n be += self.b[15:20]\n be += ['_'] * 8\n cnt_PG = 0\n cnt_P = 0\n points = 0\n vptab_park = (0, 2, 4, 7, 11)\n for i in range(8, 34):\n if be[i] == 'P' or be[i] == 'G':\n cnt_PG += 1\n if be[i] == 'P':\n cnt_P += 1\n neigh_tower_office = 0\n if be[i - 1] == 'T' or be[i - 1] == 'O':\n neigh_tower_office += 1\n if be[i + 1] == 'T' or be[i + 1] == 'O':\n neigh_tower_office += 1\n if be[i - 7] == 'T' or be[i - 7] == 'O':\n neigh_tower_office += 1\n if be[i + 7] == 'T' or be[i + 7] == 'O':\n neigh_tower_office += 1\n points += vptab_park[neigh_tower_office]\n if 'park' in args.exp:\n points += cnt_PG\n if 'repr' in args.exp:\n recycle_energy = max(self.energy - self.energy_used, 0)\n points += recycle_energy\n else:\n penalty_energy = max(self.energy - self.energy_used - cnt_P, 0)\n points -= penalty_energy\n return points", "def cal_pt(self):\n\n if not self.check_def(['E','px','py','pz']):\n sys.exit('Particle error: Quadri impulsion not define (error for pt routine)')\n\n self.pt =math.sqrt(self.px**2+self.py**2)", "def pointlength(x):\n return 0.0", "def precisions(self):\n raise NotImplementedError", "def update_points(self):\n #Calculate Upper Section\n total = 0\n for box in self._upper_section:\n total += box.points\n self._upperSum = total\n\n if total >= 63:\n self._bonus = 35\n total += 35\n self._upperTotal = total\n\n # Calculate Lower Section\n total = 0\n for box in self._lower_section:\n total += box.points\n\n if self.get_box(\"Yahtzee\").points > 0:\n total = total + (self._yahtzee_count - 1) * 100 # Yahtzee Bonus\n\n self._lowerTotal = total\n\n #Total Points\n self._grandTotal = self._upperTotal + self._lowerTotal", "def pointsize(self):\n\treturn self.m_pointsize", "def calculate_prp(self, ref_point: np.ndarray, f_current: np.ndarray) -> np.ndarray:\n\n # distance\n d = np.linalg.norm(np.atleast_2d(ref_point - f_current))\n\n # unit vectors\n ei = np.array([np.zeros(len(ref_point))])\n es = np.repeat(ei, len(ref_point), axis=0)\n\n for i, j in enumerate(es):\n for ind, _ in enumerate(j):\n if ind == i:\n j[ind] = 1\n\n return ref_point + (d * es)", "def solution_score(points, point_iscore, test_pc):\n IDOP, iscore_sum = calculate_dop(points, point_iscore)\n DOP, _ = calculate_dop(points)\n sol_iscore = DOP/IDOP\n return sol_iscore, iscore_sum", "def quadrature_calculator(x_points: list, y_points: list) -> float:\n # sorted_y = [p for _, p in sorted(zip(x_points, y_points))]\n sorted_y = [p for _, p in\n sorted(list(zip(x_points, y_points)), key=lambda x: x[0])]\n n = len(y_points)\n sorted_x = sorted(x_points)\n\n trapezoidal_rule = [\n 0.5 * (sorted_x[n + 1] - sorted_x[n]) * (sorted_y[n + 1] + sorted_y[n])\n for n in range(n - 1)]\n\n return float(np.sum(trapezoidal_rule))", "def calc_dist(self, points): \n dist_x = [self._current_pose.position.x - p.pose.position.x for p in points]\n dist_y = [self._current_pose.position.y - p.pose.position.y for p in points]\n dist = np.hypot(dist_x,dist_y) \n if len(dist) > 0:\n return min(dist) \n else: \n return 0", "def calculate_pts(crv, *args):\t\n mode = cmds.radioButtonGrp(widgets[\"methodRBG\"], q=True, sl=True)\n\n if mode == 1:\n cLen = cmds.arclen(crv, ch=False)\n perUnit = cmds.floatFieldGrp(widgets[\"recoFFG\"], q=True, v1=True)\n total = int(cLen * perUnit)\n if mode == 2:\n total = cmds.intFieldGrp(widgets[\"totalIFBG\"], q=True, v1=True)\n\n # print \"curve = {0}, total = {1}\".format(crv, total)\n return total", "def _cal_pr_curve(self, labelAndVectorisedScores):\n rawPredictionCol = self.rawPredictionColValue\n labelCol = self.labelColValue\n curve = precision_recall_curve(labelAndVectorisedScores, rawPredictionCol, labelCol).select(\"precision\",\"recall\")\n \n return curve", "def FindScale(self):\n\n ## 6 and from the cv code the distance is 6 then we are good\n print(\"TODO: Very hard\")", "def pdf(self, point: np.ndarray) -> float:\n return self._probs.dot([rv.pdf(point) for rv in self._rvs])", "def cal_pn(grams_set, grams, candidate, reference):\n count = 0\n for gram in grams_set:\n # print(gram)\n count += count_clip(gram, grams, reference)\n # calculate log() for p, so '+10**-8' avoid 'p==0'\n p = count / len(grams) + 10**-8 \n return p", "def points_percentage(plane, p, points, total):\n match = 0\n for point in points:\n if distance_to_plane(plane, point) <= p:\n match += 1\n\n return match / total", "def pss(self):\n return (self.table[0, 0] * self.table[1, 1] - self.table[0, 1] * self.table[1, 0]) / \\\n ((self.table[0, 0] + self.table[1, 0]) * (self.table[0, 1] + self.table[1, 1]))", "def m2pt(x):\n return x / pt_size", "def check_prize(correct_num):", "def required_points(self):\n req_points = self.min_performance * self.initial_available_points()\n return np.maximum(0, np.int64(np.ceil(req_points)))", "def theoretical_effective(dataset):\n return float(sum(dataset))/len(dataset)", "def test_processed_points_calculation(self):\n\n assert self.test_shape.processed_points == [\n (1030.0, 525.0, \"straight\"),\n (1030.0, 475.0, \"straight\"),\n (970.0, 475.0, \"straight\"),\n (970.0, 525.0, \"straight\"),\n (1030.0, 525.0, \"straight\"),\n ]", "def compute_area(self):\r\n\r\n \"\"\"Косое произведение векторов\r\n A = (x2-x1; y2-y1; z2-z1)\r\n B = (x3-x1; y3-y1; z3-z1)\r\n S = 0.5*sqrt((Ay*Bz - Az*By)^2 + (Az*Bx - Ax*Bz)^2 + (Ax*By - Ay*Bx)^2 )\r\n \"\"\"\r\n a_x = self.x2 - self.x1\r\n a_y = self.y2 - self.y1\r\n a_z = self.z2 - self.z1\r\n\r\n b_x = self.x3 - self.x1\r\n b_y = self.y3 - self.y1\r\n b_z = self.z3 - self.z1\r\n\r\n self.area = 0.5 * math.sqrt((a_y * b_z - a_z * b_y) ** 2 + (a_z * b_x - a_x * b_z) ** 2 + (a_x * b_y - a_y * b_x) ** 2)\r\n\r\n \"\"\"По теореме Герона\"\"\"\r\n # a = math.sqrt((self.x1-self.x2)**2 + (self.y1-self.y2)**2 + (self.z1-self.z2)**2)\r\n # b = math.sqrt((self.x1-self.x3)**2 + (self.y1-self.y3)**2 + (self.z1-self.z3)**2)\r\n # c = math.sqrt((self.x2-self.x3)**2 + (self.y2-self.y3)**2 + (self.z2-self.z3)**2)\r\n # p = 0.5 * (a + b + c)\r\n # self.area = math.sqrt(p * (p - a) * (p - b) * (p - c))\r", "def pr_auc_score(precision: list, recall: list) -> float:\n return quadrature_calculator(recall, precision)", "def evaluate(self,p):\n if not self.initialized: self.__initialize__()\n if self.vp0: p_ = 1-p\n else: p_ = p\n if self.ids_to_consider is None:\n #sum on all parametrized cell\n cf = np.sum(self.V[self.p_ids-1]*p_)/self.V_tot - self.max_v_frac\n else:\n cf = np.sum((self.V[self.ids_to_consider-1]*p_))/self.V_tot - self.max_v_frac\n return cf", "def test_Pt(self):\n\n test_value = self.portfolio.calculate_asset_performance(\n *self.boarder)[self.test_row_number]\n calculated_value = self.manual_cumprod(\n self.portfolio._get_asset_portfolio)\n self.assertAlmostEqual(test_value, calculated_value)", "def _proportionalTerm(self):\n\n\t\treturn self._getErrorFunction() * self._Kp", "def mape(self) -> float:\n return float(np.mean(np.abs((self.true - self.predicted) / self.true)) * 100)", "def prescaler(self) -> int:", "def computePRMeasures(self, targetLabels, actualLabels):\r\n if self.basicMeasures is None:\r\n self.basicMeasures = self.computeBasicStatistics(targetLabels, actualLabels)\r\n if self.basicMeasures[0] == 0:\r\n self.prMeasures = (0,0)\r\n else:\r\n self.prMeasures = ((0.0 + self.basicMeasures[0]) / (self.basicMeasures[0] + self.basicMeasures[1]),\r\n (0.0 + self.basicMeasures[0]) / (self.basicMeasures[0] + self.basicMeasures[3]))\r\n return self.prMeasures", "def rmspe(self) -> float:\n return float(np.sqrt(np.mean(np.square(((self.true - self.predicted) / self.true)), axis=0)))", "def _prorata_rate(self, days_used, days_in_month):\n return (100 * days_used // days_in_month) / 100.0", "def P_total(pressures=[]):\n total = 0.0\n for pressure in pressures:\n total += pressure\n return float(total)", "def z_score(self, x):\n return (x - self.n) / self.p", "def postage_needed(self):\n return self.weight * self.postage_coefficient", "def calculate(self) -> float:", "def pr_at_k(rels, expected_count, k):\n k = min(k, len(rels))\n TP = sum(rels[:k])\n FP = k - TP\n FN = expected_count - TP\n TN = len(rels[k:]) - sum(rels[k:])\n assert TN >= 0.0\n return TP / (TP + FP), TP / (TP + FN), TP / (TP + TN) if TP + TN > 0 else 0", "def score_ap_from_ranks_1(ranks, nres):\n\n # accumulate trapezoids in PR-plot\n ap = 0.0\n\n # All have an x-size of:\n recall_step = 1.0 / nres\n\n for ntp, rank in enumerate(ranks):\n\n # y-size on left side of trapezoid:\n # ntp = nb of true positives so far\n # rank = nb of retrieved items so far\n if rank == 0:\n precision_0 = 1.0\n else:\n precision_0 = ntp / float(rank)\n\n # y-size on right side of trapezoid:\n # ntp and rank are increased by one\n precision_1 = (ntp + 1) / float(rank + 1)\n\n ap += (precision_1 + precision_0) * recall_step / 2.0\n\n return ap", "def get_precision(self):\n ...", "def eucdist3d(self,point1,point2):\n#\t\tif not isinstance(point1,np.ndarray):\n#\t\t\tpoint1 = np.array(point1)\n#\t\t\tpoint2 = np.array(point2)\n\t\t\n\t\treturn(((point2[0]-point1[0])**2 + (point2[1]-point1[1])**2 + (point2[2]-point1[2])**2)**0.5)", "def _compute_variance_of_points(self, points_to_sample):\n\n\n\n _hyperparameters = self._covariance.get_hyperparameters()\n del_idx = [idx +1 for idx in self.idx]\n covariance = SquareExponential(numpy.delete(_hyperparameters, del_idx, 0))\n var_star = numpy.empty((points_to_sample.shape[0],points_to_sample.shape[0]))\n marginal = self._get_variance_aij_marginal()\n for i, point_one in enumerate(points_to_sample):\n for j, point_two in enumerate(points_to_sample):\n tmp_point_two = numpy.delete(point_two, self.idx, 0)\n tmp_point_one = numpy.delete(point_one, self.idx, 0)\n var_star[i,j] = covariance.covariance(tmp_point_one, tmp_point_two) * marginal\n\n K_star = self._build_integrated_covariance_maxtrix_variance(\n self._covariance,\n self._points_sampled,\n points_to_sample,\n )\n K_star_K_C_Inv_K_star = numpy.dot(numpy.dot(K_star.T, self._K_C), K_star)\n tmp = var_star - K_star_K_C_Inv_K_star *self._get_variance_bij_marginal()\n return tmp * self._get_average()", "def __float__(self) -> float:\n return float(self.p)", "def precision(y_true, y_pred):\n true_positives = bk.sum(bk.round(bk.clip(y_true * y_pred, 0, 1)))\n predicted_positives = bk.sum(bk.round(bk.clip(y_pred, 0, 1)))\n precision = true_positives / (predicted_positives + bk.epsilon())\n return precision", "def precision(y_true, y_pred):\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\n precision = true_positives / (predicted_positives + K.epsilon())\n return precision", "def priceit(self):\n paytree = np.zeros((self.steps+1,self.steps+1))\n paytree[-1,:] = np.array( list( map(lambda x:max(x-self.s,0.0),self.pricetree[-1,:]) ) )\n discount = math.exp( self.r*self.deltatime )\n for i in range(self.steps,0,-1):\n for j in range(i):\n paytree[i-1][j] = (paytree[i][j]*self.upprob +paytree[i][j+1]*(1-self.upprob))/discount\n return paytree[0][0]", "def getsize_pt(self):\n # The factor 16L/16777216L=2**(-20) converts a fix_word (here self.q)\n # to the corresponding float. Furthermore, we have to convert from TeX\n # points to points, hence the factor 72/72.27.\n return 72/72.27 * 16*self.q/16777216", "def get_scale():\r\n\r\n \r\n return 0.5", "def barycenter(self):\n _value = (sum((v[0] for v in self.objects.values())),sum((v[1] for v in self.objects.values())))\n if self.objects:\n _value = (_value[0]/len(self.objects), _value[1]/len(self.objects))\n self.bc=_value\n return _value", "def totalScore(x, air = 5, safety = 5, job = 5, green = 5, price = 5):\n #\"price\", \"contamination\", \"distanceToDangerZone\", \"distanceToMedianJobLocation\",\"GreenConc\"\n return -x[0]*air + x[1]*safety - x[2]*job + x[3]*green - x[4]*price", "def plotprice(self):\n plt.figure()\n plt.hist( self.pricetree[-1,:] )\n plt.title(\"price Distribution\") \n plt.show()", "def intra_cost(points, cluster):\n def _p2p(point):\n _freq_sum = 0\n for pt in points:\n if point != pt and pt not in cluster.points:\n _freq_sum += point.frequency(pt)\n return _freq_sum\n return int(sum(map(_p2p, cluster.points)))", "def Precision(self, *args):\n return _ShapeUpgrade.ShapeUpgrade_Tool_Precision(self, *args)", "def calc_points_tower(self):\n points = 0\n cnt_tower = 0\n vptab_tower = (0, 1, 3, 6, 10, 15)\n for i in range(20):\n if self.b[i] == 'T':\n points += vptab_tower[self.f[i]]\n cnt_tower += 1\n if 'poli' in args.exp:\n points += max(self.f)\n if 'scho' in args.exp:\n points += cnt_tower\n return points", "def sum_points(self) -> int:\n return sum([card.rank_value for card in self.deck.cards])", "def rmse4 (a, p) :\n s = len(a)\n z = zip(a, p)\n v = sum(map(lambda (x, y) : sqre_diff(x, y), z), 0.0)\n return math.sqrt(v / s)", "def get_points(self):\n self.round_points = 0\n for die in self.dice:\n if die == 1:\n self.round_points += 100\n elif die == 5:\n self.round_points += 50\n return self.round_points", "def dollars_per_point(self):\n if float(self.predicted_draftkings_points) == 0.0:\n return 0.0\n\n return float(self.draftkings_salary) / float(self.predicted_draftkings_points)", "def compute_ap(ranks, nres):\n\n # number of images ranked by the system\n nimgranks = len(ranks)\n\n # accumulate trapezoids in PR-plot\n ap = 0\n\n recall_step = 1. / nres\n\n for j in np.arange(nimgranks):\n rank = ranks[j]\n\n if rank == 0:\n precision_0 = 1.\n else:\n precision_0 = float(j) / rank\n\n precision_1 = float(j + 1) / (rank + 1)\n\n ap += (precision_0 + precision_1) * recall_step / 2.\n\n return ap", "def calculateP(SD, numDiff):\n return numDiff/SD", "def calculateP(SD, numDiff):\n return numDiff/SD", "def calculate_score(self):\n\n correct_award = 150\n turns_total = self.turns.count()\n turns_correct = self.turns.filter(is_match=True).count()\n seconds_left = (60.0 - (self.turns.last().created - self.turns.first().created).total_seconds()) or 0\n maxpoints = turns_correct * correct_award\n deduction_for_errors = correct_award * 0.11123\n\n maxpoints -= ((turns_total - turns_correct) * 2 * deduction_for_errors)\n maxpoints += seconds_left * 5.123214\n\n return Decimal(maxpoints)", "def premium(self):\n premium = 0\n for weight, mean, std in zip(self.weights, self.means, self.stds):\n shift = (self.data['riskfree'] - mean) * self.data['maturity']\n moneyness = np.array(self.data['moneyness']) + shift\n premium += weight * blackscholes_norm(moneyness,\n self.data['maturity'],\n std, self.data['call'])\n return premium", "def chauvenet_criterion(npoints, p=0.5):\n \n \n return np.abs(stats.norm.ppf(p/(2.*npoints), loc=0., scale=1.))", "def nachalnye_dannie(pkx, size):\r\n return pkx.sum(axis=0) / size", "def evaluate(self, pts):\n pts = np.atleast_2d(pts)\n assert pts.ndim == 2, 'points must be two-dimensional'\n\n x, y = pts.T\n pdf = super(Bounded_2d_kde, self).evaluate(pts.T)\n if self.xlow is not None:\n pdf += super(Bounded_2d_kde, self).evaluate([2*self.xlow - x, y])\n\n if self.xhigh is not None:\n pdf += super(Bounded_2d_kde, self).evaluate([2*self.xhigh - x, y])\n\n if self.ylow is not None:\n pdf += super(Bounded_2d_kde, self).evaluate([x, 2*self.ylow - y])\n\n if self.yhigh is not None:\n pdf += super(Bounded_2d_kde, self).evaluate([x, 2*self.yhigh - y])\n\n if self.xlow is not None:\n if self.ylow is not None:\n pdf += super(Bounded_2d_kde, self).evaluate([2*self.xlow - x, 2*self.ylow - y])\n\n if self.yhigh is not None:\n pdf += super(Bounded_2d_kde, self).evaluate([2*self.xlow - x, 2*self.yhigh - y])\n\n if self.xhigh is not None:\n if self.ylow is not None:\n pdf += super(Bounded_2d_kde, self).evaluate([2*self.xhigh - x, 2*self.ylow - y])\n if self.yhigh is not None:\n pdf += super(Bounded_2d_kde, self).evaluate([2*self.xhigh - x, 2*self.yhigh - y])\n\n return pdf", "def __call__(self, points):\n return self.cdf(points)", "def precision(y_true, y_pred):\r\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\r\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\r\n precision = true_positives / (predicted_positives + K.epsilon())\r\n return precision", "def precision(y_true, y_pred):\r\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\r\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\r\n precision = true_positives / (predicted_positives + K.epsilon())\r\n return precision", "def precision(y_true, y_pred):\r\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\r\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\r\n precision = true_positives / (predicted_positives + K.epsilon())\r\n return precision", "def precision(y_true, y_pred):\r\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\r\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\r\n precision = true_positives / (predicted_positives + K.epsilon())\r\n return precision", "def precision(y_true, y_pred):\r\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\r\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\r\n precision = true_positives / (predicted_positives + K.epsilon())\r\n return precision", "def precision(y_true, y_pred):\r\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\r\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\r\n precision = true_positives / (predicted_positives + K.epsilon())\r\n return precision", "def getPoints(self):\n count = 0\n for card in self.cards:\n if card.rank > 9:\n count += 10\n elif card.rank == 1:\n count += 11\n else:\n count += card.rank\n # Deduct 10 if Ace is available and needed as 1\n for card in self.cards:\n if count <= 21:\n break\n elif card.rank == 1:\n count -= 10\n return count", "def ppd(self):\n return math.sqrt(np.dot(self.v, self.v) / np.dot(self.w, self.w) )", "def l1(self, points):\n new_points = []\n sum = []\n for point in points:\n for i in range(len(point.coordinates)):\n if (i < len(sum)):\n sum[i] += abs(point.coordinates[i])\n else:\n sum.append(abs(point.coordinates[i]))\n for point in points:\n new_coordinates = point.coordinates\n new_coordinates = [(new_coordinates[i]/ sum[i]) for i in range(len(point.coordinates))]\n new_points.append(Point(point.name, new_coordinates, point.label))\n return new_points", "def rapoint(rpoint):\r\n return [rpoint[0]*gv[\"globalscale\"]*(gv[\"fixedUR\"][0]-gv[\"fixedLL\"][0]),\r\n rpoint[1]*gv[\"globalscale\"]*(gv[\"fixedUR\"][1]-gv[\"fixedLL\"][1])]", "def point_to_param(self, pt):\n r = self.p2 - self.p1\n return (pt - self.p1).dot(r) / r.square()", "def average(cls, points):\n return Point.sum(points) / len(points)", "def calculate_precision(targets, preds):\n intersection_foreground = targets * preds\n n_intersection_foreground = float(np.sum(intersection_foreground))\n n_preds = float(np.sum(preds))\n\n return n_intersection_foreground / (n_preds + 1e-7)", "def ppf(self,x):\n if self.base == 'natural':\n ppfValue = math.exp((self.upperBound-self.lowerBound)*x + self.lowerBound)\n else:\n ppfValue = 10.**((self.upperBound-self.lowerBound)*x + self.lowerBound)\n return ppfValue", "def prf_cal(y_pred,y_true,k):\r\n GT=np.sum(y_true[y_true==1.])\r\n instance_num=y_true.shape[0]\r\n prediction_num=instance_num*k\r\n\r\n sort_indices = np.argsort(y_pred)\r\n sort_indices=sort_indices[:,::-1]\r\n static_indices = np.indices(sort_indices.shape)\r\n sorted_annotation= y_true[static_indices[0],sort_indices]\r\n top_k_annotation=sorted_annotation[:,0:k]\r\n TP=np.sum(top_k_annotation[top_k_annotation==1.])\r\n recall=TP/GT\r\n precision=TP/prediction_num\r\n f1=2.*recall*precision/(recall+precision)\r\n return precision, recall, f1", "def pontos(self):\n \n self.sc = 1. \n self.x = self.sc*np.array([-155., -139.4, -124., -108.5, -93., -77.5, -62., -46.5, -31., -15.5, 0, 15.5, 31., 46.5, 62., 77.5, 93., 108.5, 124., 139.5, 155.])\n self.y = self.sc*np.array([ 9.23, 14.37, 18.98, 23.6, 23.6, 23.6, 23.6, 23.6, 23.6, 23.6, 23.6, 23.6, 23.6, 23.6, 23.6, 23.6, 23.6, 23.6, 21.55, 14.37, 3.59])\n self.px_index = len(self.x)\n #self.py_index = len(self.x)/2\n\n self.coord = np.array([self.x,self.y,np.full(len(self.x),self.z)])\n \n self.x = self.x[::-1]\n self.y = -self.y[::-1] \n self.new = np.array([self.x,self.y,np.full(len(self.x),self.z)])\n self.coord = np.array([np.append(self.coord[0],self.new[0]),np.append(self.coord[1],self.new[1]),np.append(self.coord[2],self.new[2])])\n self.coord = np.array([np.append(self.coord[0],self.coord[0,0]),np.append(self.coord[1],self.coord[1,0]),np.append(self.coord[2],self.coord[2,0])])\n\n self.coord[0] = self.coord[0] - (np.amax(self.coord[0])+np.amin(self.coord[0]))/2\n self.coord[1] = self.coord[1] + (np.amax(self.coord[1])-np.amin(self.coord[1]))/2 \n \n self.coordi = np.array(self.coord)\n \n self.cg = np.array([0 + self.dx, self.H/2 + self.dy, self.z]) \n self.cgi = np.array(self.cg)\n \n self.thi = 0. + self.dth \n self.th = float(self.thi) \n \n self.coordnav(self.dx,self.dy,self.dth)", "def calculateUSky(self):\n skyline = []\n for p in self.pruned:\n pastart = [self.drange[0] for i in range(self.dim)]\n pamax = p.getLocationMax()\n pdom = list(self.index.intersection(tuple(pastart+pamax),objects=True))\n if len(pdom) == 1 and pdom[0].object == p:\n skyline.append([p, 1.0])\n else:\n finalp = 0.0\n for i in range(p.getPCount()):\n base = p.getProb(i)\n loc = p.getLocation(i)\n intersec = list(self.index.intersection(tuple(pastart+loc),objects=True))\n for d in intersec:\n dobj = d.object\n if dobj != p:\n tprob = 0.0\n for idx in range(dobj.getPCount()):\n if dominateStat(dobj.getLocation(idx),loc) == True:\n tprob += dobj.getProb(idx)\n tprob = 1.0 - tprob\n base *= tprob\n finalp += base\n skyline.append([p, finalp])\n for p in skyline:\n print(p[0])\n print(p[1])\n print(\"\")\n # print(skyline)" ]
[ "0.6893368", "0.6084819", "0.6049355", "0.6028179", "0.59974515", "0.5966032", "0.5889782", "0.58744955", "0.58731395", "0.5870193", "0.5849474", "0.58456403", "0.5832309", "0.58152395", "0.5789245", "0.5786074", "0.578353", "0.577726", "0.5765119", "0.57123584", "0.5707989", "0.5683858", "0.5671604", "0.5649551", "0.5647254", "0.5638782", "0.56334186", "0.56193465", "0.56082535", "0.56061685", "0.5603413", "0.5597595", "0.5570291", "0.55689377", "0.5537652", "0.55314195", "0.55299646", "0.552357", "0.5520684", "0.55012834", "0.54932284", "0.5493009", "0.5490867", "0.54881066", "0.54666084", "0.5466155", "0.5465616", "0.54523325", "0.54409933", "0.54274666", "0.541933", "0.5397974", "0.5393169", "0.5393005", "0.5386877", "0.53855854", "0.5375246", "0.5368699", "0.53632885", "0.53571093", "0.5356218", "0.53556293", "0.5354947", "0.53544223", "0.5353512", "0.53477913", "0.53436184", "0.5341243", "0.5335535", "0.5333708", "0.53318965", "0.53238016", "0.5318603", "0.531827", "0.53171897", "0.5314048", "0.5314048", "0.53129894", "0.53076464", "0.53044415", "0.53008467", "0.52974045", "0.52972776", "0.52966696", "0.52966696", "0.52966696", "0.52966696", "0.52966696", "0.52966696", "0.52950794", "0.52897984", "0.52890503", "0.5285721", "0.52841675", "0.52819544", "0.5281045", "0.52809846", "0.52806276", "0.52793646", "0.5277748" ]
0.56365174
26
Queries a nearby weather underground station for temp data and rain data
def get_weather_data(weather_station): now = datetime.datetime.now() then = now - datetime.timedelta(days=7) query_date_start = ("%d%02d%02d" % (then.year, then.month, then.day)) query_date_end = ("%d%02d%02d" % (now.year, now.month, now.day)) api_key = '/api/%s' % WUNDERGROUND_KEY history_key = '/history_%s%s/lang:EN/units:english/bestfct:1/v:2.0' % (query_date_start, query_date_end) query = '/q/%s.json?showObs=0&ttl=120' % weather_station weather_url = ("%s%s%s%s" % (WUNDERGROUND_HOST, api_key, history_key, query)) logger.info('Weather URL: %s', weather_url) response = requests.get(weather_url).text max_temp_avg = json.loads(response)['history']['summary']['max_temperature_avg'] sum_precip = json.loads(response)['history']['summary']['precip_sum'] return max_temp_avg, sum_precip
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getTodaysWeather(self, keyword, temp):\n\n\t\t# Variables\n\t\tweather = {} \n\t\tfio = self.helper.getFio(keyword, temp) # Getting fio object\n\t\t\n\t\t# Getting todays weather data and populating the dictionary\n\t\tif fio.has_daily() is True and fio.has_hourly() is True:\n\t\t daily = FIODaily.FIODaily(fio)\n\t\t hourly = FIOHourly.FIOHourly(fio)\n\t\t for day in xrange(0, 1):\n\t\t\t\tfor item in daily.get_day(day).keys():\n\t\t\t\t\tif item == \"temperatureMin\":\n\t\t\t\t\t\tweather[item] = str(daily.get_day(day)[item]).split(\".\")[0]\n\t\t\t\t\tif item == \"summary\":\n\t\t\t\t\t\tweather[item] = unicode(daily.get_day(day)[item])\n\t\t\t\t\tif item == \"temperatureMax\":\n\t\t\t\t\t\tweather[item] = str(daily.get_day(day)[item]).split(\".\")[0]\n\t\t\t\t\tif item == \"windSpeed\":\n\t\t\t\t\t\twindSpeed = unicode(daily.get_day(day)[item])\n\t\t\t\t\tif item == \"windBearing\":\n\t\t\t\t\t\twindBearing = unicode(daily.get_day(day)[item])\n\t\t\t\t\t\twindBearing = self.helper.convertWindBearing(windBearing)\n\t\t\t\t\tif item == \"sunsetTime\":\n\t\t\t\t\t\tweather[item] = self.helper.getDateForWeather(daily.get_day(day)[item])\n\t\t\t\t\tif item == \"sunriseTime\":\n\t\t\t\t\t\tweather[item] = self.helper.getDateForWeather(daily.get_day(day)[item])\n\t\t\t\t\tif item == \"precipProbability\":\n\t\t\t\t\t\tweather[item] = str(daily.get_day(day)[item] * 100).split(\".\")[0] + \"%\"\n\t\t\t\tweather[\"wind\"] = windBearing + \" \" + windSpeed + \" mph\"\n\t\t\t\tfor item in hourly.get_hour(day).keys():\n\t\t\t\t\tif item == \"summary\":\n\t\t\t\t\t\tweather[\"current\"] = unicode(hourly.get_hour(0)[item])\n\t\t\t\t\tif item == \"temperature\":\n\t\t\t\t\t\tweather[item] = str(hourly.get_hour(0)[item]).split(\".\")[0]\n\t\t\t\t\tif item == \"icon\":\n\t\t\t\t\t\tweather[item] = unicode(hourly.get_hour(0)[item])\n\t\t\t\t\tif item == \"cloudCover\":\n\t\t\t\t\t\tweather[item] = str(hourly.get_hour(0)[item] * 100).split(\".\")[0] + \"%\"\n\t\t\t\tweather[\"town\"] = self.helper.getCoords(keyword)[2]\n\t\telse:\n\t\t\treturn 'No Todays data'\n\n\t\treturn weather", "def temperatures():\n\n return station_9281", "def query(self, lon, lat):\n def distance(lon1, lat1, lon2, lat2):\n return (lon2 - lon1) ** 2 + (lat2 - lat1) ** 2\n\n min_distance = sys.maxint\n weather = {}\n for w in self._weather:\n d = distance(lon, lat, w['lon'], w['lat'])\n if d < min_distance:\n min_distance = d\n weather = w\n\n return dict(temp=weather['temp'],\n humidity=weather['humidity'],\n weather_code=weather['weather_code'])", "def get_weather_data(lat='40.761440',lng='-73.981806'):\r\n key ='********************************'\r\n x = pd.DataFrame()\r\n unix_now = int((dt.datetime.now()- dt.datetime(1970,1,1)).total_seconds())\r\n for time in range(unix_now-86400, unix_now+604800, 86400):\r\n rsp = rq.get('https://api.darksky.net/forecast/{}/{},{},{}'.format(key, lat, lng, time))\r\n rsp_json = json.loads(rsp.text)\r\n row = json_normalize(rsp_json[\"daily\"]['data'])\r\n x = x.append(row)\r\n \r\n x = x[['icon','apparentTemperatureHigh','apparentTemperatureLow','cloudCover','humidity','precipProbability',\r\n 'pressure','visibility','windBearing','windGust','windSpeed']].reset_index(drop=True)\r\n return x", "def getDailyWeather(self, keyword, temp):\n\n\t\t# Variables\n\t\tdaily_weather = []\n\t\tweather = {}\n\t\tfio = self.helper.getFio(keyword, temp) # Getting fio object\n\n\t\t# Getting 4-day forecast, storing each day's data in a dictionary and\n\t\t# storing each dictionary in an array\n\t\tif fio.has_daily() is True:\n\t\t\tdaily = FIODaily.FIODaily(fio)\n\t\t\tfor day in xrange(0, 4):\n\t\t\t\tfor item in daily.get_day(day).keys():\n\t\t\t\t\tif item == \"summary\":\n\t\t\t\t\t\tweather[item] = unicode(daily.get_day(day)[item])\n\t\t\t\t\tif item == \"icon\":\n\t\t\t\t\t\tweather[item] = unicode(daily.get_day(day)[item])\n\t\t\t\t\tif item == \"temperatureMax\":\n\t\t\t\t\t\tweather[item] = str(daily.get_day(day)[item]).split(\".\")[0]\t\n\t\t\t\t\tif item == \"temperatureMin\":\n\t\t\t\t\t\tweather[item] = str(daily.get_day(day)[item]).split(\".\")[0]\n\t\t\t\t\tif item == \"precipProbability\":\n\t\t\t\t\t\tweather[item] = str(daily.get_day(day)[item] * 100).split(\".\")[0] + \"%\"\n\t\t\t\t\tif item == \"time\":\n\t\t\t\t\t\tweather[item] = self.helper.getDateForWeather(daily.get_day(day)[item])\n\t\t\t\t\tif item == \"cloudCover\":\n\t\t\t\t\t\tweather[item] = str(daily.get_day(day)[item] * 100).split(\".\")[0] + \"%\"\n\t\t\t\tdaily_weather.append(weather)\n\t\t\t\tweather = {}\n\t\telse:\n\t\t\treturn 'No Daily data'\n\t\treturn daily_weather", "def getHourlyWeather(self, keyword, temp, last_hour):\n\n\t\t# Variables\n\t\tconditions = []\n\t\tweather = {}\n\n\t\tfio = self.helper.getFio(keyword, temp) # Getting fio object\n\n\t\tif fio.has_hourly() is True:\n\t\t\thourly = FIOHourly.FIOHourly(fio)\n\n\t\t\t# Getting weather forecast for next 12 hours\n\t\t\tfor hour in xrange(1, last_hour):\n\t\t\t\tfor item in hourly.get_hour(hour).keys():\n\t\t\t\t\t# Parsing data from hourly fio object and adding it to weather dictionary\n\t\t\t\t\tif item == \"icon\":\n\t\t\t\t\t\tweather[item] = unicode(hourly.get_hour(hour)[item])\n\t\t\t\t\tif item == \"summary\":\n\t\t\t\t\t\tweather[item] = unicode(hourly.get_hour(hour)[item])\n\t\t\t\t\tif item == \"temperature\":\n\t\t\t\t\t\tif temp == \"f\":\n\t\t\t\t\t\t\tweather[item] = str(hourly.get_hour(hour)[item]).split(\".\")[0] + \"° F\"\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tweather[item] = str(hourly.get_hour(hour)[item]).split(\".\")[0] + \"° C\"\n\t\t\t\t\tif item == \"humidity\":\n\t\t\t\t\t\tweather[item] = str(hourly.get_hour(hour)[item] * 100).split(\".\")[0] + \"%\"\n\t\t\t\t\tif item == \"time\":\n\t\t\t\t\t\tweather[item] = self.helper.getDateForWeather(hourly.get_hour(hour)[item])\n\t\t\t\t\tif item == \"precipProbability\":\n\t\t\t\t\t\tweather[item] = str(hourly.get_hour(hour)[item] * 100).split(\".\")[0] + \"%\"\n\t\t\t\t\tif item == \"windSpeed\":\n\t\t\t\t\t\twindSpeed = unicode(hourly.get_hour(hour)[item])\n\t\t\t\t\tif item == \"windBearing\":\n\t\t\t\t\t\twindBearing = unicode(hourly.get_hour(hour)[item])\n\t\t\t\t\t\twindBearing = self.helper.convertWindBearing(windBearing)\n\t\t\t\t\t\tweather[\"wind\"] = windBearing + \" \" + windSpeed + \" mph\"\n\t\t\t\t\tif item == \"cloudCover\":\n\t\t\t\t\t\tweather[item] = str(hourly.get_hour(hour)[item] * 100).split(\".\")[0] + \"%\"\n\n\t\t\t\t# Populating conditions array with weather dicitonary\n\t\t\t\tconditions.append(weather)\n\t\t\t\tweather = {}\n\t\telse:\n\t\t\treturn 'No hourly data'\n\t\treturn conditions", "def read_weather_data():\n # Check if UTC to gmt+1 conversion is being handled correctly\n weather = pd.read_csv('//datc//opschaler//weather_data//knmi_10_min_raw_data//output//df_combined_uncleaned.csv',\n delimiter='\\t', comment='#',\n parse_dates=['datetime'])\n weather = weather.set_index(['datetime'])\n return weather", "def forecast_weather(self):\n pass", "def update_rain_temp(self, day_of_week, departure_time_seconds):\n\n current_time = t.time()\n today = datetime.today().weekday()\n\n if (departure_time_seconds < (current_time + 3600) \\\n and day_of_week == today):\n\n self.temp = self.current_temperature\n self.rain = self.current_rainfall\n\n elif (day_of_week == today):\n for i in range(24):\n if (departure_time_seconds > self.weather_forecast_json \\\n [\"hourly\"][\"data\"][i][\"time\"] and departure_time_seconds \\\n < self.weather_forecast_json[\"hourly\"][\"data\"][i + 1][\"time\"]):\n\n self.temp = self.weather_forecast_json \\\n ['hourly']['data'][i]['temperature']\n\n self.rain = self.weather_forecast_json['hourly'] \\\n ['data'][i]['precipIntensity']\n break\n else:\n continue\n else:\n day_difference = int((departure_time_seconds - current_time) / 86400)\n\n self.temp = (self.weather_forecast_json['daily']['data'] \\\n [day_difference]['temperatureMax'] + \\\n self.weather_forecast_json['daily']['data'] \\\n [day_difference]['temperatureMin']) / 2\n\n self.rain = self.weather_forecast_json['daily'] \\\n ['data'][day_difference]['precipIntensity']", "def testWeatherFetch(self):\n\n timeCol = 'timestamp'\n rows = []\n for row in self.aggregator.rawData(dataType = 'weather',\n orderBy = [timeCol],\n timestampCol = timeCol,\n startDate = self.testStart,\n endDate = self.testEnd):\n rows.append(row)\n self.assertIsNotNone(rows, 'Rows are present.')", "def get_weather(days, hours, db):\n days = format_list_for_db(days)\n hours = format_list_for_db(hours)\n sql = f\"SELECT * FROM weather WHERE day in {days} AND HOUR in {hours}\"\n cursor = db.cursor()\n cursor.execute(sql)\n data = cursor.fetchall()\n cursor.close()\n\n weathers = []\n if len(data) > 0:\n for weather in data:\n weather = {\"hour\": weather[1],\n \"day\": day_absolute_to_relative(weather[2]),\n \"temperature\": weather[3],\n \"apparenttemperature\": weather[4],\n \"precipitationintensity\": weather[5],\n \"precipitationprobability\": weather[6],\n \"humidity\": weather[7],\n \"dewpoint\": weather[8],\n \"windspeed\": weather[9],\n \"windbearing\": weather[10],\n \"windgust\": weather[11],\n \"pressure\": weather[12],\n \"cloudcover\": weather[13],\n \"uvindex\": weather[14],\n \"visibility\": weather[15]}\n weathers.append(weather)\n return weathers", "def GetWeather(query, api_key):\n try:\n owm = pyowm.OWM(api_key)\n observation = owm.weather_at_place(str(query))\n location = observation.get_location()\n weather = observation.get_weather()\n temp = weather.get_temperature('fahrenheit')\n status = CleanupWeatherStatus(weather.get_detailed_status())\n return 'It is %sF degrees with %s in %s right now.' % (int(temp['temp']),\n status,\n location.get_name())\n except:\n return 'I couldn\\'t find any weather for %s. I am sorry.' % (query)", "def get_weather_data():\n keys = ['1364038.csv',\n '1364041.csv',\n '1364042.csv',\n '1364043.csv',\n '1364044.csv',\n '1364046.csv',\n '1364047.csv',\n '1364048.csv',\n '1364051.csv',\n '1364052.csv',\n '1364053.csv',\n '1364054.csv',\n '1364055.csv',\n '1364058.csv',\n '1364059.csv',\n '1364060.csv',\n '1364061.csv',\n '1364062.csv',\n '1364063.csv',\n '1364064.csv',\n '1364066.csv']\n df_weather = import_weather(keys)\n df_weather_dist = df_weather[[\n 'LATITUDE', 'LONGITUDE', 'name']].drop_duplicates().reset_index()\n return df_weather, df_weather_dist", "def combine_weather(weather):\n\n weather1 = weather[weather[\"Station\"] == 1]\n weather2 = weather[weather[\"Station\"] == 2]\n\n\n pass", "def get_weather(phenny, input):\n import wunderground\n \n report_type = 'conditions'\n\n unicode_input = unicode(input)\n if unicode_input[1:8] == 'weather':\n location_str = unicode_input[9:]\n elif unicode_input[1:3] == 'w ':\n location_str = unicode_input[3:]\n try:\n json_data = wunderground.format_json(location_str, input.weather_API, report_type)\n output_results(phenny, json_data)\n except Exception, e:\n print e\n phenny.say('Could not find results for \"%s\", please reword the search and try again.' % location_str)", "def get_weather_data(lat, lon):\n\n # Get weather\n filedata = pvtoolslib.get_s3_filename_df()\n filedata_closest = nsrdbtools.find_closest_datafiles(float(lat), float(lon),\n filedata)\n\n filename = filedata_closest['filename'].iloc[0]\n\n if filename == '124250_37.93_-122.3.npz':\n weather, info = nsrdbtools.get_local_weather_data(filename)\n else:\n weather, info = pvtoolslib.get_s3_weather_data(filename)\n\n return weather, info", "def rainfall_series(self):\n\n # assign local temporal variables\n datatype = 'strds'\n increment = str(self.rain_interval)+\" minutes\"\n raster = 'raster'\n rain_excess = 'rain_excess'\n net_difference = 'net_difference'\n #iterations = sum(1 for row in precip)\n\n # create a raster space time dataset\n gscript.run_command(\n 't.create',\n type=datatype,\n temporaltype=self.temporaltype,\n output=self.elevation_timeseries,\n title=self.elevation_title,\n description=self.elevation_description,\n overwrite=True)\n gscript.run_command(\n 't.create',\n type=datatype,\n temporaltype=self.temporaltype,\n output=self.depth_timeseries,\n title=self.depth_title,\n description=self.depth_description,\n overwrite=True)\n gscript.run_command(\n 't.create',\n type=datatype,\n temporaltype=self.temporaltype,\n output=self.erdep_timeseries,\n title=self.erdep_title,\n description=self.erdep_description,\n overwrite=True)\n gscript.run_command(\n 't.create',\n type=datatype,\n temporaltype=self.temporaltype,\n output=self.flux_timeseries,\n title=self.flux_title,\n description=self.flux_description,\n overwrite=True)\n gscript.run_command(\n 't.create',\n type=datatype,\n temporaltype=self.temporaltype,\n output=self.difference_timeseries,\n title=self.difference_title,\n description=self.difference_description,\n overwrite=True)\n\n # register the initial digital elevation model\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.elevation_timeseries,\n maps=self.elevation,\n start=self.start,\n increment=increment,\n flags='i',\n overwrite=True)\n\n # create evolution object\n evol = Evolution(\n elevation=self.elevation,\n precipitation=self.precipitation,\n start=self.start,\n rain_intensity=self.rain_intensity,\n rain_interval=self.rain_interval,\n walkers=self.walkers,\n runoff=self.runoff,\n mannings=self.mannings,\n detachment=self.detachment,\n transport=self.transport,\n shearstress=self.shearstress,\n density=self.density,\n mass=self.mass,\n grav_diffusion=self.grav_diffusion,\n erdepmin=self.erdepmin,\n erdepmax=self.erdepmax,\n k_factor=self.k_factor,\n c_factor=self.c_factor,\n m=self.m,\n n=self.n,\n threads=self.threads,\n fill_depressions=self.fill_depressions)\n\n # open txt file with precipitation data\n with open(evol.precipitation) as csvfile:\n\n # check for header\n has_header = csv.Sniffer().has_header(csvfile.read(1024))\n\n # rewind\n csvfile.seek(0)\n\n # skip header\n if has_header:\n next(csvfile)\n\n # parse time and precipitation\n precip = csv.reader(csvfile, delimiter=',', skipinitialspace=True)\n\n # initial run\n initial = next(precip)\n evol.start = initial[0]\n evol.rain_intensity = 'rain_intensity'\n # compute rainfall intensity (mm/hr)\n # from rainfall observation (mm)\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{rain_intensity}\"\n \"={rain_observation}\"\n \"/{rain_interval}\"\n \"*60.\".format(\n rain_intensity=evol.rain_intensity,\n rain_observation=float(initial[1]),\n rain_interval=self.rain_interval),\n overwrite=True)\n\n # determine mode and run model\n if self.mode == \"simwe_mode\":\n (evolved_elevation, time, depth, erosion_deposition,\n difference) = evol.erosion_deposition()\n # remove relative timestamps\n # from r.sim.water and r.sim.sediment\n gscript.run_command(\n 'r.timestamp',\n map=depth,\n date='none')\n gscript.run_command(\n 'r.timestamp',\n map=erosion_deposition,\n date='none')\n\n elif self.mode == \"usped_mode\":\n (evolved_elevation, time, depth, erosion_deposition,\n difference) = evol.usped()\n\n elif self.mode == \"rusle_mode\":\n (evolved_elevation, time, depth, sediment_flux,\n difference) = evol.rusle()\n\n else:\n raise RuntimeError(\n '{mode} mode does not exist').format(mode=self.mode)\n\n # register the evolved maps\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.elevation_timeseries,\n maps=evolved_elevation,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.depth_timeseries,\n maps=depth,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n try:\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.erdep_timeseries,\n maps=erosion_deposition,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n except (NameError, CalledModuleError):\n pass\n try:\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.flux_timeseries,\n maps=sediment_flux,\n start=evol.start,\n increment=increment,\n flags='i', overwrite=True)\n except (NameError, CalledModuleError):\n pass\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.difference_timeseries,\n maps=difference,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n\n # run the landscape evolution model for each rainfall record\n for row in precip:\n\n # update the elevation\n evol.elevation=evolved_elevation\n\n # update time\n evol.start=row[0]\n\n # compute rainfall intensity (mm/hr)\n # from rainfall observation (mm)\n rain_intensity = 'rain_intensity'\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{rain_intensity}\"\n \"={rain_observation}\"\n \"/{rain_interval}\"\n \"*60.\".format(\n rain_intensity=rain_intensity,\n rain_observation=float(row[1]),\n rain_interval=self.rain_interval),\n overwrite=True)\n\n # derive excess water (mm/hr) from rainfall rate (mm/hr)\n # plus the depth (m) per rainfall interval (min)\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{rain_excess}\"\n \"={rain_intensity}\"\n \"+{depth}\"\n \"/1000.\"\n \"/{rain_interval}\"\n \"*60.\".format(\n rain_excess=rain_excess,\n rain_intensity=rain_intensity,\n depth=depth,\n rain_interval=self.rain_interval),\n overwrite=True)\n\n # update excess rainfall\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{rain_intensity} = {rain_excess}\".format(\n rain_intensity='rain_intensity',\n rain_excess=rain_excess),\n overwrite=True)\n evol.rain_intensity = rain_intensity\n\n # determine mode and run model\n if self.mode == \"simwe_mode\":\n (evolved_elevation, time, depth, erosion_deposition,\n difference) = evol.erosion_deposition()\n # remove relative timestamps\n # from r.sim.water and r.sim.sediment\n gscript.run_command(\n 'r.timestamp',\n map=depth,\n date='none')\n gscript.run_command(\n 'r.timestamp',\n map=erosion_deposition,\n date='none')\n\n elif self.mode == \"usped_mode\":\n (evolved_elevation, time, depth, erosion_deposition,\n difference) = evol.usped()\n\n elif self.mode == \"rusle_mode\":\n (evolved_elevation, time, depth, sediment_flux,\n difference) = evol.rusle()\n\n else:\n raise RuntimeError(\n '{mode} mode does not exist').format(mode=self.mode)\n\n # register the evolved maps\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.elevation_timeseries,\n maps=evolved_elevation,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.depth_timeseries,\n maps=depth,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n try:\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.erdep_timeseries,\n maps=erosion_deposition,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n except (NameError, CalledModuleError):\n pass\n try:\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.flux_timeseries,\n maps=sediment_flux,\n start=evol.start,\n increment=increment,\n flags='i', overwrite=True)\n except (NameError, CalledModuleError):\n pass\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.difference_timeseries,\n maps=difference,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n\n # remove temporary maps\n gscript.run_command(\n 'g.remove',\n type='raster',\n name=['rain_excess'],\n flags='f')\n\n # compute net elevation change\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{net_difference}\"\n \"= {evolved_elevation}-{elevation}\".format(\n net_difference=net_difference,\n elevation=self.elevation,\n evolved_elevation=evol.elevation),\n overwrite=True)\n gscript.write_command(\n 'r.colors',\n map=net_difference,\n rules='-',\n stdin=difference_colors)", "def getHourlyWind(self, keyword):\n\n\t\tweather_data = self.getHourlyWeatherFromCSV(keyword, \"f\", \"wind\")\n\t\twind_values = [] # Array that will contain all the wind data\n\t\twind_data = {} # Dictionary of wind data\n\n\t\t# Getting humidity data\n\t\tfor data in weather_data:\n\t\t\twind_data[\"x\"] = self.helper.getDateInEpoch(data[\"date\"])\n\t\t\twind_data[\"y\"] = float(data[\"wind\"].split(\" \")[1])\n\t\t\twind_values.append(wind_data)\n\t\t\twind_data = {}\n\n\t\treturn wind_values", "def get_temperature_data(zone):\n\n zone = zone[1:len(zone)-1]\n temp_response = {}\n conn = sqlite3.connect(os.path.abspath('database.db'))\n\n # get temperatures data\n query = \"Select temp_date, temp_max From temperature Left join fire_danger_zone on temperature.temp_station=fire_danger_zone.fdz_station Where fire_danger_zone.fdz_station == '\" + zone + \"' and temperature.temp_date >= date('2010-01-01') Order by temperature.temp_date;\"\n dataframe = pd.read_sql_query(query, conn) \n temperatures = dataframe['temp_max'].values.tolist()\n\n # get dates\n dates = dataframe['temp_date'].values.tolist()\n \n # add data in dictionary \n data_name = 'temp_'+zone\n temp_response[data_name] = temperatures\n temp_response['labels'] = dates\n \n # return data\n response = jsonify(temp_response)\n response.headers.add('Access-Control-Allow-Origin', '*')\n \n # close database connection\n conn.close()\n return response", "def _do_checkWeather(self, mjd, w, config):\n # Convert mjd to the relevant time units of the weather dates.\n time = (mjd - config['sim_start'] + config['%s_start' %(w)]) * _day2sec\n # And wrap the time, if we need to. \n time = time % self.maxtime[w]\n # Find the observations which are closest in time to our requested time.\n time_order = (abs(self.dates[w] - time)).argsort()\n date1 = self.dates[w][time_order[0]]\n date2 = self.dates[w][time_order[1]]\n weather1 = self.weather[w][time_order[0]]\n weather2 = self.weather[w][time_order[1]]\n # Do interpolation for weather at this particular time.\n weather = (weather2 - weather1) / (date2 - date1) * (time - date1) + weather1\n return weather, weather1", "def get_weather(html):\n\tcheck_page_type(html)\n\tget_temp(html)\n\tget_table(html)\n\treturn weather_dict", "def get_fire_weather_stations(session: Session) -> CursorResult:\n return session.query(PlanningWeatherStation, FuelType, PlanningArea, FireCentre)\\\n .join(FuelType, FuelType.id == PlanningWeatherStation.fuel_type_id)\\\n .join(PlanningArea, PlanningArea.id == PlanningWeatherStation.planning_area_id)\\\n .join(FireCentre, FireCentre.id == PlanningArea.fire_centre_id)\\\n .filter(PlanningWeatherStation.is_deleted == False)", "def get_hourly(location_list):\n location, human_location = location_list\n query = location\n url = \"http://api.wunderground.com/auto/wui/geo/WXCurrentObXML/index.xml?query=%s\" % query\n f = urllib2.urlopen(url)\n xml = f.read()\n root = ET.XML(xml)\n \n current = {'location': location, 'human_location': human_location}\n current['observation_time'] = parser.parse(root.find('observation_time').text.replace('Last Updated on',''))\n current['temperature'] = root.find('temp_f').text\n current['humidity'] = root.find('relative_humidity').text.strip('%') #Remove %\n current['wind_speed'] = root.find('wind_mph').text\n current['wind_direction'] = root.find('wind_dir').text\n current['icon'] = root.find('icon').text\n current['conditions'] = root.find('weather').text\n try:\n f = Forecast(**current)\n f.save()\n except:\n logging.info(\"Hourly Forecast Data missing or no new data available\")", "def Kweather():\n while True:\n hr = int(datetime.datetime.now().strftime(\"%H\"))\n if hr == 23:\n from weather import Weather, Unit\n weather = Weather(unit=Unit.CELSIUS)\n lookup = weather.lookup_by_location('Taipei')\n condition = lookup.print_obj\n code = condition[\"item\"][\"forecast\"][1][\"text\"]\n hightemp = condition[\"item\"][\"forecast\"][1][\"high\"]\n lowtemp = condition[\"item\"][\"forecast\"][1][\"low\"]\n \n print(hightemp,lowtemp,code)\n #Warning\n msg = \"\"\n if int(hightemp) > 32:\n msg = msg + \"明天溫度: \" + hightemp + \" 早上可能會很熱哦, 敲鼻可以穿少一點 \"\n if int(lowtemp) < 15:\n msg = msg + \"明天溫度: \" + lowtemp + \" 會很冷哦, 敲鼻要記得多穿一點\"\n if \"Rain\" in code or \"Thunder\" in code or \"Showers\" in code:\n msg = msg + \"明天會下雨, 敲鼻記得帶傘\"\n if msg != \"\":\n print(msg)\n SendMsg(msg)\n time.sleep(60*60)", "def get_rainfall_data(zone):\n zone = zone[1:len(zone)-1]\n rain_response = {}\n conn = sqlite3.connect(os.path.abspath('database.db'))\n\n # get rainfall data\n query = \"Select rain_date, rain_rainfall From rainfall Left join fire_danger_zone on rainfall.rain_station=fire_danger_zone.fdz_station Where fire_danger_zone.fdz_station == '\" + zone + \"' and rainfall.rain_date >= date('2010-01-01') Order by rainfall.rain_date;\"\n dataframe = pd.read_sql_query(query, conn) \n rainfall = dataframe['rain_rainfall'].values.tolist()\n\n # get dates\n dates = dataframe['rain_date'].values.tolist()\n \n # add data in dictionary \n data_name = 'rain_'+zone\n rain_response[data_name] = rainfall\n rain_response['labels'] = dates\n \n # return data\n response = jsonify(rain_response)\n response.headers.add('Access-Control-Allow-Origin', '*')\n \n # close database connection\n conn.close()\n return response", "def get_weather(self):\n with urllib.request.urlopen(self.url) as response:\n json_data = response.read().decode('utf-8')\n\n data = json.loads(json_data)\n\n weather = {}\n weather['current'] = {\n 'temp': round(data['current']['temp_f']),\n 'humidity': round(data['current']['humidity']),\n 'summary': data['current']['condition']['text']\n }\n today = data['forecast']['forecastday'][0]['day']\n weather['today'] = {\n 'temp': round(today['maxtemp_f']),\n 'summary': today['condition']['text']\n }\n \n return weather", "def _get_dict_weather_data(self, weather_current):\n\n returned_dict = dict()\n returned_dict[\"weather_status\"] = weather_current.get_detailed_status()\n\n time_format = '%H:%M'\n if self.am_pm_time:\n time_format = '%I:%M %p'\n\n returned_dict[\"sunset\"] = datetime.fromtimestamp(weather_current.get_sunset_time()).strftime(time_format)\n returned_dict[\"sunrise\"] = datetime.fromtimestamp(weather_current.get_sunrise_time()).strftime(time_format)\n\n returned_dict[\"temperature\"] = int(round(weather_current.get_temperature(unit=self.temp_unit)[\"temp\"]))\n returned_dict[\"temperature_min\"] = int(round(weather_current.get_temperature(unit=self.temp_unit)[\"temp_min\"]))\n returned_dict[\"temperature_max\"] = int(round(weather_current.get_temperature(unit=self.temp_unit)[\"temp_max\"]))\n\n returned_dict[\"pressure\"] = weather_current.get_pressure()[\"press\"]\n returned_dict[\"sea_level_pressure\"] = weather_current.get_pressure()[\"sea_level\"]\n\n returned_dict[\"humidity\"] = weather_current.get_humidity()\n\n wind = weather_current.get_wind()\n wind_deg = wind.get(\"deg\", None)\n wind_speed = wind.get(\"speed\", None)\n returned_dict[\"wind_deg\"] = wind_deg\n returned_dict[\"wind_speed\"] = wind_speed\n\n snow_current = weather_current.get_snow()\n snow_current = snow_current.get('all', None)\n rain_current = weather_current.get_rain()\n rain_current = rain_current.get('all', None)\n returned_dict[\"rainfall\"] = rain_current\n returned_dict[\"snow\"] = snow_current\n\n returned_dict[\"clouds_coverage\"] = weather_current.get_clouds()\n\n return returned_dict", "def populate_weather(connection):\n metadata = load_metadata('weather')\n cursor = connection.cursor()\n water_defs = get_water_definitions()\n\n # Check if tables are already populated.\n cursor.execute('SELECT count(*) FROM weather')\n weather_count = cursor.fetchone()[0]\n\n if weather_count:\n print('Weather tables already populated!')\n return\n\n print('WEATHER:')\n\n # Darksky data\n for dir_name, location in metadata.items():\n print(f'\\tPopulating weather: \"{location[\"name\"]}\".')\n\n # Insert location.\n cursor.execute(f'''INSERT INTO locations(name, lat, lng)\n VALUES ('{location['name']}', {location['lat']}, {location['lng']})''')\n location_id = cursor.lastrowid\n\n # Set weather locations for watercourses/aquifers.\n for water_body in [d['body'] for d in water_defs.values()]:\n if water_body in location:\n cursor.execute(f'''UPDATE {water_body}s\n SET location_id = {location_id}\n WHERE name IN ('{\"','\".join(location[water_body])}')''')\n break\n\n dir_path = get_data_path('weather', 'raw', dir_name)\n for json_file_name in os.listdir(dir_path):\n json_path = os.path.join(dir_path, json_file_name)\n with open(json_path, 'r', encoding='utf-8') as json_file:\n print(f'\\t\\tPopulating year: {json_file_name[0:-5]}')\n year_forecasts = json.load(json_file)\n for date, date_forecast in year_forecasts.items():\n hourly_forecasts = date_forecast['hourly']\n\n if not hourly_forecasts:\n print(f'\\t\\tNo hourly forecasts for {date}!')\n continue\n\n daily_forecast = {\n 'location_id': location_id,\n 'time': date_forecast['time'],\n 'day_time': date_forecast['sunset_time'] - date_forecast['sunrise_time'],\n 'precipitation': 0,\n 'snow_accumulation': 0\n }\n # List of value names with `avg`, `min` and `max` values\n value_names = {\n 'temperature': 'temperature',\n 'cloud_cover': 'cloudCover',\n 'dew_point': 'dewPoint',\n 'humidity': 'humidity',\n 'pressure': 'pressure',\n 'uv_index': 'uvIndex',\n 'precipitation_probability': 'precipProbability',\n 'precipitation_intensity': 'precipIntensity'\n }\n # Value name counters, which indicate how many times (out of 24)\n # certain value appears in hourly data.\n value_counts = {k: 0 for k in value_names.keys()}\n\n for value_name in value_names.keys():\n daily_forecast[f'{value_name}_avg'] = 0.0\n daily_forecast[f'{value_name}_min'] = float('inf')\n daily_forecast[f'{value_name}_max'] = float('-inf')\n\n # Calculate daily forecast values from hourly forecasts.\n for hourly_forecast in hourly_forecasts:\n for value_name in value_names.keys():\n orig_value_name = value_names[value_name]\n if is_forecast_number(orig_value_name, hourly_forecast):\n daily_forecast[f'{value_name}_avg'] += hourly_forecast[orig_value_name]\n daily_forecast[f'{value_name}_min'] = min(\n hourly_forecast[orig_value_name],\n daily_forecast[f'{value_name}_min']\n )\n daily_forecast[f'{value_name}_max'] = max(\n hourly_forecast[orig_value_name],\n daily_forecast[f'{value_name}_max']\n )\n value_counts[value_name] += 1\n\n if is_forecast_number('precipAccumulation', hourly_forecast) \\\n and hourly_forecast['precipType'] == 'snow':\n daily_forecast['snow_accumulation'] += hourly_forecast['precipAccumulation']\n elif is_forecast_number('precipIntensity', hourly_forecast) \\\n and is_forecast_number('precipProbability', hourly_forecast):\n daily_forecast['precipitation'] += \\\n hourly_forecast['precipIntensity'] * hourly_forecast['precipProbability']\n\n for value_name, value_count in value_counts.items():\n if value_count:\n # Calculate average.\n daily_forecast[f'{value_name}_avg'] = daily_forecast[f'{value_name}_avg'] / value_count\n else:\n # If value never appeared\n daily_forecast[f'{value_name}_avg'] = 'NULL'\n daily_forecast[f'{value_name}_min'] = 'NULL'\n daily_forecast[f'{value_name}_max'] = 'NULL'\n\n cursor.execute(f'''INSERT INTO weather({', '.join(daily_forecast.keys())})\n VALUES ({', '.join([str(v) for v in daily_forecast.values()])})''')\n\n # IOT data:\n for location in SETTINGS['weather_locations_iot']:\n print(f'\\tPopulating weather: \"{location[\"name\"]}\".')\n\n # Insert location.\n cursor.execute(f'''INSERT INTO locations(name, lat, lng)\n VALUES ('{location['name']}', {location['lat']}, {location['lng']})''')\n location_id = cursor.lastrowid\n\n # Set weather locations for watercourses/aquifers.\n for water_body in [d['body'] for d in water_defs.values()]:\n if water_body in location:\n cursor.execute(f'''UPDATE {water_body}s\n SET location_id = {location_id}\n WHERE name IN ('{\"', '\".join(location[water_body])}')''')\n\n # Set locations for all stations on given water body to match its location.\n cursor.execute(f'''SELECT id\n FROM {water_body}s\n WHERE location_id = {location_id}''')\n ids = [row[0] for row in cursor.fetchall()]\n if len(ids):\n cursor.execute(f'''UPDATE {water_body}_stations\n SET location_id = {location_id}\n WHERE {water_body}_id IN ({', '.join([str(v) for v in ids])})''')\n\n break \n \n file_name = f'''{location['lat']}-{location['lng']}.json'''\n json_path = get_data_path('weather', 'raw', file_name)\n\n # If data file doesn't exist, download it first.\n if not os.path.isfile(json_path):\n with open(json_path, 'wb', encoding=\"utf-8\") as file:\n file.write(read_from_url(location['url'], decode=False))\n \n with open(json_path, 'r', encoding='utf-8') as json_file:\n row_names = {\n \"Sun_duration\": \"sun_duration\",\n \"CloudCover\": \"cloud_cover_avg\",\n \"Percipitation\": \"precipitation\",\n \"New_snow_blanket\": \"snow_accumulation\",\n \"Snow_blanket\": \"snow_depth\",\n \"TemperatureAvg\": \"temperature_avg\",\n \"TemperatureMin\": \"temperature_min\",\n \"TemperatureMax\": \"temperature_max\"\n }\n forecasts = json.load(json_file)\n for forecast in forecasts:\n f = {row_names[k]: forecast[k] for k in row_names.keys()}\n f['location_id'] = location_id\n f['time'] = round(forecast['LastUpdatedEpoch'] / 1000)\n cursor.execute(f'''INSERT INTO weather({', '.join(f.keys())})\n VALUES ({', '.join([str(v) for v in f.values()])})''')", "def streaming_weather_data(**kwargs):\n df = weather_data(['San Francisco'])\n df['time'] = [pd.Timestamp.now()]\n return df.set_index('time')", "def get_weather_data() -> dict:\n # Creating the url for the api call\n api_key = \"96bba64ba34672da132c1a987ad2fee6\"\n lat = 49.24\n long = -123.15\n config = '&units=metric'\n url = f'https://api.openweathermap.org/data/2.5/onecall?lat={lat}&lon={long}&appid={api_key}{config}'\n\n # Querying and JSON parsing\n api_return = requests.get(url)\n weather_data = api_return.json()\n return weather_data", "def get_weather(self):\n\n city = self.user_data[\"weatherSettings\"][\"weatherCity\"]\n country = self.user_data[\"weatherSettings\"][\"weatherCountry\"]\n\n host = \"weather.mios.com\"\n temp_scale = \"C\"\n url = \"http://%s/?tempFormat=%s&cityWeather=%s&countryWeather=%s\" % \\\n (host, temp_scale, Vera.urlencode(city), Vera.urlencode(country))\n\n weather = self.proxy_get(url)\n\n return (float(weather[\"temp\"]), weather[\"text\"])", "def get_external_temp():\n baseurl = \"http://api.openweathermap.org/data/2.5/weather\"\n query = \"?q=salhouse&mode=xml\"\n url = baseurl + query\n r = requests.get(url)\n root = ET.fromstring(r.text)\n kelvin = float(root[1].attrib.get('value'))\n celcius = kelvin - 272.15\n return celcius", "def get_weather(lat, lon):\r\n\r\n # API key, retrieved from configure.py\r\n api_key = configure.WEATHER_KEY\r\n\r\n # API endpoint\r\n url = f'https://api.openweathermap.org/data/2.5/onecall?lat={lat}&lon={lon}&appid={api_key}'\r\n\r\n # API call\r\n response = requests.get(url)\r\n\r\n # Collect response in json format\r\n weather = response.json()\r\n\r\n # Interpret Current Weather\r\n current_weather = weather['current']\r\n\r\n # By default, the API returns all requested times in unix format\r\n current_weather['dt'] = epoch_to_human_readable_date(current_weather['dt'])\r\n current_weather['sunrise'] = epoch_to_human_readable_date(current_weather['sunrise'])\r\n current_weather['sunset'] = epoch_to_human_readable_date(current_weather['sunset'])\r\n\r\n # By default, the API returns all temperature values in Kelvin\r\n current_weather['dew_point'] = {'kelvin': current_weather['dew_point'], \r\n 'fahrenheit': round(kelvin_to_fahrenheit(current_weather['dew_point']), 2),\r\n 'celsius': round(kelvin_to_celsius(current_weather['dew_point']), 2)}\r\n\r\n current_weather['feels_like'] = {'kelvin': current_weather['feels_like'], \r\n 'fahrenheit': round(kelvin_to_fahrenheit(current_weather['feels_like']), 2),\r\n 'celsius': round(kelvin_to_celsius(current_weather['feels_like']), 2)}\r\n\r\n current_weather['temp'] = {'kelvin': current_weather['temp'], \r\n 'fahrenheit': round(kelvin_to_fahrenheit(current_weather['temp']), 2),\r\n 'celsius': round(kelvin_to_celsius(current_weather['temp']), 2)}\r\n\r\n # Change icon value to image url to be used in html img tag as src\r\n current_weather['weather'][0]['icon'] = 'http://openweathermap.org/img/wn/' + current_weather['weather'][0]['icon'] + '@2x.png'\r\n\r\n # Interpret Daily Weather\r\n daily_forcast = weather['daily']\r\n\r\n for day in daily_forcast:\r\n # Get readable dates and times\r\n day['dt'] = epoch_to_human_readable_date(day['dt'])\r\n day['sunrise'] = epoch_to_human_readable_date(day['sunrise'])\r\n day['sunset'] = epoch_to_human_readable_date(day['sunset'])\r\n\r\n # Change icon value to image url to be used in html img tag as src\r\n day['weather'][0]['icon'] = 'http://openweathermap.org/img/wn/' + day['weather'][0]['icon'] + '@2x.png'\r\n\r\n\r\n # Convert temperatures in 'feels_like' dictionary from Kelvin to Fahrenheit and Celsius\r\n\r\n for temp in day['feels_like']:\r\n day['feels_like'][temp] = {'kelvin': day['feels_like'][temp], \r\n 'fahrenheit': round(kelvin_to_fahrenheit(day['feels_like'][temp]), 2),\r\n 'celsius': round(kelvin_to_celsius(day['feels_like'][temp]), 2)}\r\n\r\n\r\n # Convert temperatures in 'temp' dictionary from Kelvin to Fahrenheit\r\n\r\n for temp in day['temp']:\r\n day['temp'][temp] = {'kelvin': day['temp'][temp], \r\n 'fahrenheit': round(kelvin_to_fahrenheit(day['temp'][temp]), 2),\r\n 'celsius': round(kelvin_to_celsius(day['temp'][temp]), 2)}\r\n\r\n # Interpret Hourly Weather\r\n hourly_weather = weather['hourly']\r\n\r\n # Only manipulating data for hours of the current date, rest will be ommitted\r\n\r\n curr_date = epoch_to_human_readable_date(hourly_weather[0]['dt']).split(\",\", 1)[1][:3]\r\n\r\n last_hour = 0\r\n\r\n for index, hour in enumerate(hourly_weather):\r\n # Get date in relation to the hour\r\n date = epoch_to_human_readable_date(hour['dt']).split(\",\", 1)[1][:3]\r\n if date != curr_date:\r\n last_hour = index\r\n break\r\n \r\n # Convert temperatures in 'dew_point' dictionary from Kelvin to Fahrenheit and Celsius\r\n hour['dew_point'] = {'Kelvin':hour['dew_point'],\r\n 'fahrenheit': round(kelvin_to_fahrenheit(hour['dew_point']), 2),\r\n 'celsius': round(kelvin_to_celsius(hour['dew_point']), 2)}\r\n\r\n # Get readable dates and times\r\n hour['dt'] = epoch_to_human_readable_date(hour['dt'])\r\n\r\n # Convert temperatures in 'feels_like' dictionary from Kelvin to Fahrenheit and Celsius\r\n hour['feels_like'] = {'kelvin': hour['feels_like'], \r\n 'fahrenheit': round(kelvin_to_fahrenheit(hour['feels_like']), 2),\r\n 'celsius': round(kelvin_to_celsius(hour['feels_like']), 2)}\r\n\r\n # Convert temperatures in 'temp' dictionary from Kelvin to Fahrenheit\r\n hour['temp'] = {'kelvin': hour['temp'], \r\n 'fahrenheit': round(kelvin_to_fahrenheit(hour['temp']), 2),\r\n 'celsius': round(kelvin_to_celsius(hour['temp']), 2)}\r\n\r\n hour['weather'][0]['icon'] = 'http://openweathermap.org/img/wn/' + hour['weather'][0]['icon'] + '@2x.png'\r\n\r\n\r\n return current_weather, daily_forcast, hourly_weather[:last_hour]", "def findWetWeatherDays(self, dbsession, today):\n wetDays = dbsession.query(self.dt).filter(or_(self.weather_description == \"light rain\", self.weather_description == \"moderate rain\")).all()\n # if one of those days is today return it.\n # else just return a wet day.\n for i in range(len(wetDays)):\n if today == wetDays[i][0].weekday():\n return wetDays[i][0]\n else:\n return wetDays[0][0]", "def get_weather(latitude, longitude, units):\n global API_KEY\n qs = {\n 'lat': latitude,\n 'lon': longitude,\n 'APPID': API_KEY,\n 'units': units\n }\n qs = urllib.parse.urlencode(qs)\n url = \"http://api.openweathermap.org/data/2.5/weather?{}\".format(qs)\n weather = requests.get(url)\n weather_json = json.loads(weather.content)\n category = weather_json['weather'][0]['id']\n temp = weather_json['main']['temp']\n wind_speed = weather_json['wind']['speed']\n\n return category, temp, wind_speed", "def get_weather(xml_data):\n\n import elementtree.ElementTree as ET\n \n page = ET.fromstring(unicode(xml_data, errors=\"ignore\"))\n\n weather = page.find( \"weather/current_conditions\" )\n\n return {\n 'f' : weather.find( \"temp_f\" ).get( \"data\" ),\n 'c' : weather.find( \"temp_c\" ).get( \"data\" ),\n 'humidity' : weather.find( \"humidity\" ).get( \"data\" ),\n 'wind' : weather.find( \"wind_condition\" ).get( \"data\" )\n }", "def rainfall_event(self):\n\n # assign local variables\n datatype = 'strds'\n increment = str(self.rain_interval)+' minutes'\n raster = 'raster'\n iterations = int(self.rain_duration)/int(self.rain_interval)\n rain_excess = 'rain_excess'\n net_difference = 'net_difference'\n\n # create raster space time datasets\n gscript.run_command(\n 't.create',\n type=datatype,\n temporaltype=self.temporaltype,\n output=self.elevation_timeseries,\n title=self.elevation_title,\n description=self.elevation_description,\n overwrite=True)\n gscript.run_command(\n 't.create',\n type=datatype,\n temporaltype=self.temporaltype,\n output=self.depth_timeseries,\n title=self.depth_title,\n description=self.depth_description,\n overwrite=True)\n gscript.run_command(\n 't.create',\n type=datatype,\n temporaltype=self.temporaltype,\n output=self.erdep_timeseries,\n title=self.erdep_title,\n description=self.erdep_description,\n overwrite=True)\n gscript.run_command(\n 't.create',\n type=datatype,\n temporaltype=self.temporaltype,\n output=self.flux_timeseries,\n title=self.flux_title,\n description=self.flux_description,\n overwrite=True)\n gscript.run_command(\n 't.create',\n type=datatype,\n temporaltype=self.temporaltype,\n output=self.difference_timeseries,\n title=self.difference_title,\n description=self.difference_description,\n overwrite=True)\n\n # register the initial digital elevation model\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.elevation_timeseries,\n maps=self.elevation,\n start=self.start,\n increment=increment,\n flags='i',\n overwrite=True)\n\n # create evolution object\n evol = Evolution(elevation=self.elevation,\n precipitation=self.precipitation,\n start=self.start,\n rain_intensity=self.rain_intensity,\n rain_interval=self.rain_interval,\n rain_duration=self.rain_duration,\n walkers=self.walkers,\n runoff=self.runoff,\n mannings=self.mannings,\n detachment=self.detachment,\n transport=self.transport,\n shearstress=self.shearstress,\n density=self.density,\n mass=self.mass,\n grav_diffusion=self.grav_diffusion,\n erdepmin=self.erdepmin,\n erdepmax=self.erdepmax,\n k_factor=self.k_factor,\n c_factor=self.c_factor,\n m=self.m,\n n=self.n,\n threads=self.threads,\n fill_depressions=self.fill_depressions)\n\n # determine mode and run model\n if self.mode == 'simwe_mode':\n (evolved_elevation, time, depth, erosion_deposition,\n difference) = evol.erosion_deposition()\n # remove relative timestamps from r.sim.water and r.sim.sediment\n gscript.run_command(\n 'r.timestamp',\n map=depth,\n date='none')\n gscript.run_command(\n 'r.timestamp',\n map=erosion_deposition,\n date='none')\n\n elif self.mode == \"usped_mode\":\n (evolved_elevation, time, depth, erosion_deposition,\n difference) = evol.usped()\n\n elif self.mode == \"rusle_mode\":\n (evolved_elevation, time, depth, sediment_flux,\n difference) = evol.rusle()\n\n else:\n raise RuntimeError(\n '{mode} mode does not exist').format(mode=self.mode)\n\n # register the evolved maps\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.elevation_timeseries,\n maps=evolved_elevation,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.depth_timeseries,\n maps=depth,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n try:\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.erdep_timeseries,\n maps=erosion_deposition,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n except (NameError, CalledModuleError):\n pass\n try:\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.flux_timeseries,\n maps=sediment_flux,\n start=evol.start,\n increment=increment,\n flags='i', overwrite=True)\n except (NameError, CalledModuleError):\n pass\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.difference_timeseries,\n maps=difference,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n\n # run the landscape evolution model\n # as a series of rainfall intervals in a rainfall event\n i = 1\n while i < iterations:\n\n # update the elevation\n evol.elevation = evolved_elevation\n print evol.elevation\n\n # update time\n evol.start = time\n print evol.start\n\n # derive excess water (mm/hr) from rainfall rate (mm/hr)\n # plus the depth (m) per rainfall interval (min)\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{rain_excess}\"\n \"={rain_intensity}\"\n \"+{depth}\"\n \"/1000.\"\n \"/{rain_interval}\"\n \"*60.\".format(\n rain_excess=rain_excess,\n rain_intensity=self.rain_intensity,\n depth=depth,\n rain_interval=self.rain_interval),\n overwrite=True)\n\n # update excess rainfall\n rain_intensity = 'rain_intensity'\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{rain_intensity} = {rain_excess}\".format(\n rain_intensity='rain_intensity',\n rain_excess=rain_excess),\n overwrite=True)\n evol.rain_intensity = rain_intensity\n\n # determine mode and run model\n if self.mode == \"simwe_mode\":\n (evolved_elevation, time, depth, erosion_deposition,\n difference) = evol.erosion_deposition()\n # remove relative timestamps\n # from r.sim.water and r.sim.sediment\n gscript.run_command(\n 'r.timestamp',\n map=depth,\n date='none')\n gscript.run_command(\n 'r.timestamp',\n map=erosion_deposition,\n date='none')\n\n elif self.mode == \"usped_mode\":\n (evolved_elevation, time, depth, erosion_deposition,\n difference) = evol.usped()\n\n elif self.mode == \"rusle_mode\":\n (evolved_elevation, time, depth, sediment_flux,\n difference) = evol.rusle()\n\n else:\n raise RuntimeError(\n '{mode} mode does not exist').format(mode=self.mode)\n\n # register the evolved maps\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.elevation_timeseries,\n maps=evolved_elevation,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.depth_timeseries,\n maps=depth,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n try:\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.erdep_timeseries,\n maps=erosion_deposition,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n except (NameError, CalledModuleError):\n pass\n try:\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.flux_timeseries,\n maps=sediment_flux,\n start=evol.start,\n increment=increment,\n flags='i', overwrite=True)\n except (NameError, CalledModuleError):\n pass\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.difference_timeseries,\n maps=difference,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n\n # remove temporary maps\n gscript.run_command(\n 'g.remove',\n type='raster',\n name=['rain_excess'],\n flags='f')\n\n i = i+1\n\n # compute net elevation change\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{net_difference}\"\n \"={evolved_elevation}-{elevation}\".format(\n net_difference=net_difference,\n elevation=self.elevation,\n evolved_elevation=evol.elevation),\n overwrite=True)\n gscript.write_command(\n 'r.colors',\n map=net_difference,\n rules='-',\n stdin=difference_colors)", "def Find_nearest_dwd_stations(inpt_data,\r\n date_start='20051201',\r\n date_end='20201231',\r\n dwd_time_format='%Y%m%d%H',\r\n data_category='air_temperature',\r\n temp_resolution='hourly',\r\n no_of_nearest_stations=4,\r\n memory_save=True,\r\n Output='True'):\r\n if isinstance(data_category,list):\r\n if len(list(data_category)) > 1:\r\n print(\r\n 'Currently only one dwd category allowed, please run function multiple times for each category'\r\n )\r\n return None\r\n \r\n #convert time to datetime\r\n dt_start=datetime.strptime(date_start,'%Y%m%d')\r\n dt_end=datetime.strptime(date_end,'%Y%m%d')\r\n print('Start quering data from DWD')\r\n #define the database folder\r\n pypath = os.path.dirname(os.path.abspath(__file__))\r\n table_dir = pypath + '\\\\' + 'tables'\r\n dbase_dir = pypath + '\\\\' + 'dbase' \r\n #%% we check all available stations and create a valid list\r\n filename_stations=update_stationlist(time_res='hourly',dbase_dir=table_dir)\r\n stations_all=pd.read_csv(filename_stations, dtype={'STATIONS_ID': object})\r\n # delete all stations which do not cover the category\r\n dwd_stations=stations_all[stations_all[data_category]==True].copy()\r\n #correct to datetime\r\n dwd_stations['date_end']=pd.to_datetime(stations_all.date_end,format='%Y%m%d')\r\n dwd_stations['date_start']=pd.to_datetime(stations_all.date_start,format='%Y%m%d')\r\n # clean to stations which cover the campaign time #dt_low <= dt <= dt_high:\r\n dwd_stations=dwd_stations[(dwd_stations.date_start<=dt_start) & (dwd_stations.date_end>=dt_end)]\r\n #make a geodataframe out of it\r\n dwd_stations=gpd.GeoDataFrame(dwd_stations,geometry=gpd.points_from_xy(dwd_stations.geo_lon, dwd_stations.geo_lat))\r\n \r\n #loop through all rows to get the n closest points\r\n distances=pd.DataFrame()\r\n for _, station in dwd_stations.iterrows():\r\n distances[station.STATIONS_ID]=inpt_data.distance(station.geometry)\r\n \r\n #%% get the n stations with smallest distance and update database\r\n id_nearest_stations=distances.apply(lambda s: s.nsmallest(no_of_nearest_stations).index.tolist(), axis=1).values.tolist() #station ids\r\n #get them as unique values by sum a list of lists https://bit.ly/353iZQB\r\n id_dwd_stations=list(set(sum(id_nearest_stations,[])))\r\n \r\n #update the database\r\n db_dwd_stations=import_stations(time_res=temp_resolution,time_format=dwd_time_format,campaign_time=[dt_start,dt_end],data_category=data_category,station_ids=id_dwd_stations,dbase_dir=dbase_dir,Output=Output,table_dir=table_dir,memory_save=memory_save)\r\n \r\n #distance of nearest stattions\r\n dist_nearest_stations=pd.DataFrame(np.sort(distances.values)[:,:no_of_nearest_stations]).values.tolist() #distances themself\r\n #create new columns in the input data\r\n station_col_nm=list()\r\n for i in range(0,no_of_nearest_stations):\r\n station_col_nm.append(data_category+'_station_'+str(i))\r\n for i in range(0,no_of_nearest_stations):\r\n station_col_nm.append(data_category+'_distance_'+str(i))\r\n #create new dataframe\r\n distance_data=pd.concat([pd.DataFrame(id_nearest_stations).astype(int),pd.DataFrame(dist_nearest_stations)],axis=1)\r\n distance_data.columns=station_col_nm\r\n #add to main dataset\r\n inpt_data=pd.concat([inpt_data, distance_data],axis=1) \r\n \r\n return inpt_data,db_dwd_stations", "def _get_weather_data(self, lat, long):\n return {}\n try:\n # get the data\n forecast = self.ds.get_forecast(\n lat, long,\n exclude=[weather.HOURLY, weather.MINUTELY,\n weather.DAILY, weather.ALERTS, weather.FLAGS])\n\n # add lat & long to the hourly weather data for composite key in db\n data = forecast.currently\n data.latitude = lat\n data.longitude = long\n data = data.__dict__\n data.pop(\"time\")\n return data\n except Exception as e:\n print(e)\n return None", "def temperatures():\n hi_act= session.query(measurements.tobs,measurements.date,measurements.station).\\\n filter(measurements.station == 'USC00519281').\\\n filter(measurements.date >last_12).\\\n order_by(measurements.date).all()\n hi_act_df=pd.DataFrame(hi_act).set_index('date')\n hi_act_dict=hi_act_df.to_dict()\n return jsonify(hi_act_dict)", "def internetWeather(command,special_data,userInput):\n # openweatherapi key - 47c0157cd7e7c5cf1f19a97abc04edbc\n try:\n result = \"\"\n otherdays = {\"tomorrow\",\"week\",\"sunday\",\"monday\",\"tuesday\",\"wednesday\",\"thursday\",\"friday\",\"saturday\"}\n for time in otherdays:\n if time in userInput:\n result += \"I can only tell the weather now...\"\n weather_data = requests.get('http://api.openweathermap.org/data/2.5/weather?q=Israel&APPID=47c0157cd7e7c5cf1f19a97abc04edbc').json()\n temp = str(weather_data[\"main\"]['temp'] - 273.15)\n pressure = str(weather_data[\"main\"]['temp'])\n general = str(weather_data[\"weather\"][0]['description'])\n wind = str(weather_data[\"wind\"]['speed'])\n humidity = str(weather_data[\"main\"]['humidity'])\n if \"wear\" in special_data:\n clothing = [(-50,0,\"something very warm\"),(1,14,\"something warm\"),(15,19,\"a sweatshirt\"),(20,24,\"something light\"),(25,100,\"something short\")]\n decision = clothing[0][1]\n for cloth in clothing:\n if cloth[0]<=int(float(temp)) and cloth[1]>=int(float(temp)):\n decision = cloth[2]\n break\n if int(float(temp)) < 15 and int(float(humidity)) > 90:\n decision += \" and take an umbrella.\"\n return result + \"with this weather, I'd go with \"+decision\n else:\n return result + \"Weather: \" + str(general)+ \"\\nTemp: \" + str(temp) + \" deg C\" + \"\\nPressure: \" + str(pressure) + \" Hg\"+ \"\\nWind: \"+ str(wind )+ \" Kmh\" + \"\\nHumidity: \"+ str(humidity) + \"%\"\n except Exception as e:\n return \"Failed \" + e # interconnection problems e.g.?", "def GetWeatherByLocation():\n Location = GetLocation()\n WeatherUrl =\"http://api.openweathermap.org/data/2.5/weather?\"+ Location +\"&appid=b4bacbe2dc824431289800439f1ec3df&units=metric\"\n WeatherRequest = requests.get(WeatherUrl)\n WeatherInfo = WeatherRequest.json()\n pprint(WeatherInfo)\n WindSpeed = WeatherInfo['wind']['speed']\n pprint(WindSpeed)\n Temp = WeatherInfo['main']['temp']\n Humidity = WeatherInfo['main']['humidity']\n Description = WeatherInfo['weather'][0]['description']\n print(type(Humidity))\n return(Temp, Humidity, Description)", "def find_weather(city):\n\n\ttry:\n\t\thttp = urllib3.PoolManager()\n\t\tresponse = http.request('GET', \n\t\t\t'http://api.openweathermap.org/data/2.5/weather', \n\t\t\tfields ={\n\t\t\t'q':city, \n\t\t\t'units':'metric', \n\t\t\t\"appid\": \"2bc3e79bb974a007818864813f53fd35\"\n\t\t\t}) \n\t\tparsed_data = json.loads(response.data.decode('utf-8'))\n\t\t\n\t\t\n\t\treturn (\"\\t{}\\t{}\\t{}\").format((parsed_data['name']).ljust(10),(str(parsed_data[\"main\"][\"temp\"])).ljust(10), parsed_data[\"weather\"][0][\"description\"])\n\n\texcept Exception as e:\n\t\tprint (e)", "def weather():\r\n def weather_api_call():\r\n with open('config.json', 'r') as conf:\r\n conf = json.load(conf)\r\n # Gets the API key from the config.json file\r\n weather_api_key = conf[\"weather_api_key\"]\r\n weather_city_name = conf['weather_city_name']\r\n response = requests.get(\r\n 'http://api.openweathermap.org/data/2.5/weather?'\r\n 'q=' + weather_city_name + '&units=metric&appid=' + weather_api_key)\r\n resp_json = response.json()\r\n with open('weather.json', 'w') as outfile:\r\n # Uses the data from the API to overwrite the weather data\r\n json.dump(resp_json, outfile)\r\n outfile.close()\r\n\r\n def weather_data_extractor():\r\n with open('weather.json', 'r') as weather_json:\r\n weather_json = json.load(weather_json)\r\n temp = weather_json[\"main\"]\r\n weather_item = weather_json[\"weather\"]\r\n desc = weather_item[0]\r\n current_temperature = \"The current temperature is: \" + \\\r\n str(int(temp[\"temp\"])) + \"C\"\r\n current_feels_like = \"Feels like: \" + \\\r\n str(int(temp[\"feels_like\"])) + \"C\"\r\n forecast = desc[\"main\"]\r\n return current_feels_like, current_temperature, forecast\r\n\r\n weather_api_call()\r\n return weather_data_extractor()", "def get_operational_forecasts(self):\n\n # Real time ensemble data:\n # https://www.ftp.ncep.noaa.gov/data/nccf/com/ens_tracker/prod/\n\n # If forecasts dict already exist, simply return the dict\n try:\n self.forecast_dict\n return self.forecast_dict\n except:\n pass\n\n # Follow HURDAT procedure\n if self.source == \"hurdat\":\n\n # Get storm ID & corresponding data URL\n storm_id = self.dict['operational_id']\n storm_year = self.dict['year']\n if storm_year <= 2006:\n storm_id = self.dict['id']\n if storm_year < 1954:\n msg = \"Forecast data is unavailable for storms prior to 1954.\"\n raise RuntimeError(msg)\n\n # Error check\n if storm_id == '':\n msg = \"No NHC operational data is available for this storm.\"\n raise RuntimeError(msg)\n\n # Check if archive directory exists for requested year, if not redirect to realtime directory\n url_models = f\"https://ftp.nhc.noaa.gov/atcf/archive/{storm_year}/a{storm_id.lower()}.dat.gz\"\n if requests.get(url_models).status_code != 200:\n url_models = f\"https://ftp.nhc.noaa.gov/atcf/aid_public/a{storm_id.lower()}.dat.gz\"\n\n # Retrieve model data text\n if requests.get(url_models).status_code == 200:\n request = urllib.request.Request(url_models)\n response = urllib.request.urlopen(request)\n sio_buffer = BytesIO(response.read())\n gzf = gzip.GzipFile(fileobj=sio_buffer)\n data = gzf.read()\n content = data.splitlines()\n content = [(i.decode()).split(\",\") for i in content]\n content = [i for i in content if len(i) > 10]\n response.close()\n else:\n raise RuntimeError(\n \"No operational model data is available for this storm.\")\n\n # Follow JTWC procedure\n else:\n\n url_models_noaa = f\"https://www.ssd.noaa.gov/PS/TROP/DATA/ATCF/JTWC/a{self.id.lower()}.dat\"\n url_models_ucar = f\"http://hurricanes.ral.ucar.edu/repository/data/adecks_open/{self.year}/a{self.id.lower()}.dat\"\n\n # Retrieve model data text\n try:\n content = read_url(url_models_noaa, split=True, subsplit=False)\n except:\n try:\n content = read_url(\n url_models_ucar, split=True, subsplit=False)\n except:\n raise RuntimeError(\n \"No operational model data is available for this storm.\")\n content = [i.split(\",\") for i in content]\n content = [i for i in content if len(i) > 10]\n\n # Iterate through every line in content:\n forecasts = {}\n for line in content:\n\n # Get basic components\n lineArray = [i.replace(\" \", \"\") for i in line]\n try:\n basin, number, run_init, n_a, model, fhr, lat, lon, vmax, mslp, stype, rad, windcode, neq, seq, swq, nwq = lineArray[\n :17]\n use_wind = True\n except:\n basin, number, run_init, n_a, model, fhr, lat, lon, vmax, mslp, stype = lineArray[\n :11]\n use_wind = False\n\n # Check init time is within storm time range\n run_init_dt = dt.strptime(run_init, '%Y%m%d%H')\n if run_init_dt < self.dict['time'][0] - timedelta(hours=6) or run_init_dt > self.dict['time'][-1] + timedelta(hours=6):\n continue\n \n # Skip erroneous lines\n try:\n if int(fhr) > 240:\n continue\n except:\n continue\n\n # Enter into forecast dict\n if model not in forecasts.keys():\n forecasts[model] = {}\n if run_init not in forecasts[model].keys():\n forecasts[model][run_init] = {\n 'init': run_init_dt, 'fhr': [], 'lat': [], 'lon': [], 'vmax': [], 'mslp': [], 'type': [], 'windrad': []\n }\n\n # Format lat & lon\n fhr = int(fhr)\n if \"N\" in lat:\n lat_temp = lat.split(\"N\")[0]\n lat = round(float(lat_temp) * 0.1, 1)\n elif \"S\" in lat:\n lat_temp = lat.split(\"S\")[0]\n lat = round(float(lat_temp) * -0.1, 1)\n if \"W\" in lon:\n lon_temp = lon.split(\"W\")[0]\n lon = round(float(lon_temp) * -0.1, 1)\n elif \"E\" in lon:\n lon_temp = lon.split(\"E\")[0]\n lon = round(float(lon_temp) * 0.1, 1)\n\n # Format vmax & MSLP\n if vmax == '':\n vmax = np.nan\n else:\n vmax = int(vmax)\n if vmax < 10 or vmax > 300:\n vmax = np.nan\n if mslp == '':\n mslp = np.nan\n else:\n mslp = int(mslp)\n if mslp < 1:\n mslp = np.nan\n\n # Format wind radii\n if use_wind:\n try:\n rad = int(rad)\n if rad in [0, 35]:\n rad = 34\n neq = np.nan if windcode == '' else int(neq)\n seq = np.nan if windcode in ['', 'AAA'] else int(seq)\n swq = np.nan if windcode in ['', 'AAA'] else int(swq)\n nwq = np.nan if windcode in ['', 'AAA'] else int(nwq)\n except:\n rad = 34\n neq = np.nan\n seq = np.nan\n swq = np.nan\n nwq = np.nan\n else:\n rad = 34\n neq = np.nan\n seq = np.nan\n swq = np.nan\n nwq = np.nan\n\n # Add forecast data to dict if forecast hour isn't already there\n if fhr not in forecasts[model][run_init]['fhr']:\n if model in ['OFCL', 'OFCI'] and fhr > 120:\n pass\n else:\n if lat == 0.0 and lon == 0.0:\n continue\n forecasts[model][run_init]['fhr'].append(fhr)\n forecasts[model][run_init]['lat'].append(lat)\n forecasts[model][run_init]['lon'].append(lon)\n forecasts[model][run_init]['vmax'].append(vmax)\n forecasts[model][run_init]['mslp'].append(mslp)\n forecasts[model][run_init]['windrad'].append(\n {rad: [neq, seq, swq, nwq]})\n\n # Get storm type, if it can be determined\n if stype in ['', 'DB'] and vmax != 0 and not np.isnan(vmax):\n stype = get_storm_type(vmax, False)\n forecasts[model][run_init]['type'].append(stype)\n else:\n ifhr = forecasts[model][run_init]['fhr'].index(fhr)\n forecasts[model][run_init]['windrad'][ifhr][rad] = [\n neq, seq, swq, nwq]\n\n # Save dict locally\n self.forecast_dict = forecasts\n\n # Return dict\n return forecasts", "def get_weather(station_id):\n latitude, longitude = helper.get_station_coordinate(db, station_id)\n return jsonify(scrape(latitude, longitude))", "def process_weather(forecast_file):\n with open(forecast_file) as json_file:\n json_data = json.load(json_file)\n\n min_temp_store = {}\n max_temp_store = {}\n weather_results = str()\n header_results = str()\n\n for day_in_forecast in json_data['DailyForecasts']:\n day_date = day_in_forecast['Date']\n min_temp = day_in_forecast['Temperature']['Minimum'][\"Value\"]\n min_temp_c = convert_f_to_c(min_temp)\n min_temp_store[day_date] = min_temp_c\n max_temp = day_in_forecast['Temperature']['Maximum'][\"Value\"]\n max_temp_c = convert_f_to_c(max_temp)\n max_temp_store[day_date] = max_temp_c\n\n day_time_phrase = day_in_forecast['Day']['LongPhrase']\n rain_chance_day = day_in_forecast['Day']['RainProbability']\n night_time_phrase = day_in_forecast['Night']['LongPhrase']\n rain_chance_night = day_in_forecast['Night']['RainProbability']\n weather_results = weather_results + (f\"-------- {convert_date(day_date)} --------\\nMinimum Temperature: {format_temperature(round(min_temp_c,1))}\\nMaximum Temperature: {format_temperature(round(max_temp_c,1))}\\nDaytime: {day_time_phrase}\\n Chance of rain: {rain_chance_day}%\\nNighttime: {night_time_phrase}\\n Chance of rain: {rain_chance_night}%\\n\")+ \"\\n\"\n\n\n max_day = max(max_temp_store, key=max_temp_store.get)\n max_value = max_temp_store[max_day]\n min_day = min(min_temp_store, key=min_temp_store.get)\n min_value = min_temp_store[min_day]\n max_totals = (sum(max_temp_store.values()))\n min_totals = (sum(min_temp_store.values()))\n num_items = len(min_temp_store)\n mean_min = round(calculate_mean(min_totals,num_items),1)\n mean_max = round(calculate_mean(max_totals,num_items),1)\n\n save_header = (f\"{len(json_data['DailyForecasts'])} Day Overview\\n The lowest temperature will be {format_temperature(round((min_value),1))}, and will occur on {convert_date(min_day)}.\\n The highest temperature will be {format_temperature(round((max_value),1))}, and will occur on {convert_date(max_day)}.\\n The average low this week is {format_temperature(mean_min)}.\\n The average high this week is {format_temperature(mean_max)}.\\n\")\n\n header_results = save_header + \"\\n\"+ weather_results\n \n return(header_results)", "def get_wrf_stations(pool):\n\n wrfv3_stations = {}\n\n connection = pool.connection()\n try:\n with connection.cursor() as cursor:\n sql_statement = \"SELECT `id`, `name` FROM `station` WHERE `id` like %s\"\n row_count = cursor.execute(sql_statement, \"11_____\")\n if row_count > 0:\n results = cursor.fetchall()\n for dict in results:\n wrfv3_stations[dict.get(\"name\")] = dict.get(\"id\")\n return wrfv3_stations\n else:\n return None\n except Exception as exception:\n error_message = \"Retrieving wrf stations failed\"\n logger.error(error_message)\n traceback.print_exc()\n raise exception\n finally:\n if connection is not None:\n connection.close()", "def get_weather_details(self, days: int = None):\n forecast = super().get_weather_forecast(self.BASE_URL)\n headers = [\n \"date\",\n \"min_temp\",\n \"max_temp\",\n \"phrase\",\n \"probability\",\n \"wind_speed\"]\n if days is None:\n days = 5\n for number in range(days):\n data = []\n date = forecast[\"DailyForecasts\"][number]['Date']\n date = date[:10]\n data.append(date)\n min_temp = round((int(\n (forecast[\"DailyForecasts\"][number][\"Temperature\"]\n [\"Minimum\"][\"Value\"])) - 32) / 1.8)\n data.append(min_temp)\n max_temp = round((int(\n (forecast[\"DailyForecasts\"][number][\"Temperature\"]\n [\"Maximum\"][\"Value\"])) - 32) / 1.8)\n data.append(max_temp)\n phrase = forecast[\"DailyForecasts\"][number][\"Day\"][\"LongPhrase\"]\n data.append(phrase)\n probability = (forecast[\"DailyForecasts\"][number][\"Day\"]\n [\"RainProbability\"])\n data.append(probability)\n wind_speed = round(int(\n (forecast[\"DailyForecasts\"][number][\"Day\"][\"Wind\"][\"Speed\"]\n [\"Value\"]) / 1.6), 1)\n data.append(wind_speed)\n yield dict(zip(headers, data))", "def weather( self, mess, args):\n conn=sqlite3.connect(dbname)\n curs=conn.cursor()\n curs.execute(\"SELECT temperature, humidity,datetime(timestamp, 'localtime') FROM weatherdata order by timestamp desc limit 1\")\n rowcur=curs.fetchone()\n rowstrcur=\"Recorded at: {2} | Temperature: {0:0.1f} *C | Humidity : {1:0.1f}%\".format(rowcur[0],rowcur[1],str(rowcur[2]))\n return rowstrcur", "def get_wind_data(start, end):\n\n con = connect_to_db()\n query = f'SELECT rssi FROM rala WHERE id > \"{start}\" AND id <= \"{end}\";'\n df = pd.read_sql_query(query, con)\n return df", "def get_weather(self, time=None, location=None):\n req = requests.get(self.source_url)\n text = req.text\n moment = self.extract_datetime(text)\n met_data = self.parse_hms_data(text)\n met_data['time'] = moment\n met_data['text'] = text\n return self.source_label, met_data", "def collect(self, start_date=None, end_date=None):\n if start_date is None:\n start_date = self.default_start\n if end_date is None:\n end_date = self.default_end\n\n cur = self.conn.cursor()\n\n # Maximum return is 1000 entries\n num_days = 1000 // len(self.stations)\n # Maximum date-range is 1 year\n if num_days > 365:\n num_days = 365\n\n for interval in netzero.util.time_intervals(\n start_date, end_date, days=num_days\n ):\n netzero.util.print_status(\n \"Weather\",\n \"Collecting: {} to {}\".format(\n interval[0].strftime(\"%Y-%m-%d\"), interval[1].strftime(\"%Y-%m-%d\")\n ),\n )\n\n # TODO -- REMOVE ASSUMPTION THAT LEN(DATA) < LIMIT\n raw_data = self.query_api(interval[0], interval[1])\n\n if raw_data is None:\n print(\"ERROR QUERYING API\") # TODO exception here?\n continue\n\n for entry in raw_data.get(\"results\", []):\n # Insert the weather data to the table, to be averaged later\n date = datetime.datetime.strptime(\n entry[\"date\"], \"%Y-%m-%dT%H:%M:%S\"\n ).date()\n value = entry[\"value\"]\n station = entry[\"station\"]\n\n cur.execute(\n \"INSERT OR IGNORE INTO weather VALUES (?, ?, ?)\", (date, value, station)\n )\n\n self.conn.commit()\n\n cur.close()\n\n netzero.util.print_status(\"Weather\", \"Complete\", newline=True)", "def Fetch_station(long, lat, y):\r\n global ddf\r\n dmin = 1000000\r\n rs = 0\r\n i=0\r\n for i in range(len(ddf[y])):\r\n #Calculate the distance between zip code location and weather station location\r\n dnew = Distance_orthonormique(ddf[y]['LON'][i], ddf[y]['LAT'][i], long, lat)\r\n\r\n if(dmin > dnew):\r\n #If the last smaller distance is superior than the current distance :\r\n #the new smaller distance is the current distance\r\n dmin = dnew\r\n rs = i\r\n\r\n #rs = index dataframe weather station\r\n #ddf[y]['STATION NAME'][rs] = Weather station name\r\n #round(dmin, 2) = Distance between weather station and zip code\r\n \r\n return rs, ddf[y]['STATION NAME'][rs], round(dmin,2)", "def weather():\n latlong = request.form.get(\"latlong\")\n latlong = latlong.split(\",\")\n data = lookup_weather(latlong[0],latlong[1])\n return render_template(\"weather.html\", data = data)", "def read_weather(self):\n print \"Reading weather data from file\",self.datafile\n tab = ascii.read(self.datafile)\n \n # Fix 'T' values in precipitation column, which represent tiny\n # amounts of rain (not measurable)\n TINY_VALUE = '.005' # 0.005 is half the smallest measurable value\n rain = tab['PrecipitationIn']\n wbad = (rain == 'T')\n rain[wbad] = TINY_VALUE\n rain = numpy.array(rain).astype(\"float\")\n\n # Replace string version of precip with float version\n tab['PrecipIn'] = rain\n tab.remove_column('PrecipitationIn')\n\n self.table = tab", "def get_all_stations(engine): \n # Query db\n sql = (\"SELECT DISTINCT a.station_id, \"\n \" a.station_code, \"\n \" a.station_name, \"\n \" c.station_type, \"\n \" d.latitude, \"\n \" d.longitude \"\n \"FROM nivadatabase.projects_stations a, \"\n \" nivadatabase.stations b, \"\n \" nivadatabase.station_types c, \"\n \" niva_geometry.sample_points d \"\n \"WHERE a.station_id = b.station_id \"\n \"AND b.station_type_id = c.station_type_id \"\n \"AND b.geom_ref_id = d.sample_point_id \"\n \"ORDER BY a.station_id\")\n df = pd.read_sql(sql, engine)\n\n return df", "def print_weather(self, days):\n if days == 1:\n open_weather = urlopen(self.full_url).read().decode(\"utf8\")\n read_json = json.loads(open_weather)\n outside = self.get_outside_outlook(read_json[\"weather\"])\n wind_speed = read_json[\"wind\"][\"speed\"]\n wind_direction = self.deg_to_compass(read_json[\"wind\"][\"deg\"])\n current_temp = self.convert_temp(read_json[\"main\"][\"temp\"])\n print(\"Current Temperature: {:.2f}\\xb0\\n\"\n \"Sky: {}\\n\"\n \"Wind speed: {} MPH\\n\"\n \"Wind direction: {}\".format(current_temp, outside, wind_speed, wind_direction))\n else:\n open_weather = urlopen(self.full_url).read().decode(\"utf8\")\n read_json = json.loads(open_weather)\n outside = read_json[\"list\"]\n \"\"\"\n Should be:\n for temp in outside:\n stuff = temp[\"weather\"]\n for i in stuff:\n print(i['description'])\n\n Each of these will need to be added to a list or a dictionary to print relationally\n \"\"\"\n print(outside)", "async def get_temperatures(self, **kwargs: Any) -> Dict[str, float]:\n ...", "def get_temp():\n epts = [\"cage_coldPlate_temp\", \"cage_pressure\"]\n # t_earlier_aug = '2019-10-02T00:00'\n # t_later_aug = datetime.utcnow().isoformat()\n t_earlier_aug = '2019-09-27T13:00'\n t_later_aug = '2019-09-28T19:49'\n dfs = pandas_db_query(epts, t_earlier_aug, t_later_aug)\n print(dfs[epts[0]].tail())\n\n exit()\n\n xv = dfs[epts[0]][\"timestamp\"]\n yv = dfs[epts[0]][epts[0]]\n plt.plot(xv, yv, '-b')\n plt.ylabel(epts[0], ha='right', y=1)\n\n p1a = plt.gca().twinx()\n xv = dfs[epts[1]][\"timestamp\"]\n yv = dfs[epts[1]][epts[1]]\n p1a.set_ylabel(epts[1], color='r', ha='right', y=1)\n p1a.tick_params('y', colors='r')\n p1a.semilogy(xv, yv, '-r')\n\n plt.gcf().autofmt_xdate()\n plt.tight_layout()\n plt.show()", "def get(self, city: str):\n # Make a call to the OpenWeatherMap API and check the units inserted at the query parameter.\n units = request.args.get('unit', '').casefold()\n weather_data, query_units = self.get_weather(city, units)\n temp = self.check_unit(query_units)\n\n # Get the date from the request if no date is provided use the current date and time.\n date_raw = request.args.get('at')\n self.timezone = datetime.now().astimezone().tzinfo\n\n if date_raw:\n # Two date formats are allow an aware and naive date. If no time info has been given use the current time.\n try:\n date = isoparse(date_raw.replace(' ', '+'))\n except ValueError:\n now = datetime.now()\n date = datetime.strptime(date_raw, '%Y-%m-%d').replace(\n hour=now.hour, minute=now.minute, second=now.second, microsecond=now.microsecond,\n tzinfo=self.timezone\n )\n else:\n now = datetime.now()\n date = datetime.now().replace(\n hour=now.hour, minute=now.minute, second=now.second, microsecond=now.microsecond, tzinfo=self.timezone\n )\n\n # Prepare the error response.\n self.error = {\n 'error': '',\n 'error_code': ''\n }\n\n if self.check_past_date(date):\n return self.error, 400\n\n if type(weather_data) == dict:\n # Based on the date check the index of the weather that corresponds with the date in the weather response.\n index = self.find_index(weather_data, date)\n weather_dict = {\n f'{weather_data[\"list\"][index][\"weather\"][0][\"main\"].lower()}':\n f'{weather_data[\"list\"][index][\"weather\"][0][\"description\"]}',\n 'humidity': f'{weather_data[\"list\"][index][\"main\"][\"humidity\"]}%',\n 'pressure': f'{weather_data[\"list\"][index][\"main\"][\"pressure\"]} hPa',\n 'temperature': f'{str(weather_data[\"list\"][index][\"main\"][\"temp\"]) + temp}',\n }\n return weather_dict, 200\n\n elif '404' in str(weather_data):\n self.error['error'] = f'cannot find the city\"{city}\"'\n self.error['error_code'] = 'city_not_found'\n return self.error, 404\n\n else:\n self.error['error'] = 'Something went wrong'\n self.error['error_code'] = 'internal_server_error'\n return self.error, 500", "def main(temp, humid):\n user = 'root'\n password = 'root'\n dbname = 'iot'\n dbuser = 'raspberry'\n dbuser_password = 'password'\n query = 'select temp_value,humid_value from temp_humid;'\n json_body = [\n {\n \"measurement\": \"temp_humid\",\n \"fields\": {\n \"temp_value\": temp,\n \"humid_value\":humid \n\t}\n }\n ]\n\n client = InfluxDBClient('localhost', 8086, user, password, dbname)\n\n #client.create_database(dbname)\n\n print(\"Write points: {0}\".format(json_body))\n client.write_points(json_body)\n\n #print(\"Querying data: \" + query)\n #result = client.query(query)\n\n #print(\"Result: {0}\".format(result))\n\n #client.drop_database(dbname)", "def get_humidity_data(zone):\n\n zone = zone[1:len(zone)-1]\n humidity_response = {}\n conn = sqlite3.connect(os.path.abspath('database.db'))\n\n # get humidity data\n query = \"Select humidity_date, humidity_relative From humidity Left join fire_danger_zone on humidity.humidity_station=fire_danger_zone.fdz_station Where fire_danger_zone.fdz_station == '\" + zone + \"' and humidity.humidity_date >= date('2010-01-01') Order by humidity.humidity_date;\"\n dataframe = pd.read_sql_query(query, conn) \n humidity = dataframe['humidity_relative'].values.tolist()\n\n # get dates\n dates = dataframe['humidity_date'].values.tolist()\n \n # add data in dictionary \n data_name = 'humidity_'+zone\n humidity_response[data_name] = humidity\n humidity_response['labels'] = dates\n \n # return data\n response = jsonify(humidity_response)\n response.headers.add('Access-Control-Allow-Origin', '*')\n \n # close database connection\n conn.close()\n return response", "def show_current(self):\n print 'Querying the station for current weather data...'\n for packet in self.station.genLoopPackets():\n print packet\n break", "def get_rain():\n global rain\n\n # Report rain only if the condition is 'rainy' (and not always).\n if weather_condition == CONDITION_RAINY and random.random() > 0.7:\n rain += round(random.random(), 2)\n return rain", "def storeHourlyWeatherInCSV(self, keyword, temp):\n\t\tconditions = self.getHourlyWeather(keyword, temp, 25)\n\t\t\n\t\tif keyword[0] == \"0\":\n\t\t\tkeyword = self.helper.convertZipcodeToTown(keyword)\n\t\t# keyword = self.helper.getCorrectTownName(keyword)\n\t\tif temp == 'f':\n\t\t\tfilename = \"data/weather/\"+keyword+\"_f.csv\"\n\t\telse:\n\t\t\tfilename = \"data/weather/\"+keyword+\"_c.csv\"\n\t\tf = csv.writer(open(filename, \"wb+\"))\n\t\tf.writerow([\"date\", \"description\", \"precipitation\", \"cloudCover\", \"temperature\", \"humidity\", \"wind\"])\n\t\tfor condition in conditions:\n\t\t\ttemp = condition['temperature'].split(\" \")[0][:-2] + \" \" + condition['temperature'].split(\" \")[1]\n\t\t\ttry:\n\t\t\t\tf.writerow([condition['time'],\n\t\t\t\t\tcondition['summary'],\n\t\t\t\t\tcondition['precipProbability'],\n\t\t\t\t\tcondition['cloudCover'],\n\t\t\t\t\ttemp,\n\t\t\t\t\tcondition['humidity'],\n\t\t\t\t\tcondition['wind']])\n\t\t\texcept:\n\t\t\t\tpass", "def get_data(link):\n data = re.get(link)\n jsondata = data.json()\n for weatherstation in jsondata['weatherStations']:\n FetchandStore.sensordict.update({weatherstation[\"id\"]:weatherstation[\"sensorValues\"]})\n for sensorvalue in weatherstation[\"sensorValues\"]:\n FetchandStore.sensors.append({\"id\": sensorvalue[\"roadStationId\"], \"name\": sensorvalue[\"oldName\"],\n \"value\": sensorvalue[\"sensorValue\"], \"unit\": sensorvalue[\"sensorUnit\"],\n \"datetime\": sensorvalue[\"measuredTime\"]})\n return FetchandStore.sensors", "def update_weather(location_request, db):\n with open(expanduser(\"~/bin/my_utilities/config/darksky-key\")) as f:\n ds_key = f.readline().strip()\n current = []\n current_day = 0\n with forecast(ds_key, *location_request, units=\"uk2\") as location:\n raw = location['hourly']['data'][0]\n current.append(datetime.datetime.now().hour)\n current.append(day_relative_to_absolute(current_day))\n current.append(raw[\"temperature\"])\n current.append(raw[\"apparentTemperature\"])\n current.append(raw[\"precipIntensity\"])\n current.append(raw[\"precipProbability\"] * 100)\n current.append(raw[\"humidity\"] * 100)\n current.append(raw[\"dewPoint\"])\n current.append(raw[\"windSpeed\"])\n current.append(raw[\"windBearing\"])\n current.append(raw[\"windGust\"])\n current.append(raw[\"pressure\"])\n current.append(raw[\"cloudCover\"] * 100)\n current.append(raw[\"uvIndex\"])\n current.append(raw[\"visibility\"])\n current = format_list_for_db(current)\n\n columns = [\"hour\", \"day\", \"temp\", \"apptemp\", \"precipint\", \"precipprob\",\n \"humidity\", \"dewpoint\", \"windspeed\", \"windbearing\",\n \"windgust\", \"pressure\", \"cloudcover\", \"uvindex\", \"visibility\"]\n columns = format_list_for_db(columns)\n statement = f\"INSERT INTO WEATHER {columns} VALUES {current}\"\n print(statement)\n cursor = db.cursor()\n cursor.execute(statement)\n cursor.close()", "def update(self):\n if self.last_update and (\n self.last_update + timedelta(hours=1)\n > datetime.utcnow().replace(tzinfo=dt_util.UTC)\n ):\n return # Not time to update yet; data is only hourly\n\n for row in self.current_observations():\n if row.get(\"Station\") == self._station_id:\n api_fields = {\n col_heading: (standard_name, dtype)\n for standard_name, (\n _,\n _,\n _,\n col_heading,\n dtype,\n ) in SENSOR_TYPES.items()\n }\n self.data = {\n api_fields.get(col_heading)[0]: api_fields.get(col_heading)[1](\n v.replace(\",\", \".\")\n )\n for col_heading, v in row.items()\n if col_heading in api_fields and v\n }\n break\n else:\n raise ValueError(f\"No weather data for station {self._station_id}\")", "def get_env_data(inp_path):\n rain1_str = []\n rain2_str = []\n tide_str = []\n with open(inp_path, 'r') as tmp_file:\n lines = tmp_file.readlines()\n for i, l in enumerate(lines):\n if l.startswith(\"[TIMESERIES]\"): # find time series section\n start = i + 3\n for i, l in enumerate(lines[start:]):\n if l.startswith('Rain1'):\n rain1_str.append(l)\n if l.startswith('Rain2'):\n rain2_str.append(l)\n if l.startswith('Tide1'):\n tide_str.append(l)\n\n rain1_data = []\n rain1_time = []\n rain2_data = []\n rain2_time = []\n tide_data = []\n tide_time = []\n for i in rain1_str:\n rain1_data.append(i.split(' ')[3].rstrip())\n rain1_time.append(i.split(' ')[1] + \" \" + i.split(' ')[2])\n\n for i in rain2_str:\n rain2_data.append(i.split(' ')[3].rstrip())\n rain2_time.append(i.split(' ')[1] + \" \" + i.split(' ')[2])\n\n for i in tide_str:\n tide_data.append(i.split(' ')[3].rstrip())\n tide_time.append(i.split(' ')[1] + \" \" + i.split(' ')[2])\n\n rain1_df = pd.DataFrame([rain1_time, rain1_data]).transpose()\n rain1_df.columns = ['datetime1', 'rain1']\n rain1_df['datetime1'] = pd.to_datetime(rain1_df['datetime1'], infer_datetime_format=True)\n rain1_df.set_index(pd.DatetimeIndex(rain1_df['datetime1']), inplace=True)\n rain1_df['rain1'] = rain1_df['rain1'].astype('float')\n rain1_df = rain1_df.resample('H').sum()\n\n rain2_df = pd.DataFrame([rain2_time, rain2_data]).transpose()\n rain2_df.columns = ['datetime2', 'rain2']\n rain2_df['datetime2'] = pd.to_datetime(rain2_df['datetime2'], infer_datetime_format=True)\n rain2_df.set_index(pd.DatetimeIndex(rain2_df['datetime2']), inplace=True)\n rain2_df['rain2'] = rain2_df['rain2'].astype('float')\n rain2_df = rain2_df.resample('H').sum()\n\n tide_df = pd.DataFrame([tide_time, tide_data], dtype='float64').transpose()\n tide_df.columns = ['datetime', 'tide']\n tide_df['datetime'] = pd.to_datetime(tide_df['datetime'], infer_datetime_format=True)\n tide_df.set_index(pd.DatetimeIndex(tide_df['datetime']), inplace=True)\n tide_df['tide'] = tide_df['tide'].astype('float')\n\n df = pd.concat([rain1_df['rain1'], rain2_df['rain2'], tide_df['tide']], axis=1)\n df[['rain1', 'rain2']].fillna(0, inplace=True)\n df.reset_index(inplace=True)\n\n return df", "def weather(user):\r\n user = user.lower()\r\n\r\n if user == \"stiv\":\r\n return _stiv_bullshit()\r\n location = USER_LOOKUP.get(user)\r\n\r\n if not location:\r\n return \"Berg was too busy sucking dongs to add your location.\"\r\n\r\n url = ROOT_URL + location\r\n\r\n resp = requests.get(url).json()\r\n weather_data = resp.get(\"observations\", {}).get(\"data\")[0]\r\n temp_f = _get(weather_data, \"air_temp\")\r\n\r\n output = {k: _get(weather_data, k) for k, v in weather_data.items() if k in FIELDS}\r\n output[\"degree\"] = \"\\N{DEGREE SIGN}\"\r\n output[\"temp_f\"] = \"%.2f\" % (temp_f * 9 / 5 + 32)\r\n output[\"username\"] = user if not user == 'mcspud' else 'macspud'\r\n\r\n return _format_output(**output)", "def get_temp():\n count = 0\n while True:\n # Temp\n output = subprocess.check_output(\n [\"/home/andy/python/bitbucket/pitemp/Adafruit_DHT\", \"2302\", \"4\"])\n count += 1\n print (\"Attempt %s: %s\") % (count, output)\n temp_match = re.search(\"Temp =\\s+([0-9.]+)\", output)\n humid_match = re.search(\"Hum =\\s+([0-9.]+)\", output)\n\n # if the beginning of output contains temp and numbers,\n # we can assume we are getting valid data\n if temp_match:\n temp = float(temp_match.group(1))\n humidity = float(humid_match.group(1))\n break\n\n return (temp, humidity)", "def getData(tme=currentTime):\n # attempts request 10 times\n for attempt in range(10):\n try:\n # make a request to the url and return it in json format\n url = \"https://api.darksky.net/forecast/%s/%s,%s,%s?exclude=minutely,hourly,daily,alerts,flags\" % (API_KEY, LAT, LNG, tme)\n return get(url).json()\n except:\n # Wait .05 seconds and try again\n sleep(.05)\n pass", "def process_weather(forecast_file):\n # Load json data file\n \n with open(forecast_file) as json_file:\n json_data = json.load(json_file)\n \n # Set Variables, Dictionaries and Lists\n days_list = []\n temp_dict = {}\n daily_dict = {}\n\n num_items = 0\n total_sum_min = 0\n total_sum_max = 0\n days = len(json_data['DailyForecasts'])\n days_list = days_in_data(days)\n\n t_temp_min = 100\n t_temp_max = 0\n\n # Pull through the data\n\n for day in days_list:\n num_items += 1\n date = convert_date(json_data['DailyForecasts'][day]['Date'])\n min_temp = convert_f_to_c(json_data['DailyForecasts'][day]['Temperature']['Minimum']['Value'])\n total_sum_min += min_temp\n max_temp = convert_f_to_c(json_data['DailyForecasts'][day]['Temperature']['Maximum']['Value'])\n total_sum_max += max_temp\n day_desc = json_data['DailyForecasts'][day]['Day']['LongPhrase']\n chance_rain_day = json_data['DailyForecasts'][day]['Day']['RainProbability']\n night_desc = json_data['DailyForecasts'][day]['Night']['LongPhrase']\n chance_rain_night = json_data['DailyForecasts'][day]['Night']['RainProbability']\n \n if min_temp < t_temp_min:\n t_temp_min = min_temp\n t_temp_mindate = date\n else:\n pass\n if max_temp > t_temp_max:\n t_temp_max = max_temp\n t_temp_maxdate = date\n else:\n pass\n \n daily_dict[day] = [date, min_temp, max_temp, day_desc, chance_rain_day, night_desc, chance_rain_night]\n # 0 1 2 3 4 5 6 \n \n # print(temp_dict)\n # print(daily_dict)\n\n # Calculate Minimum, Maximum and Mean temperatures\n\n mean_min = format_temperature(calculate_mean(total_sum_min, num_items))\n # print(mean_min)\n mean_max = format_temperature(calculate_mean(total_sum_max, num_items))\n # print(mean_max)\n\n # Format Minimum and Maximum temperatures\n min_temp_format = format_temperature(t_temp_min)\n max_temp_format = format_temperature(t_temp_max)\n\n ##############################################################################################\n\n # Combine string messages to return to user\n\n str_Output = \"\"\n Output_gen1 = (f\"{num_items} Day Overview\\n\")\n Output_gen2 = (f\" The lowest temperature will be {min_temp_format}, and will occur on {t_temp_mindate}.\\n\")\n Output_gen3 = (f\" The highest temperature will be {max_temp_format}, and will occur on {t_temp_maxdate}.\\n\")\n Output_gen4 = (f\" The average low this week is {mean_min}.\\n\")\n Output_gen5 = (f\" The average high this week is {mean_max}.\\n\")\n str_Output = Output_gen1 + Output_gen2 + Output_gen3 + Output_gen4 + Output_gen5\n for key, value in daily_dict.items():\n Output_daily0 = (\"\\n\")\n Output_daily1 = (f\"-------- {value[0]} --------\\n\")\n Output_daily2 = (f\"Minimum Temperature: {format_temperature(value[1])}\\n\")\n Output_daily3 = (f\"Maximum Temperature: {format_temperature(value[2])}\\n\")\n Output_daily4 = (f\"Daytime: {value[3]}\\n\")\n Output_daily5 = (f\" Chance of rain: {value[4]}%\\n\")\n Output_daily6 = (f\"Nighttime: {value[5]}\\n\")\n Output_daily7 = (f\" Chance of rain: {value[6]}%\\n\")\n str_Output = str_Output + Output_daily0 + Output_daily1 + Output_daily2 + Output_daily3 + Output_daily4 + Output_daily5 + Output_daily6 + Output_daily7\n str_Output = str_Output +\"\\n\"\n\n return str_Output", "def get_wind_data(zone):\n\n zone = zone[1:len(zone)-1]\n wind_response = {}\n conn = sqlite3.connect(os.path.abspath('database.db'))\n\n # get wind data\n query = \"Select wind_date, wind_speed From wind_velocity Left join fire_danger_zone on wind_velocity.wind_station=fire_danger_zone.fdz_station Where fire_danger_zone.fdz_station == '\" + zone + \"' and wind_velocity.wind_date >= date('2010-01-01') Order by wind_velocity.wind_date;\"\n dataframe = pd.read_sql_query(query, conn) \n wind = dataframe['wind_speed'].values.tolist()\n\n # get dates\n dates = dataframe['wind_date'].values.tolist()\n \n # add data in dictionary \n data_name = 'wind_'+zone\n wind_response[data_name] = wind\n wind_response['labels'] = dates\n \n # return data\n response = jsonify(wind_response)\n response.headers.add('Access-Control-Allow-Origin', '*')\n \n # close database connection\n conn.close()\n return response", "def _checkWeather(self, mjd, w, config):\n # Convert mjd to the relevant time units of the weather dates. \n time = (mjd - config['sim_start'] + config['%s_start' %(w)]) * _day2sec\n # And wrap the time, if we need to. \n time = time % self.maxtime[w]\n # Then use numpy interp to find the weather values for all of our times (works for single values or for arrays). \n # Requires that the 'weather' times are monotonically increasing, but this is true. (and checked when weather read in).\n # Find the *interpolated* weather values\n values = numpy.interp(time, self.dates[w], self.weather[w])\n return values", "def pull_forecast(city, api_key):\n base_url = \"http://api.openweathermap.org/data/2.5/forecast?\"\n url = base_url + \"appid=\" + api_key + \"&q=\" + city\n r = requests.get(url)\n r.raise_for_status()\n data = r.json()\n return data", "def get_zakopane_daily_weather():\n zakopane = FiveDaysWeatherForecast(location.get(\"zakopane\", \"\"))\n zakopane_weather_detail = zakopane.get_weather_details()\n zakopane_daily_weather_detail = []\n for data in zakopane_weather_detail:\n zakopane_daily_weather_detail.append(data)\n return zakopane_daily_weather_detail", "def get_forecast(location_list):\n #Might need to munge location to get a query out of it\n location, human_location = location_list\n date = datetime.datetime.today()\n query = location\n url = \"http://api.wunderground.com/auto/wui/geo/ForecastXML/index.xml?query=%s\" % query\n f = urllib2.urlopen(url)\n xml = f.read()\n root = ET.XML(xml)\n \n forecast = {'location': location, 'human_location': human_location}\n #Find forecast\n simple = root.find('simpleforecast')\n for day in simple.findall('forecastday'):\n forecast['forecast_date'] = parser.parse(day.find('date').find('pretty').text)\n forecast['high_temp'] = day.find('high').find('fahrenheit').text\n forecast['low_temp'] = day.find('low').find('fahrenheit').text\n forecast['conditions'] = day.find('conditions').text\n forecast['icon'] = day.find('icon').text\n forecast['skyicon'] = day.find('skyicon').text\n try:\n f, created = ForecastDay.objects.get_or_create(**forecast)\n if created:\n f.save()\n except:\n logging.info(\"Long Range Forecast Data missing or already created\")\n \n \n #Find Moon\n moon = root.find('moon_phase')\n illuminated = moon.find('percentIlluminated')\n age = moon.find('ageOfMoon')\n sun_rise = datetime.datetime(date.year, date.month, date.day, **_hour_minute(moon.find('sunrise')))\n sun_set = datetime.datetime(date.year, date.month, date.day, **_hour_minute(moon.find('sunset'))) \n #It doesn't error, so it appears to be doing what it should.\n f = ForecastDay.objects.get(forecast_date=date)\n f.sun_rise = sun_rise\n f.sun_set = sun_set\n f.moon_illuminated = illuminated.text\n f.moon_age = age.text\n try:\n f.save()\n except:\n logging.info(\"Moon Data missing or no new data available\")", "def compute_aggregate_weather_data():\n\n # get a list of all the csv files names in the 'weather_data' directory\n files = get_all_csv_files_in_directory('weather_data')\n\n # Todo: if the number of csv files doesn't match the expected value, unzip remaining using the 'os' module\n\n if len(files) == 0:\n\n # Unzip all files in current directory and subdirectories\n print \"unzipping weather files...\"\n os.system(\"unzip 'weather_data/*.zip' -d weather_data\")\n\n\n # Try again to get files\n files = get_all_csv_files_in_directory('weather_data')\n\n # Throw exception if still missing csv files\n if len(files) == 0:\n raise ValueError(\"Missing weather data in csv format in the 'weather_data' directory\")\n\n # convert the list of csv file names to a list of corresponding DataFrames\n dallas_files = filter(lambda file_name : \"KDAL\" in file_name, files)\n houston_files = filter(lambda file_name : \"KHOU\" in file_name, files)\n san_antonio_files = filter(lambda file_name : \"KSAT\" in file_name, files)\n\n print \"Retrieved weather data files...\"\n print \"\\t# of Dallas weather files found: \", len(dallas_files)\n print \"\\t# of Houston weather files found: \", len(houston_files)\n print \"\\t# of San Antonio weather files found: \", len(san_antonio_files)\n\n dallas_dfs = map(lambda file_name: read_weather_data_from_csv(\"./weather_data/\" + file_name), dallas_files)\n houston_dfs = map(lambda file_name: read_weather_data_from_csv(\"./weather_data/\" + file_name), houston_files)\n san_antonio_dfs = map(lambda file_name: read_weather_data_from_csv(\"./weather_data/\" + file_name), san_antonio_files)\n\n dallas_df = pd.concat(dallas_dfs)\n houston_df = pd.concat(houston_dfs)\n san_antonio_df = pd.concat(san_antonio_dfs)\n\n print \"Aggregating all of the weather data...\"\n # fold the list of data frames into a single data frame\n aggregate_df = reduce(lambda df1, df2: pd.merge(df1, df2, on=\"Date\", how=\"outer\"), [dallas_df, houston_df, san_antonio_df]).sort_values(\"Date\")\n\n return aggregate_df", "def get_weather_forecast_comparison(user_city=\"lagos\", explored_city=\"london\", days=7):\n user_city_forecast = weather_client.getForecastWeather(q=user_city, days=days)['forecast']['forecastday']\n explored_city_forecast = weather_client.getForecastWeather(q=explored_city, days=days)['forecast']['forecastday']\n weather_forecast_comparison = zip(map(get_weather_info, user_city_forecast), map(get_weather_info, explored_city_forecast))\n return weather_forecast_comparison", "def get_weather(self):\n return self.__weather", "def parse_weather(data: DataFrame) -> List[WeatherData]:\n parsed_results = []\n\n for index, row in data.iterrows():\n date = sqlite3.Date(index.year, index.month, index.day)\n item = WeatherData(\n date=date,\n average_temp=celsius_to_fahr(row.get('tavg', 0)),\n precipitation=row.get('prcp', 0),\n )\n parsed_results.append(item)\n return parsed_results", "def generate_weather_data(self):\n months = pd.to_datetime(self.output['Local Time']).dt.month\n self.output['Month'] = months # set month values for later joins\n\n # merge output data frame with historical data to get ranges\n keys = ['Location', 'Month']\n m = pd.merge(self.output, self.histdata, how='left',\n left_on=keys, right_on=keys)\n\n # uniformly select random pressure, temperature\n # and humidity values between the historical max and min ranges\n r = np.random.rand(m.shape[0])\n m['Temperature'] = ((m['Tmean_high'] - m['Tmean_low']\n ) * r + m['Tmean_low']).round(1)\n m['Pressure'] = ((m['Pmax'] - m['Pmin']) * r + m['Pmin']).round(1)\n m['Humidity'] = ((m['Hmax'] - m['Hmin']) * r + m['Hmin']).astype(int)\n\n # drop redundant columns and assign to output\n dcols = ['Month', 'Timezone', 'Pmax', 'Pmin',\n 'Hmax', 'Hmin', 'Tmean_high', 'Tmean_low']\n m.drop(columns=dcols, inplace=True)\n self.output = m", "def fetch_weather():\n\n # Fetch the current weather.\n response = requests.get(f'https://api.openweathermap.org/data/2.5/weather?q=Manchester,UK&units=metric&APPID={WEATHER_API_KEY}')\n\n # Return the data.\n return response.json()", "def details(weather):\n\treturn \"\"\"<table class=\"forecast bg-success\"><tr><th colspan=\"2\" class=\"text-center lead\">Weather for {location} at {time}<th></tr>\n\t<tr><td>Temp: {temperature}<i class=\"wi wi-celsius\"></i> Feels Like: {feelsLike}<i class=\"wi wi-celsius\"></i></td><td rowspan=\"9\"><img src=\"map.gif?{latitude},{longitude}\" width=\"600\" height=\"371\"/><td></tr>\n\t<tr><td>Low: {low}<i class=\"wi wi-celsius\"></i> High: {high}<i class=\"wi wi-celsius\"></i></td></tr>\n\t<tr><td>Sunrise <i class=\"wi wi-sunrise\"></i>: {sunrise} Sunset <i class=\"wi wi-sunset\"></i>: {sunset}</td></tr>\n\t<tr><td>Wind: {windSpeed} kph from {windBearing} <i class=\"wi wi-wind.towards-{windDirection}-deg\"></i></td></tr>\n\t<tr><td>Summary <i class=\"wi wi-{icon}\"></i>: {summary}</td></tr>\n\t<tr><td></td></tr>\n\t<tr><td></td></tr>\n\t<tr><td></td></tr>\n\t<tr><td></td></tr>\n\t<tr><td>&nbsp;</td><td>&nbsp;</td></tr>\n\t</table>\"\"\".format(**weather)", "def tafs(station, hours_before_now=24, most_recent=True):\n return aviation_weather('tafs', station, hours_before_now, most_recent)", "def get_outdoor_data(temp_dir,site):\n if site == 'berk':\n files_od = glob(join(temp_dir,'outdoor','20*.xlsx'))\n elif site == 'bus':\n files_od = glob(join(temp_dir,'outdoor','Busara*.csv'))\n else:\n raise NameError(site)\n\n dfs = []\n for f in files_od:\n if site == 'berk':\n this_df = pd.read_excel(f,sheet_name=0,usecols='B:D',index_col=0,parse_dates=True, header=1)\n elif site == 'bus':\n this_df = pd.read_csv(f,usecols=[0,1,2],index_col=0,parse_dates=True,header=2)\n \n # drop missing values that prevented conversion to float type\n if this_df.iloc[:,0].dtype != np.float64:\n this_df = this_df[this_df.iloc[:,0] != ' ']\n this_df = this_df.astype(np.float64)\n\n # correct for weird timezones in berkeley datalogger\n this_df = correct_tz(this_df,site)\n \n this_df.columns = ['T','RH']\n this_df.index.name = 'time'\n\n # convert to celsius\n this_df['T'] = (this_df['T'] - 32) * 5/9\n dfs.append(this_df)\n \n df_od = pd.concat(dfs)\n\n # drop duplicated measurements\n df_od = df_od[~df_od.index.duplicated(keep='last')].sort_index()\n \n # separate out into daily min,mean,max\n groups = df_od.groupby(df_od.index.date)\n dfs_od = {'all':df_od,\n 'min': groups.min(),\n 'mean': groups.mean(),\n 'max': groups.max()}\n \n for i in ['min','mean','max']:\n # remove first and last day to ignore days where we did not get full recording\n dfs_od[i] = dfs_od[i].iloc[1:-1,:]\n \n # name index so that we can merge onto multiIndex'd dataframe\n dfs_od[i].index.name = 'date'\n \n return dfs_od", "def get_weather(start: str, end: str) -> Generator[Dict[str, str], None, None]:\n speeds = get_speeds(start, end)\n for data in get_temperatures(start, end):\n data.update(next(speeds))\n yield data", "def getHourlyTemp(self, keyword, scale):\n\n\t\tweather_data = self.getHourlyWeatherFromCSV(keyword, scale, \"temperature\")\n\t\ttemp_values = [] # Array that will contain all the temperature data\n\t\ttemp_data = {} # Dictionary of temperature data\n\n\t\t# Getting temperature data\n\t\tfor data in weather_data:\n\t\t\ttemp_data[\"x\"] = self.helper.getDateInEpoch(data[\"date\"])\n\t\t\ttemp_data[\"y\"] = float(data[\"temperature\"].split(\"°\")[0].split(\" \")[0])\n\t\t\ttemp_values.append(temp_data)\n\t\t\ttemp_data = {}\n\n\t\treturn temp_values", "def get_weather_with_time(time):\n global DARK\n\n if TIME in range(6, 9):\n DARK = False\n return 1\n elif TIME in range(9, 13):\n return 2\n elif TIME in range(13, 16):\n return 3\n elif TIME in range(16, 19):\n if HAS_RAINCOAT:\n return 4\n else:\n if not NICE_WEATHER:\n add_strength(False, 10)\n return 5\n\n elif TIME in range(19, 22):\n if HAS_RAINCOAT:\n return 7\n else:\n if not NICE_WEATHER:\n add_strength(False, 10)\n return 6\n\n else: # 9 - 6am\n DARK = True\n if HAS_FLASHLIGHT:\n return 9\n else:\n return 8", "def weather_script(\n init_data_path: str,\n output_path: str,\n workers: int,\n weatherAPI_rpm: int,\n geoAPI_rpm: int,\n) -> None:\n unzip(init_data_path, output_path)\n top_hotels_dataframe_without_addresses = primary_data_proc(output_path)\n geocoder = geocoder_setup(geoAPI_rpm)\n top_hotels_df_with_addresses = define_address(\n top_hotels_dataframe_without_addresses,\n workers,\n geocoder,\n )\n cities, countries, coordinates = city_center_coord(top_hotels_df_with_addresses)\n weather_df = pd.concat(\n [\n prev_weather(cities, countries, coordinates, workers, weatherAPI_rpm),\n forecast_weather(cities, countries, coordinates, workers, weatherAPI_rpm),\n ]\n )\n\n logger.info(\"Start to save results\")\n save_main_info(output_path, weather_df, top_hotels_df_with_addresses)\n logger.info(\"Finish\")", "def weather_data(cities, openweathermap_api_key=openweathermap_api_key):\n L = []\n for c in cities:\n res = requests.get(f'http://api.openweathermap.org/data/2.5/weather?q={c}&appid={openweathermap_api_key}&units=imperial')\n L.append(res.json())\n\n df = pd.DataFrame(L)\n df['lon'] = df['coord'].map(op.itemgetter('lon'))\n df['lat'] = df['coord'].map(op.itemgetter('lat'))\n df['Temprature'] = df['main'].map(op.itemgetter('temp'))\n df['Humidity'] = df['main'].map(op.itemgetter('humidity'))\n df['Wind Speed'] = df['wind'].map(op.itemgetter('speed'))\n return df[['name','lon', 'lat','Temprature','Humidity','Wind Speed']]", "def get_zakopane_hourly_weather():\n zakopane = TwelveHoursWeatherForecast(location.get(\"zakopane\", \"\"))\n zakopane_weather_detail = zakopane.get_hourly_weather_details()\n zakopane_hourly_weather_detail = []\n for data in zakopane_weather_detail:\n zakopane_hourly_weather_detail.append(data)\n return zakopane_hourly_weather_detail", "def __weather_api_call(\n self, time: datetime, location: tuple, index: int,\n ) -> Weather:\n URL = (\n 'https://weather.visualcrossing.com/VisualCrossingWebServices'\n + '/rest/services/weatherdata/history?'\n )\n time_start = time.strftime('%Y-%m-%dT%H:%M:%S')\n # time_end = (time + timedelta(hours=1, seconds=0)\n # ).strftime('%Y-%m-%dT%H:%M:%S')\n location0_str = f'{location[0]:.5f}'\n location1_str = f'{location[1]:.5f}'\n\n PARAMS = {\n 'aggregateHours': 1,\n 'combinationMethod': 'aggregate',\n 'startDateTime': time_start,\n 'endDateTime': time_start,\n 'maxStations': -1,\n 'maxDistance': -1,\n 'contentType': 'json',\n 'unitGroup': self.unit_group,\n 'locationMode': 'single',\n 'key': self.vc_api_key,\n 'dataElements': 'all',\n 'locations': f'{location0_str}, {location1_str}',\n }\n # sending get request and saving the response as response object\n r = requests.get(url=URL, params=PARAMS)\n # extracting data in json format\n response_data = r.json()\n data_values = response_data['location']['values'][0]\n return Weather(\n temperature=data_values['temp'],\n maximum_temperature=data_values['maxt'],\n minimum_temperature=data_values['mint'],\n wind_chill=data_values['windchill'],\n heat_index=data_values['heatindex'],\n precipitation=data_values['precip'],\n snow_depth=data_values['snowdepth'],\n wind_speed=data_values['wspd'],\n wind_direction=data_values['wdir'],\n sea_level_pressure=data_values['sealevelpressure'],\n visibility=data_values['visibility'],\n cloud_cover=data_values['cloudcover'],\n dew_point=data_values['dew'],\n solar_radiation=data_values['solarradiation'],\n relative_humidity=data_values['humidity'],\n weather_type=data_values['weathertype'],\n conditions=data_values['conditions'],\n date=time,\n location=location,\n index=index,\n )", "def get_weather(address, update = ''):\n \n def proceed_with_method():\n if update == 'forecast':\n precip_hist_dict = historic_weather_data(address, \"P\")\n temp_hist_dict = historic_weather_data(address, \"T\") \n else:\n precip_hist_dict = historic_weather_data(address, \"P\", update)\n temp_hist_dict = historic_weather_data(address, \"T\", update) \n \n if update == 'history':\n forecast_dict = get_weather_forecast(address)\n else:\n forecast_dict = get_weather_forecast(address, update)\n\n \n precip_forecast_dict = {}\n temp_forecast_dict = {}\n for key, item in forecast_dict.items():\n precip_forecast_dict[key] = item[1]\n temp_forecast_dict[key] = item[0]\n \n precip_dict = join_historic_forecast(precip_hist_dict, precip_forecast_dict)\n #use adj join for temp, forecast is not accurate, this at least gives a shape\n temp_dict = adj_join_historic_forecast(temp_hist_dict, temp_forecast_dict)\n return [precip_dict, temp_dict]\n \n title = \"weather_dict_temp\"\n if weather_profile(address, title) == None or update != '':\n print(\"/t-Adding/refreshing data...\")\n data = proceed_with_method()\n weather_profile(address, title, data[1], update)\n else:\n print(\"There is existing data for: \" + str(title))\n \n title = \"weather_dict_precip\"\n if weather_profile(address, title) == None or update != '':\n print(\"/t-Adding/refreshing data...\")\n data = proceed_with_method()\n weather_profile(address, title, data[0], update)\n return [weather_profile(address, title),weather_profile(address, \"weather_dict_temp\")]\n else:\n return [weather_profile(address, title),weather_profile(address, \"weather_dict_temp\")]\n print(\"There is existing data for: \" + str(title))", "def fetch_weather(y):\r\n # request parameter(s): Start with '?'\r\n # separate name and value with '='\r\n # multiple parameter name value pairs are separate with '&'\r\n query_string = \"?id={}&units=imperial&APIKEY={}\".format(y, API_KEY)\r\n request_url = WS_URL + query_string\r\n print(\"Request URL: \", request_url)\r\n response = requests.get(request_url)\r\n if response.status_code == 200:\r\n city_name = response.json()[\"city\"][\"name\"]\r\n lst = response.json()[\"list\"]\r\n tmp_list = []\r\n for i in range(len(lst) // 8):\r\n li = [x for x in range(len(lst)) if x // 8 == i]\r\n tmp_list.append(max([lst[j][\"main\"][\"temp_max\"] for j in li]))\r\n return City(city_name, tmp_list)\r\n else:\r\n print(\"How should I know?\")\r\n return None", "def update_rain():\n \n if check_password(request.form):\n\n weather_data = read_from_file()\n \n try:\n data = json.loads(request.form['data'])\n now = datetime.now().isoformat()\n\n # update weather_data, if necessary\n # update contains rain\n if data.has_key('current_data'):\n if data['current_data'].has_key('intensity'):\n weather_data['last_rain_intensity'] = data['current_data']['intensity']\n\n if weather_data.has_key('last_update_rain'):\n if weather_data['last_update_rain'] == False or weather_data['rain_since'] == None:\n weather_data['rain_since'] = now\n else:\n weather_data['rain_since'] = now\n\n\n weather_data['last_rain'] = now\n weather_data['last_update_rain'] = True \n\n # update contains no rain\n else:\n\n if weather_data.has_key('last_update_rain'):\n if weather_data['last_update_rain'] == True or weather_data['dry_since'] == None:\n weather_data['dry_since'] = now \n\n else:\n weather_data['dry_since'] = now\n\n weather_data['last_dry'] = now\n weather_data['last_update_rain'] = False\n\n if \"prediction\" in data:\n weather_data['prediction'] = data['prediction']\n\n if data.has_key('temperature') and data['temperature'].has_key('status') and data['temperature']['status'] == 200:\n if data['temperature'].has_key('temperature'):\n weather_data['temperature'] = data['temperature']['temperature']\n\n if \"snow\" in data:\n weather_data['snow'] = data['snow']\n\n if data.has_key('current_weather') and data['current_weather'].has_key('weather_symbol_id'):\n weather_data['weather_symbol_id'] = data['current_weather']['weather_symbol_id']\n\n with open(app.config['DATA_FILE'], 'w') as outfile:\n json.dump(weather_data, outfile)\n\n except Exception, e:\n return \"fail: %s\"%e\n\n return \"merci\"\n \n else:\n abort(401)", "def test():\n temp_data = fetch_temp_data(\n (\"https://opendata-download-metobs.smhi.se/api/version/\" +\n \"latest/parameter/1/station/52350/period/latest-day/data.json\"))\n data = temp_series(temp_data)\n print(data)", "async def get_temps(self):\n return await self.get_states_by_tag_prefix(\"temp\", True)" ]
[ "0.6910243", "0.66520226", "0.6608227", "0.6573686", "0.63866216", "0.6343781", "0.631178", "0.6305189", "0.6281451", "0.62793523", "0.62376016", "0.6201287", "0.6198531", "0.6157297", "0.61510736", "0.6086177", "0.6076465", "0.6055122", "0.60240924", "0.5990383", "0.598555", "0.59790176", "0.59455276", "0.59411716", "0.5938862", "0.59372866", "0.5936434", "0.59353644", "0.59311247", "0.59307945", "0.592189", "0.5916002", "0.59141886", "0.5906629", "0.58926183", "0.5887271", "0.58706784", "0.5853595", "0.584836", "0.58331555", "0.5829586", "0.5818033", "0.5803123", "0.5802678", "0.57856244", "0.57811177", "0.5761662", "0.5750513", "0.5733111", "0.57293105", "0.5722789", "0.5718302", "0.57147753", "0.5695876", "0.5684673", "0.5678746", "0.5672794", "0.566251", "0.56408346", "0.5614617", "0.56031686", "0.56021696", "0.5598758", "0.55978173", "0.5595854", "0.5588175", "0.5587867", "0.55870295", "0.55770326", "0.5573633", "0.5568145", "0.5551748", "0.5550188", "0.5548608", "0.5548383", "0.5541416", "0.553443", "0.5517188", "0.5515195", "0.55125445", "0.55100244", "0.5498955", "0.54915494", "0.5479163", "0.54770935", "0.54760695", "0.5475436", "0.5474821", "0.54666865", "0.5466546", "0.54662603", "0.5463153", "0.5461766", "0.5458755", "0.54585093", "0.5453782", "0.545168", "0.5448433", "0.5446926", "0.543677" ]
0.71451485
0
Return all zip streams and their positions in file.
def zipstreams(filename): with open(filename, 'rb') as fh: data = fh.read() i = 0 while i < len(data): try: zo = zlib.decompressobj() yield i, zo.decompress(data[i:]) i += len(data[i:]) - len(zo.unused_data) except zlib.error: i += 1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_stream_readers_for_zip(fh, tmp_dir):\n fasta_zip = zipfile.ZipFile(fh, 'r')\n rval = []\n for member in fasta_zip.namelist():\n fasta_zip.extract(member, tmp_dir)\n rval.append(open(os.path.join(tmp_dir, member), 'rb'))\n return rval", "def _GetStreamNames(self):\n if self._zipfile:\n for stream_name in self._zipfile.namelist():\n yield stream_name", "def ls(self):\n return self._zip_file.infolist()", "def _extract_zip(self, zipfile):\n zf = ZipFile(zipfile)\n d = {}\n for n in zf.namelist():\n d[n] = zf.read(n)\n return d", "def get_list_of_files_in_zip(zip_files):\n files = {k: [] for k in zip_files}\n for zip_file in zip_files:\n print('[ INFO ] Loading: %s' % zip_file)\n try:\n with ZipFile(join(zip_file), 'r') as z:\n files[zip_file] = files[zip_file] + z.namelist()\n except Exception as e:\n print(e)\n return files", "def get_zip(self):\n self.zip.rewind()\n return self.zip.in_memory_zip", "def zip(self):\n global pointer\n global error_flag\n global totalFiles\n while pointer < len(self.files) and ((self.t and not error_flag) or not self.t):\n # Se o modo e' t e a error_flag nao for false entao pode avancar\n # Se o modo nao for t pode avancar sem restricoes\n self.sem.acquire()\n iterator = pointer\n pointer += 1\n self.sem.release()\n if iterator < len(self.files): # Iterator e' o ficheiro que deve ser utilizado pela thread\n File = self.files[iterator]\n if os.path.isfile(File): # Ver se o ficheiro existe\n with ZipFile(File + '.zip', 'w') as zipfile:\n zipfile.write(File) # Zip\n self.totalFilesSem.acquire()\n totalFiles += 1\n self.totalFilesSem.release()\n else:\n print \"O ficheiro\", File, \"não existe.\" # Se nao existir, avisa o utilizador\n error_flag = True # Atualiza a sua propria flag", "def extract(zip_file_path: str) -> List[bytes]:\n\n with ZipFile(zip_file_path, 'r') as zip_file:\n files_extracted = zip_file.namelist()\n try:\n assert len(files_extracted) == 1, f\"{zip_file_path} contains more than one file: \"\n except:\n logging.info(f\"{zip_file_path} contains more than one file: {files_extracted}\")\n with zip_file.open(files_extracted[0], 'r') as f:\n return f.readlines()", "def zip(self):\n return self.__zip", "def test_open_each(self):\n zip_paths = zip_scanner(os.getcwd())\n for zip_path in zip_paths:\n with ZipEditor(zip_path) as zed:\n self.assertEqual(zip_path, zed.file)\n zed.open()\n self.assertIsNotNone(zed.tmpdir.name)\n self.assertEqual(zed.tmpdir.name, zed.getdir())\n self.assertIsNone(zed.getdir())", "def load_zip_files(zip_path):\n zd = zipfile.ZipFile(zip_path)\n paths = sorted(zd.filelist, key=lambda x: x.date_time)\n for path in paths:\n with zd.open(path, 'r') as fd:\n try:\n data = json.load(fd)\n yield File.Ok(fd.name, data)\n except (json.JSONDecodeError, UnicodeDecodeError) as exc:\n fd.seek(0)\n yield File.Err(fd.name, fd.read(), str(exc))", "def list_zip_files(zip_path):\n with zipfile.ZipFile(zip_path) as zf:\n data = ComixData(Directories=[], Files=[])\n app.logger.info(\"Loaded the zip file: %s\", zip_path)\n dirs = [name for name in zf.namelist() if name.endswith('/')]\n subdirs = set([name.split('/')[0] for name in dirs])\n if subdirs:\n for dirname in subdirs:\n dirname = dirname.decode('euc-kr').encode('utf-8')\n app.logger.debug('list_zip_files: %s, %s', dirname, [to_hex(c) for c in dirname])\n data.Directories.append(dirname)\n data = json.dumps(data._asdict(), ensure_ascii=False)\n response = flask.Response(data, headers=None)\n return reseponse\n ## No folder in zip file\n return get_files_in_zip_path(zip_path, '')", "def quickScanZip(args, fh):\n # 100 bytes is the smallest .zip possible\n\n fh.seek(0, 2)\n fsize = fh.tell()\n if fsize==0:\n print(\"Empty file\")\n return\n if fsize<100:\n print(\"Zip too small: %d bytes, minimum zip is 100 bytes\" % fsize)\n return\n fh.seek(-100, 2)\n\n eoddata = fh.read()\n iEND = eoddata.find(b'PK\\x05\\x06')\n if iEND==-1:\n # try with larger chunk\n ofs = max(fh.tell()-0x10100, 0)\n fh.seek(ofs, 0)\n eoddata = fh.read()\n iEND = eoddata.find(b'PK\\x05\\x06')\n if iEND==-1:\n print(\"expected PK0506 - probably not a PKZIP file\")\n return\n else:\n ofs = fh.tell()-0x100\n eod = EndOfCentralDir(ofs, eoddata, iEND+4)\n yield eod\n\n dirofs = eod.dirOffset\n for _ in range(eod.thisEntries):\n fh.seek(dirofs)\n dirdata = fh.read(46)\n if dirdata[:4] != b'PK\\x01\\x02':\n print(\"expected PK0102\")\n return\n dirent = CentralDirEntry(dirofs, dirdata, 4)\n\n yield dirent\n dirofs = dirent.endOffset", "def file_package_iter(self):\n files = list()\n futures = list()\n\n amount = 0\n for file in self.file_iterator:\n if amount + self._estimate_file_size(file) > self.max_size:\n if len(files) == 0: # This file is too large for one archive, special handling\n self.pool.wait(futures)\n self._calculate_hash(file)\n yield self._finish_info_package([file])\n continue\n\n self.pool.wait(futures)\n yield self._finish_info_package(files)\n\n files = list()\n amount = 0\n\n amount += file.size\n files.append(file)\n futures.append(self.pool.add_task(self._calculate_hash, file)) # todo calc small files in-thread?\n\n if len(files) > 0:\n yield self._finish_info_package(files)", "def _zip_files(files: Iterable[str], root: str) -> Tuple[bytes, str]:\n zip_data = StringIO()\n files = list(files) # create copy of list also converts generator to list\n with ZipFile(zip_data, \"w\", ZIP_DEFLATED) as zip_file:\n for file_name in files:\n zip_file.write(os.path.join(root, file_name), file_name)\n\n # Fix file permissions to avoid any issues - only care whether a file\n # is executable or not, choosing between modes 755 and 644 accordingly.\n for zip_entry in zip_file.filelist:\n perms = (zip_entry.external_attr & ZIP_PERMS_MASK) >> 16\n new_perms = 0o755 if perms & stat.S_IXUSR != 0 else 0o644\n if new_perms != perms:\n LOGGER.debug(\n \"fixing perms: %s: %o => %o\", zip_entry.filename, perms, new_perms\n )\n new_attr = (zip_entry.external_attr & ~ZIP_PERMS_MASK) | (\n new_perms << 16\n )\n zip_entry.external_attr = new_attr\n\n contents = zip_data.getvalue()\n zip_data.close()\n content_hash = _calculate_hash(files, root)\n\n return contents, content_hash", "def _unzip_files(self) -> None:\n for file in self.input_path.iterdir():\n if is_zipfile(file):\n with ZipFile(file, mode=\"r\") as archive:\n archive.extractall(path=self.temp_path)", "def add_zip(manager, zipfile, incref=False):\n from .core.cache.buffer_cache import empty_dict_checksum, empty_list_checksum\n result = []\n for checksum in zipfile.namelist():\n if checksum in (empty_dict_checksum, empty_list_checksum):\n continue\n checksum2 = bytes.fromhex(checksum)\n buffer = zipfile.read(checksum)\n checksum3 = calculate_checksum(buffer)\n if checksum3 != checksum2:\n raise ValueError(\"Incorrect checksum for zipped file '{}'\".format(checksum))\n buffer_cache.cache_buffer(checksum2, buffer)\n if incref:\n buffer_cache.incref(checksum2, authoritative=False)\n result.append(checksum)\n return result", "def Read(self):\n try:\n file_object = self._zip_file.open(self._stream_name, mode='r')\n except KeyError as exception:\n raise IOError(\n 'Unable to open stream with error: {0!s}'.format(exception))\n\n try:\n entry_data = file_object.read(self._TABLE_ENTRY_SIZE)\n while entry_data:\n table_entry = self._TABLE_ENTRY.parse(entry_data)\n\n self._offsets.append(table_entry.offset)\n entry_data = file_object.read(self._TABLE_ENTRY_SIZE)\n\n except construct.FieldError as exception:\n raise IOError(\n 'Unable to read table entry with error: {0!s}'.format(exception))\n\n finally:\n file_object.close()", "def unzip(self):\n global pointer\n global error_flag\n global totalFiles\n while pointer < len(self.files) and ((self.t and not error_flag) or not self.t):\n # Se o modo nao for t pode avancar sem restricoes\n # Se o modo e' t e a error_flag nao for false entao pode avancar\n self.sem.acquire()\n iterator = pointer\n pointer += 1\n self.sem.release()\n if iterator < len(self.files): # Iterator e' o ficheiro que deve ser utilizado pela thread\n File = self.files[iterator]\n if os.path.isfile(File): # Ver se o ficheiro existe\n with ZipFile(File, 'r') as zipfile:\n zipfile.extractall('.') # Unzip\n self.totalFilesSem.acquire()\n totalFiles += 1\n self.totalFilesSem.release()\n else:\n print \"O ficheiro\", File, \"não existe.\" # Se nao exister, avisa o utilizador\n error_flag = True # Atualiza a sua propria flag", "def get_zipinfo(self):\n zipinfo = zipfile.ZipInfo()\n zipinfo.filename = self.translated_path()\n zipinfo.date_time = self.get_mod_time()\n zipinfo.file_size = self.get_size()\n return zipinfo", "def Zip(args):\n parser = argparse.ArgumentParser(description=Zip.__doc__)\n parser.add_argument(\n '-r', dest='recursive', action='store_true',\n default=False,\n help='recurse into directories')\n parser.add_argument(\n '-q', dest='quiet', action='store_true',\n default=False,\n help='quiet operation')\n parser.add_argument('zipfile')\n parser.add_argument('filenames', nargs='+')\n options = parser.parse_args(args)\n\n src_files = []\n for filename in options.filenames:\n globbed_src_args = glob.glob(filename)\n if not globbed_src_args:\n if not options.quiet:\n print('zip warning: name not matched: %s' % filename)\n\n for src_file in globbed_src_args:\n src_file = os.path.normpath(src_file)\n src_files.append(src_file)\n if options.recursive and os.path.isdir(src_file):\n for root, dirs, files in os.walk(src_file):\n for dirname in dirs:\n src_files.append(os.path.join(root, dirname))\n for filename in files:\n src_files.append(os.path.join(root, filename))\n\n # zip_data represents a list of the data to be written or appended to the\n # zip_stream. It is a list of tuples:\n # (OS file path, zip path/zip file info, and file data)\n # In all cases one of the |os path| or the |file data| will be None.\n # |os path| is None when there is no OS file to write to the archive (i.e.\n # the file data already existed in the archive). |file data| is None when the\n # file is new (never existed in the archive) or being updated.\n zip_data = []\n new_files_to_add = [OSMakeZipPath(src_file) for src_file in src_files]\n zip_path_to_os_path_dict = dict((new_files_to_add[i], src_files[i])\n for i in range(len(src_files)))\n write_mode = 'a'\n if os.path.exists(options.zipfile):\n with zipfile.ZipFile(options.zipfile, 'r') as zip_stream:\n try:\n files_to_update = set(new_files_to_add).intersection(\n set(zip_stream.namelist()))\n if files_to_update:\n # As far as I can tell, there is no way to update a zip entry using\n # zipfile; the best you can do is rewrite the archive.\n # Iterate through the zipfile to maintain file order.\n write_mode = 'w'\n for zip_path in zip_stream.namelist():\n if zip_path in files_to_update:\n os_path = zip_path_to_os_path_dict[zip_path]\n zip_data.append((os_path, zip_path, None))\n new_files_to_add.remove(zip_path)\n else:\n file_bytes = zip_stream.read(zip_path)\n file_info = zip_stream.getinfo(zip_path)\n zip_data.append((None, file_info, file_bytes))\n except IOError:\n pass\n\n for zip_path in new_files_to_add:\n zip_data.append((zip_path_to_os_path_dict[zip_path], zip_path, None))\n\n if not zip_data:\n print('zip error: Nothing to do! (%s)' % options.zipfile)\n return 1\n\n with zipfile.ZipFile(options.zipfile, write_mode,\n zipfile.ZIP_DEFLATED) as zip_stream:\n for os_path, file_info_or_zip_path, file_bytes in zip_data:\n if isinstance(file_info_or_zip_path, zipfile.ZipInfo):\n zip_path = file_info_or_zip_path.filename\n else:\n zip_path = file_info_or_zip_path\n\n if os_path:\n st = os.stat(os_path)\n if stat.S_ISDIR(st.st_mode):\n # Python 2.6 on the buildbots doesn't support writing directories to\n # zip files. This was resolved in a later version of Python 2.6.\n # We'll work around it by writing an empty file with the correct\n # path. (This is basically what later versions do anyway.)\n zip_info = zipfile.ZipInfo()\n zip_info.filename = zip_path\n zip_info.date_time = time.localtime(st.st_mtime)[0:6]\n zip_info.compress_type = zip_stream.compression\n zip_info.flag_bits = 0x00\n zip_info.external_attr = (st[0] & 0xFFFF) << 16\n zip_info.CRC = 0\n zip_info.compress_size = 0\n zip_info.file_size = 0\n zip_stream.writestr(zip_info, '')\n else:\n zip_stream.write(os_path, zip_path)\n else:\n zip_stream.writestr(file_info_or_zip_path, file_bytes)\n\n if not options.quiet:\n if zip_path in new_files_to_add:\n operation = 'adding'\n else:\n operation = 'updating'\n zip_info = zip_stream.getinfo(zip_path)\n if (zip_info.compress_type == zipfile.ZIP_STORED or\n zip_info.file_size == 0):\n print(' %s: %s (stored 0%%)' % (operation, zip_path))\n elif zip_info.compress_type == zipfile.ZIP_DEFLATED:\n print(' %s: %s (deflated %d%%)' % (operation, zip_path,\n 100 - zip_info.compress_size * 100 / zip_info.file_size))\n\n return 0", "def iter_zip(\n path: Path,\n *,\n fp: bool = False,\n) -> Generator[ZipfileItem, None, None]:\n with zipfile.ZipFile(path, mode='r') as zip_file:\n for zip_info in zip_file.infolist():\n item = _get_zipfile_item(zip_info)\n if fp and item.type == FileSystemItemType.file:\n with zip_file.open(zip_info) as fp:\n item.fp = fp\n yield item\n else:\n yield item", "def unzip_oxygen_files(zip_file):\n name_main_content = None\n name_left_menu = None\n list_img_files_to_save = list()\n\n files_unzipped = ZipFile(zip_file)\n for file_unzipped_name in files_unzipped.namelist():\n if not file_unzipped_name.startswith('__MACOSX'):\n if file_unzipped_name.endswith(\".jpeg\"):\n list_img_files_to_save.append(file_unzipped_name)\n elif file_unzipped_name.endswith(\".indexList.html\"):\n name_left_menu = file_unzipped_name\n elif file_unzipped_name.endswith(\"_xsd.html\"):\n name_main_content = file_unzipped_name\n\n return files_unzipped, name_left_menu, name_main_content, list_img_files_to_save", "def get_lines_from_zipped_file(fname):\n content = []\n fd = gzip.open(fname, 'r')\n try:\n for line in fd:\n content.append(line.strip('\\n'))\n except Exception as err:\n raise Exception(\"Error reading from file %s: %s\" % (fname, err))\n finally:\n fd.close()\n return content", "def getZipCounts(fname):\n counts = {}\n with open(fname) as f:\n counts = json.load(f)\n return counts", "def extractall(self, *args, **kwargs):\n self.zipfile.extractall(*args, **kwargs)", "def files_in_archive(fd: BinaryIO) -> Iterable[int]:\n\n _check_next_bytes(fd, ARCHIVE_MAGIC, 'archive magic number')\n\n while True:\n # In some archives, the first file ends with an additional \\n. If that\n # is present, skip it.\n if fd.read(1) != b'\\n':\n fd.seek(-1, 1)\n\n # Each file in an archive is prefixed with an ASCII header:\n #\n # 16 B - file identifier (text)\n # 12 B - file modification timestamp (decimal)\n # 6 B - owner ID (decimal)\n # 6 B - group ID (decimal)\n # 8 B - file mode (octal)\n # 10 B - file size in bytes (decimal)\n # 2 B - ending characters (`\\n)\n #\n # Skip the unused portions of the file header, then read the size.\n fd.seek(16 + 12 + 6 + 6 + 8, 1)\n size_str = fd.read(10)\n if not size_str:\n return\n\n try:\n size = int(size_str, 10)\n except ValueError as exc:\n raise FileDecodeError(\n 'Archive file sizes must be decimal integers') from exc\n\n _check_next_bytes(fd, b'`\\n', 'archive file header ending')\n offset = fd.tell() # Store offset in case the caller reads the file.\n\n yield size\n\n fd.seek(offset + size)", "def extractZipFiles(rootDir, zipDir):\n for root, dirs, files in os.walk(zipDir, topdown=False):\n for name in files:\n \n zipFiles = os.path.join(root, name)\n \n #Check file extension here\n if \".zip\" not in zipFiles:\n continue\n \n else:\n zipPath = zipfile.ZipFile(zipFiles, 'r')\n #print(zipPath) \n \n filesInZip = zipPath.namelist()\n i = 0 \n for i in range(len(filesInZip)):\n #print(filesInZip[i])\n #print(zipPath.getinfo(filesInZip[i]))\n \n if \".mp3\" in filesInZip[i]:\n zipPath.extract(filesInZip[i], rootDir)\n print(\"{0} extracted to {1}\".format(filesInZip[i], rootDir))\n\n elif \".m4a\" in filesInZip[i]:\n zipPath.extract(filesInZip[i], rootDir)\n print(\"{0} extracted to {1}\".format(filesInZip[i], rootDir))\n\n elif \".mp4\" in filesInZip[i]:\n zipPath.extract(filesInZip[i], rootDir)\n print(\"{0} extracted to {1}\".format(filesInZip[i], rootDir))\n\n elif \".png\" in filesInZip[i]:\n zipPath.extract(filesInZip[i], rootDir)\n print(\"{0} extracted to {1}\".format(filesInZip[i], rootDir))\n\n elif \".jpg\" in filesInZip[i]:\n zipPath.extract(filesInZip[i], rootDir)\n print(\"{0} extracted to {1}\".format(filesInZip[i], rootDir))\n \n elif \".pdf\" in filesInZip[i]:\n zipPath.extract(filesInZip[i], rootDir)\n print(\"{0} extracted to {1}\".format(filesInZip[i], rootDir))\n\n else:\n print(\"No media found in zip file {0}\".format(name))\n \n zipPath.close()", "def _index_files(path):\n with zipfile.ZipFile(path) as zf:\n names = sorted(zf.namelist())\n names = [nn for nn in names if nn.endswith(\".tif\")]\n names = [nn for nn in names if nn.startswith(\"SID PHA\")]\n phasefiles = []\n for name in names:\n with zf.open(name) as pt:\n fd = io.BytesIO(pt.read())\n if SingleTifPhasics.verify(fd):\n phasefiles.append(name)\n return phasefiles", "def get_filenames(zip_file: str) -> List:\n file_names = []\n with ZipFile(zip_file, 'r') as zipObj:\n listOfiles = zipObj.namelist()\n for elem in listOfiles:\n if \"wav\" in elem:\n file_names.append(elem)\n return file_names", "def _add_payload_files(zip_file, payload_info_list):\n payload_byte_count = 0\n payload_file_count = 0\n for payload_info_dict in payload_info_list:\n zip_file.write_iter(payload_info_dict[\"path\"], payload_info_dict[\"iter\"])\n payload_byte_count += payload_info_dict[\"iter\"].size\n payload_file_count += 1\n return payload_byte_count, payload_file_count", "def open_zip(path, *args, **kwargs):\r\n with contextlib.closing(zipfile.ZipFile(path, *args, **kwargs)) as zip:\r\n yield zip", "def zipfile_readlines(package, filename):\n\n import zipfile\n\n f = None\n try:\n f = zipfile.ZipFile(package)\n try:\n package_dir = os.path.splitext(os.path.basename(package))[0]\n return [line.decode('utf-8') if isinstance(line, bytes) else line for line in f.open(os.path.join(package_dir, filename), 'rU').readlines()]\n except:\n pass\n finally:\n if f is not None:\n f.close()\n\n return []", "def get_zip_names(zipfilename):\n\n # Get names from the zip file\n zipfiles = []\n with ZipFile(zipfilename) as archive:\n for file in archive.infolist():\n zipfiles.append(file.filename)\n\n return zipfiles", "def get_files(self):\n\n self.files = []\n retriever_methods = [\n m\n for m in rtorrent9.file.methods\n if m.is_retriever() and m.is_available(self._rt_obj)\n ]\n # 2nd arg can be anything, but it'll return all files in torrent\n # regardless\n m = rtorrent9.rpc.Multicall(self)\n m.add(\n \"f.multicall\",\n self.info_hash,\n \"\",\n *[method.rpc_call + \"=\" for method in retriever_methods]\n )\n\n results = m.call()[0] # only sent one call, only need first result\n\n offset_method_index = retriever_methods.index(\n rtorrent9.rpc.find_method(\"f.offset\")\n )\n\n # make a list of the offsets of all the files, sort appropriately\n offset_list = sorted([r[offset_method_index] for r in results])\n\n for result in results:\n results_dict = {}\n # build results_dict\n for m, r in zip(retriever_methods, result):\n results_dict[m.varname] = rtorrent9.rpc.process_result(m, r)\n\n # get proper index positions for each file (based on the file\n # offset)\n f_index = offset_list.index(results_dict[\"offset\"])\n\n self.files.append(\n File(self._rt_obj, self.info_hash, f_index, **results_dict)\n )\n\n return self.files", "def zip_names(zip):\n if hasattr(zip, 'names'):\n return zip.names()\n else:\n def zip_filter():\n # 'Fix' an issue where directories are also being listed...\n for name in zip.namelist():\n if name[-1] != '/':\n yield name\n return zip_filter()", "def chunk_content(self):\n entries = DataObject.objects.filter(uuid=self.uuid)\n for entry in entries:\n if entry.compressed:\n data = BytesIO(entry.blob)\n z = zipfile.ZipFile(data, \"r\")\n content = z.read(\"data\")\n data.close()\n z.close()\n yield content\n else:\n yield entry.blob", "def extract(self):\n if self.files:\n return dict((f, self.read_file(f)) for f in self.files)\n else:\n raise RuntimeError(\"Can't extract whole archive without listfile.\")", "def _return_dfs_from_zipfolder(zip_path: str) -> Dict[str, pd.DataFrame]:\n zipfolder = ZipFile(zip_path)\n df_dict = {}\n for csv_info in zipfolder.infolist():\n csv_name = csv_info.filename\n unzipped = zipfolder.open(csv_name)\n df = _load_csv_into_df(unzipped, csv_name)\n df_dict[csv_name] = df\n\n assert len(df_dict) == len(zipfolder.infolist()) # TODO: maybe check / log function\n\n return df_dict", "def zip_files(dict_files, compression=zipfile.ZIP_DEFLATED):\n in_memory = StringIO()\n\n with zipfile.ZipFile(in_memory, 'w', compression) as zf:\n for fname, fp in dict_files.iteritems():\n zf.writestr(fname, fp.read())\n\n zf.close()\n\n in_memory.seek(0)\n\n return in_memory", "def get_zip_hashes(zip_obj):\n hashes = []\n\n for info in zip_obj.infolist():\n content = zip_obj.read(info.filename)\n content_hash = hashlib.sha1(content).hexdigest()\n hashes.append('%s %s' % (info.filename, content_hash))\n\n return \"\\n\".join(hashes)", "def get_files_in_zip_path(zipname, path):\n data = ComixData(Directories=[], Files=[])\n with zipfile.ZipFile(zipname) as zf:\n for name in zf.namelist():\n name = name.decode('euc-kr').encode('utf-8')\n pardir, basename = os.path.split(name)\n if basename and path == pardir:\n app.logger.debug(\"get_files_in_zip_path: %s, %s\", pardir, basename)\n data.Files.append(basename)\n if len(data.Files):\n response = flask.Response(json.dumps(data._asdict(), ensure_ascii=False), headers=None)\n return response\n\n return ('', 204)", "def Read(self):\n try:\n file_object = self._zip_file.open(self._stream_name, mode='r')\n except KeyError as exception:\n raise IOError(\n 'Unable to open stream with error: {0!s}'.format(exception))\n\n try:\n entry_data = file_object.read(self._TABLE_ENTRY_SIZE)\n while entry_data:\n table_entry = self._TABLE_ENTRY.parse(entry_data)\n\n self._timestamps.append(table_entry.timestamp)\n entry_data = file_object.read(self._TABLE_ENTRY_SIZE)\n\n except construct.FieldError as exception:\n raise IOError(\n 'Unable to read table entry with error: {0!s}'.format(exception))\n\n finally:\n file_object.close()", "def _open_zip(self):\n self.buffer = io.BytesIO()\n self.zf = zipfile.ZipFile(self.buffer, \"w\", zipfile.ZIP_DEFLATED)", "def unzip_data():\n zip_ref = zipfile.ZipFile(data_zip, 'r')\n zip_ref.extractall('')\n zip_ref.close()", "def zipfile_containing(file_contents: Sequence[Tuple[str, str]]):\n with tempfile.NamedTemporaryFile(suffix='.zip') as temp_file:\n with zipfile.ZipFile(temp_file, 'w') as zip_file:\n for file_name, contents in file_contents:\n zip_file.writestr(file_name, contents)\n temp_file.flush()\n yield temp_file", "def fromZip(self, zip_location,extract_location):\n zip_file = zipfile.ZipFile(zip_location,'r')\n zip_file.extractall(extract_location)", "def _iter_dag_filelocs(fileloc: str) -> Iterator[str]:\n if fileloc.endswith(\".py\") or not zipfile.is_zipfile(fileloc):\n yield fileloc\n return\n try:\n with zipfile.ZipFile(fileloc) as z:\n for info in z.infolist():\n if might_contain_dag(info.filename, True, z):\n yield os.path.join(fileloc, info.filename)\n except zipfile.BadZipFile:\n self.log.exception(\"There was an error accessing ZIP file %s %s\", fileloc)", "def process_files(exp_folders):\n pool = mp.Pool()\n results = pool.imap_unordered(read_and_serialize, exp_folders)\n\n stat = []\n for res in results:\n print(res)\n stat.append(res)\n\n pool.close()\n pool.join()", "def _GetSerializedDataStreamNumbers(self, stream_name_prefix):\n stream_numbers = []\n for stream_name in self._zipfile.namelist():\n if not stream_name.startswith(stream_name_prefix):\n continue\n\n _, _, stream_number = stream_name.partition('.')\n try:\n stream_number = int(stream_number, 10)\n stream_numbers.append(stream_number)\n except ValueError:\n logging.error(\n 'Unable to determine stream number from stream: {0:s}'.format(\n stream_name))\n\n return sorted(stream_numbers)", "def _get_compressed_file(files, password=None):\n multiple_files = len(files) > 1\n # Replace the data and report type with just `.zip`.\n zipfile = re.sub(r'(_(\\w+))?\\.(\\w+)$', '.zip', files[0].name)\n compression = pyminizip.compress_multiple if multiple_files else pyminizip.compress\n compression([f.name for f in files] if multiple_files else files[0].name, zipfile, password, COMPRESSION_LEVEL)\n return zipfile", "def get_files(self):\r\n return self._filelist", "def extract_files(self) -> list:\n pass", "def _split_zip_file_into_sections(self, file_hex):\n split_end_central_directory_header = file_hex.split('504b0506')\n end_central_directory_header = EndCentralDirectoryHeader(\n f'504b0506{split_end_central_directory_header[-1]}')\n\n central_directory = get_header_field(\n file_hex, end_central_directory_header.get_offset_start_central_dir_from_start(),\n end_central_directory_header.get_size_central_dir_bytes())\n\n local_files = file_hex[:end_central_directory_header.get_offset_start_central_dir_from_start(\n )*2]\n\n return (local_files, central_directory, end_central_directory_header)", "def get_zipped_images(self, num_sequence=None):\n self._create_pdf(self.survey, self.response)\n self._build_image_names(num_sequence, self._page_count)\n self._create_index()\n self._build_zip()\n return self.zip", "def open_files(filenames):\n for filename in filenames:\n if filename.endswith('.gz') or filename.endswith('.zip'):\n yield gzip.open(filename, 'r')\n else:\n yield open(filename, 'rb')", "def unzip_subtitles(self, zip_filepath):\n with ZipFile(zip_filepath, 'r') as zip_obj:\n filenames = zip_obj.namelist()\n\n subtitle_filenames = [fn for fn in filenames if fn.endswith('.srt')]\n for fn in subtitle_filenames:\n zip_obj.extract(fn, self.temp_storage_dir)\n\n return subtitle_filenames", "def get_zip_file(self):\n io = StringIO()\n zf = zipfile.ZipFile(io, \"w\")\n try:\n for track in self.get_tracks():\n zf.write(track.file_name,\n track.safe_file_name,\n zipfile.ZIP_DEFLATED)\n finally:\n zf.close()\n\n io.reset()\n io.seek(0, 2)\n length = io.tell()\n io.reset()\n return io,\\\n cleanse_filename(\"%s - %s.zip\" % (self.name, self.year)),\\\n length", "def __get_files(self):\r\n \r\n files = []\r\n with requests.Session() as s:\r\n headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64)'}\r\n respons = s.get(self.__url, headers=headers).text\r\n soup = BeautifulSoup(respons, 'html.parser')\r\n data_files = [link.get('href') for link in soup.find_all('a', class_=\"btn-primary\")]\r\n for year in soup.find_all('td', class_=\"align-middle\"):\r\n regex = re.compile(r\"data/data-?gis({year}|\\-rok\\-{year})\\.zip\".format(year=year.text))\r\n if any((match := regex.match(link)) for link in data_files):\r\n files.append(match.group(0))\r\n else:\r\n files.append(data_files[-1])\r\n return files", "def unzip_data(zip_f,data_folder_path): \n\n with zipfile.ZipFile(zip_f,\"r\") as zip_ref:\n zip_ref.extractall(data_folder_path)", "def test_find_many_files_zipped_allow(self):\n\n these_file_names = satellite_io.find_many_files(\n top_directory_name=TOP_DIRECTORY_NAME,\n first_date_string=FIRST_DATE_STRING,\n last_date_string=LAST_DATE_STRING,\n prefer_zipped=True, allow_other_format=True, test_mode=True\n )\n\n self.assertTrue(these_file_names == FILE_NAMES_UNZIPPED)", "def unzip_file(zip_path: str, output_dir: str) -> UnzipFiles:\n files = []\n with zipfile.ZipFile(zip_path, 'r') as zip_ref:\n zip_ref.extractall(output_dir)\n for file_path in os.listdir(output_dir):\n files.append(f\"{output_dir}/{file_path}\")\n return files", "def read_data(self, filename):\n with zipfile.ZipFile(filename) as f:\n raw_data = f.read(f.namelist()[0]).split()\n return raw_data", "def files(self):\n files = []\n if self.package_type == 'package':\n file_data = dict([(k, self[k]) \\\n for k in ['size', 'sha1', 'sha256', 'md5sum']])\n file_data['name'] = self['filename'].split('/')[-1]\n files.append(file_data)\n else:\n for d in self['files']:\n file_data = d.copy()\n # Get checksum data as well...\n for key in ['sha1', 'sha256']:\n for data in self['checksums-' + key]:\n if file_data['name'] == data['name']:\n file_data[key] = data[key]\n files.append(file_data)\n return files", "def test_find_many_files_zipped_no_allow(self):\n\n these_file_names = satellite_io.find_many_files(\n top_directory_name=TOP_DIRECTORY_NAME,\n first_date_string=FIRST_DATE_STRING,\n last_date_string=LAST_DATE_STRING,\n prefer_zipped=True, allow_other_format=False, test_mode=True\n )\n\n self.assertTrue(these_file_names == FILE_NAMES_ZIPPED)", "def getAllEntries(self):\n \n log_entries_dict = collections.defaultdict(list)\n for logfile in os.listdir(self.log_folder):\n log = os.path.join(self.log_folder, logfile)\n with open(log, 'rb') as l:\n logCSVreader = csv.reader(l, delimiter=\"|\")\n logCSVreader.next() # skip header\n try:\n for row in logCSVreader:\n zip_file = row[0]\n log_entries_dict[zip_file].append(row)\n except:\n pass\n return log_entries_dict", "def filecoords(self):\n coords = sorted(self.map.keys())\n for coord in coords:\n yield coord, self.map[coord]", "def _add_tag_files(\n zip_file, dir_name, payload_info_list, payload_byte_count, payload_file_count\n):\n tag_info_list = []\n _add_tag_file(zip_file, dir_name, tag_info_list, _gen_bagit_text_file_tup())\n _add_tag_file(\n zip_file,\n dir_name,\n tag_info_list,\n _gen_bag_info_file_tup(payload_byte_count, payload_file_count),\n )\n _add_tag_file(\n zip_file, dir_name, tag_info_list, _gen_pid_mapping_file_tup(payload_info_list)\n )\n return tag_info_list", "def files(self):\r\n return self._files", "def peek(self, target: str):\n\t\ttry:\n\t\t\ttarget = os.path.abspath(target)\n\t\t\tif output_target:\n\t\t\t\toutdir = output_target\n\t\t\telse:\n\t\t\t\toutdir = os.path.dirname(target)\n\n\t\t\twith open(target, \"rb+\") as archive:\n\t\t\t\theader = archive.read(HEADER_LENGTH)\n\t\t\t\tzip_contents = BytesIO(archive.read())\n\n\t\t\tzip = zipfile.ZipFile(zip_contents, \"r\", zipfile.ZIP_DEFLATED)\n\t\t\tstatus = zip.namelist()\n\t\t\t\n\t\texcept Exception as e:\n\t\t\tprint(f\"Error peeking at archive {target} - {e}\")\n\t\t\tstatus = []\n\t\t\theader = None\n\n\t\treturn (status, header)", "def test_close_stream_first(self):\n z = ZipFile(self.f, 'r')\n stream = z.readstream(FILENAMES[0])\n z.close()\n try:\n stream.read()\n except:\n self.fail(\"Reading stream from closed archive failed!\")\n stream.close()\n # Now the archive should close.\n self.assertIsNone(z._a)\n self.assertTrue(stream.closed)\n self.assertIsNone(z._stream)", "def zip_open_bin(zip, filename):\n if isinstance(zip, FakeZip):\n return zip.open(filename, 'rb')\n else:\n return zip.open(filename, 'r')", "def _open_zipped(infile, mode='r', encoding='utf-8'):\n mode = mode[0] + 't'\n p2mode = mode\n if hasattr(infile, 'write'):\n return infile\n if isinstance(infile, str):\n if infile.endswith('.gz'):\n return _zopen(infile, mode)\n if infile.endswith('.bz2'):\n return _bopen(infile, mode)\n return open(infile, p2mode, encoding=encoding)", "def list_of(self):\r\n self.files = os.listdir(self.p)\r\n self.size = [0] * len(self.files)\r\n self.created = [0] * len(self.files)\r\n self.modified = [0] * len(self.files)\r\n total_size = 0\r\n iteration = 0\r\n for file in self.files:\r\n self.fol = os.path.join(self.p, file)\r\n self.modified[iteration] = time.ctime(os.path.getmtime(f\"{self.fol}\"))\r\n self.created[iteration] = time.ctime(os.path.getctime(f\"{self.fol}\"))\r\n for path, dirs, files in os.walk(self.fol):\r\n for fol in files:\r\n fpath = os.path.join(path, fol)\r\n total_size += os.path.getsize(fpath)\r\n self.size[iteration] = total_size\r\n iteration += 1\r\n return self.files, self.size, self.created, self.modified", "def create_zip_from_files(files: List[Path]) -> Any:\n temp = tempfile.NamedTemporaryFile()\n with zipfile.ZipFile(temp, 'w') as handle:\n for f in files:\n filename = f.name\n handle.write(f, arcname=filename)\n temp.flush()\n return temp", "def extract_zip_contents(zip_file, destination):\n logging.info(\"Extracting ZIP File\")\n if os.path.isfile(zip_file):\n with zipfile.ZipFile(zip_file, \"r\") as zip_ref:\n zip_ref.extractall(destination)\n else:\n logging.error(\"%s not found.\", zip_file)\n sys.exit(\"ZIP is not the filesystem.\")", "def MapFileToArchive(self, file_path):\n num_archives = len(self.zipfilenames)\n while num_archives > 0:\n target = self.zipfilenames[num_archives - 1]\n if len(target) > 1:\n if self.CompareFilenames(target[1], file_path) >= 0:\n return target[0]\n num_archives -= 1\n\n return None", "def MapFileToArchive(self, file_path):\n num_archives = len(self.zipfilenames)\n while num_archives > 0:\n target = self.zipfilenames[num_archives - 1]\n if len(target) > 1:\n if self.CompareFilenames(target[1], file_path) >= 0:\n return target[0]\n num_archives -= 1\n\n return None", "def getFilesAtStamp(self, timestamp):\n\t\tout = []\n\t\tfor stream_name in self.stamps_by_stream.keys():\n\t\t\tts_index = bisect.bisect_right(self.stamps_by_stream[stream_name], timestamp)-1\n\t\t\tif ts_index < 0:\n\t\t\t\tcontinue\n\t\t\ttuple_ts = self.streams[stream_name].keys()\n\t\t\ttuple_ts.sort()\n\t\t\tout.append(self.streams[stream_name][tuple_ts[ts_index]])\n\t\treturn out", "def load_zipped_image(zipfilename):\n\n # Read each image and append in a list\n img = []\n filenames = []\n with ZipFile(zipfilename) as archive:\n for entry in archive.infolist():\n with archive.open(entry) as file:\n tmp = Image.open(file)\n img.append(np.array(tmp))\n filenames.append(file.name)\n\n # Return the read images\n return img, filenames", "def test_deferred_close_by_stream(self):\n z = ZipFile(self.f, 'r')\n stream = z.readstream(FILENAMES[0])\n stream.close()\n # Make sure archive stays open after stream is closed.\n self.assertIsNotNone(z._a)\n self.assertIsNone(z._stream)\n z.close()\n self.assertIsNone(z._a)\n self.assertTrue(stream.closed)", "def extract_zip(file, extract_location):\n\n with zipfile.ZipFile(file, \"r\") as zip_ref:\n zip_ref.extractall(extract_location)\n\n print(f\"Extracted file to {extract_location}\")", "def read_directory_stream(self, offset):\n stream = []\n\n self.infile.seek(offset)\n LOG.debug(\"Reading dirent stream at: %s\", POSITION)\n for _ in xrange(256):\n LOG.debug(\" Reading dirent at: %s\", POSITION)\n dirent = FatXDirent.from_file(self)\n\n # TODO: Perhaps I should also do this before creating the object.\n # check for end of dirent stream\n if (dirent.file_name_length == DIRENT_NEVER_USED or\n dirent.file_name_length == DIRENT_NEVER_USED2):\n LOG.debug(\" End of dirent stream\")\n break\n\n LOG.debug(\" Read dirent: %s\", dirent.file_name)\n\n stream.append(dirent)\n\n return stream", "def files(self):\n return self._files", "def zip_data(self) -> None:\n zipf = zipfile.ZipFile('output.zip', 'w', zipfile.ZIP_DEFLATED)\n self._zipdir(self.path, zipf)\n zipf.close()", "def test_open_by_unnamed_fobj(self):\n with open(ZIPPATH, 'rb') as zf:\n with io.FileIO(zf.fileno(), mode='r', closefd=False) as f:\n self._test_listing_content(f)", "def open_zip_file(self, filepath: str, mode=\"r\"):\n with ZipFile(self.zipped_code_path, \"r\") as z:\n return z.open(filepath, mode=mode)", "def files_content(tmpdir):\n return _full_content()[0 : len(_simple_files(tmpdir))]", "def _zip_files(self):\n\n zip_file = Path(self.build_directory.parent).joinpath(\n self.package_name + '.zip'\n )\n logger.info('Creating zip file: %s', zip_file)\n\n shutil.make_archive(zip_file.with_suffix(''), 'zip', self.build_directory)\n shutil.move(str(zip_file), self.build_directory)", "def load_zip_codes():\n logger.info('Loading zip code data')\n read_zips()", "def readArchiveFiles(self) -> 'ChartVersionInfo':\n if self._archiveFiles is not None:\n return self\n archiveFiles = {}\n try:\n with self.fileOpen() as tar_file:\n for tarinfo in tar_file.getmembers():\n if tarinfo.isreg():\n parsename = pathlib.PurePosixPath(tarinfo.name)\n if parsename.is_absolute():\n raise InputOutputError('All files in chart archive must be relative')\n basename = str(pathlib.PurePosixPath(*parsename.parts[1:]))\n if basename in ['Chart.yaml', 'Chart.lock', 'values.yaml', 'values.schema.json', 'requirements.yaml']:\n file = tar_file.extractfile(tarinfo.name)\n if file is None:\n raise InputOutputError('Could not read file \"{}\" from archive'.format(tarinfo.name))\n archiveFiles[basename] = file.read().decode('utf-8')\n except tarfile.TarError as e:\n raise InputOutputError(str(e)) from e\n self._archiveFiles = archiveFiles\n return self", "def test_zip_files(self):\n base_zip_files = ['whypython.txt', 'states.dbf', 'cities.kmz']\n\n text_file = os.path.join(os.getcwd(), 'test-data', 'whypython.txt')\n dbf_file = os.path.join(os.getcwd(), 'test-data', 'states.dbf')\n kml_file = os.path.join(os.getcwd(), 'test-data', 'cities.kmz')\n #non_file = os.path.join(os.getcwd(), 'test-data', 'emptyfolder')\n self.request['params'][0]['response']['docs'][0]['path'] = text_file\n self.request['params'][0]['response']['docs'][1]['path'] = dbf_file\n self.request['params'][0]['response']['docs'][2]['path'] = kml_file\n #self.request['params'][0]['response']['docs'][3]['path'] = non_file\n __import__(self.request['task'])\n getattr(sys.modules[self.request['task']], \"execute\")(self.request)\n zip_files = zipfile.ZipFile(os.path.join(self.temp_folder, 'output.zip')).namelist()\n self.assertEqual(sorted(zip_files), sorted(base_zip_files))", "def getzip_requests(url, zipfile, unzipdir):\n with closing(requests.get(url, stream=True)) as r:\n if r.headers.get('content-type') == None or r.headers.get('content-type') != 'application/zip':\n warning = \"{} doesn't seem to be a zip file. Unzipping may fail.\".format(url)\n warn(warning)\n with open(zipfile, 'wb') as fd:\n for chunk in r.iter_content():\n fd.write(chunk)\n with ZipFile(zipfile, 'r') as zip:\n zip.extractall(unzipdir)\n os.remove(zipfile)", "def unzip(zipped_file):\n with gzip.open(zipped_file, 'rt', encoding='ISO-8859-1') as file:\n file = file.read()\n return file", "def test_open_by_name(self):\n self._test_listing_content(ZIPPATH)", "def open_zipped(infile, mode='r'):\n mode = mode[0] + 't'\n p2mode = mode\n if hasattr(infile, 'write'):\n return infile\n if isinstance(infile, str):\n if infile.endswith('.gz'):\n return gzip.open(infile, mode)\n if infile.endswith('.bz2'):\n if hasattr(bz2, 'open'):\n return bz2.open(infile, mode)\n else:\n return bz2.BZ2File(infile, p2mode)\n return open(infile, p2mode)", "def test_zip_file_streamer(mock_gen):\n urls = [\n 'http://www.example.com/coda123/manifest-md5.txt',\n 'http://www.example.com/coda123/bagit.txt',\n 'http://www.example.com/coda123/bag-info.txt'\n ]\n meta_id = 'coda123'\n mock_data_1 = [b'Test1', b'manifest', b'data1']\n mock_data_2 = [b'Test2', b'bagit', b'data2']\n mock_data_3 = [b'Test3', b'baginfo', b'data3']\n mock_gen.side_effect = [iter(mock_data_1), iter(mock_data_2), iter(mock_data_3)]\n chunk = list(presentation.zip_file_streamer(urls, meta_id))\n for data in mock_data_1, mock_data_2, mock_data_3:\n for val in data:\n assert val in chunk\n assert mock_gen.call_count == 3", "def read_zip_file():\n with open(os.path.join(DIST_DIR, \"build.zip\"), \"rb\") as zip_file:\n return zip_file.read()", "def _GetDataStreams(self):\n if self._data_streams is None:\n self._data_streams = []\n if self._fsntfs_file_entry.has_default_data_stream():\n data_stream = ntfs_data_stream.NTFSDataStream(self, None)\n self._data_streams.append(data_stream)\n\n for fsntfs_data_stream in self._fsntfs_file_entry.alternate_data_streams:\n data_stream = ntfs_data_stream.NTFSDataStream(self, fsntfs_data_stream)\n self._data_streams.append(data_stream)\n\n return self._data_streams", "def __init__(self, zip_file, stream_name):\n super(_SerializedDataOffsetTable, self).__init__()\n self._offsets = []\n self._stream_name = stream_name\n self._zip_file = zip_file" ]
[ "0.7123306", "0.654805", "0.64884543", "0.6211099", "0.6143137", "0.60887825", "0.60620993", "0.6023242", "0.591442", "0.58101535", "0.57589924", "0.5750186", "0.57359564", "0.5727873", "0.57115054", "0.57092226", "0.56497854", "0.5616687", "0.561402", "0.5552648", "0.55434126", "0.55362296", "0.551657", "0.5498416", "0.5497909", "0.5496984", "0.5484534", "0.54766744", "0.5465474", "0.546309", "0.54438967", "0.5442699", "0.53819376", "0.53715724", "0.5367524", "0.53531927", "0.5347765", "0.53450274", "0.5316173", "0.53145015", "0.53142595", "0.531348", "0.5300152", "0.52896065", "0.52814746", "0.5272585", "0.52724266", "0.5263814", "0.525952", "0.525794", "0.52491945", "0.5243569", "0.52319235", "0.5230872", "0.5224073", "0.52063805", "0.52060133", "0.52021295", "0.51894635", "0.51772213", "0.5168451", "0.51568377", "0.5155704", "0.5151376", "0.5150221", "0.5133513", "0.51313835", "0.5118482", "0.51171803", "0.51163137", "0.51126313", "0.5111218", "0.5111118", "0.51021105", "0.51014334", "0.51001877", "0.50968766", "0.50968766", "0.508263", "0.5077064", "0.5077026", "0.50737804", "0.50632995", "0.5063276", "0.5050275", "0.5047331", "0.50437933", "0.50373256", "0.5033605", "0.5024562", "0.5017873", "0.50066715", "0.50036424", "0.49997112", "0.4976496", "0.49727973", "0.49700442", "0.49685338", "0.4955411", "0.49518412" ]
0.6771445
1
erase files that have been renamed but not normalized
def __clean(path, pattern = '.tiff'): for f in os.listdir(path): if re.search(pattern, f): os.remove(os.path.join(path, f)) print("directory cleaned")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def clean_files(self):\n self.filenames.clear()", "def space_cleaning():\n for file in os.listdir(\".\"):\n if file.endswith(\".png\"):\n os.remove(file)", "def clean_data():\n for clean_file in clean_files:\n file_list = [f for f in os.listdir(\".\") if f.endswith(clean_file)]\n for f in file_list:\n os.remove(f)", "def cleanup() -> None:\n\n for fname in glob(os.path.join(tdir, 'alexandria.*')):\n if os.path.splitext(fname)[1] not in {'.c', '.h'}:\n os.unlink(fname)", "def _clean_input_dir():\n for existing_file in os.listdir(join(input_dir, 'analysis')):\n if existing_file != '.hold':\n os.remove(join(input_dir, 'analysis', existing_file))", "def unnormalize_files():\n mdir = mw.col.media.dir()\n try:\n # A quirk of certain Pythons is that some os commands give\n # different results when you put in a unicode object rather\n # than a str.\n mdir = unicode(mdir, sys.getfilesystemencoding())\n except TypeError:\n # Already unicode.\n pass\n media_in_col = mw.col.media.allMedia()\n # Filter the files on disk. Drop all files that do not contain\n # combining characters. Those should be no problem. (The Unicode\n # web page describes a \"quick test\", we do an even quicker test.)\n problem_files = []\n try:\n for f in progress(os.listdir(mdir), _(u\"Checking files on disk.\"),\n _(u\"Stop that!\")):\n for c in f:\n if unicodedata.combining(c):\n # We just assume that f is NFD-normalized. If not\n # we will just waste time later.\n problem_files.append(f)\n break\n except StopIteration:\n return\n try:\n for m in progress(media_in_col, _(u\"Unicode unnormalizing files.\"),\n _(u\"Stop that!\")):\n m_n = unicodedata.normalize('NFD', m)\n if m == m_n:\n continue\n if m_n in problem_files:\n shutil.move(os.path.join(mdir, m_n), os.path.join(mdir, m))\n except StopIteration:\n return", "def clean():\n for f in [f for f in os.listdir() if f.endswith(\".part\")]:\n os.remove(f)", "def clean(self):\n clean_list = [\n position\n for position in os.listdir()\n if os.path.isfile(position) and not position.startswith(\".\")\n ]\n self.move_files(clean_list)", "def clean(self):\n actual_output_file = path.splitext(self.source_name)[0] + \".actual\"\n if path.exists(self.binary_name):\n os.unlink(self.binary_name)\n if path.exists(actual_output_file):\n os.unlink(actual_output_file)", "def clean(vendor):\n remove_all(\n path\n for path in vendor.glob('*')\n if path.basename() != 'vendored.txt'\n )", "def _clean_input_dir():\n for existing_file in os.listdir(join(input_dir, 'fitting')):\n if existing_file != '.hold':\n os.remove(join(input_dir, 'fitting', existing_file))", "def cleanup(e):\n for f in e.files:\n try:\n if os.path.isfile(f):\n os.remove(f)\n except OSError:\n continue\n\n return", "def clean(files):\n\tfor file in files:\n\t\ttry:\n\t\t\tos.remove(file)\n\t\texcept Exception as e:\n\t\t\tprint(e)", "def tidyFileNames(folderToCheck):\n\n filters = list(map(lambda x: \"*.\" + x, expectedExts))\n\n for filter in filters:\n\n for f in getFiles(folderToCheck,filter):\n\n clean = f\n for search in searches:\n clean = replace(clean,search)\n\n if renameFile(f,clean):\n results = list(map(os.path.basename,[f,clean]))\n if results[0] != results[1]:\n print(f\"Renamed: {results[0]} -> {results[1]}\")", "def remover_files():\n directory = os.getcwd()\n for file_name in glob.glob((\"{}/tmp/*\").format(directory)):\n remove(file_name)", "def clean():\n for dirpath, dirnames, filenames in os.walk('.'):\n for filename in filenames:\n if filename.endswith('.pyc') or filename.endswith('.pyo'):\n full_pathname = os.path.join(dirpath, filename)\n click.echo('Removing {}'.format(full_pathname))\n os.remove(full_pathname)", "def cleanup(self):\n files = self.nlst()\n latest = self.latest_filename\n for filename in files:\n if filename != latest:\n result = self.delete(filename)\n logger.info(f\"Deleted old export from FTP: {result}\")", "def clean_files_for(file):\n for f in [file, f\"{file}.json\", f\"{file}.lock\"]:\n if os.path.isfile(f):\n os.remove(f)", "def cleanup_intermediate_files(self):\n self.cmd(\"rm -f {local_temp_dir}/*rg_dict* \\\n {local_temp_dir}/*aln* \\\n {local_temp_dir}/snappy*\".\n format(\n local_temp_dir=self.local_temp_dir\n ),\n shell=True)", "def delete_leftovers(self):\n for each_file, artist in self.past_songs_db_data:\n if os.path.isfile(each_file): \n os.remove(each_file)\n print \"Deleted \" + each_file\n\n for each_file in os.listdir(\".\"):\n if each_file.endswith(\".jpg\"):\n os.remove(each_file)", "def deleteIntermediateFiles(self):\n uniq_files = set(self.files_to_delete)\n print (\"Deleting %d intermediate files\" % len(uniq_files))\n for fn in uniq_files:\n # don't delete log files\n if not fn.endswith(\".log\"):\n os.remove(fn)", "def clean_folder(self):\n # Remove the 1st output\n # Remove the 2nd output\n # Remove the calibrated output\n try:\n os.remove(\"output1.csv\")\n except:\n pass\n try: \n os.remove(\"output2.csv\")\n except:\n pass\n try:\n os.remove(self.__add_output_file_location(self._output_filename))\n except:\n pass\n \n list = os.listdir(\"edited\")\n for file in list:\n file = os.path.join(\"edited\", file)\n try:\n os.remove(file)\n except:\n pass\n \n list = os.listdir(\"extracted\")\n for file in list:\n file = os.path.join(\"extracted\", file)\n try:\n os.remove(file)\n except:\n pass", "def _remove_files(self):\n if hasattr(self, 'files'):\n for file in self.files:\n if os.path.exists(file):\n os.remove(file)\n\n self._remove_changes()\n self._remove_temporary_files()", "def clean():\n possible_outputs = (\n '{}.html'.format(CONFIG['FULL_PROJECT_NAME']),\n '{}.epub'.format(CONFIG['FULL_PROJECT_NAME']),\n '{}.pdf'.format(CONFIG['FULL_PROJECT_NAME']),\n '{}.docx'.format(CONFIG['FULL_PROJECT_NAME']),\n '{}.odt'.format(CONFIG['FULL_PROJECT_NAME']),\n )\n\n for filename in possible_outputs:\n if os.path.exists(filename):\n os.remove(filename)\n print(\"Removed {}\".format(filename))", "def clean(self):\n files = ['CHG', 'CHGCAR', 'POSCAR', 'INCAR', 'CONTCAR',\n 'DOSCAR', 'EIGENVAL', 'IBZKPT', 'KPOINTS', 'OSZICAR',\n 'OUTCAR', 'PCDAT', 'POTCAR', 'vasprun.xml',\n 'WAVECAR', 'XDATCAR', 'PROCAR', 'ase-sort.dat',\n 'LOCPOT', 'AECCAR0', 'AECCAR1', 'AECCAR2',\n 'WAVECAR.GTO', 'vasp.out', 'vasp.err']\n for f in files:\n try:\n os.remove(f)\n except OSError:\n pass", "def tearDown(self):\n\n for fname in self.fnames:\n os.remove(fname)", "def clean_retrosheet_files(self):\n # Get zipped and unzipped folder names\n zippedFileFolder = Filepath.get_retrosheet_folder(folder='zipped')\n unzippedFileFolder = Filepath.get_retrosheet_folder(folder='unzipped')\n\n # Clean out all files in both folders\n for folder in (zippedFileFolder, unzippedFileFolder):\n os.chdir(folder)\n for file in os.listdir(os.getcwd()): \n if os.path.isdir(file): \n shutil.rmtree(file)\n else: \n os.remove(file)", "def remove_files(files):\n for file_name in files:\n os.remove(file_name)", "def clean_filesystem(files=[]):\n remove_files(files + find_cache_files())", "def strip_filenames(descriptor):\r\n print(\"strip filename from {desc}\".format(desc=descriptor.location.to_deprecated_string()))\r\n if descriptor._field_data.has(descriptor, 'filename'):\r\n descriptor._field_data.delete(descriptor, 'filename')\r\n\r\n if hasattr(descriptor, 'xml_attributes'):\r\n if 'filename' in descriptor.xml_attributes:\r\n del descriptor.xml_attributes['filename']\r\n\r\n for child in descriptor.get_children():\r\n strip_filenames(child)\r\n\r\n descriptor.save()", "def _clean_files(self):\n if self.delfiles & 1:\n ProcUtils.remove(self.okm)\n if self.delfiles & 2:\n ProcUtils.remove(self.hkm)\n if self.delfiles & 4:\n ProcUtils.remove(self.qkm)\n if self.delfiles & 8:\n ProcUtils.remove(self.obc)\n\n if self.log is False:\n ProcUtils.remove(self.pcf_file)\n base = os.path.basename(self.okm)\n ProcUtils.remove(os.path.join(self.dirs['run'],\n '.'.join(['LogReport', base])))\n ProcUtils.remove(os.path.join(self.dirs['run'],\n '.'.join(['LogStatus', base])))\n ProcUtils.remove(os.path.join(self.dirs['run'],\n '.'.join(['LogUser', base])))", "def clean_old_data():\n logger.info('Cleaning standalone files on disk...')\n for absolute_path in glob.glob(MEDIA_URL + '*'):\n file_name = os.path.basename(absolute_path)\n try:\n relative_path = os.path.join(AUDIOS_URL, file_name)\n audio = Audio.objects.get(filename=relative_path)\n if audio.get_type() == 'episode':\n try:\n # If there are inactive audios on its being\n for e in audio.podcast.episode_set.exclude(pk=audio.podcast.active_episode.pk):\n if not e.is_active():\n logger.info('Inactive audio found in podcast set. Erasing files.')\n e.delete_files()\n except Exception, e:\n logger.exception(e.message)\n except ObjectDoesNotExist, e:\n logger.info('A file with no audio registered in database')\n if os.path.isfile(relative_path):\n logger.info('Erasing: %s' % relative_path)\n os.remove(relative_path)\n logger.info('... Done.')", "def dev_clean():\n clean_files(\"csv\", True)\n clean_files(\"jsontxt\", True)", "def clean():\n try:\n os.unlink(options.coords + 'mirza_mrna_input' + '.fa')\n os.unlink(options.coords + 'mirza_mirna_input' + '.fa')\n os.unlink(options.coords + 'mirza_mirna_expressions' + '.fa')\n except:\n pass", "def remove_old_files(filelist):\n\n for filename in filelist:\n if path.exists(filename):\n try:\n remove(filename)\n print \"%s deleted\" % filename \n except Exception: #TODO Exception spesifik.\n stderr.write(\"%s cannot remove. Please check your priviledge\\n\"\n % filename)\n exit(1)", "def clean():\n clean_files()", "def remove_files(files):\n for file in files:\n if os.path.exists(file):\n if file.startswith(\"./\") or file.startswith(\".\\\\\"):\n file = file[2:]\n if os.path.isdir(file):\n rmtree(file)\n else:\n os.unlink(file)", "def cleanup_intermediate_files():\n\n dirs = (DIR_PAGE, DIR_SRGB, DIR_VTI, DIR_TIFF, DIR_BACK, DIR_TEXT)\n map(lambda dir: shutil.rmtree(os.path.join(cwd, dir)) , dirs)", "def delete_ansys_leftovers(ansys_folder: str):\n\n files = os.listdir(ansys_folder)\n keep_endings = ('.txt', '.png', '.vtk', '.pdf', '.eps', '.svg')\n for ansys_file in files:\n if not ansys_file.endswith(keep_endings):\n os.remove(os.path.join(ansys_folder, ansys_file))", "def remove_extra_files(self):\n\n for f in self._extra_files:\n if os.path.isfile(f):\n os.remove(f)", "def clean(self):\r\n\r\n for _, data in self.composition.items():\r\n index_file = Path(data['file'] + '.fxi')\r\n if index_file.exists():\r\n index_file.unlink()", "def clean(self):\n print(\"Cleaning outputs in %s\" % self.args.output)\n files = glob.glob(self.args.output + \"*.pkl\")\n for f in files:\n if os.path.exists(f):\n os.remove(f)", "def tearDown(self):\n\n for fname in self.fnames:\n FileSystem.unlink(fname)", "def clean_filename(self, filename):\n return remove(filename,self.unwanted_chars_in_filenames)", "def fixupFileNames(process):\n if not hasattr(process.source, \"fileNames\"):\n process.source.fileNames = cms.untracked.vstring()\n return", "def clean_file_tree(new_path):\n for folder in os.listdir(new_path): # get rid Merge in folder names\n fp = new_path + '/' + folder + '/'\n if folder.startswith('Merge_'):\n folder = folder[6:]\n os.rename(fp, new_path + '/' + folder)\n fp = new_path + '/' + folder \n if '__' in folder: # un-flatten directories\n os.renames(fp, new_path + '/' + '/'.join(folder.split('__')))\n \n for ll in os.walk(new_path): # get rid of directory names in filenames\n for f in ll[2]:\n if ('__' in f) and (f.startswith('gdac') is False):\n os.rename(ll[0] + '/' + f,\n ll[0] + '/' + '.'.join(f.split('.')[-2:]))", "def _clean_path(self, pathToRemove, files):\n result = []\n for filePath in files:\n filePath = string.split(filePath, pathToRemove)\n filePath = filePath[1]\n filePath = string.split(filePath, os.sep)\n if filePath[0] == '':\n filePath.remove('')\n fileName = string.join(filePath, os.sep)\n result.append(fileName)\n return result", "def remove_files(self, files: Set[str]) -> None:\n for f in files:\n src = os.path.join(self.get_directory(), f)\n os.remove(src)", "def withdraw(self):\n files = self._file_list\n for f in files:\n remove(str(f))\n self._file_list = []\n self._filename = \"\"", "def clean_documents():\n start = datetime.now()\n for i, raw_filename in enumerate(os.listdir(RAW_DIR)):\n fullpath = os.path.join(RAW_DIR, raw_filename)\n if os.path.isfile(fullpath):\n print(\"Cleaning {0} {1}\".format(i, fullpath), file=stderr)\n try:\n with open(fullpath, \"r\") as f:\n text = f.read()\n text = clean(text)\n soup = BeautifulSoup(text, \"html.parser\")\n cleaned = visible_text(soup)\n score = germanwings_score(cleaned)\n if not score:\n print(\"not germanwings: {0}\".format(raw_filename))\n else:\n clean_filename = os.path.join(CLEAN_DIR, raw_filename)\n with open(clean_filename, \"w\") as f:\n f.write(cleaned.encode(\"ascii\", \"ignore\"))\n except Exception as exc:\n print(\"{0}: {1}\".format(fullpath, exc), file=stderr)\n end = datetime.now()\n print(\"Elapsed time to clean: {0}\".format(end - start), file=stderr)", "def space_cleaning(file=\"\"):\n intermediate = str(file) + str(\"_intermediate\")\n output_file = str(file)\n\n os.rename(intermediate, output_file)", "def remove_matching_files(\n removal_ids: Set[str],\n directory: str,\n ):\n for file_name in os.listdir(directory):\n file_id, _ = os.path.splitext(file_name)\n if file_id in removal_ids:\n os.remove(os.path.join(directory, file_name))", "def clean_frames(self):\n for fn in os.listdir(self.frame_directory):\n if fn.endswith(\".png\") and fn in self.frame_fns:\n os.remove(fn)", "def cleanUpTemporaryFiles(options):\n os.system(\"rm \"+options.output_directory_per_run+\"/*.abundance\")\n os.system(\"rm \"+options.output_directory_per_run+\"/*.phasing_score\")\n os.system(\"rm \"+options.output_directory_per_run+\"/*regionsOfInterest*\")\n os.system(\"mv \"+options.output_directory_per_run+\"/* \"+options.output_directory_per_run+\"/../\")\n os.system(\"rm -rf \"+options.output_directory_per_run)", "def purge_non_article_filenames(filenames):\n\n result = []\n\n for filename in filenames:\n if search(r'^\\d+-\\S+\\.md$', filename):\n result.append(filename)\n\n return result", "def cleanUp(self):\n print(\" cleaning up\",self.folderSave)\n for fname in glob.glob(self.folderSave+\"/*.*\"):\n if not fname.endswith(\".npy\") and not fname.endswith(\".csv\"):\n print(\" deleting\",os.path.basename(fname))\n os.remove(fname)", "def clean_up_dir(dirname):\n\n for filename in os.listdir(dirname):\n if(filename.endswith(\".fq\")\n or filename.endswith(\".paired.bam\")\n or filename.endswith(\".sam\")):\n filepath = os.path.join(dirname, filename)\n os.remove(filepath)\n assert(not(os.path.exists(filepath)))", "def cleanup_file(name: str):\n if os.path.exists(name) and os.path.isfile(name): # h5\n os.remove(name)\n elif os.path.exists(name) and os.path.isdir(name): # tf\n shutil.rmtree(name)", "def clear_old_files(self):\n self.logger.logMsg(\"Clearing Old Files.....\")\n try:\n for files in os.listdir(self.download_path):\n path = os.path.join(self.download_path, files)\n os.remove(path)\n for files in os.listdir(self.outpath):\n path = os.path.join(self.outpath, files)\n os.remove(path)\n except Exception as e:\n self.logger.logError(\"Error Creating Old Files {}.....\".format(str(e)))\n raise Exception('Error in Clearing Old Files')\n\n self.logger.logMsg(\"Done Clearing Old Files.....\")", "def deleteconvert(self):\n filename = os.path.join(self.docx_path, self.name.docx)\n if os.path.isfile(filename):\n os.remove(filename)\n filename = os.path.join(self.html_path, self.name.html)\n if os.path.isfile(filename):\n os.remove(filename)\n filename = os.path.join(self.docbook_path, self.name.xml)\n if os.path.isfile(filename):\n os.remove(filename)\n filename = os.path.join(self.markdown_path, self.name.md)\n if os.path.isfile(filename):\n os.remove(filename)", "def handleCleanMetadataKeep(self):\n logging.debug(\"Removing all metadata found...\")\n filePath = self.filesList.selectedItems()[0].text(2)\n self.filesList.removeAllMeta(filePath)", "def clean_files(ftype, remove=False):\n import os\n files = os.listdir()\n found_files = [f for f in files if ftype in f]\n if remove:\n for ff in found_files:\n os.remove(ff)\n print(\"Removed {}\".format(ff))\n else:\n return found_files", "def cleanup_files(self):\n\n self.backup_files()\n self.delete_files()", "def delete_b_files(intermediate_files: List[File]) -> None:\n for f in intermediate_files:\n f.remove()", "def clean_chunk_files(dirpath):\n workdir = os.getcwd()\n os.chdir(dirpath)\n for filename in glob.glob(\"[0-9]*_[0-9]*_[0-9]*.hdf5\"):\n os.remove(filename)\n os.chdir(workdir)", "def transform_suffix(filenames, suffix_old, suffix_new):\n\n new_filenames = set([])\n len_suffix_old = len(suffix_old) + 1 # add one for the \".\"\n # loop over the list of files and remove the suffix\n for name in filenames:\n name = name[:-len_suffix_old]\n new_filenames.add(name + \".\" + suffix_new)\n \n return new_filenames", "def clear_outdated_files():\n for f in os.listdir(MEDIA_ROOT):\n file_path = os.path.join(MEDIA_ROOT, f)\n if os.path.isfile(file_path) and os.stat(file_path).st_mtime < time.time() - STORE_PDF_DAYS * 86400:\n os.remove(file_path)", "def removeMeanFile(indexName,DATA_path):\n print 'Finish \\n Remove useless files'\n tileList = glob.glob(DATA_path +'/' + indexName+'/*')\n for tile in tileList:\n meanList = glob.glob(tile + '/*_MEAN.tif')\n for file in meanList:\n os.remove(file)\n return 0", "def sanitize_paths(self):\n\n for wadfile in self.files:\n if wadfile.path:\n path, filename = os.path.split(wadfile.path)\n if len(filename) < 255:\n continue\n\n basename, ext = os.path.splitext(filename)\n wadfile.path = os.path.join(path, f\"{basename[:255-17-len(ext)]}.{wadfile.path_hash:016x}{ext}\")", "def tearDown(self):\n for f in os.listdir('/tmp'):\n if not f.startswith(self.FILE_PREFIX):\n continue\n\n os.remove(os.path.join('/tmp', f))", "def clean(self):\n if os.path.exists(self.initial):\n if os.path.exists(self.path) and os.stat(self.path).st_size == os.stat(\n self.initial).st_size:\n os.remove(self.initial)\n else:\n # if it doesn't match, something probably crashed; rename the temporary file and\n # it'll get uploaded at some point\n self.auto_filename()\n self.rename()\n self.connect()\n os.remove(self.initial)\n if os.path.exists(self.path):\n os.remove(self.path)\n self.filename_set = False", "def cleanFiles(a_file_list):\n for entry in a_file_list:\n cmd = 'sudo rm ' + entry\n os.system(cmd)", "def _remove_tmpfiles():\n for f in tmpfiles:\n try:\n os.remove(f)\n except OSError:\n pass", "def cleanup_precluster_intermediate_files(batch_index):\n files = [\"seed{0}.S.fasta\".format(batch_index),\n \"seed{0}.orphans.fasta\".format(batch_index),\n \"batch{0}.fasta\".format(batch_index),\n \"batch{0}.remains.fasta\".format(batch_index),\n \"batch{0}.remains2.fasta\".format(batch_index)]\n\n files += glob.glob(\"batch{0}*.minimap\".format(batch_index))\n for file in files:\n try:\n os.remove(file)\n except:\n print >> sys.stderr, \"Failure to remove {0}. Ignore.\".format(file)", "def remove_unused_files(self):\n\n response_list = self.client.api_call(\n f'files.list?'\n f'count=1000&'\n )\n assert response_list['ok']\n\n for file in [\n f for f in response_list['files']\n if not f['channels'] and not f['groups'] and not f['ims']\n ]:\n response_delete = self.client.api_call(\n f'files.delete?'\n f'file={file[\"id\"]}'\n )\n assert response_delete['ok']", "def remove_cruft_files(cls, files):\n valid_files = []\n for changes_file in files:\n if cls.is_changes(changes_file):\n LOG.debug(\"Checking: {c}\".format(c=changes_file))\n try:\n with mini_buildd.misc.open_utf8(changes_file) as cf:\n for fd in debian.deb822.Changes(cf).get(\"Files\", []):\n valid_files.append(fd[\"name\"])\n LOG.debug(\"Valid: {c}\".format(c=fd[\"name\"]))\n\n valid_files.append(os.path.basename(changes_file))\n except BaseException as e:\n mini_buildd.config.log_exception(LOG, \"Invalid changes file: {f}\".format(f=changes_file), e, logging.WARNING)\n\n for f in files:\n if os.path.basename(f) not in valid_files:\n # Be sure to never ever fail, just because cruft removal fails (instead log accordingly)\n try:\n if os.path.isdir(f):\n shutil.rmtree(f)\n else:\n os.remove(f)\n LOG.warning(\"Cruft file (not in any changes file) removed: {f}\".format(f=f))\n except BaseException as e:\n mini_buildd.config.log_exception(LOG, \"Can't remove cruft from incoming: {f}\".format(f=f), e, logging.CRITICAL)", "def git_removed_files(self):\n\n etc_tracked = self.repo.tracked_files('etc-tmp')\n for rpath in etc_tracked:\n etc_file = os.path.join(self.root_dir, rpath)\n if not os.path.lexists(etc_file):\n self.etc_commits.removed.rpaths.append(rpath)\n self.etc_commits.removed.commit()\n\n master_tracked = self.repo.tracked_files('master-tmp')\n for rpath in master_tracked:\n etc_file = os.path.join(self.root_dir, rpath)\n if not os.path.lexists(etc_file):\n self.master_commits.removed.rpaths.append(rpath)\n self.master_commits.removed.commit()", "def removeIntermediateStereoFiles(stereoPrefix):\n\n # List of all non-final-output files\n fileList = ['-align-L.exr', \\\n '-align-R.exr', \\\n '-DEM.tif.aux.xml', \\\n '-D_sub.tif', \\\n '-D.tif', \\\n '-F.tif', \\\n '-GoodPixelMap.tif', \\\n '-lMask_sub.tif', \\\n '-lMask.tif', \\\n '-L_sub.tif', \\\n '-L.tif', \\\n '-RD.tif' ,\\\n '-rMask_sub.tif', \\\n '-rMask.tif', \\\n '-R_sub.tif' ,\\\n '-R.tif']\n # Remove each of those files\n for f in fileList:\n path = stereoPrefix + f\n removeIfExists(path)", "def destroyer(): # ;-)\n\n def find_files_to_remove(pyfile):\n for filename in (\"%sc\" % pyfile, \"%so\" % pyfile):\n if exists(filename):\n yield filename\n\n counter = 0\n try:\n while True:\n pyfile = (yield)\n for filename in find_files_to_remove(pyfile):\n try:\n log.debug('removing %s', filename)\n remove(filename)\n counter += 1\n except (IOError, OSError), e:\n log.error('cannot remove %s', filename)\n log.debug(e)\n except GeneratorExit:\n log.info(\"removed files: %s\", counter)", "def cleaning_this_directory():\n import os, shutil\n files = os.listdir(\".\")\n for f in files:\n if os.path.isfile(f):\n extension = f.split(\".\")[-1]\n if extension == 'jpg':\n #move the file\n os.rename(f, \"images/\"+f)\n elif extension == 'JPG':\n #move to xml file\n os.rename(f, 'xml/'+f)\n else:\n pass", "def _clean_filename(name):\n return re.sub(\"[^\\\\w .]\", \"\", name)", "def clean():\n for root, dirs, files in os.walk('.'):\n for item in dirs:\n if (item[0]!='.'):\n try:\n os.remove(os.path.join(item,'.DS_Store'))\n except:\n pass", "def removeFilenameValidate(call, args=(), kwargs={}, nodeClass='Write'):", "def remove_intermediate_files(dir_):\n file_list = glob.glob(f'{dir_}/*temp*')\n [os.remove(f) for f in file_list]", "def test_upload_area_cleanup(self):\n vis2_uvid='urn:mrn:stm:service:instance:furuno:vis2'\n p = Path('import')\n files = list(p.glob('**/urn:mrn:s124:*'))\n for item in files:\n print(item)\n os.remove(str(item))\n pass", "def removeOldFiles(directory, time):\n modTime = 60 * 60 * 24 * 4 # max number of seconds to keep aux files\n for file in listdir(directory):\n if time - path.getmtime(path.join(directory, file)) > modTime and\\\n file != 'reveal.js':\n try:\n remove(path.join(directory, file))\n except OSError:\n pass", "def remove_hidden_files(files):\n hidden = []\n for f in files:\n if f.startswith(\".\"):\n hidden.append(f)\n\n for h in hidden:\n files.remove(h)", "def fix_filename(self):\n if not self.remove_path:\n return\n self.filename = re.sub(\".+\\/\", \".../\", self.filename)", "def remove(self): \n self.doRoot(self.removeDir)\n settings.getChanged('mosh.resourceReplacer.applied').remove(self.file)", "def remove_files(file_list):\n###############################################################################\n for fpath in file_list:\n if os.path.exists(fpath):\n os.remove(fpath)\n # End if\n # End for", "def clean_filelist(fnlist):\n cntClean = 0\n for fn in fnlist:\n try:\n with h5py.File(fn,\n 'r+') as handle: # ref: https://docs.h5py.org/en/stable/high/file.html?highlight=h5py.File#h5py.File\n if args.groupName in list(handle.keys()): # clean if found any group named 'Analyses'\n del handle[args.groupName]\n cntClean += 1\n except: ## avoid corrupted fast5 files\n pass\n return cntClean", "def clean_dir_filtered(dr, filters):\n # type: (path, List[str]) -> None\n for f in os.listdir(dr):\n for fltr in filters:\n if fltr in f:\n os.remove(f)\n continue", "def __rm_general(file_contents: str) -> str:\n\n new_file_contents = file_contents\n\n for regex in COBOL_FORMAT_RM_REGEXES:\n for match in re.finditer(regex, file_contents):\n match_str = match_to_str(match)\n new_file_contents = new_file_contents.replace(match_str, '')\n\n return new_file_contents", "def cleanStamps(self, criteria):\n dirEmpty = True\n for s in self.iterStamps():\n if criteria(s):\n os.unlink(self.getFile(s))\n else:\n dirEmpty = False\n try:\n os.rmdir(self.path)\n except OSError:\n pass", "def remove_old_files(cls, document):\n tempdir = tempfile.mkdtemp()\n try:\n tempname = os.path.join(tempdir, 'new.zip')\n with zipfile.ZipFile(document, 'r') as zipread:\n with zipfile.ZipFile(tempname, 'w') as zipwrite:\n for item in zipread.infolist():\n # If pattern does not match then add file to the new zip file.\n if not OLD_FILE_PATTERN.match(item.filename):\n data = zipread.read(item.filename)\n zipwrite.writestr(item, data)\n shutil.move(tempname, document)\n finally:\n shutil.rmtree(tempdir)", "def clearString(name):\n if name.find(\".tar\") != 0:\n name = name.replace(\".tar\", \"\")\n\n if name.find(\".gz\") != 0:\n name = name.replace(\".gz\", \"\")\n return name", "def clean_files(folder=\".\", posreg='.*[.]((py)|(rst))$',\n negreg=\".*[.]git/.*\", op=\"CR\", fLOG=print):\n def clean_file_cr(name):\n with open(name, \"rb\") as f:\n content = f.read()\n new_content = content.replace(b\"\\r\\n\", b\"\\n\")\n if new_content != content:\n with open(name, \"wb\") as f:\n f.write(new_content)\n return True\n return False\n\n def clean_file_cr_back(name):\n with open(name, \"rb\") as f:\n lines = f.read().split(b'\\n')\n new_lines = []\n changes = False\n for li in lines:\n if not li.endswith(b'\\r'):\n new_lines.append(li + b'\\r')\n changes = True\n else:\n new_lines.append(li)\n if changes:\n with open(name, \"wb\") as f:\n f.write(b'\\n'.join(new_lines))\n return changes\n\n if op == 'CR':\n clean_file = clean_file_cr\n elif op == 'CRB':\n clean_file = clean_file_cr_back\n elif op == 'pep8':\n from .code_helper import remove_extra_spaces_and_pep8\n clean_file = remove_extra_spaces_and_pep8\n else:\n raise ValueError(f\"Unknown cleaning '{op}'.\")\n\n if posreg and isinstance(posreg, str):\n posreg = re.compile(posreg)\n if negreg and isinstance(negreg, str):\n negreg = re.compile(negreg)\n\n res = []\n for root, _, files in os.walk(folder):\n for f in files:\n full = os.path.join(root, f)\n rel = os.path.relpath(full, folder)\n fn = rel.replace(\"\\\\\", \"/\")\n if posreg is None or posreg.search(fn):\n if negreg is None or not negreg.search(fn):\n r = clean_file(full)\n if r and fLOG:\n fLOG(f\"[clean_files] processed '{fn}'\")\n res.append(rel)\n return res", "def _remove_all_manifest_files(self):\n manifest_files = Path(self.manifest_dir).glob(\"**/elyra-component-manifest-*.json\")\n for file in manifest_files:\n os.remove(str(file))", "def clean(self):\n original_dir = os.getcwd()\n os.chdir(self.output)\n\n # Clear out directory\n file_list = os.listdir(self.output)\n\n for afile in file_list:\n if not afile.endswith('.gitignore'):\n path = os.path.join(self.output, afile)\n if os.path.isdir(path):\n rmtree(path)\n else:\n os.remove(path)\n os.chdir(original_dir)", "def perform_cleanup(scratch_image_name, scratch_text_name_root):\n\tfor name in (scratch_image_name, scratch_text_name_root + '.txt', \"tesseract.log\"):\n\t\ttry:\n\t\t\tos.remove(name)\n\t\texcept OSError:\n\t\t\tpass" ]
[ "0.72445166", "0.70031506", "0.6998387", "0.69157773", "0.69150037", "0.68766177", "0.68629783", "0.68068534", "0.67834073", "0.6767647", "0.6741178", "0.6736381", "0.67008567", "0.662713", "0.66061443", "0.65724576", "0.6555076", "0.65424263", "0.6495746", "0.64890414", "0.64828247", "0.6469914", "0.6463196", "0.64572746", "0.6445273", "0.6442266", "0.64275086", "0.6427492", "0.64256257", "0.64117014", "0.639436", "0.6345176", "0.6323234", "0.6316361", "0.6293904", "0.62729824", "0.6266405", "0.6264475", "0.6251614", "0.6250235", "0.6248341", "0.62395096", "0.6222515", "0.6222222", "0.62108594", "0.6198515", "0.6197343", "0.6192593", "0.6187996", "0.61758155", "0.6165655", "0.6153017", "0.61485445", "0.6115562", "0.61106825", "0.6107912", "0.61065185", "0.60982984", "0.6070241", "0.6043619", "0.6028167", "0.60202765", "0.6016989", "0.6010982", "0.60078347", "0.5981264", "0.59757847", "0.5945591", "0.59454614", "0.5945186", "0.5944186", "0.5940303", "0.5930731", "0.5930368", "0.5922348", "0.59194213", "0.59150535", "0.59132886", "0.590666", "0.5901996", "0.59006584", "0.5898685", "0.58960694", "0.5894467", "0.5892612", "0.5892196", "0.5881797", "0.5879252", "0.5872699", "0.5872057", "0.58658785", "0.58624417", "0.5860938", "0.5860086", "0.5859417", "0.5853785", "0.5851222", "0.58450294", "0.5825975", "0.5819559" ]
0.5960827
67
Returns an enumeration member with a value matching `value`.
def get_member( cls, value: str, ): if not value: return None members = [ (member, member.value) for member in cls.__members__.values() ] for member, member_value in members: if member_value == value: return member return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def Enum(enum, value, default=None):\n if value is None:\n return default\n\n for pair in enum:\n if pair.value == value:\n return pair\n\n raise KeyError(\"Value '{}' not contained in enum type\".format(value))", "def from_value(cls, value: str):\n return cls._value2member_map_[value]", "def test_get_enum_by_value():\n assert BusinessType.get_enum_by_value('CP') == BusinessType.COOPERATIVE\n assert BusinessType.get_enum_by_value('FM') == BusinessType.PARTNERSHIP_AND_SOLE_PROP\n assert BusinessType.get_enum_by_value('NOT_FOUND') is None", "def parse(\n cls,\n value: str\n ):\n\n if value is None or len(value) == 0:\n raise ValueError(\"provided value may not be None or empty\")\n\n for item in cls:\n if value == item.value:\n # found a matching value\n return item\n\n # Fallback value in case the API adds an enum that is not supported\n # by an older version of the SDK\n return cls.Unknown", "def __getitem__(self, key):\n try:\n if utils.is_str(key):\n key = utils.force_name_case(key)\n return next(enum for enum in self if enum.name == key)\n else:\n return self._enums[key]\n except (StopIteration, TypeError, KeyError, IndexError):\n raise KeyError(\"There is no enum with the name/index '%s' in the '%s' bitfield!\" % (key, self.name))", "def from_value(cls, value):\n value = value if value else 0\n try:\n flags = [flag.name for flag in cls.enum_class if flag.value & value]\n except TypeError:\n flags = [flag.name for flag in cls.enum_class if flag.name == value]\n\n return cls(*flags)", "def getName(cls, value):\n for v, n in cls.iterate():\n if v == value:\n return n\n\n raise ValueError('Value {0} not found in {1}'.format(value, cls.__name__))", "def member_status(value):\n for status in models.MEMBER_STATUS:\n if status[0]==value:\n return status[1]\n\n return \"MEMBER STATUS NOT FOUND\"", "def get_enum_value_row(enum_field, enum_value):\n # Translate plural, if given\n enum_field = ENUM_PLURALS_TRANSLATE[enum_field] if enum_field in ENUM_PLURALS_TRANSLATE else enum_field\n return apps.get_model('ahj_app', enum_field).objects.get(Value=enum_value)", "def cast_value_to_enum(attribute: Any, widget_value: str):\n enum_class: MyEnum = attribute.__class__\n return (t for i, t in enumerate(enum_class)\n if t.value == widget_value).__next__()", "def get_by(cls, name, value):\n return cls.query(getattr(cls, name) == value).get()", "def from_value(value):\r\n result = TokenKind._value_map.get(value, None)\r\n\r\n if result is None:\r\n raise ValueError('Unknown TokenKind: %d' % value)\r\n\r\n return result", "def check_enum(enumerator, value):\n is_valid = False\n for data in enumerator:\n if data == value:\n is_valid = True\n break\n\n if is_valid:\n return value\n else:\n my_banner(\"Value must be from enum \" + enumerator +\" Value has been set to N/A\")\n return \"na\"", "def enum_value(cls: Any, e: Any) -> Any:\n if is_enum(e):\n v = e.value\n # Recursively get value of Nested enum.\n if is_enum(v):\n return enum_value(v.__class__, v)\n else:\n return v\n else:\n return cls(e).value", "def _get_data_type_name_by_value(data_type, value, field_name='data_type'):\n return data_type.DESCRIPTOR.fields_by_name[field_name].enum_type.values_by_number[value].name", "def from_int(value):\n for item in Sfc5xxxUnit:\n if item.value == value:\n return item\n raise ValueError(\"Invalid unit value: {}!\".format(value))", "def __getitem__(self, value):\n\n # Select the correct index\n if isinstance(value, six.integer_types):\n idx = self.by_value\n elif isinstance(value, six.string_types):\n idx = self.by_name\n else:\n raise KeyError(value)\n\n # Look up the value in that index\n return idx[value]", "def FromString(cls, value: str):\n for _, member in cls.__members__.items():\n if member.value == value:\n return member\n raise LookupError('Invalid component: ' + value)", "def get(cls, value):\n for k in cls:\n if k.value == value:\n return k\n\n raise KeyError(f'Cannot get key by value \"{value}\" of {cls}')", "def getItem(enum, index):\r\n return enum[list(enum.__members__)[index]]", "def with_value(self, value):\n return type(self)(self.name, self.type, value, self.metadata or None)", "def by_label(self, value: str) -> RegistryType:\n return {k: v for k, v in self.items() if k == value}", "def EnumValueName(self, enum_value, enum_type):\n return '%s_%s' % (self.ToPpapiType(enum_type).upper(),\n enum_value.name.upper())", "def get_by_field(self, field, value):\n for item in self.items:\n if item.__dict__[field] == value:\n return item\n return None", "def _enum_getter(enum):\n def getter(name):\n try:\n return enum[name]\n except KeyError:\n return name\n getter.__name__ = enum.__name__\n return getter", "def from_int(value):\n for item in Sfc5xxxUnitPrefix:\n if item.value == value:\n return item\n raise ValueError(\"Invalid unit prefix value: {}!\".format(value))", "def get_by(self, field, value):\n return self._client.get_by(field, value)", "def to_python(self, value):\n if isinstance(value, self.enum_class):\n return value\n value = super(self.__class__, self).to_python(value)\n if isinstance(value, int):\n return self.enum_class(value)\n assert value is None\n return None", "def enum_lookup(enumtype, name):\n # type: (typing.Type[T], str) -> Optional[T]\n try:\n return enumtype[name]\n except LookupError:\n return None", "def get_attr(self, value):\n return self.index[value]", "def is_enum_value(enumeration, potential_value):\n try:\n enumeration(potential_value)\n except ValueError:\n return False\n\n return True", "def contains(self, value):\n n = self.search(value)\n return (n.value==value, n)", "def from_int(cls, value: int) -> 'Enum':\n if value not in cls.tags():\n raise ValueError(f'the following integer value is not defined within the enumeration: {str(value)}')\n\n return value", "def search(self, value):\r\n node = self.head\r\n while node:\r\n if node.value == value:\r\n return node\r\n node = node.next\r\n raise ValueError('Value not found')", "def getNameByValue(self, value):\n pass", "def get_child(self, value):\n for node in self.children:\n if node.value == value:\n return node\n\n return None", "def from_int(value):\n for item in Sfc5xxxUnitTimeBase:\n if item.value == value:\n return item\n raise ValueError(\"Invalid unit time base: {}!\".format(value))", "def handle_enum_input(_, param, value):\n try:\n tup = _ENUM_MAPPING.get(param.name)\n assert tup is not None, \"Must add {} to _ENUM_MAPPING\".format(param.name)\n # Force the correct type onto the value.\n return tup[0](tup[1](value))\n except ValueError as err:\n raise click.BadParameter(str(err))", "def db_value(self, val):\n\n # If we're using a native Enum field, just let the DBMS decide what to do\n # Otherwise, allow values in the enum_list and the null value (which may\n # be rejected by the database, but that's not our problem.)\n if self.native or val in self.enum_list or val is None:\n return val\n else:\n raise ValueError(f'invalid value \"{val}\" for EnumField')", "def get_value(o, object_name, value_name, value_type):\n try:\n return o[value_name]\n except KeyError:\n l.warning(\"The field {} doesn't exists in {}: {}\".format(value_name, object_name, ujson.dumps(o)))\n return value_type()", "def repr_enum(value, enum=None):\n assert enum is not None, \"`enum` required\"\n\n for key, value in enum._asdict().iteritems():\n if value == value:\n return repr(key)\n\n raise KeyError(\n \"%s does not map to a key in %s\" % (repr(value), enum.__class__))", "def _fromflagname(cls, name:str, default=...) -> enum.Enum:\n if default is not Ellipsis:\n return cls._LOOKUP.get(name, default)\n return cls._LOOKUP[name]", "def might_contain(self, value):\n h = self.hash_value(value)\n return self.values[h]", "def search(self, value):\n return self._search(self.head, value)", "def _parse_enum(type, item):\n try:\n return type[item]\n except:\n return type(item)", "def the_option_with_value(value: str) -> \"SelectByValue\":\n return SelectByValue(value)", "def test_enum(self):\n with sphinx_build('pyexample'):\n with open('_build/text/docfx_yaml/example.enum_type.EnumFoo.yml') as yml_file:\n data = yaml.safe_load(yml_file)\n for item in data['items']:\n if item['uid'] == 'example.enum_type.EnumFoo':\n self.assertEqual(\n item['children'],\n ['example.enum_type.EnumFoo.VALUE0', 'example.enum_type.EnumFoo.VALUE1']\n )\n if item['uid'] == 'example.enum_type.EnumFoo.VALUE0':\n self.assertEqual(\n item['syntax'],\n {'content': 'VALUE0 = 0', 'return': {'type': ['example.enum_type.EnumFoo']}}\n )\n self.assertEqual(\n item['type'],\n 'attribute'\n )\n if item['uid'] == 'example.enum_type.EnumFoo.VALUE1':\n self.assertEqual(\n item['syntax'],\n {'content': 'VALUE1 = 1', 'return': {'type': ['example.enum_type.EnumFoo']}}\n )\n self.assertEqual(\n item['type'],\n 'attribute'\n )", "def get_by(cls, name, value, keys_only=None):\n return cls.query(getattr(cls, name) == value).get(keys_only=keys_only)", "def get(key, default=-1):\n if isinstance(key, int):\n return TransType(key)\n if key not in TransType._member_map_: # pylint: disable=no-member\n extend_enum(TransType, key, default)\n return TransType[key]", "def by_label_contains(self, value: str) -> RegistryType:\n return {k: v for k, v in self.items() if value in k}", "def from_db_value(self, value, expression, connection, context):\n # can't call super. See\n # https://docs.djangoproject.com/en/1.9/ref/models/fields/#django.db.models.Field.from_db_value\n if isinstance(value, int):\n try:\n return self.enum_class(value)\n except ValueError:\n raise ValidationError(\n 'Invalid enum integer value {} for {}'.format(value, self.enum_class))\n\n assert value is None\n return None", "def getName(cls, itemValue):\n for name, value in cls.iterate():\n if itemValue == value:\n return name\n\n raise ValueError('Value {0} not found in {1}'.format(itemValue, cls.__name__))", "def getItem(self, value):\r\n # If the tree contains no items, return false\r\n if self.empty():\r\n return False\r\n\r\n # If the returned node is False, it wasn't found and an error should be given and False returned\r\n node = self.descend_to_node(value)\r\n if node:\r\n return node.object\r\n else:\r\n print(\"Value\", value, \"not found.\")\r\n return False", "def get_value(value: str, registers: dict):\n\n if value in registers:\n return registers[value]\n\n return int(value)", "def get_prep_value(self, value):\n if value is None:\n return value\n if isinstance(value, self.enum_class):\n return super(self.__class__, self).get_prep_value(value.value)\n if isinstance(value, Enum):\n raise ValueError('{} is of the wrong Enum type.'.format(value))\n return super(self.__class__, self).get_prep_value(value)", "def __getitem__(self, value) -> Node:\n self.value = value\n self.next_value = None\n if value in map(lambda x: x.value, self.nodes):\n return value\n\n else:\n return False", "def post_process(self, value: 'int | bytes', packet: 'dict[str, Any]') -> 'Enum_AppType':\n value = super(EnumField, self).post_process(value, packet)\n return self._namespace.get(value, proto=Enum_TransportProtocol.tcp)", "def parse(value: str):\n return [member for member in FilterMode if member.name == value][0]", "def cast(self, value: Any) -> Any:\n for val in self.values:\n if val['value'] == value:\n return value\n raise err.InvalidArgumentError(\"unknown value '{}'\".format(value))", "def _get_value(self, value):\r\n try:\r\n return int(value)\r\n except ValueError:\r\n return self.registers[value]", "def find_value(code, value):\n value_pattern = re.compile(rf\"{re.escape(value)} ?= ?([^=][a-zA-Z0-9\\.'/_)(]*)\")\n\n target = None\n for line in code:\n if value_pattern.search(line):\n target = re.findall(value_pattern, line)\n break\n\n return target[0] if target is not None else value", "def _parse(self, val: str):\n if val is None:\n return val\n\n if self._enum_class and isinstance(val, self._enum_class):\n return val # Directly return the enum value if it is the enum.\n\n if val not in self._str2enum:\n msg = \"Not a valid enum value: '{}', valid values: {}\"\n raise ValidationException(\n message=msg.format(val, \", \".join(self.enum)),\n no_personal_data_message=msg.format(\"[val]\", \"[enum]\"),\n error_category=ErrorCategory.USER_ERROR,\n target=ErrorTarget.PIPELINE,\n error_type=ValidationErrorType.INVALID_VALUE,\n )\n return self._str2enum[val]", "def by_label(self, value):\n return {k: v for k, v in self.items() if k == value}", "def has(self, value):\n return Filter(self, value, 'has')", "def __new__(cls, index):\n # If is enum type of this class, return it.\n if isinstance(index, cls):\n return index\n\n # If number, look up by number.\n if isinstance(index, six.integer_types):\n try:\n return cls.lookup_by_number(index)\n except KeyError:\n pass\n\n # If name, look up by name.\n if isinstance(index, six.string_types):\n try:\n return cls.lookup_by_name(index)\n except KeyError:\n pass\n\n raise TypeError('No such value for %s in Enum %s' %\n (index, cls.__name__))", "def translate_from_rpc(rpc_enum_value):\n return {\n 0: StatusText.StatusType.INFO,\n 1: StatusText.StatusType.WARNING,\n 2: StatusText.StatusType.CRITICAL,\n }.get(rpc_enum_value, None)", "def lookup(self, val):\n vid = id(val)\n return self.values.get(vid, None)", "def enum_value(self):\r\n if not hasattr(self, '_enum_value'):\r\n assert self.kind == CursorKind.ENUM_CONSTANT_DECL\r\n # Figure out the underlying type of the enum to know if it\r\n # is a signed or unsigned quantity.\r\n underlying_type = self.type\r\n if underlying_type.kind == TypeKind.ENUM:\r\n underlying_type = underlying_type.get_declaration().enum_type\r\n if underlying_type.kind in (TypeKind.CHAR_U,\r\n TypeKind.UCHAR,\r\n TypeKind.CHAR16,\r\n TypeKind.CHAR32,\r\n TypeKind.USHORT,\r\n TypeKind.UINT,\r\n TypeKind.ULONG,\r\n TypeKind.ULONGLONG,\r\n TypeKind.UINT128):\r\n self._enum_value = \\\r\n conf.lib.clang_getEnumConstantDeclUnsignedValue(self)\r\n else:\r\n self._enum_value = conf.lib.clang_getEnumConstantDeclValue(self)\r\n return self._enum_value", "def find(self, value: str, is_sorted=False) -> CompletionElement:\n if is_sorted:\n raise NotImplementedError( # pragma: no cover\n \"No optimisation for the sorted case.\"\n )\n for e in self:\n if e.value == value:\n return e\n return None", "def get_value(value, unit=None):\n if isinstance(value, u.Quantity):\n if unit is not None:\n return value.to(unit).value\n else:\n return value.value\n return value", "def dereference_value(self, value: int) -> int:\n if self.is_register(value):\n return self[value]\n\n return value", "def get_node(self, value):\n\t\treturn self.adjacency_list[value]", "def getValue(self, name: unicode) -> object:\n ...", "def to_representation(self, value):\n try:\n return self._choices[value]\n except KeyError:\n raise Exception('Value: {0} not valid!'.format(value))", "def to_internal_value(self, value):\n for key, key_value in self._choices.items():\n if key_value.strip().lower() == value.strip().lower():\n return key\n\n raise Exception('Value: {0} not supported!'.format(value))", "def getEnum( self, par, path ):\n\n return self.db.getEnumPar( par, path )", "def _to_google_protobuf_value(value):\n lv = struct_pb2.ListValue()\n lv.append(value)\n return lv.values[0]", "def __getitem__(self, name):\n assert self._unparsed is not None, \\\n ('Flags have not been parsed yet: cannot access flag %r' % name)\n try:\n return self._defs[name].value\n except KeyError as err:\n if self._parent is not None:\n return self._parent[name]\n raise err", "def get_from_list(list_search, key, value):\n # TODO: Handle iteration error better\n return next(element for element in list_search if getattr(element, key) == value)", "def get_key(self, value):\n return [item[0] for item in self.items() if item[1] == value]", "def __getitem__(self, value):\n return self.d.get(value, 0)", "def parse_value(named_reg_value):\n name, value, value_type = named_reg_value\n value_class = REG_VALUE_TYPE_MAP[value_type]\n return name, value_class(value)", "def get(self, field: str, value: str):\n data = {\n 'readByQuery': {\n 'object': 'EEACCOUNTLABEL',\n 'fields': '*',\n 'query': \"{0} = '{1}'\".format(field, value),\n 'pagesize': '1000'\n }\n }\n\n return self.format_and_send_request(data)['data']", "def get_element(self, key, value):\n if isinstance(key, int):\n lst = [s for s in self if s.values[key] == value]\n elif isinstance(key, str):\n lst = [s for s in self if key in s and s[key][0] == name]\n else:\n raise ValueError(\"Key argument must be int or str\")\n if len(lst) == 1:\n return lst[0]\n elif len(lst) == 0:\n raise ValueError(\"contents with '%s=%s' not found\" % (key, value))\n else:\n raise ValueError(\"Multiple contents with '%s=%s' not found\" % (key, value))", "def get_value_of(self, attr):\n return getattr(self, attr.upper(), 0)", "def getValue(self,value):\n if value in self.header.keys():\n return self.header[value]\n if value in self.subintinfo.keys():\n return self.subintinfo[value][-1]\n if self.params is None:\n return None\n return self.params.get(value) #will return None if non-existent", "def _missing_(cls, value):\n enum_values = set(e.value for e in cls)\n\n value = cls.clean(value)\n\n if value not in enum_values:\n raise ValueError(\"%r is not a valid %s\" % (value, cls.__name__))\n\n return cls(value)", "def get_child(self, val):\n if val in self._children:\n return self._children[val]", "def matches(self, value):\n return value == self.attributes[AT.VALUE]", "def get_value_label(self, value):\n return self.label_config.get_index_label(value)", "def find(self, value):\n bucketNum = self.__hash(value)\n result = self.__buckets[bucketNum].find(value)\n return result", "def has_value(cls, value):\n return value in [item.value for item in cls]", "def extractWrappedForaConstant(self, value):\n if isinstance(value, ForaValue.FORAValue):\n value = value.implVal_\n if not isinstance(value, ForaNative.ImplValContainer):\n return value\n\n return value.getObjectLexicalMember(\"@m\")[0].pyval", "def get_enum_value_row_else_null(enum_field, enum_value):\n try:\n if enum_value is None:\n return None\n elif isinstance(enum_value, list):\n return [get_enum_value_row_else_null(enum_field, v) for v in enum_value]\n return get_enum_value_row(enum_field, enum_value)\n except ObjectDoesNotExist:\n return None", "def alfred_items_for_value(value):\n index = 0\n results = []\n\n config_list = [\n ('t2s.json', u'繁體到簡體', 'SimplifiedChinese.png'),\n ('s2t.json', u'簡體到繁體', 'TraditionalChinese.png'),\n ('s2tw.json', u'簡體到臺灣正體', 'TW_taiwan.png'),\n ('tw2s.json', u'臺灣正體到簡體', 'CN_china.png'),\n ('s2hk.json', u'簡體到香港繁體', 'HK_hongKong.png'),\n ('hk2s.json', u'香港繁體(香港小學學習字詞表標準)到簡體', 'CN_china.png'),\n ('tw2sp.json', u'繁體(臺灣正體標準)到簡體並轉換爲中國大陸常用詞彙', 'CN_china.png'),\n ('s2twp.json', u'簡體到繁體(臺灣正體標準)並轉換爲臺灣常用詞彙', 'TW_taiwan.png'),\n ]\n for config_file, description, icon in config_list:\n converter = opencc.OpenCC(\n config=config_file, opencc_path='/usr/local/bin/opencc')\n item_value = converter.convert(value)\n results.append(alfred.Item(\n title=item_value,\n subtitle=description,\n attributes={\n 'uid': alfred.uid(index),\n 'arg': item_value,\n },\n icon=icon,\n ))\n index += 1\n\n return results", "def testFindEnum(self):\n class Color(messages.Enum):\n pass\n A = self.DefineMessage('a', 'A', {'Color': Color})\n\n self.assertEquals(\n Color,\n messages.find_definition('Color', A, importer=self.Importer))", "def get(key, default=-1):\n if isinstance(key, int):\n return Packet(key)\n if key not in Packet._member_map_: # pylint: disable=no-member\n extend_enum(Packet, key, default)\n return Packet[key]", "def searchActiveLabel(self, value):\n\n\t\traise foundations.exceptions.ProgrammingError(\n\t\t\"{0} | '{1}' attribute is read only!\".format(self.__class__.__name__, \"searchActiveLabel\"))", "def getValue(self, state):\n return self.values[state]", "def task_3_find_item_via_value(data: DT, value) -> DT:\n find = [find for find in data for n in find.values() if n == value]\n return find" ]
[ "0.67772806", "0.66527843", "0.64541763", "0.6362393", "0.611267", "0.6082106", "0.5995382", "0.59891886", "0.5980627", "0.5902926", "0.5802926", "0.57012653", "0.560989", "0.5605811", "0.5560168", "0.55101895", "0.55054975", "0.5468104", "0.5463636", "0.5452052", "0.5404608", "0.53592795", "0.5358487", "0.53295296", "0.5295095", "0.5256075", "0.5239886", "0.51944584", "0.5184017", "0.515656", "0.5119584", "0.50912255", "0.50695", "0.50507957", "0.5046887", "0.5037102", "0.5022552", "0.50038964", "0.50000805", "0.49967062", "0.49762788", "0.497262", "0.49724904", "0.49639535", "0.4946661", "0.49317473", "0.4924342", "0.48957452", "0.4890929", "0.48899204", "0.48823267", "0.48654467", "0.48527262", "0.48329678", "0.48322377", "0.48267248", "0.48237768", "0.48186573", "0.48121464", "0.4810947", "0.47971824", "0.47937664", "0.47851154", "0.47824526", "0.47765666", "0.47753495", "0.47751525", "0.4755198", "0.47535583", "0.4742778", "0.47263277", "0.47249088", "0.4679805", "0.46562767", "0.46493793", "0.46404508", "0.46276057", "0.46220878", "0.46160176", "0.46093526", "0.46079323", "0.46064723", "0.4600217", "0.4597727", "0.45964032", "0.45838898", "0.45835438", "0.45807686", "0.45752913", "0.45751008", "0.4568233", "0.4564133", "0.45563003", "0.45449862", "0.45333868", "0.45266083", "0.45210516", "0.45154136", "0.45067397", "0.45062253" ]
0.7054582
0
Decorator that can be used to return the first item of a callable's `list` return.
def return_first_item(func): # Define the wrapper function. def wrapper(self, *args, **kwargs): # Execute the decorated method with the provided arguments. result = func(self, *args, **kwargs) # If the function returned a result and that result is a list then # return the first item on that list. if result and isinstance(result, list): result = result[0] return result return wrapper
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def first(items):\n return next(iter(items or []), None)", "def first(items):\r\n return items[0]", "def first(l):\n return next(iter(l), None)", "def _get_first(details: CallableDetails) -> CallableArg:\n return details.args[0]", "def first(sequence, default=Ellipsis):\n if default is Ellipsis:\n return next(iter(sequence))\n else:\n return next(iter(sequence), default)", "def return_first(fn):\n def wrapped(*args, **kwargs):\n res = fn(*args, **kwargs)\n return res if _HVD.rank() == 0 else None\n return wrapped", "def first(iterable: t.Iterable[T]) -> T:\n return next(iter(iterable))", "def first(self, func: Callable[[T], bool], default=None, raise_exception: bool=True) -> Optional[T]:\n if raise_exception:\n return next(iter(filter(func, self.array)))\n return next(iter(filter(func, self.array)), default)", "def first(xs):\n if not xs:\n return None\n return xs[0]", "def first(xs):\n if not xs:\n return None\n return xs[0]", "def decorator(arg):\n return lambda: list(arg)", "def _sfn(x):\n if len(x) == 1:\n return x[0]\n return fn(*x)", "def first(collection):\n return next(iter(collection))", "def first(collection):\n return next(iter(collection))", "def first(iterable: Iterable[T1], predicate: Callable[[T1], bool]) -> Union[T1, None]:\n for x in iterable:\n if predicate(x):\n return x\n return None", "def _resolver_first(self, item: Any, *_: Any) -> Any:\n try:\n return next(iter(item))\n except StopIteration:\n assert False # not supposed to happen in current tests", "def first(seq):\n return next(iter(seq))", "def first(l: iter, predicate):\n for ele in l:\n if predicate(ele):\n return ele\n raise RuntimeError(\"Found nothing to match predicate\")", "def _first(self, \n iterable, \n condition=lambda x: True):\n try:\n return next(x for x in iterable if condition(x))\n except:\n return None", "def first_last_item(input_list: list) -> list:\n\n if len(input_list) > 1:\n return [input_list[0], input_list[-1]]\n else:\n return []", "def memoize(func):\n result: List[Any] = []\n\n @functools.wraps(func)\n def wrapped_func():\n if not result:\n result.append(func())\n return result[0]\n\n return wrapped_func", "def getfirst(s):\n return s[0] if isinstance(s, list) else s", "def hd(lst):\n return lst[0] if lst else None", "def getFirstFunction(self) -> ghidra.program.model.listing.Function:\n ...", "def return_first(x):\r\n if x == []:\r\n return ''\r\n else:\r\n return x[0]", "def first(self):\n if self.is_empty():\n raise Empty('list is empty')\n return self._head._element # front aligned with head of list", "def first(x):\n try:\n x = x.to_series()\n except AttributeError:\n pass\n return list(x)[0]", "def first_true(iterable, default=False, pred=None):\n return next(filter(pred, iterable), default)", "def get_first_item(videos):\n\n return next(iter(videos or []), None)", "def first(self, callback: Callable = None) -> Any:\n if callback:\n return self.filter(callback).first()\n\n return self[0]", "def first(self):\n if self.is_empty():\n raise Empty('list is empty')\n return self._head._next._element # front aligned with head of list", "def one():\n return lambda f: lambda x: f(x)", "def first(seq):\n try: # try iterator interface\n return seq.next()\n except AttributeError:\n pass\n try: # seq is no iterator, try indexed lookup\n return seq[0]\n except IndexError:\n pass\n raise TypeError(\n \"Argument to `first()` method needs to be iterator or sequence.\")", "def Listor(fun):\n @functools.wraps(fun)\n def inside(*args, **kwargs):\n return list(fun(*args, **kwargs))\n return inside", "def get_first_item(checklist):\r\n return checklist['items'][0]", "def get_function(self, index = 0):\n return self._get_one_param('list_func')[index]", "def _call_or_ret(self, item, *args):\n if callable(item):\n return item(*args)\n return item", "def peek_first(self):\n if self.is_empty(): raise RuntimeError(\"Empty list\")\n return self.head.data", "def __call__(self, func, *args, **kwds):\r\n results = self.map(func, *args, **kwds)\r\n if results:\r\n return results[0]", "def always(value: A) -> Callable[..., A]:\n return Always(value)", "def x(a):\n return a[0]", "def tl(lst):\n return lst[1:] if len(lst) > 1 else None", "def First():\n return CheckForError(lib.Generators_Get_First())", "def first(s):\n assert is_link(s), 'fist only applies to a linked list.'\n assert s != empty, 'empty linked list has no first element.'\n return s[0]", "def get_only_element_from_collection(one_element_collection):\n if len(one_element_collection) != 1:\n raise AssertionError(u'Expected a collection with exactly one element, but got: {}'\n .format(one_element_collection))\n return funcy.first(one_element_collection)", "def get_only(seq: Iterable[T]) -> T:\n it = iter(seq)\n try:\n first_element = it.__next__()\n # we use the sentinel approach rather than the usual (evil) Python \"attempt can catch the\n # exception\" approach to avoid raising zillions of spurious exceptions on the expected\n # code path, which makes debugging a pain\n sentinel = object()\n second_element = next(it, sentinel)\n if second_element is sentinel:\n return first_element\n else:\n got_msg: str\n if isinstance(seq, Sized):\n got_msg = str_list_limited(seq, limit=10)\n else:\n got_msg = f\"{first_element!r}, {second_element!r}, and possibly more.\"\n raise ValueError(f\"Expected one item in sequence but got {got_msg}\")\n except StopIteration:\n raise ValueError(\"Expected one item in sequence but got none\")", "def getfirst(self, key, default=None):\n \n values = self.getlist(key)\n return values[0] if values else default", "def finditem(func, seq):\n return next((item for item in seq if func(item)))", "def any_item(seq, default=None, sort=True):\n if seq is None:\n return default\n if isinstance(seq, (list, tuple)):\n return seq[0] if seq else default\n if isinstance(seq, (str, unicode)):\n return seq\n if hasattr(seq, '__iter__'):\n if sort:\n items = sorted(seq)\n return items[0] if items else default\n else:\n return next(iter(seq), default)\n return seq", "def first(self):\n if self.is_empty():\n raise Empty(\"List is empty!\")\n return self._header._next._element", "def find(func, list_seq):\n for list_item in list_seq:\n if func(list_item):\n return list_item", "def flatten(x): # przerobić na lambda?\n if x==[]:\n return None\n else:\n return x[0]", "def take_first(count):\n def _take_first(iterable):\n return islice(iterable, count)\n return pipe | set_name('take_first(%s)' % count, _take_first)", "def sequence_side_effect(*args):\n seq = list(args)\n\n def rv_fun(*args, **kw):\n return seq.pop(0)\n return rv_fun", "def first(self):\n return self._reduce_for_stat_function(F.first, only_numeric=False)", "def first_true(cls, iterable, default=None, pred=None):\n # first_true([a,b,c], x) --> a or b or c or x\n # first_true([a,b], x, f) --> a if f(a) else b if f(b) else x\n return next(filter(pred, iterable), default)", "def runner(func, iterable, arguments, local=False):\n if local:\n return [func(i, *arguments) for i in iterable]\n else:\n if iterable:\n return group(func.s(i, *arguments) for i in iterable)().get()\n else:\n # group()() returns None if group is called with no arguments,\n # leading to an AttributeError with get().\n return []", "def head(array) -> T:\n return array[0]", "def get_first(self):\n raise NotImplementedError(\"get_first: You should have implemented this method!\")", "def _flatten_one(x):\n return x[0] if is_iterable(x) else x", "def listify(fn=None, wrapper=list):\n\n def listify_return(fn):\n @functools.wraps(fn)\n def listify_helper(*args, **kw):\n return wrapper(fn(*args, **kw))\n\n return listify_helper\n\n if fn is None:\n return listify_return\n return listify_return(fn)", "def first(s):\n assert is_link(s), 'first only applies to linked lists.'\n assert s != empty, 'empty linked list has no first element.'\n return s[0]", "def getone(func):\n\n def structure(self, *args, **kwargs):\n for obj in func(self, *args, **kwargs): return obj\n return structure", "def call_firstresult(self, methname, *args, **kwargs): \n return MultiCall(self.listattr(methname), *args, **kwargs).execute(firstresult=True)", "def zzX_value(l, f):\n if type(f) is not list:\n return zzX_const(l, f)\n else:\n if not l:\n return f\n else:\n return [zzX_value(l-1, f)]", "def first(s):\n assert is_link(s), \"first only applies to linked lists.\"\n assert s != empty, \"empty linked list has no first element.\"\n return s[0]", "def simple_function(arg1, arg2=1):\n return [arg1] * arg2", "def first(self):\n try:\n return self.next()\n except StopIteration:\n return None", "def first(self):", "def remove_first(lst, elem):\n \"*** YOUR CODE HERE ***\"\n if len(lst) <= 0:\n return []\n if lst[0] == elem:\n return lst[1:]\n return lst[:1] + remove_first(lst[1:], elem)", "def after_first(self, value: Any) -> List:\n matches = self._slice_helper(value, multiple_matches_forbidden=False)\n return type(self.parent)() if not matches else type(self.parent)(self.parent[matches[0]+1:])", "def find_item(self, func):\n return next((_(x) for x in self._ if func(x)), None)", "def FirstTrue(values, default=None):\n for value in values:\n if value:\n return value\n return default", "def last(iterable, *default):\n\tassert len(default) <= 1\n\titerable = iter(iterable)\n\n\ttry:\n\t\tx = next(iterable)\n\texcept StopIteration:\n\t\tif default:\n\t\t\treturn default[0]\n\t\traise\n\n\tfor x in iterable:\n\t\tpass\n\treturn x", "def before_first(self, value: Any) -> List:\n matches = self._slice_helper(value, multiple_matches_forbidden=False)\n return type(self.parent)() if not matches else type(self.parent)(self.parent[:matches[0]])", "def _advance(ls: List[any]) -> Optional[any]:\r\n try:\r\n return ls.pop(0)\r\n except IndexError:\r\n return None", "def first(s):\n assert is_link(s),\"first only applies ti linked lists.\"\n assert s != empty, \"empty linked list has no first element.\"\n return s[0]", "def wrapper(*args, **kwargs):\n go_back = last(numbers)\n if to_tuple(numbers[-1]) != go_back:\n return goto(go_back if go_back != ('index',) else numbers[0])\n return func(*args, **kwargs)", "def handle_empty_list(item):\n\n return None if len(item) == 0 else item", "def _advance_peek(ls: List[any]) -> Optional[any]:\r\n try:\r\n return ls[0]\r\n except IndexError:\r\n return None", "def wrap_call(*args, return_idx=0):\n return args[return_idx]", "def second(xs):\n if not xs:\n return None\n return xs[1]", "def second(xs):\n if not xs:\n return None\n return xs[1]", "def first(self) -> Element:\n return typing.cast(Element, self[0])", "def get(s: Iterable[T]) -> T:\n return next(iter(s))", "def get_first_element(dataset):\n return dataset.first()", "def process_list(_func, iterator, *args, **kwargs):\n return [_func(i, *args, **kwargs) for i in iterator]", "def scalarDecorator(func):\n @wraps(func)\n def scalar_wrapper(*args,**kwargs):\n if numpy.array(args[0]).shape == ():\n scalarOut= True\n newargs= ()\n for ii in range(len(args)):\n if ii == 0:\n newargs= newargs+(numpy.array([args[ii]]),)\n else:\n newargs= newargs+(args[ii],)\n args= newargs\n else:\n scalarOut= False\n result= func(*args,**kwargs)\n if scalarOut:\n return result[0]\n else:\n return result\n return scalar_wrapper", "def extract(l):\n if l is None: return None\n if len(l) > 1:\n raise ValueError('More than 1 Value')\n try:\n return l[0]\n except IndexError:\n return None", "def first(self, rows: List[Row]) -> List[Row]:\n if not rows:\n logger.warning(\"Trying to get first row from an empty list\")\n return []\n return [rows[0]]", "def extract_self_if_method_call(args: List[Any], func: Callable) -> Optional[object]:\n if len(args) > 0:\n method = getattr(args[0], func.__name__, False)\n if method:\n wrapped = getattr(method, \"__wrapped__\", False)\n if wrapped and wrapped == func:\n return args[0]\n\n return None", "def find_first(item, vec):\n @jit # Numba jit uses C-compiled version of the code in this function\n def find_first_iter(item,vec):\n for v in range(len(vec)):\n for i in item:\n if i == vec[v]:\n return v\n\n @jit\n def find_first_sing(item,vec):\n for v in range(len(vec)):\n if item == vec[v]:\n return v\n\n\n if isinstance(item,(tuple,list)):\n return find_first_iter(item,vec)\n else:\n return find_first_sing(item,vec)", "def first(self):\n if self.is_empty():\n raise Empty('La cola está vacía')\n return self._head._element # frente alineado con la cabeza de la lista", "def pick_one(_lst):\n if len(_lst) == 2:\n return _lst[0] if int(random(2)) else _lst[1]\n elif len(_lst) == 3:\n return _lst[int(random(3))]", "def test_listlist_op_1():\n\n @ops.listlist_op\n def f(x):\n return [4, 5, 6]\n\n result = f([1, 2, 3]) # Passing in a list, as expected\n\n assert(isinstance(result, list)), f\"{result}\"\n assert(result == [4, 5, 6])", "def first(pair):\n\treturn pair[0]", "def first(self): #TODO\r\n result = []\r\n for x in self.first_lookup(self.initialsymbol):\r\n result += x.first()\r\n if len(result) == 1:\r\n return result[0]\r\n return Choice(result)", "def exactly_one(iterable):\n i = iter(iterable)\n try:\n item = next(i)\n except StopIteration:\n raise ValueError(\"Too few items. Expected exactly one.\")\n try:\n next(i)\n except StopIteration:\n return item\n raise ValueError(\"Too many items. Expected exactly one.\")", "def __call__(self, items: List[Item]) -> List[Item]:", "def until_first(self, value: Any) -> List:\n matches = self._slice_helper(value, multiple_matches_forbidden=False)\n return type(self.parent)() if not matches else type(self.parent)(self.parent[:matches[0]+1])" ]
[ "0.65695965", "0.65369004", "0.6376428", "0.6358152", "0.6354889", "0.6343154", "0.6285985", "0.62713856", "0.6234703", "0.6234703", "0.61748505", "0.61029243", "0.6100002", "0.6100002", "0.60022485", "0.5952739", "0.5899202", "0.5892642", "0.58904845", "0.58807445", "0.5874004", "0.5869914", "0.58322227", "0.5802878", "0.5801705", "0.5779935", "0.5777", "0.57767045", "0.5774048", "0.5767386", "0.5754596", "0.57437193", "0.57411057", "0.5724504", "0.56613725", "0.5635161", "0.5618201", "0.56169295", "0.56064796", "0.5586049", "0.55834687", "0.55832064", "0.55771023", "0.55642617", "0.55348724", "0.5524935", "0.55233073", "0.5494126", "0.5491906", "0.5468918", "0.5466352", "0.54571885", "0.5443147", "0.54095155", "0.540041", "0.53985745", "0.5364405", "0.5358196", "0.53538126", "0.53452414", "0.5344438", "0.5342326", "0.5331112", "0.53250617", "0.5321184", "0.5318086", "0.53152305", "0.5313696", "0.5308507", "0.53022647", "0.5301819", "0.5297066", "0.52956367", "0.5295437", "0.5292219", "0.5284644", "0.5282892", "0.5274045", "0.52721894", "0.52540344", "0.5253135", "0.52394474", "0.52394474", "0.52288485", "0.52228445", "0.5220863", "0.52180815", "0.5193054", "0.5189593", "0.51894623", "0.51801616", "0.5170695", "0.516426", "0.5162713", "0.51495737", "0.51435816", "0.5138848", "0.51384616", "0.51310843", "0.5126987" ]
0.8338826
0
Decorator that ensures all ``list`` objects in a method's arguments have the same length
def lists_equal_length(func): # Define the wrapper function. def wrapper(self, *args, **kwargs): # Collect all `list` objects from `args`. lists_args = [arg for arg in args if isinstance(arg, list)] # Collecgt all `list` object from `kwargs`. lists_kwargs = [arg for arg in kwargs.values() if isinstance(arg, list)] # Concatenate the lists of `list` objects. lists = lists_args + lists_kwargs # Check whether all the `list` objects have the same length. do_have_same_length = len(set(map(len, lists))) == 1 # Raise an `InvalidArgumentsError` exception if there's a length # mismatch. if not do_have_same_length: msg_fmt = "The argument lists must have the same length." raise InvalidArgumentsError(msg_fmt) # Simply execute the decorated method with the provided arguments # and return the result. return func(self, *args, **kwargs) return wrapper
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_list_of_equal_len():\n\n @type_checked\n def _run_test(something:[str, int, bool]):\n assert isinstance(something[0], str)\n assert isinstance(something[1], int)\n assert isinstance(something[2], bool)\n\n _run_test(something=[None, \"12\", 1])", "def __size_restriction_correct_list_parameter(self):\n\n strTestName = 'List size equal to a parameter (correct)'\n RxCSObject = _RxCSobject()\n\n # Firstly, let us define a reference parameter\n RxCSObject.paramAddMan('iRefParameter1', 'Ref. parameter')\n RxCSObject.paramType('iRefParameter1', int)\n\n # Now, let us define a tuple\n RxCSObject.paramAddMan('parameter1', 'List parameter')\n RxCSObject.paramType('parameter1', list)\n RxCSObject.paramSizEq('parameter1', 'iRefParameter1')\n\n RxCSObject.iRefParameter1 = 3\n RxCSObject.parameter1 = [11, 12, 13]\n\n self.__parametersCheck_error(RxCSObject, 'correct', strTestName)", "def list_check(*args, func=None):\n func = func or inspect.stack()[2][3]\n for var in args:\n if not isinstance(var, (list, collections.UserList, collections.abc.MutableSequence)):\n name = type(var).__name__\n raise ListError(\n 'Function {} expected list, {} got instead.'.format(func, name))", "def __size_restriction_incorrect_list_parameter(self):\n\n strTestName = 'List size equal to a parameter (incorrect)'\n RxCSObject = _RxCSobject()\n\n # Firstly, let us define a reference parameter\n RxCSObject.paramAddMan('iRefParameter1', 'Ref. parameter')\n RxCSObject.paramType('iRefParameter1', int)\n\n # Now, let us define a list\n RxCSObject.paramAddMan('parameter1', 'List parameter')\n RxCSObject.paramType('parameter1', list)\n RxCSObject.paramSizEq('parameter1', 'iRefParameter1')\n\n RxCSObject.iRefParameter1 = 14\n RxCSObject.parameter1 = [11, 12, 13]\n\n self.__parametersCheck_error(RxCSObject, SizeError, strTestName)", "def decorator(arg):\n return lambda: list(arg)", "def __size_restriction_correct_list_list(self):\n\n strTestName = 'List size higher than the size of other list (correct)'\n RxCSObject = _RxCSobject()\n\n # Firstly, let us define a reference parameter\n RxCSObject.paramAddMan('lRefParameter1', 'Ref. parameter')\n RxCSObject.paramType('lRefParameter1', list)\n\n # Now, let us define a list\n RxCSObject.paramAddMan('parameter1', 'List parameter')\n RxCSObject.paramType('parameter1', list)\n RxCSObject.paramSizEq('parameter1', 'lRefParameter1', mul=0.5)\n\n RxCSObject.lRefParameter1 = [21, 22, 23, 24, 25, 26]\n RxCSObject.parameter1 = [11, 12, 13]\n\n self.__parametersCheck_error(RxCSObject, 'correct', strTestName)", "def __size_restriction_incorrect_list_list(self):\n\n strTestName = 'List size higher or equal to the size of other list (incorrect)'\n RxCSObject = _RxCSobject()\n\n # Firstly, let us define a reference parameter\n RxCSObject.paramAddMan('lRefParameter1', 'Ref. parameter')\n RxCSObject.paramType('lRefParameter1', list)\n\n # Now, let us define a list\n RxCSObject.paramAddMan('parameter1', 'List 1D parameter')\n RxCSObject.paramType('parameter1', list)\n RxCSObject.paramSizHE('parameter1', 'lRefParameter1', mul=0.5)\n\n RxCSObject.lRefParameter1 = [21, 22, 23, 24, 25, 26]\n RxCSObject.parameter1 = [11, 12]\n\n self.__parametersCheck_error(RxCSObject, SizeError, strTestName)", "def __len__(self, *args, **kwargs):\n return len(self._list(*args, **kwargs))", "def test_listlist_op_1():\n\n @ops.listlist_op\n def f(x):\n return [4, 5, 6]\n\n result = f([1, 2, 3]) # Passing in a list, as expected\n\n assert(isinstance(result, list)), f\"{result}\"\n assert(result == [4, 5, 6])", "def needs_arglist(self):\n True", "def __DimSiz_restriction_correct_list_parameter_pedantic(self):\n\n strTestName = 'The size of a list dimension lower than a parameter [pedantic] (correct)'\n RxCSObject = _RxCSobject()\n\n # Firstly, let us define a reference parameter\n RxCSObject.paramAddMan('iParameter1', 'Int parameter')\n RxCSObject.paramType('iParameter1', int)\n\n # Now, let us define a list\n RxCSObject.paramAddMan('parameter1', 'List parameter')\n RxCSObject.paramType('parameter1', list)\n RxCSObject.paramDimL('parameter1', 'iParameter1', 0, mul=2, pedantic=1) # Size of dimension 0 must be lower than 2 * 'iParameter1'\n\n RxCSObject.iParameter1 = 2\n RxCSObject.parameter1 = [0, 1, 4]\n\n self.__parametersCheck_error(RxCSObject, 'correct', strTestName)", "def __DimSiz_restriction_correct_list_parameter(self):\n\n strTestName = 'The size of a list dimension lower than a parameter (correct)'\n RxCSObject = _RxCSobject()\n\n # Firstly, let us define a reference parameter\n RxCSObject.paramAddMan('iParameter1', 'Int parameter')\n RxCSObject.paramType('iParameter1', int)\n\n # Now, let us define a list\n RxCSObject.paramAddMan('parameter1', 'List parameter')\n RxCSObject.paramType('parameter1', list)\n RxCSObject.paramDimL('parameter1', 'iParameter1', 0) # Size of dimension 0 must be lower than 'iParameter1'\n\n RxCSObject.iParameter1 = 5\n RxCSObject.parameter1 = [0, 1, 2, 4]\n\n self.__parametersCheck_error(RxCSObject, 'correct', strTestName)", "def test_arguments(self):\n calls = []\n decorator = self.decorator()\n\n @decorator\n def func(a, b, c):\n calls.append((a, b, c))\n\n func(1, 2, c=3)\n self.assertEqual(calls, [(1, 2, 3)])", "def doing_nothing(A: list):\n pass", "def _check_args(self, args):\n if not isinstance(args, list) or not len(args) >= 2:\n raise FunctionArgumentException(\"Argument of attribute getter \"\n \"function '%s' must be a list of \"\n \"indeces; got: '%s'\" % (\n self.name,\n args\n ))\n\n if not is_homogeneous(args, (str, int)):\n raise FunctionArgumentException(\n \"'%s': argument must be a list of strings; got: '%s'\" %\n (self.name, args)\n )", "def __NDim_restriction_correct_list_parameter(self):\n\n strTestName = 'The number of dimensions in a list lower than a parameter (correct)'\n RxCSObject = _RxCSobject()\n\n # Firstly, let us define a reference parameter\n RxCSObject.paramAddMan('iRefParameter1', 'Int parameter')\n RxCSObject.paramType('iRefParameter1', int)\n\n # Now, let us define a list parameter\n RxCSObject.paramAddMan('parameter1', 'List parameter')\n RxCSObject.paramType('parameter1', list)\n RxCSObject.paramNDimH('parameter1', 'iRefParameter1')\n\n RxCSObject.iRefParameter1 = 0\n RxCSObject.parameter1 = [4, 2, 11, -1, -4]\n\n self.__parametersCheck_error(RxCSObject, 'correct', strTestName)", "def getListSize(*args):", "def getListSize(*args):", "def __len__(self):\n return len(self.lst)", "def listify(arg):\n if isinstance(arg, list):\n return arg\n else:\n return [arg]", "def __size_restriction_correct_list_number(self):\n\n strTestName = 'List size higher than a number (correct)'\n RxCSObject = _RxCSobject()\n\n RxCSObject.paramAddMan('parameter1', 'List parameter')\n RxCSObject.paramType('parameter1', list)\n RxCSObject.paramSizH('parameter1', 3)\n\n RxCSObject.parameter1 = [1, 2, 3, 4, 5, 6]\n\n self.__parametersCheck_error(RxCSObject, 'correct', strTestName)", "def len_list(self) -> int:\n return 1", "def test_sizesetterwithlist(self):\n Rectangle.reset_objects()\n with self.assertRaises(TypeError) as e:\n r1 = Square(1, 2, 2, 3)\n self.assertEqual(r1.size, 1)\n r1.size = [1, 2]\n self.assertEqual(str(e.exception), \"width must be an integer\")", "def size(*args):", "def list_generalizer(f):\n @functools.wraps(f)\n def wrapped(data, *args, **kwargs):\n if type(data) == list:\n return [f(d, *args, **kwargs) for d in data]\n else:\n return f(data, *args, **kwargs)\n\n return wrapped", "def test_args_count_equal(args: list, target: int) -> bool:\n\n\treturn (args_count(args) == target)", "def test_raises_typeerror_if_arg_not_list(self):\n def result():\n return num_islands({})\n\n self.assertRaises(TypeError, result)", "def _list4_validator(_: object, attrib: 'attrs.Attribute[List[Vec]]', value: object) -> None:\n if not isinstance(value, list):\n raise TypeError(attrib.name + ' should be a list!')\n if len(value) != 4:\n raise ValueError(attrib.name + ' must have 4 values!')", "def validate_too_many_args(args_list):\n if len(args_list) > 1:\n raise TooManyArgsError()", "def length(memoryManager, paramsList):\n handleEmpty(paramsList, \"cannot get length of\")\n head = paramsList[0]\n\n if not validateList(head):\n raise Exception('Tried to get length of non-list')\n # if type(head) == float:\n # return [1.0]\n\n return [float(len(head))]", "def __size_restriction_inccorrect_string_list(self):\n\n strTestName = 'String size equal to the size of a list (incorrect)'\n RxCSObject = _RxCSobject()\n\n # Firstly, let us define a reference parameter\n RxCSObject.paramAddMan('lRefParameter1', 'List ref. parameter')\n RxCSObject.paramType('lRefParameter1', list)\n\n # Now, let me define a string\n RxCSObject.paramAddMan('parameter1', 'String parameter')\n RxCSObject.paramType('parameter1', str)\n RxCSObject.paramSizEq('parameter1', 'lRefParameter1')\n\n RxCSObject.lRefParameter1 = [13, -4, 6, 5, -8, 9]\n RxCSObject.parameter1 = 'abcde'\n\n self.__parametersCheck_error(RxCSObject, SizeError, strTestName)", "def check_arguments_count(self, node: AnyFunctionDefAndLambda) -> None:\n self.arguments[node] = len(functions.get_all_arguments(node))", "def create_argument_list(self):\n raise NotImplementedError", "def _all_equal(arg):\n return arg.count(arg[0]) == len(arg)", "def __len__(self):\n raise NotImplementedError", "def __size_restriction_correct_string_list(self):\n\n strTestName = 'String size higher or equal to the size of a list (correct)'\n RxCSObject = _RxCSobject()\n\n # Firstly, let us define a reference parameter\n RxCSObject.paramAddMan('lRefParameter1', 'List ref. parameter')\n RxCSObject.paramType('lRefParameter1', list)\n\n # Now, let me define a string\n RxCSObject.paramAddMan('parameter1', 'String parameter')\n RxCSObject.paramType('parameter1', str)\n RxCSObject.paramSizHE('parameter1', 'lRefParameter1')\n\n RxCSObject.lRefParameter1 = [4, 5, 8, 9]\n RxCSObject.parameter1 = 'abbce'\n\n self.__parametersCheck_error(RxCSObject, 'correct', strTestName)", "def __len__(self, ) -> Any:\n ...", "def accept_arguments(method, number_of_arguments=1):\n if 'method' in method.__class__.__name__:\n number_of_arguments += 1\n func = getattr(method, 'im_func', getattr(method, '__func__'))\n func_defaults = getattr(func, 'func_defaults', getattr(func, '__defaults__'))\n number_of_defaults = func_defaults and len(func_defaults) or 0\n elif method.__class__.__name__ == 'function':\n func_defaults = getattr(method, 'func_defaults', getattr(method, '__defaults__'))\n number_of_defaults = func_defaults and len(func_defaults) or 0\n\n coArgCount = getattr(method, 'func_code', getattr(method, '__code__')).co_argcount\n if(coArgCount >= number_of_arguments and coArgCount - number_of_defaults <= number_of_arguments):\n return True\n\n return False", "def __DimSiz_restriction_correct_list_number_pedantic(self):\n\n strTestName = 'The size of a list dimension equal to a number [pedantic] (correct)'\n RxCSObject = _RxCSobject()\n\n RxCSObject.paramAddMan('parameter1', 'List parameter')\n RxCSObject.paramType('parameter1', list)\n RxCSObject.paramDimEq('parameter1', 4, 0, pedantic=1) # Size of dimension 0 must be 4\n\n RxCSObject.parameter1 = [0, 1, 2, 4]\n\n self.__parametersCheck_error(RxCSObject, 'correct', strTestName)", "def check_args(*args: Tuple[Any, ...], **kwargs: Any) -> None:\n\n # We begin by initializing the maximum number of args we will allow at 0. We will iterate\n # this if by chance we see an argument whose name is \"self\".\n max_arg_len = 0\n\n # iterate through every parameter passed in\n for idx, param_name in enumerate(literal_signature.parameters):\n\n if idx == 0 and (param_name == \"self\" or param_name == \"cls\"):\n max_arg_len += 1\n continue\n\n # if this parameter isn't in kwargs, then it's probably in args. However, we can't check\n # directly because we don't have arg names, only the list of args which were passed in.\n # Thus, the way this check works is to return an error if we find an argument which\n # isn't in kwargs and isn't \"self\".\n if param_name not in kwargs and len(args) > max_arg_len:\n traceback_and_raise(\n AttributeError(\n f\"'{param_name}' was passed into a function as an arg instead of a kwarg. \"\n f\"Please pass in all arguments as kwargs when coding/using PySyft.\"\n )\n )", "def _is_list(arg):\n return isinstance(arg, collections.Sequence) and not _is_string(arg)", "def empty_list(*args):\n return []", "def Listor(fun):\n @functools.wraps(fun)\n def inside(*args, **kwargs):\n return list(fun(*args, **kwargs))\n return inside", "def __sizeof__(*args):", "def __sizeof__(*args):", "def __sizeof__(*args):", "def __sizeof__(*args):", "def __sizeof__(*args):", "def __sizeof__(*args):", "def __sizeof__(*args):", "def __sizeof__(*args):", "def __sizeof__(*args):", "def __sizeof__(*args):", "def __sizeof__(*args):", "def __sizeof__(*args):", "def __sizeof__(*args):", "def __sizeof__(*args):", "def __sizeof__(*args):", "def __sizeof__(*args):", "def __sizeof__(*args):", "def __sizeof__(*args):", "def __sizeof__(*args):", "def __DimSiz_restriction_correct_list_number(self):\n\n strTestName = 'The size of a list dimension equal to a number (correct)'\n RxCSObject = _RxCSobject()\n\n RxCSObject.paramAddMan('parameter1', 'List parameter')\n RxCSObject.paramType('parameter1', list)\n RxCSObject.paramDimEq('parameter1', 1, 1) # Size of dimension 1 must be 1\n\n RxCSObject.parameter1 = [0, 1, 2]\n\n self.__parametersCheck_error(RxCSObject, 'correct', strTestName)", "def _validate_list_type(self, name, obj, *args):\n if obj is None:\n return\n if isinstance(obj, list):\n for i in obj:\n self._validate_type_not_null(name, i, *args)\n else:\n self._validate_type(name, obj, *args)", "def takes_multiple_arguments(func):\n if func in ONE_ARITY_BUILTINS:\n return False\n elif func in MULTI_ARITY_BUILTINS:\n return True\n\n try:\n spec = getargspec(func)\n except:\n return False\n\n try:\n is_constructor = spec.args[0] == 'self' and isinstance(func, type)\n except:\n is_constructor = False\n\n if spec.varargs:\n return True\n\n if spec.defaults is None:\n return len(spec.args) - is_constructor != 1\n return len(spec.args) - len(spec.defaults) - is_constructor > 1", "def indexists(list, *args): # Technically doesn't have to do with the screen, but it is very useful. \n return all([int(arg) < len(list) for arg in args])", "def atomp(lst):\n return not isinstance(lst, list)", "def atomp(lst):\n return not isinstance(lst, list)", "def __len__(self):\n return len(self.list)", "def isList(memoryManager, paramsList):\n if isEmptyList(paramsList):\n return [1.0]\n A = paramsList[0]\n if validateList(A):\n return [0.0] if len(A) <= 1 else [1.0]\n return [0.0]", "def list_wrap(spec):\n if not isinstance(spec, list):\n spec = [spec]\n return spec", "def arguments(*args):\n def decorate(func):\n func.arguments = args\n return func\n return decorate", "def assert_len_eq(lists):\n # Sanity check\n max_len = max(len(p) for p in lists)\n for i, p in enumerate(lists):\n assert len(\n p\n ) == max_len, \"Length check failed!\\nl[{}] has {} elements != {} ({!r})\\n{!r}\".format(\n i, len(p), max_len, p, lists\n )", "def test_empty(self):\n argument = []\n expected = []\n double_preceding(argument)\n self.assertEqual(expected, argument, \"The list is empty.\")", "def __size_restriction_incorrect_list_number(self):\n\n strTestName = 'List size lower or equal to a number (incorrect)'\n RxCSObject = _RxCSobject()\n\n RxCSObject.paramAddMan('parameter1', 'List parameter')\n RxCSObject.paramType('parameter1', list)\n RxCSObject.paramSizLE('parameter1', 3)\n\n RxCSObject.parameter1 = [1, 2, 3, 4, 5, 6]\n\n self.__parametersCheck_error(RxCSObject, SizeError, strTestName)", "def __DimSiz_restriction_incorrect_list_parameter(self):\n\n strTestName = 'The size of a list dimension higher than a parameter (incorrect)'\n RxCSObject = _RxCSobject()\n\n # Firstly, let us define a reference parameter\n RxCSObject.paramAddMan('iParameter1', 'Int parameter')\n RxCSObject.paramType('iParameter1', int)\n\n # Now, let us define a list\n RxCSObject.paramAddMan('parameter1', 'List parameter')\n RxCSObject.paramType('parameter1', list)\n RxCSObject.paramDimH('parameter1', 'iParameter1', 0, mul=2) # Size of dimension 0 must be higher than 2 * 'iParameter1'\n\n RxCSObject.iParameter1 = 2\n RxCSObject.parameter1 = [0, 1, 2, 4]\n\n self.__parametersCheck_error(RxCSObject, DimSizError, strTestName)", "def checkLists(self):\n self.x = self.checkList(self.x)\n self.y = self.checkList(self.y)\n return", "def __DimSiz_restriction_incorrect_list_parameter_pedantic(self):\n\n strTestName = 'The size of a list dimension higher than a parameter [pedantic] (incorrect)'\n RxCSObject = _RxCSobject()\n\n # Firstly, let us define a reference parameter\n RxCSObject.paramAddMan('iParameter1', 'Int parameter')\n RxCSObject.paramType('iParameter1', int)\n\n # Now, let us define a list\n RxCSObject.paramAddMan('parameter1', 'List parameter')\n RxCSObject.paramType('parameter1', list)\n RxCSObject.paramDimH('parameter1', 'iParameter1', 0, mul=2, pedantic=1) # Size of dimension 0 must be higher than 2 * 'iParameter1'\n\n RxCSObject.iParameter1 = 2\n RxCSObject.parameter1 = [0, 1, 4]\n\n self.__parametersCheck_error(RxCSObject, DimSizError, strTestName)", "def clearList(*args):", "def clearList(*args):", "def __len__(self):\n return _libsbml.ListWrapperSBase___len__(self)", "def params(cls):\n def method_decorator(method):\n @wraps(method)\n def wrapper(self, *args):\n return method(self, *map(cls, args))\n return wrapper\n return method_decorator", "def check_args(f, got_len, exp_len):\n if not got_len == exp_len:\n msg = \"{0} expects {1} argument; got {2}\".format(f, exp_len, got_len)\n raise error.LispException(msg)", "def check_for_list(check):", "def __init__(self, arg_list: List[_Argument]):\n self.arg_list: List[_Argument] = arg_list", "def _validate_args(obj, fn, trait_types, args):\n\n argnames, varargs, varkw, defaults = inspect.getargspec(fn)\n \n actual = []\n for name, value in zip(argnames[1:], args):\n trait_type = trait_types.get(name)\n if trait_type is not None:\n value = trait_type.validate_method_argument(obj, fn, name, value)\n\n actual.append(value)\n\n return actual", "def _CheckLengthOrExpand(param_per_dataset, expected_len, param_name):\n if param_per_dataset is None:\n return None\n if isinstance(param_per_dataset, list):\n if len(param_per_dataset) != expected_len:\n raise ValueError(f'{param_name} doesn\\'t match the size of '\n f'eval_dataset_names: {len(param_per_dataset)} vs '\n f'{expected_len}.')\n else:\n param_per_dataset = [param_per_dataset] * expected_len\n return param_per_dataset", "def arg_rules(max_length: int, type_: str, contains: list):\n \n def arg_rules_decorator(func):\n def function_wrapper(arg): \n if not isinstance(arg, str):\n print('Is not a string')\n return False\n\n if len(arg) > max_length:\n print('Exceeded max_length constraint')\n return False\n\n for el in contains:\n if el not in arg:\n print('Does not contain required elements')\n return False\n \n return func(arg)\n \n return function_wrapper\n \n return arg_rules_decorator", "def test_listlist_op_2():\n\n @ops.listlist_op\n def f(x):\n return [4, 5, 6]\n\n with pytest.raises(ValueError):\n f(iter([1, 2, 3])) # Passing in an iterator instead of an list", "def _validate_internal_value_list(self, value: Any):\n # Make sure the value is a list\n if not isinstance(value, list):\n raise TypeError(f\"Value must be a list\")\n\n # Make sure the list has the correct number of values in it\n length = len(value)\n if isinstance(self._nargs, int):\n if self._action == \"store\":\n if length != self._nargs:\n raise ValueError(f\"Requires list of length {self._nargs}\")\n else:\n if length == 0 or length % self._nargs != 0:\n raise ValueError(f\"Requires list of length a multiple of {self._nargs}\")\n elif self._nargs == \"+\":\n if length == 0:\n raise ValueError(f\"Requires at least one item\")\n\n if any(not isinstance(element, self._type) for element in value):\n raise TypeError(f\"All values must be of the declared type {self._type.__name__}\")", "def __noop_list(self, *args, **kwargs):\n return []", "def size(self, *args):\n pass", "def nested_list_size(inputs: Sequence[Any]) -> List[int]:\n if hasattr(inputs, \"tensors\"):\n return nested_list_size(inputs.tensors) # type: ignore\n if isinstance(inputs[0], dict):\n return nested_list_size(list(inputs[0].items()))\n if hasattr(inputs[0], \"size\") and callable(inputs[0].size):\n return list(inputs[0].size())\n if isinstance(inputs, (list, tuple)):\n return nested_list_size(inputs[0])\n return []", "def __len__(self):\n return _libsbml.ListOf___len__(self)", "def test_list_size_one_even(self):\n argument = [2]\n expect = 2\n actual = find_an_even(argument)\n self.assertEqual(expect, actual)", "def check_arguments_number(args_list,excpected_num):\n if not len(args_list) == excpected_num:\n raise ValueError(\n \"Expected {0} arguements, but found {1}.\".format(\n excpected_num, len(args_list)\n )\n )", "def test_method_list_all(self):\n\n locations_list = Location.list()\n\n # returned object should be a list\n self.assertIsInstance(locations_list, list)\n\n # make sure items returned are not duplicated. \n location_set = set(locations_list)\n self.assertEqual(len(locations_list), len(location_set))\n \n # ensure the types of the returned items are all 'Location'\n types = [type(location) for location in locations_list]\n self.assertEqual(len(set(types)), 1)\n self.assertEqual(types[0], Location)", "def test_neg_list_size_with_extra_parameter(self):\n key = ('test', 'demo', 1)\n policy = {'timeout': 1000}\n with pytest.raises(TypeError) as typeError:\n self.as_connection.list_size(key, \"contact_no\", {}, policy, \"\")\n\n assert \"list_size() takes at most 4 arguments (5 given)\" in str(\n typeError.value)", "def can_take_n_args(func, n=2):\n (pos, args, kwargs, defaults) = inspect.getargspec(func)\n if args is not None or len(pos) >= n:\n return True\n return False", "def _is_list(arg):\n if isinstance(arg, dict):\n return False\n if isinstance(arg, str): # Python 3-only, as str has __iter__\n return False\n return (\n not _has_method(arg, \"strip\")\n and _has_method(arg, \"__getitem__\")\n or _has_method(arg, \"__iter__\")\n )" ]
[ "0.6833578", "0.6415181", "0.63303596", "0.61641717", "0.6103861", "0.6006148", "0.59215814", "0.59204984", "0.5868942", "0.57566774", "0.57490146", "0.5735287", "0.572292", "0.5700607", "0.56883466", "0.56657", "0.5599322", "0.5599322", "0.55968153", "0.5568369", "0.5561684", "0.5558008", "0.5543931", "0.553128", "0.553022", "0.55268925", "0.55224335", "0.55217195", "0.5516648", "0.55093", "0.5497656", "0.54940784", "0.54835606", "0.54792756", "0.54635197", "0.54585075", "0.54566956", "0.5456561", "0.5455911", "0.54477674", "0.54421586", "0.5437172", "0.5435759", "0.54276055", "0.54276055", "0.54276055", "0.54276055", "0.54276055", "0.54276055", "0.54276055", "0.54276055", "0.54276055", "0.54276055", "0.54276055", "0.54276055", "0.54276055", "0.54276055", "0.54276055", "0.54276055", "0.54276055", "0.54276055", "0.54276055", "0.5422979", "0.54223305", "0.5422328", "0.54082006", "0.53622276", "0.53622276", "0.5358796", "0.53507835", "0.5345619", "0.5340588", "0.5329513", "0.53224367", "0.53211105", "0.53176856", "0.5315366", "0.5314281", "0.53091854", "0.53091854", "0.5309097", "0.5296074", "0.52907526", "0.5290488", "0.52893746", "0.52838796", "0.52834916", "0.5276586", "0.5275476", "0.52604353", "0.5259896", "0.52471304", "0.5244134", "0.52375686", "0.52352434", "0.52336967", "0.5233323", "0.5232987", "0.5226309", "0.52211463" ]
0.8060742
0
Clear the screen and draw the alien.
def draw(): screen.fill((0, 0, 0)) alien.draw()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def clear(self):\n pygame.draw.rect(self.screen,BLACK,(0,0,WINDOWWIDTH,\n WINDOWHEIGHT))\n pygame.display.update()", "def clear(self) -> None:\n\n self.screen.fill(self.bg)", "def draw(self):\n self.screen.fill(BACKGROUND_COLOR)\n self.cannon.draw(self.screen)\n self.objects.draw(self.screen)", "def clearScreen(self):\n background = pygame.Surface(self.getSize())\n background = background.convert()\n background.fill((0, 0, 0))\n self.screen.blit(background, (0, 0))", "def clearScreen():\n dislin.erase()", "def draw(self):\r\n self.__screen.draw_asteroid(self, self.__x, self.__y)", "def draw(self):\r\n\r\n self.screen.fill((0,0,0))\r\n self.sprite_group.draw(self.screen)\r\n pygame.display.flip()", "def draw(self):\r\n self.scr.fill(SCREEN_COLOR)\r\n self.label.draw()\r\n pygame.display.flip()", "def draw(self):\n\n State.screen.draw()", "def draw(self):\n arcade.draw_rectangle_filled(self.center.x,\n self.center.y,\n self.width,\n self.height,\n arcade.color.WHITE)", "def draw(self):\n arcade.draw_xywh_rectangle_filled(\n self.x, self.y, self.width, self.height, self.fill.color\n )\n arcade.draw_xywh_rectangle_outline(\n self.x, self.y, self.width, self.height, self.pen.color, 3\n )", "def draw(self):\n self.screen.fill(WHITE)\n self.color_invalid()\n self.draw_selected()\n self.shade_locked_cells()\n self.draw_grid()\n self.draw_buttons()\n self.draw_numbers()", "def draw(self):\n\n self.squares.draw(self.screen)\n if not self.hide_grid:\n self.draw_grid()\n self.fleas.draw(self.screen)\n pygame.display.flip()", "def on_draw(self):\n self.clear()\n arcade.draw_text(\n \"Game Over - Click to restart\",\n SCREEN_WIDTH / 2,\n SCREEN_HEIGHT / 2,\n arcade.color.WHITE,\n 30,\n anchor_x=\"center\",\n )", "def display_pygame():\n sprite_group.clear(screen, eraser_image)\n sprite_group.draw(screen)\n pygame.display.update()", "def draw_screen(self):\n\t\tself.current_screen.draw_screen(self.master_screen)", "def _blank_screen(self):\n self._screen.fill(self._bgcolor)\n pygame.display.update()", "def clear_board(self):\n pygame.draw.rect(self.display, self.white, pygame.Rect(0, 0, self.window_x, self.window_y))\n self.draw_grid()", "def clear(self):\n self.animation.stop()\n self.draw(0, 0, 0, 0, 0)", "def draw(self):\n self.bufferX = (self.appWidth/2) - self.viewX\n self.bufferY = (self.appHeight/2) - self.viewY\n anwp.sl.engine.clear()\n anwp.sl.engine.drawImage(0, 0, self.appWidth, self.appHeight, self.backgroundImage)\n self.drawWarpLines()\n \n # render engine\n anwp.sl.engine.render()\n self.drawSystemInfo()\n self.drawWarpGateInfo()\n self.drawWarpTradeInfo()", "def draw(self, screen):", "def clear_screen(self):\n os.system('cls' if os.name == 'nt' else 'clear')\n self.display_heading()\n self.display_empty_lines()", "def clearScreen():\n pass", "def clear():\n\tglobal _s\n\t_s.screen.fill(_s.back)\n\t_s.tab(0,0)\n\t_flip()", "def do_paint(self):\r\n curses.curs_set(0)\r\n if self.win:\r\n self.paint()\r\n self.done_paint()", "def clear(self):\n black = neo.Color(0,0,0)\n self.set_all(black)\n self.draw()", "def drawScreen(screen):\n screen.fill(BLACK) # Fill the screen with black.\n \n\n # Flip the display so that the things we drew actually show up.\n pygame.display.flip()", "def draw(self, screen):\n self.draw_left_zone(screen)\n self.draw_middle_zone(screen)\n self.draw_right_zone(screen)", "def clear_screen():\n print('\\n' * TERMINAL_HEIGHT)", "def draw(self):\n ui.clear()\n ui.draw_board(self)\n ui.output_buffer()", "def draw_menu(self, screen: curses.window) -> None:\n screen.clear()\n sh, sw = screen.getmaxyx()\n self.draw_title_window(screen, 3, sw, 0, 0)\n\n bottom_win_height = sh - 2\n output_win_width = sw // 2 + 25\n input_win_width = sw - output_win_width + 1\n\n self.draw_output_window(screen, bottom_win_height - 1, output_win_width, 2, 0)\n self.draw_input_window(screen, bottom_win_height - 1, input_win_width, 2, output_win_width - 1)\n\n self.draw_status_bar(screen)", "def draw(screen):\n MY.restart_button.draw(screen)\n MY.display_text.draw(screen)", "def ClearDisplay():\n display.fill(0)", "def renderall(self):\n\n if not self.isinitialized:\n return\n # clear display\n self.screen.fill(BGCOLOR)\n # draw the board\n self.drawBoard()\n # flip the display to show whatever we drew\n pygame.display.flip()", "def _update_screen(self):\n self.screen.fill(self.rain_settings.bg_color)\n self.rain.draw(self.screen)\n\n pygame.display.flip()", "def on_draw(self):\n arcade.start_render()\n arcade.draw_lrwh_rectangle_textured(0, 0,\n constants.SCREEN_WIDTH * 1, constants.SCREEN_HEIGHT * 1,\n self.background, alpha=50)", "def clear(self) -> None:\n self.screen.clear()", "def draw_menu(self):\n self.__screen.fill(pygame.Color(\"black\"))\n self.__screen.blit(Constants.Assets.MENU_BACKGROUND_IMG, (0, 0))\n self.__start_button.draw(self.__screen, Constants.WHITE)\n self.__end_button.draw(self.__screen, Constants.WHITE)\n self.__about_button.draw(self.__screen, Constants.WHITE)", "def draw_app(self):\n \n # Start iterations\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n sys.exit()\n\n self.display.fill([255, 255, 255])\n self.grid.draw(self.display)\n pygame.display.update()", "def draw(self):\n self.menu_pointer.draw()", "def _draw(self):\n display.draw_maze(self._screen, self._maze, self._settings)\n pygame.display.flip()", "def on_draw():\n window.clear()\n world.draw()", "def clear(self):\n self.display(Image.new(self.mode, self.size))", "def mainmenu_background():\n gameDisplay.fill((40, 0, 40))", "def mainmenu_background():\n gameDisplay.fill((40, 0, 40))", "def reset_screen() -> None:\n os.system(\"clear\") if os.name == \"posix\" else os.system(\"cls\")\n print(logo)\n print(\"=\" * 80)", "def draw(self):\n\n surf = self.get_oxygen_surface()\n surf.set_alpha(255)\n self.screen.blit(surf, self.pos)", "def draw(self):\n self.game.screen.blit(self.image, self.game.off(self.pos))", "def draw(self):\n self.screen.fill((0,51,102))\n # get the new drawables\n self.drawables = (self.game_model.get_background_drawables()\n + self.game_model.get_plane_drawables()\n + self.game_model.get_bullet_drawables()\n + self.game_model.get_enemy_drawables())\n for d in self.drawables:\n rect = d.get_rect()\n surf = d.get_surface()\n surf.set_colorkey((255,255,255))\n self.screen.blit(surf, rect)", "def render(self, screen) -> None:\n screen.fill(self.background_color)\n self.draw_center_circle(screen)\n self.draw_rectangle_field(screen)\n self.draw_defense_areas(screen)\n self.draw_field_divider(screen)\n self.draw_goals(screen)", "def clear(self):\n self.img = PIL.Image.new(self.img.mode, self.img.size, self.background)\n self.drawer = aggdraw.Draw(self.img)", "def draw(self):\n self.screen.fill(App.current_scene.fill_color)\n\n # To turn on debug mode on pure simulation underneath\n # uncomment line below and comment one after\n # if isinstance(App.current_scene, GameScene):\n # App.current_scene.object_mgr.space.debug_draw(self.draw_options)\n\n App.current_scene.draw(self.screen)\n\n pygame.display.flip()", "def clear_main(self):\n\n if self.terminate:\n return\n\n self.windows['MAIN'].erase()\n self.windows['MAIN'].border(' ', ' ',\n curses.ACS_HLINE, curses.ACS_HLINE,\n curses.ACS_HLINE, curses.ACS_HLINE,\n curses.ACS_HLINE, curses.ACS_HLINE)", "def draw(self):\n arcade.draw_rectangle_filled(self.center.x, self.center.y, PADDLE_WIDTH, PADDLE_HEIGHT, PADDLE_COLOR)\n pass", "def draw(self, *args, **kwargs):\n self.window.clear()\n self.batch.draw()", "def clear_screen(self):\n if self.x:\n self.move_cur_up((self.prev_x+1)/self.get_col_width())\n self.clear_line(self.get_num_lines(self.prev_lines) +\n self.get_num_lines(['>' + self.prev_str + ' ']))\n #time.sleep(2)", "def clear_screen(self) -> None:\n assert self.screen is not None\n self.screen.clear()\n self.refresh_screen()", "def display_screen(self):\n self.screen.blit(self.bg, (0, 0))\n pygame.display.update()", "def _update_screen(self):\n self.screen.fill(self.settings.bg_color)\n self.sideways_ship.blitme()\n for bullet in self.bullets.sprites():\n bullet.draw_bullet()\n self.aliens.draw(self.screen)\n pygame.display.flip()", "def draw(self) -> None:\n assert self.screen is not None\n self.screen.border()\n self.screen.addstr(2, 2, self.title, curses.A_STANDOUT)\n self.screen.addstr(4, 2, self.subtitle, curses.A_BOLD)\n\n for index, item in enumerate(self.all_items):\n self.draw_item(index, item)\n\n self.refresh_screen()\n if self._debug_screens: # pragma: no cover all\n with _SCREENDUMP_DIR.joinpath(f\"{self.title}-{time.time()}\").open(\n \"wb\",\n ) as f:\n self.screen.putwin(f)\n with _SCREENDUMP_DIR.joinpath(\n f\"stdscr-{self.title}-{time.time()}\",\n ).open(\"wb\") as f:\n self.screen.putwin(f)", "def draw(self):\r\n arcade.draw_rectangle_filled(self.center.x, self.center.y, self.radius, self.radius, TARGET_SAFE_COLOR)", "def redraw(self):\n # enough to go to front, don't need to clear the line\n sys.stderr.write(self._FRONT)\n self.draw()", "def draw(self):\n arcade.draw_rectangle_outline(self.position_x, self.position_y, self.radius, self.color)", "def clear_screen():\n os.system('cls')", "def __draw(self, screen):\n\n pygame.draw.rect(screen, (200, 255, 200), (self.x, self.y, self.width, self.height))", "def draw(self, screen):\n screen.blit(self.rotate_surface, [self.x_pos, self.y_pos])\n self.draw_radar(screen)", "def _update_screen(self):\n self.screen.fill(self.settings.bg_color)\n self.ship.blitme()\n for bullet in self.bullets.sprites():\n bullet.draw_bullet()\n self.aliens.draw(self.screen)\n\n # Draw score information\n self.scoreboard.show_score()\n\n if not self.stats.game_active:\n self.play_button.draw_button()", "def draw(self):\n if not self.pressed:\n #draw info prompt in room\n arcade.draw_rectangle_filled(self.center_x, self.center_y, 20, 20, arcade.color.ANTIQUE_BRASS)\n arcade.draw_text(\"?\", self.center_x, self.center_y, arcade.color.BLACK, anchor_x=\"center\", anchor_y=\"center\")\n else:\n #draw info to top of screen when clicked\n arcade.draw_text(self.text, 10, settings.HEIGHT - 10, arcade.color.BLACK, anchor_x=\"left\", anchor_y=\"top\")", "def draw_bg(self):\n self.screen.fill(self.bg)", "def draw_screen(self, master_screen):\n master_screen.blit(self.screen_image, (0, 0))", "def clear_top(self):\n background = pygame.Surface((720, 77))\n background.fill((255, 255, 255))\n self.screen.blit(background, (0, 0))\n pygame.display.update((0, 0, 720, 77))", "def redraw(dis):\n dis.blit(Bg,(0,0))", "def clear_screen(self, w_height):\r\n\r\n print(\"\\n\"*(w_height))", "def draw(self):\n\n self.state_stack.peek().draw(self.screen)", "def screen_reset(self, width, height):\n pygame.display.set_mode((width, height))\n self.s_width = width\n self.s_height = height\n self.main_menu.reset()\n self._option_menu.reset()\n self._instruction_menu.reset()\n self._title.reset_pos(self.s_width/2, self.s_height*0.25)\n self._info.screen_init()\n self._bg.init_bg()", "def redraw(self) -> None:\n self.canvas.draw_idle()\n self.Refresh()", "def draw(self):\n self.scene.draw(self.screen)", "def draw():", "def draw(self, screen):\n screen.blit(self.surface, self.rect)", "def draw(self):\n if not self.pressed:\n #draw dialogue prompt\n arcade.draw_rectangle_filled(self.center_x, self.center_y, 20, 20, arcade.color.ALABAMA_CRIMSON)\n arcade.draw_text(\"!\", self.center_x, self.center_y, arcade.color.BLACK, anchor_x=\"center\", anchor_y=\"center\")\n else:\n #draw dialogue box\n arcade.draw_rectangle_filled(self.center_x, self.center_y, self.width, self.height, self.color)\n arcade.draw_text(self.text, self.center_x, self.center_y, arcade.color.BLACK, anchor_x=\"center\", anchor_y=\"center\")", "def on_draw(self):\n self.clear()\n self.manager.draw()", "def drawUniverse(self, universe):\n self.screenSize = self.screen.get_size()\n\n # Background\n self.screen.fill((0, 0, 0))\n\n self._drawUniverse(universe)\n\n pygame.display.update()", "def clear(self):\n self.command(_LCD_CLEARDISPLAY)\n self._cursor_pos = (0, 0)\n self._content = [[0x20] * self.cols for _ in range(self.rows)]\n time.sleep(2*MILLISECOND)", "def draw(self, screen):\n \n # Draw the background\n screen.fill(CAVE)\n screen.blit(self.background,(self.world_shift // 3,0))\n \n # Draw all the sprite lists that we have\n self.platform_list.draw(screen)\n #self.enemy_list.draw(screen)\n self.enemy_list.draw(screen)", "def draw(self):\n self.window.clear()\n # Draw heads up display\n views.hud(self)\n # Refresh the messages on screen\n queue.draw(self.window)\n # Draw the map\n self.camera.draw(self.window, self.world, point=(self.player.x, self.player.y))", "def draw(self, screen):\n if self.state == self.S_ACTIVE:\n screen.blit(self.image, self.rect)", "def clear(screen):\n screen.clear()\n screen.refresh()", "def draw_a(self):\r\n pen.down()\r\n pen.left(90)\r\n pen.forward(40)\r\n pen.right(90)\r\n pen.forward(40)\r\n pen.right(90)\r\n pen.forward(40)\r\n pen.up()\r\n pen.back(20)\r\n pen.right(90)\r\n pen.down()\r\n pen.forward(40)\r\n pen.up()\r\n pen.left(90)\r\n pen.forward(20)\r\n pen.left(90)\r\n pen.forward(50)", "def clear_screen():\n\tprint(\"\\033[H\\033[J\")", "def draw(self):\n self.screen_surf.fill(BKGD_COLOUR)\n self.all_tiles.draw(self.screen_surf) # Tiles before other sprites.\n self.nests.draw(self.screen_surf) # Nests before chipmunks.\n self.chipmunks.draw(self.screen_surf)\n self.acorns.draw(self.screen_surf)\n self.screen_surf.blit(self.acorn_surf, self.acorn_surf.get_rect())\n self.screen_surf.blit(self.timer_surf, self.timer_rect)", "def _clear_drawing(self) -> None:\n self.vertices.clear()\n self.edges.clear()\n self.subplot.clear()\n self.subplot2.clear()", "def _update_screen(self):\n self.screen.fill(self.settings.bg_color)\n self.ship.blitme()\n # ignore unresolved reference below, we're using the method from Bullets, not Sprite. Pycharm...sigh.\n for bullet in self.bullets.sprites():\n bullet.draw_bullet()\n\n # drawing the aliens\n self.aliens.draw(self.screen)\n\n # drawing information about the score\n self.sb.show_score()\n\n # draws play-button on inactive game state\n if not self.stats.game_active:\n self.play_button.draw_button()\n\n pygame.display.flip()", "def render(self, screen):\n # print(\"Drawing scene {}\".format(self.imgname))\n screen.fill(self.color)", "def draw_game(self) -> None:\n\n self.screen.fill(THECOLORS['royalblue4'])\n self.our_board.draw(self.screen)\n self.game_status.draw(self.screen)\n self.heading_bar.draw(self.screen)\n\n if self.our_game_state == STATE_PREPARING:\n self.start_game_button.draw(self.screen)\n elif not self.our_game_state == STATE_READY_TO_START:\n self.their_board.draw(self.screen)", "def _update_screen(self):\n\t\tself.screen.fill((255, 255, 255))\n\n\t\tself._check_collisions()\n\t\tself._update_objects()\n\t\tself._blit_objects()\n\n\t\tpygame.display.flip()", "def render(self):\n self.screen.fill(prepare.BACKGROUND_COLOR)\n self.health_bar()\n # self.enemy_health()\n self.energy_bar()\n self.level.draw(self.screen)\n pg.display.update()", "def draw_main_menu():\n draw_cover()\n draw_menu_buttons()\n draw_border()", "def draw_menu(self):\n self.screen.fill(self.menu_color, self.rect)\n pygame.draw.rect(self.screen, self.border_color, self.rect, 5)\n self.screen.blit(self.title_image, self.title_image_rect)\n\n self.play_button.draw_button()", "def clear_screen(self):\n os.system('cls' if os.name == 'nt' else 'clear')", "def clear_screen(self):\n os.system('cls' if os.name == 'nt' else 'clear')" ]
[ "0.7505039", "0.7242356", "0.7230753", "0.7131363", "0.7115127", "0.7101928", "0.70682305", "0.70466423", "0.7031619", "0.6995481", "0.6974922", "0.6921669", "0.69176", "0.69122386", "0.6896714", "0.6883864", "0.6875905", "0.6849436", "0.6818272", "0.6815119", "0.6790672", "0.6789373", "0.674679", "0.67335576", "0.6732626", "0.6732472", "0.672865", "0.6727181", "0.6722812", "0.67102516", "0.6709372", "0.67044216", "0.6698779", "0.6686245", "0.6675916", "0.66715837", "0.6666018", "0.66658306", "0.66446537", "0.664356", "0.6606143", "0.6597208", "0.65921414", "0.6578098", "0.6578098", "0.6577739", "0.65703815", "0.65578896", "0.6541289", "0.6537862", "0.65313673", "0.65311456", "0.65281683", "0.65202004", "0.6515969", "0.6509159", "0.64999944", "0.6499818", "0.64958763", "0.64949584", "0.64828515", "0.6474839", "0.6465484", "0.6454103", "0.6434019", "0.6427524", "0.6418793", "0.6410641", "0.64065313", "0.63921654", "0.63914746", "0.6391097", "0.6389319", "0.63737607", "0.63685215", "0.63570243", "0.6354327", "0.6349967", "0.63485295", "0.634026", "0.63379145", "0.632358", "0.6318205", "0.63156027", "0.63151485", "0.6311211", "0.63107353", "0.63102776", "0.6309995", "0.6300618", "0.6298044", "0.6296974", "0.62950355", "0.62870795", "0.62828404", "0.6281684", "0.6273415", "0.627326", "0.6271015", "0.6271015" ]
0.8509718
0
Move the alien around using the keyboard.
def update(): if keyboard.left: alien.x -= 2 elif keyboard.right: alien.x += 2 if keyboard.space: alien.y = GROUND - 50 animate(alien, y=GROUND, tween='bounce_end', duration=.5) # If the alien is off the screen, # move it back on screen if alien.right > WIDTH: alien.right = WIDTH elif alien.left < 0: alien.left = 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def move(event):\r\n\t\tif event.char == \"a\":\r\n\t\t\tcanvas.move(z[a], -10, 0)\r\n\t\telif event.char == \"d\":\r\n\t\t\tcanvas.move(z[a], 10, 0)\r\n\t\telif event.char == \"w\":\r\n\t\t\tcanvas.move(z[a], 0, -10)\r\n\t\telif event.char == \"s\":\r\n\t\t\tcanvas.move(z[a], 0, 10)", "def joystick_move(self, emphasis=1):\n step = int(20*emphasis)\n self.display.ship.move_vertical(step=step)", "def move(self):\n \n self.position = self.wander()", "def move(self):\n keys = pygame.key.get_pressed()\n\n if keys[pygame.K_w]:\n self.y -= self.vel\n if keys[pygame.K_a]:\n self.x -= self.vel\n if keys[pygame.K_s]:\n self.y += self.vel\n if keys[pygame.K_d]:\n self.x += self.vel", "def move(self, environment):\n ch2 = getch()\n if ch2 == b'H' or ch2 == \"A\":\n # the up arrow key was pressed\n print (\"up key pressed\")\n\n\n elif ch2 == b'P' or ch2 == \"B\":\n # the down arrow key was pressed\n print(\"down key pressed\")\n\n\n elif ch2 == b'K' or ch2 == \"D\":\n # the left arrow key was pressed\n print(\"left key pressed\")\n\n elif ch2 == b'M' or ch2 == \"C\":\n # the right arrow key was pressed\n print(\"right key pressed\")", "def move_tower(self, x, y):\n self.x = x\n self.y = y\n self.menu.x = x\n self.menu.y = y\n self.menu.update()", "def joy_callback(self, msg):\n mappings = gamepad_mappings.set_gamepad_mappings(msg)\n self.move_vertical = mappings[\"button_vertical\"] # up: +1.0, down: -1.0\n self.move_horizontal = mappings[\"button_horizontal\"] # left: +1.0, right: -1.0", "def update(self):\n if games.keyboard.is_pressed(games.K_RIGHT):\n self.x += 1\n if games.keyboard.is_pressed(games.K_a):\n self.x -= 1", "def on_key_press(self, key):\n if key == LEFT:\n self.player.change_x = -5\n elif key == RIGHT:\n self.player.change_x = 5\n elif key == UP:\n self.player.change_y = -5 \n elif key == DOWN:\n self.player.change_y = 5", "def update(self):\n keys = pygame.key.get_pressed() # Checks for an input by the user\n if keys[pygame.K_RIGHT]:\n king.move_right() # Moves right if the user presses the right key\n\n if keys[pygame.K_LEFT]:\n king.move_left() # Moves left if the user presses the left key", "def update(self):\n pygame.event.pump()\n self.pos_x += 0\n if (pygame.key.get_pressed()[pygame.K_w]) and self.pos_y > 0:\n self.pos_y -= 1\n if (pygame.key.get_pressed()[pygame.K_a]) and self.pos_x > 0:\n self.pos_x -= 1\n if (pygame.key.get_pressed()[pygame.K_d]) and self.pos_x < 1080:\n self.pos_x += 1\n if (pygame.key.get_pressed()[pygame.K_s]) and self.pos_y < 360:\n self.pos_y += 1", "def moveBy(self, x, y):\n\t\tself.moveTo(self.x + x, self.y + y)", "def on_key_press(self, key, modifiers):\n\n if key == arcade.key.UP or key == arcade.key.W:\n self.player.change_y += .2\n elif key == arcade.key.LEFT or key == arcade.key.A:\n self.player.change_x -= .2\n elif key == arcade.key.RIGHT or key == arcade.key.D:\n self.player.change_x += .2\n elif key == arcade.key.DOWN or key == arcade.key.S:\n self.player.change_y -= .2", "def MoveCurrentSpace(self):\n if self.facing == 0:\n self.y -= 1\n elif self.facing == 1:\n self.x += 1\n elif self.facing == 2:\n self.y += 1\n elif self.facing == 3:\n self.x -= 1", "def move_to(self, x, y):\r\n self.__current_room = x, y", "def AeroMove(self, pos):\r\n\r\n pass", "def move(self, x, y):\n self.x+=x\n self.y+=y", "def move_east(self):\r\n self.move(dx=1, dy=0)", "def movement(self):\n self.rect.left -= self.speedx #to move the asteroid to the left", "def on_key_press(self, key, modifiers):\n #if self.player_sprite.amphet_excited is False:\n \n\n if key == arcade.key.UP:\n self.player_sprite.change_y = MOVEMENT_SPEED\n elif key == arcade.key.DOWN:\n self.player_sprite.change_y = -MOVEMENT_SPEED\n elif key == arcade.key.LEFT:\n self.player_sprite.change_x = -MOVEMENT_SPEED\n elif key == arcade.key.RIGHT:\n self.player_sprite.change_x = MOVEMENT_SPEED\n\n\n elif key == arcade.key.ESCAPE:\n raise Exception(\"\\n\\n See You soon, fork it share it !\")", "def on_key_press(self, key, modifiers):\n if key == arcade.key.UP:\n self.player.change_y = MOVEMENT_SPEED\n elif key == arcade.key.DOWN:\n self.player.change_y = -MOVEMENT_SPEED\n elif key == arcade.key.LEFT:\n self.player.change_x = -MOVEMENT_SPEED\n elif key == arcade.key.RIGHT:\n self.player.change_x = MOVEMENT_SPEED", "def move(self):\n \n self.position = self.explore()", "def on_key_press(self, key, modifiers):\r\n if key == arcade.key.UP:\r\n self.player.change_y = MOVEMENT_SPEED\r\n elif key == arcade.key.DOWN:\r\n self.player.change_y = -MOVEMENT_SPEED\r\n elif key == arcade.key.LEFT:\r\n self.player.change_x = -MOVEMENT_SPEED\r\n elif key == arcade.key.RIGHT:\r\n self.player.change_x = MOVEMENT_SPEED", "def on_key_press(self, key, modifiers):\r\n if key == arcade.key.UP:\r\n self.player.change_y = MOVEMENT_SPEED\r\n elif key == arcade.key.DOWN:\r\n self.player.change_y = -MOVEMENT_SPEED\r\n elif key == arcade.key.LEFT:\r\n self.player.change_x = -MOVEMENT_SPEED\r\n elif key == arcade.key.RIGHT:\r\n self.player.change_x = MOVEMENT_SPEED", "def advance(self): \n self.center.x = self.center.x + self.velocity.dx\n self.center.y = self.center.y + self.velocity.dy", "def move(self, dx, dy):\n self.x += dx\n self.y += dy", "def keyboard(key, x, y):\r\n\tglobal ROTATE\r\n\r\n\tif key == chr(27): \r\n\t\tsys.exit(0)\r\n\telif key == 'r': \r\n\t\tROTATE = (ROTATE + 5) % 360\r\n\telif key == 'R': \r\n\t\tROTATE = (ROTATE - 5) % 360\r\n\r\n\tglutPostRedisplay()", "def move(self, key):\n \n global last_time\n if (key == K_RIGHT):\n self.xMove = self.x_dist\n self.x_pos=self.xMove\n elif (key == K_LEFT):\n self.xMove = -self.x_dist\n self.x_pos+=self.xMove\n elif (key == K_UP):\n self.yMove = -self.y_dist\n self.y_pos+=self.yMove\n elif (key == K_DOWN):\n self.yMove = self.y_dist\n self.y_pos+=self.yMove\n self.rect = self.rect.move(self.xMove,self.yMove)", "def move_north(self):\r\n self.move(dx=0, dy=-1)", "def control(self, keyCode):\n if (keyCode == DOWN and (self.on_left or self.on_right)):\n if self.on_left:\n self.x = self.maze.LEFT_VERT\n else:\n self.x = self.maze.RIGHT_VERT\n self.rot_begin = self.MOUTH_DOWN_BEGIN_ANGLE\n self.rot_end = self.MOUTH_DOWN_END_ANGLE\n self.x_add = 0\n self.y_add = self.velocity\n elif (keyCode == UP and (self.on_left or self.on_right)):\n if self.on_left:\n self.x = self.maze.LEFT_VERT\n else:\n self.x = self.maze.RIGHT_VERT\n self.rot_begin = self.MOUTH_UP_BEGIN_ANGLE\n self.rot_end = self.MOUTH_UP_END_ANGLE\n self.x_add = 0\n self.y_add = -(self.velocity)\n elif (keyCode == LEFT and (self.on_top or self.on_bottom)):\n if self.on_top:\n self.y = self.maze.TOP_HORIZ\n else:\n self.y = self.maze.BOTTOM_HORIZ\n self.rot_begin = self.MOUTH_LEFT_BEGIN_ANGLE\n self.rot_end = self.MOUTH_LEFT_END_ANGLE\n self.x_add = -(self.velocity)\n self.y_add = 0\n elif (keyCode == RIGHT and (self.on_top or self.on_bottom)):\n if self.on_top:\n self.y = self.maze.TOP_HORIZ\n else:\n self.y = self.maze.BOTTOM_HORIZ\n self.rot_begin = self.MOUTH_RIGHT_BEGIN_ANGLE\n self.rot_end = self.MOUTH_RIGHT_END_ANGLE\n self.x_add = self.velocity\n self.y_add = 0", "def on_key_press(self, key, modifiers):\n if key == arcade.key.LEFT:\n self.player.change_x = -MOVEMENT_SPEED\n elif key == arcade.key.RIGHT:\n self.player.change_x = MOVEMENT_SPEED\n elif key == arcade.key.UP:\n self.player.change_y = MOVEMENT_SPEED\n elif key == arcade.key.DOWN:\n self.player.change_y = -MOVEMENT_SPEED", "def _movePaddle(self):\n self._click()\n self._game.updatePaddle(self._touch)\n self._last = self._touch", "def move_focus(self, pos_x, pos_y):\n factor = self.offset.x * -0.005 / self.scale\n pos_x *= factor\n pos_y *= factor\n self.focus += (pos_x, pos_y)", "def on_up_key(self, event) -> None:\r\n\r\n self.move_view(0, -1)", "def check_keys(self):\n if self.holding_left:\n self.paddle.move_down()\n\n if self.holding_right:\n self.paddle.move_up()", "def move_player(self, pressed_keys):\n # Arrow-key movement\n if pressed_keys[K_UP]:\n self.player.rect.move_ip(0, -2)\n self.player.movement_check = True\n self.player.up_check = True\n self.player.down_check = False\n if pressed_keys[K_DOWN]:\n self.player.rect.move_ip(0, 2)\n self.player.movement_check = True\n self.player.up_check = False\n self.player.down_check = True\n if pressed_keys[K_LEFT]:\n self.player.rect.move_ip(-2, 0)\n self.player.movement_check = True\n self.player.direction_check = False\n self.player.up_check = False\n self.player.down_check = False\n if pressed_keys[K_RIGHT]:\n self.player.rect.move_ip(2, 0)\n self.player.movement_check = True\n self.player.direction_check = True\n self.player.up_check = False\n self.player.down_check = False\n # WASD movement\n if pressed_keys[K_w]:\n self.player.rect.move_ip(0, -2)\n self.player.movement_check = True\n self.player.up_check = True\n self.player.down_check = False\n if pressed_keys[K_s]:\n self.player.rect.move_ip(0, 2)\n self.player.movement_check = True\n self.player.up_check = False\n self.player.down_check = True\n if pressed_keys[K_a]:\n self.player.rect.move_ip(-2, 0)\n self.player.movement_check = True\n self.player.direction_check = False\n self.player.up_check = False\n self.player.down_check = False\n if pressed_keys[K_d]:\n self.player.rect.move_ip(2, 0)\n self.player.movement_check = True\n self.player.direction_check = True\n self.player.up_check = False\n self.player.down_check = False\n #Boundary\n if self.player.rect.left < 0:\n self.player.rect.left = 0\n if self.player.rect.right > self.board.screen_width:\n self.player.rect.right = self.board.screen_width\n if self.player.rect.top <= 0:\n self.player.rect.top = 0\n if self.player.rect.bottom >= self.board.screen_height:\n self.player.rect.bottom = self.board.screen_height", "def move(self):\r\n if self.d == 'NORTH' and (self.y + 1) <= table_max_y:\r\n self.y += 1\r\n elif self.d == 'EAST' and (self.x + 1) <= table_max_x:\r\n self.x += 1\r\n elif self.d == 'SOUTH' and (self.y - 1) >= 0:\r\n self.y -= 1\r\n elif self.d == 'WEST' and (self.x - 1) >= 0:\r\n self.x -= 1\r\n else:\r\n print(\"Edge of Table Reached!\")", "def update(self):\n self.x += self.settings.alien_speed * self.settings.alien_direction\n self.rect.x = self.x", "def move():\n print(\" ------ Execution -----\\n\")\n pyautogui.moveRel(0, 10)\n pyautogui.moveRel(0, -10)\n pyautogui.click()", "def move(self):\n pass", "def move(self,x,y):\n self.pos.x = x\n self.pos.y = y", "def updatePaddle(self, selfinput):\n assert isinstance(selfinput,GInput)\n position = 0\n \n if selfinput.is_key_down('right'):\n position = 5\n if selfinput.is_key_down('left'):\n position = -5\n \n self._paddle.move(position)", "def keyboard(key, x, y):\n\n # Handle ESC key.\n if key == b'\\033':\t\n\t# \"\\033\" is the Escape key\n sys.exit(1)\n \n if key == b',' and selected_face:\n move_face('LEFT')\n\n if key == b'.' and selected_face:\n move_face('RIGHT')", "def move(self):\n c = self.get_position()\n\n f = c['f']\n if f == 'NORTH':\n c['y'] += 1\n elif f == 'EAST':\n c['x'] += 1\n elif f == 'SOUTH':\n c['y'] -= 1\n elif f == 'WEST':\n c['x'] -= 1\n\n if self.valid_position(c):\n self.update_position(c)\n else:\n raise ValueError('InvalidPosition')", "def on_key_release(self, key):\n if key == LEFT:\n self.player.change_x = 0\n elif key == RIGHT:\n self.player.change_x = 0\n elif key == UP:\n self.player.change_y = 0\n elif key == DOWN:\n self.player.change_y = 0", "def move(self):\n self.tick()\n self.pressed = pygame.key.get_pressed()\n\n self.player.update(self)", "def on_key_press(self, key, modifiers):\n if key == arcade.key.UP or key == arcade.key.W:\n self.player_sprite.change_y = MOVEMENT_SPEED\n\n elif key == arcade.key.DOWN or key == arcade.key.S:\n self.player_sprite.change_y = -MOVEMENT_SPEED\n\n elif key == arcade.key.LEFT or key == arcade.key.A:\n self.player_sprite.change_x = -MOVEMENT_SPEED\n\n elif key == arcade.key.RIGHT or key == arcade.key.D:\n self.player_sprite.change_x = MOVEMENT_SPEED\n elif key == arcade.key.ESCAPE:\n self.inventory.set_return_view(self)\n self.window.show_view(self.inventory)", "def move_to_position2(self):", "def move(self):\n self.pos += self.vel\n self.rect.center = self.pos", "def on_key_release(self, key, modifiers):\n if key == arcade.key.LEFT or key == arcade.key.RIGHT:\n self.player.change_x = 0\n elif key == arcade.key.UP or key == arcade.key.DOWN:\n self.player.change_y = 0", "def move(self, p):\r\n self.position.setvalue(p)", "def move(x,y):\r\n pass", "def on_key_press(self, key, modifiers):\n if key == arcade.key.UP or key == arcade.key.W:\n self.direction = MoveEnum.UP\n elif key == arcade.key.DOWN or key == arcade.key.S:\n self.direction = MoveEnum.DOWN\n elif key == arcade.key.LEFT or key == arcade.key.A:\n self.direction = MoveEnum.LEFT\n elif key == arcade.key.RIGHT or key == arcade.key.D:\n self.direction = MoveEnum.RIGHT", "def move(self, x, y, ev):\n angle = np.arctan2(y, x)\n phi = np.fabs(angle - self.theta)\n if phi > np.pi:\n phi = 2 * np.pi - phi\n self.phi = phi\n self.has_move = True\n self.base.update()\n self.base.draw()", "def on_key_release(self, key, modifiers):\r\n if key == arcade.key.UP or key == arcade.key.DOWN:\r\n self.player.change_y = 0\r\n elif key == arcade.key.LEFT or key == arcade.key.RIGHT:\r\n self.player.change_x = 0", "def on_key_release(self, key, modifiers):\r\n if key == arcade.key.UP or key == arcade.key.DOWN:\r\n self.player.change_y = 0\r\n elif key == arcade.key.LEFT or key == arcade.key.RIGHT:\r\n self.player.change_x = 0", "def movement(self):", "def on_key_release(self, key, modifiers):\n if key == arcade.key.UP or key == arcade.key.DOWN:\n self.player.change_y = 0\n elif key == arcade.key.LEFT or key == arcade.key.RIGHT:\n self.player.change_x = 0", "def automove(self):\n if self.x < self.end_cinematic_x_pos:\n self.x += self.SHIP_SPEED\n if self.x > self.end_cinematic_x_pos:\n self.x -= self.SHIP_SPEED\n if self.y < self.end_cinematic_y_pos:\n self.y += self.SHIP_SPEED\n if self.y > self.end_cinematic_y_pos:\n self.y -= self.SHIP_SPEED", "def move_left(self):\n\t\tself.set_x_vector(-1 * constants.DONKEY_SPEED)", "def move(self, X, Y):\n self.menu.x, self.menu.y = X, Y\n self.x, self.y = X, Y\n self.menu.update_buttons()", "def on_key_press(self, key, key_modifiers):\n if key == arcade.key.UP or key == arcade.key.W:\n if self.physics_engine.can_jump():\n self.sprite1.change_y = JUMP_SPEED\n elif key == arcade.key.LEFT or key == arcade.key.A:\n self.sprite1.change_x = -MOVEMENT_SPEED\n elif key == arcade.key.RIGHT or key == arcade.key.D:\n self.sprite1.change_x = MOVEMENT_SPEED", "def _animateShip(self,input,dt):\n move = 0\n if input.is_key_down('left'):\n move -= SHIP_MOVEMENT\n self._ship.x = max(self._ship.x+move,0)\n if input.is_key_down('right'):\n move += SHIP_MOVEMENT\n self._ship.x = min(self._ship.x+move, GAME_WIDTH)\n #extra feature\n if self._ship != None :\n self._ship.x += move\n self._ship.x = max(self._ship.x, 0+SHIP_WIDTH/2)\n self._ship.x = min(self._ship.x, GAME_WIDTH-SHIP_WIDTH/2)", "def update(self):\n self.x -= self.settings.alien_speed\n self.rect.x = self.x", "def jump(self):\n self.vy = -9", "def on_key_release(self, key, modifiers):\n if key == arcade.key.A or key == arcade.key.D:\n self.player.change_x = 0\n\n elif key == arcade.key.W or key == arcade.key.S:\n self.player.change_y = 0", "def update(self):\n\t\tself.y += (self.settings.alien_speed * self.settings.fleet_direction)\n\t\tself.rect.y = self.y", "def move(self):\n\n # get the location we WOULD go to\n newX = self.xcor() + self.dx\n newY = self.ycor() + self.dy\n while (abs (newX) > self.BOX_RANGE) or (abs(newY) > self.BOX_RANGE):\n # print(\"choosing new direction... \",end=\"\")\n self.chooseNewDirection()\n # print(self.dx, self.dy)\n newX = self.xcor() + self.dx\n newY = self.ycor() + self.dy\n\n # now move our monster\n super().move()", "def keyUp(self):\n if pyxel.btnp(pyxel.KEY_UP):\n self.rotater(-1)", "def move():\n Robot.move()", "def automove_to(self, x: int, y: int) -> None:\n self.cpu_controlled = True\n self.end_cinematic_x_pos = x\n self.end_cinematic_y_pos = y", "def move_right(self):\n\t\tself.set_x_vector(constants.DONKEY_SPEED)", "def move (self):\n\t\tself.x += self.direction[0]\n\t\tself.y += self.direction[1]\n\t\tself.row = (self.y - 15) / 30\n\t\tself.col = (self.x - 15) / 30\n\t\tself.rowcol = (self.row,self.col)\n\t\tself.draw()", "def move_to_position1(self):", "def move_to(self, x, y):\n self.x = x\n self.y = y", "def move_to(self, x, y):\n self.x = x\n self.y = y", "def move(contr):\n # get the object this script is attached to\n camera = contr.owner\n\n # set the movement speed\n speed = camera['Speed']\n\n # Get sensor named Mouse\n keyboard = contr.sensors['All_Keys']\n\n # Default movement speed\n move_speed = [0.0, 0.0, 0.0]\n\n keylist = keyboard.events\n for key in keylist:\n # key[0] == GameKeys.keycode, key[1] = status\n if key[1] == GameLogic.KX_INPUT_ACTIVE:\n # Also add the key corresponding key for an AZERTY keyboard\n if key[0] == GameKeys.WKEY or key[0] == GameKeys.ZKEY:\n move_speed[2] = -speed\n elif key[0] == GameKeys.SKEY:\n move_speed[2] = speed\n # Also add the key corresponding key for an AZERTY keyboard\n elif key[0] == GameKeys.AKEY or key[0] == GameKeys.QKEY:\n move_speed[0] = -speed\n elif key[0] == GameKeys.DKEY:\n move_speed[0] = speed\n elif key[0] == GameKeys.RKEY:\n move_speed[1] = speed\n elif key[0] == GameKeys.FKEY:\n move_speed[1] = -speed\n\n # The second parameter of 'applyMovement' determines\n # a movement with respect to the object's local\n # coordinate system\n camera.applyMovement( move_speed, True )\n\n # Get sensor named Mouse\n #for sensor in contr.sensors:\n #if sensor.isA(GameTypes.SCA_KeyboardSensor):", "def on_down_key(self, event) -> None:\r\n\r\n self.move_view(0, 1)", "def move(self,x,y):\n assert (type(x) in [int, float]), \"parameter x:%s is not a valid number\" % `x`\n assert (type(y) in [int, float]), \"parameter y:%s is not a valid number\" % `y`\n d = self._turtle.isdown()\n if d:\n self._turtle.penup()\n self._turtle.setposition(x,y)\n if d:\n self._turtle.pendown()", "def move_character(self, old_y, old_x, y_pos, x_pos):\n self.map[old_y][old_x] = ' '\n self.map[y_pos][x_pos] = 'G'", "def move(self):\n self.position += self.speed", "def on_key_press(self, key, modifiers):\n if key == arcade.key.UP or key == arcade.key.W:\n self.direction = MoveEnum.UP\n elif key == arcade.key.DOWN or key == arcade.key.S:\n self.direction = MoveEnum.DOWN\n elif key == arcade.key.LEFT or key == arcade.key.A:\n self.direction = MoveEnum.LEFT\n elif key == arcade.key.RIGHT or key == arcade.key.D:\n self.direction = MoveEnum.RIGHT\n elif key == arcade.key.SPACE:\n self.shoot()", "def on_key_press(self, key, modifiers):\n\n if key == arcade.key.UP:\n self.player_sprite.change_y = MOVEMENT_SPEED\n elif key == arcade.key.DOWN:\n self.player_sprite.change_y = -MOVEMENT_SPEED\n elif key == arcade.key.LEFT:\n self.player_sprite.change_x = -MOVEMENT_SPEED\n elif key == arcade.key.RIGHT:\n self.player_sprite.change_x = MOVEMENT_SPEED", "def move(self, direction):\n # replace with your code\n pass", "def move(self, direction):\n # replace with your code\n pass", "def make_move(self, move, letter):\n self.positions[move] = letter", "def move_ship(self, event):\n directions = dict(Up=(0, -1), Down=(0, +1), Left=(-1, 0), Right=(1, 0))\n direction = event.keysym \n if direction in directions:\n x_fact, y_fact = directions[direction] \n cx = x_fact * self.spaceship_speed\n cy = y_fact * self.spaceship_speed\n self.game.move(self.ship_id, cx, cy)\n self.game.move(self.Main_Ship, cx, cy)", "def update(self):\n self.x += (self.settings.alien_speed * self.settings.fleet_direction)\n self.rect.x = self.x", "def keyCam(self, event):\n dct = {\n \"d\": 0,\n \"s\": 1,\n \"q\": 2,\n \"z\": 3\n }[event.char]\n self.moveAllSeg(dct)", "def pressed_key(self, key, dt):\n if key == LEFT:\n self.rotation -= spaceship_rotation(dt)\n if key == RIGHT:\n self.rotation += spaceship_rotation(dt)\n if key == DOWN:\n self.x_speed -= spaceship_acceleration_x(dt, self.rotation)\n self.y_speed -= spaceship_acceleration_y(dt, self.rotation)\n if key == UP:\n self.x_speed += spaceship_acceleration_x(dt, self.rotation)\n self.y_speed += spaceship_acceleration_y(dt, self.rotation)", "def update(self):\r\n self.x += (self.invasion_settings.alien_speed *\r\n self.invasion_settings.fleet_direction)\r\n self.rect.x = self.x", "def move_north(self):\n self.vertical = (self.vertical * 2)[1:5]\n self.horizontal[1] = self.vertical[0]\n self.horizontal[3] = self.vertical[2]", "def _move(self):\n self.pos += self.direction # add direction vector\n self.direction += self.gravity # add gravity to direction\n self.direction = self.direction.elementwise() * self.drag # apply drag to direction", "def move_down(self):\n self.y -= 1", "def move(self):\n self.center_x += self._vx\n self.center_y += self._vy", "def movePlayer(self,direction):\n if direction == Direction.UP:\n self.y -= 1\n elif direction == Direction.DOWN:\n self.y += 1\n elif direction == Direction.LEFT:\n self.x -= 1\n elif direction == Direction.RIGHT:\n self.x += 1", "def set_move(self, x, y):\n self.pieces[x + (y * self.width)].set_move()", "def move_debug(self, environment):\n\n ch2 = sys.stdin.read(1)\n\n if ch2 == \"w\":\n # the up arrow key was pressed\n print(\"up key pressed\")\n\n elif ch2 == \"s\":\n # the down arrow key was pressed\n print(\"down key pressed\")\n\n elif ch2 == \"a\":\n # the left arrow key was pressed\n print(\"left key pressed\")\n\n elif ch2 == \"d\":\n # the right arrow key was pressed\n print(\"right key pressed\")", "def move(self) -> None:\n\n new_x = self.getX() + self.speed[0]\n new_y = self.getY() + self.speed[1]\n self.setX(new_x)\n self.setY(new_y)", "def Movement():\n keys = pygame.key.get_pressed()\n \n if keys[pygame.K_LEFT] and not snake.ang==90:\n snake.x_change = -snake.vel\n snake.y_change = 0\n snake.left = True\n snake.right = False\n snake.up = False\n snake.down = False\n snake.ang = -90\n elif keys[pygame.K_RIGHT] and not snake.ang==-90:\n snake.x_change = snake.vel\n snake.y_change = 0\n snake.left = False\n snake.right = True\n snake.up = False\n snake.down = False\n snake.ang = 90\n elif keys[pygame.K_UP] and not snake.ang==0:\n snake.x_change = 0\n snake.y_change = -snake.vel\n snake.left = False\n snake.right = False\n snake.up = True\n snake.down = False\n snake.ang = 180\n elif keys[pygame.K_DOWN] and not snake.ang==180:\n snake.x_change = 0\n snake.y_change = snake.vel\n snake.left = False\n snake.right = False\n snake.up = False\n snake.down = True\n snake.ang = 0" ]
[ "0.70149887", "0.699595", "0.681032", "0.6801341", "0.66253835", "0.65040016", "0.64911735", "0.6442173", "0.6367084", "0.632558", "0.62957025", "0.6282375", "0.62190115", "0.6184205", "0.61793596", "0.6166848", "0.61651015", "0.61622244", "0.6133459", "0.61307865", "0.61206126", "0.609572", "0.60945827", "0.60945827", "0.60821307", "0.60700375", "0.60412914", "0.6030451", "0.6030245", "0.60272413", "0.6024432", "0.6021221", "0.6016035", "0.60095775", "0.5985138", "0.5970594", "0.5968692", "0.59683406", "0.5957073", "0.5955218", "0.59537274", "0.5951825", "0.59442276", "0.59433967", "0.5935257", "0.5929682", "0.59245026", "0.5918629", "0.5917628", "0.5909464", "0.59016037", "0.59010756", "0.58989435", "0.5889487", "0.5885903", "0.5885903", "0.5879393", "0.5875066", "0.5872351", "0.58591276", "0.58580667", "0.58439815", "0.5837931", "0.58353174", "0.58346623", "0.5821084", "0.5820096", "0.58185583", "0.5817966", "0.5817267", "0.58141786", "0.5813796", "0.58044726", "0.5802055", "0.57975376", "0.57975376", "0.57920235", "0.57896954", "0.57850534", "0.5783361", "0.5782108", "0.5781724", "0.5770919", "0.5761789", "0.5761789", "0.57616776", "0.5759257", "0.5756654", "0.5756461", "0.57521653", "0.57472295", "0.57325923", "0.57321495", "0.5725914", "0.5722966", "0.57190025", "0.5712718", "0.5706211", "0.5702999", "0.56913126" ]
0.71201307
0
basic preprocessing on the raw variatons returns a single histogram for each systematic with bin content represting the symmterised uncertainty Preprocessing top mass variations are scaled by 1/3
def processSystematic(observable, xsecType, xsecLevel, systematic, histNominal): varHists = [] linkStr = "" singlePointSystematics = ["ERDON", "ERDONRETUNE", "GLUONMOVETUNE", "BFRAG_PETERSON"] sPS = 0 if any(singlePointSystematic in systematic for singlePointSystematic in singlePointSystematics): sPS = 1 linkStr = "_" variations = [""] for variation in variations: path = directory_base + xsec_type + "_" + xsec_level + directory_tail + systematic + linkStr + variation + "/combinedUnfolded/Hyp" + observable + "Results.txt" inputfile = open(path, 'r').readlines() bins = [] for line in inputfile: bins.append(float(line.split( )[3])) bins.append(float(line.split( )[5])) bins = sorted(bins) binsArray = array('f',bins) histNameUp = systematic + "_UP" histNameDown = systematic + "_DOWN" histUp = TH1F(histNameUp, histNameUp, len(bins)-1, binsArray) histDown = TH1F(histNameDown, histNameDown, len(bins)-1, binsArray) histUpFinal = TH1F("", "", len(bins)-1, binsArray) histDownFinal = TH1F("", "", len(bins)-1, binsArray) ibin = 0 for line in inputfile: nomBin = histNominal.GetBinContent(ibin+1) nomBinCenter = histNominal.GetBinCenter(ibin+1) unc = float(line.split( )[7]) if systematic == "DY": print "DY UP = " + str(1.0 + unc) print "DY DOWN = " + str(1.0 - unc) histUp.SetBinContent(ibin+1, 1.0 + unc) histDown.SetBinContent(ibin+1,1.0 - unc) ibin = ibin + 1 histUpVis = histUp.Clone() histDownVis = histDown.Clone() histUpFinal = histUp.Clone() histDownFinal = histDown.Clone() if systematic == "PDF": histUpFinal, histDownFinal = reNormalise(histNominal, histUpVis, histDownVis) return (histUpFinal, histDownFinal)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def processSystematic(observable, xsecType, xsecLevel, systematic, histNominal):\n varHists = []\n\n linkStr = \"_\"\n variations = [\"\"]\n\n for variation in variations:\n if xsecType == \"normalised\":\n xsecType = \"normalized\"\n if xsecLevel == \"particle\":\n xsecLevel = \"pseudo\"\n path = directory_base + xsecType + \"_\" + xsecLevel + directory_tail + systematic + linkStr + variation + \"/combinedUnfolded/Hyp\" + observable + \"Results.txt\"\n #print \"directory = \" + str(path)\n inputfile = open(path, 'r').readlines()\n bins = []\n for line in inputfile:\n bins.append(float(line.split( )[3]))\n bins.append(float(line.split( )[5]))\n bins = sorted(bins)\n binsArray = array('f',bins)\n histNameUp = systematic + \"_UP\" \n histNameDown = systematic + \"_DOWN\" \n histUp = TH1F(histNameUp, histNameUp, len(bins)-1, binsArray)\n histDown = TH1F(histNameDown, histNameDown, len(bins)-1, binsArray)\n histUpFinal = TH1F(\"\", \"\", len(bins)-1, binsArray)\n histDownFinal = TH1F(\"\", \"\", len(bins)-1, binsArray)\n \n ibin = 0\n\n for line in inputfile:\n nomBin = histNominal.GetBinContent(ibin+1)\n nomBinCenter = histNominal.GetBinCenter(ibin+1)\n unc = float(line.split( )[7])\n# if systematic == \"MASS\":\n# unc = unc/(3.0)\n# if systematic == \"PSFSRSCALE\":\n# unc = unc/(sqrt(2.0))\n\n histUp.SetBinContent(ibin+1, 1.0 + unc)\n histDown.SetBinContent(ibin+1,1.0 - unc)\n ibin = ibin + 1 \n\n histUpVis = histUp.Clone()\n histDownVis = histDown.Clone()\n histUpFinal = histUp.Clone()\n histDownFinal = histDown.Clone()\n\n if systematic == \"PDF\":\n histUpFinal, histDownFinal = reNormalise(histNominal, histUpVis, histDownVis)\n\n return (histUpFinal, histDownFinal)", "def _plot_psth_flat(self, sigma=5, figsize = (15, 8)):\n\t\n\t\tgaus_filt = sp.ndimage.gaussian_filter1d\n\t\tall_resp = gaus_filt(self.conditions_hist_mean.flatten(), sigma)\n\t\t\n\t\tfig = plt.figure(figsize=figsize)\n\t\tax = fig.add_subplot(1, 1, 1)\n\t\t\n\t\tax.plot(all_resp, linestyle='-', color='0.28')\n\t\t\n\t\tn_con = self.parameters['conditions']\n\t\tcon_mark = np.arange(0, (self.bins.size -1) * n_con, self.bins.size -1)\n\t\t\t\t\n\t\tax.xaxis.set_ticks(con_mark)\n\n\t\ttry:\n\t\t\tax.xaxis.set_ticklabels(self.cond_label)\n\t\texcept:\n\t\t\tax.xaxis.set_ticklabels(np.unique(self.marker_codes))\n\t\t\n\t\tfreq_label = np.round(ax.get_yticks() * (1/self.bin_width),\n\t\t\t\t\t\t\t decimals = 1)\n\t\tax.set_yticklabels(freq_label)\n\t\tax.set_ylabel('Frequency')\n\t\t\n\t\tfor label in ax.xaxis.get_majorticklabels():\n\t\t\tlabel.set_horizontalalignment('left')\n\t\t\t\n\t\tax.set_xlim(0, (self.bins.size -1) * n_con)\n\t\t\n\t\t# bug with macosx backend\n# plt.tight_layout()\n\t\tplt.subplots_adjust(hspace=0.45)", "def wScalogram_nsig(data, hypothesis=None,\n nsigma=None, nsigma_min=None, nsigma_percent=1,\n reconstruction_scaled=False,\n firsttrend=False, logscale=True,\n title=None, xlabel=None, outputfile=None):\n\n WaveDec_data = HaarTransform(data)\n Ccoeffs = WaveDec_data[:-1]\n FirstTrend = WaveDec_data[-1]\n Level = len(Ccoeffs)\n\n if logscale==True:\n scale='log'\n else:\n scale='linear'\n\n nlevels = Level if firsttrend==False else Level+1\n nrows = nlevels+1 # the first panel is the data histogram\n if nsigma is not None:\n nrows += 1 # add another panel for the generating function\n ratio = [1.5] + [1.5]\n ratio += [1]*(nrows-2)\n\n fig = plt.figure(figsize=(12,12))\n gs = gridspec.GridSpec(ncols=1, nrows=nrows,\n height_ratios=ratio,\n hspace=0)\n axs = [fig.add_subplot(gs[i,0]) for i in range(nrows)]\n cbar_axs = fig.add_axes([0.93, 0.15, 0.02, 0.7]) # colorbar axis\n\n # Fill out top panel\n data_hist, _, data_center, data_width = _BinData(data, bins=2**Level)\n axs[0].bar(data_center, data_hist, align='center', width=data_width, color=Data_color)\n axs[0].set_yscale(scale)\n axs[0].text(x=.93, y=.63, s='Data', fontsize=12,\n bbox={'facecolor': 'white', 'alpha': 0.5, 'pad': 5},\n transform=axs[0].transAxes)\n\n # If nsigma function is provided\n if nsigma is not None:\n\n nsigCcoeffs = nsigma\n\n cut = '(No cut)'\n if nsigma_percent is not None:\n cut = str(nsigma_percent*100) + '%'\n if nsigma_min is not None:\n cut = r'$\\sigma_{min}$ = ' + str(nsigma_min)\n\n if hypothesis is not None:\n #TODO: error trap\n DeltaCoeff = _NSigmaFilter(data, hypothesis, nsigma, nsigma_min, nsigma_percent)\n ReconstructedData = InvHaarTransform(DeltaCoeff, normalize=False)\n if reconstruction_scaled is True:\n RecData = np.divide(ReconstructedData, np.sqrt(hypothesis))\n else:\n RecData = ReconstructedData\n rec_hist, _, rec_center, rec_width = _BinData(RecData, bins=2**Level)\n axs[1].plot(rec_center, rec_hist, 'o', markersize=3, color='#E67E22',\n label='Reconstruction ({})'.format(cut))\n axs[1].plot(range(len(data_center)), np.zeros_like(RecData), color='black', linewidth=0.5)\n axs[1].set_yscale('linear')\n axs[1].legend(edgecolor=\"black\", fancybox=False,\n handletextpad=0, handlelength=0, markerscale=0, fontsize=12)\n\n cmap = _NewColorMap()\n binintensity = np.absolute(nsigma)\n sig_min = _findmin(binintensity)\n sig_max = _findmax(binintensity)\n norm = Normalize(vmin=sig_min, vmax=sig_max)\n\n # If firsttrend, fill out the bottom panel with the first trend\n if firsttrend==True:\n bins=1\n norm_points = norm(binintensity[-1])\n color_points = [cmap(i) for i in norm_points]\n hist, _, center, width = _BinData(FirstTrend, bins=1)\n axs[-1].bar(center, hist, align='center', width=width, color=color_points)\n axs[-1].tick_params(axis='both', bottom=False, labelbottom=False)\n axs[-1].set_yscale(scale)\n axs[-1].text(x=.93, y=.63, s=r'$\\ell={%.1i}$'%(0), fontsize=12,\n bbox={'facecolor': 'white', 'alpha': 0.5, 'pad': 5},\n transform=axs[-1].transAxes)\n\n # Now plot the negative coefficients. The bars are hashed to distinguish the\n # pos and neg coefficients.\n s = 2 if nsigma is not None else 1\n for l in range(Level):\n bins=2**(Level-l-1)\n coeffs = Ccoeffs[l]\n norm_points = norm(binintensity[l])\n color_points = [cmap(i) for i in norm_points]\n\n if logscale==True:\n # Plot the positive coefficients\n pos_ix = np.where(coeffs>0)\n pos_coeffs = np.zeros_like(coeffs)\n for i in pos_ix:\n pos_coeffs[i] = coeffs[i]\n pos_hist, _, pos_center, pos_width = _BinData(pos_coeffs, bins=bins)\n axs[l+s].bar(pos_center, pos_hist, align='center', width=pos_width, color=color_points)\n\n # Now plot the negative coefficients. The bars are hashed to distinguish the\n # pos and neg coefficients.\n neg_ix = np.where(Ccoeffs[l]<0)\n neg_coeffs = np.zeros_like(coeffs)\n for j in neg_ix:\n neg_coeffs[j] = np.absolute(coeffs[j])\n neg_hist, _, neg_center, neg_width = _BinData(neg_coeffs, bins=bins)\n axs[l+s].bar(neg_center, neg_hist, align='center', width=neg_width, color=color_points,\n hatch='///')\n\n axs[l+s].tick_params(axis='both', bottom=False, labelbottom=False)\n lev = Level-l-1\n axs[l+s].text(x=.93, y=.63, s=r'$\\ell={%.1i}$'%(lev+1), fontsize=12,\n bbox={'facecolor': 'white', 'alpha': 0.5, 'pad': 5},\n transform=axs[l+s].transAxes)\n axs[l+s].set_yscale(scale)\n\n else:\n hist, _, center, width = _BinData(coeffs, bins=bins)\n axs[l+s].bar(center, hist, align='center', width=width,\n color=color_points)\n axs[l+s].plot(range(bins), np.zeros(bins), color='black',\n linewidth=0.5)\n axs[l+s].tick_params(axis='both', bottom=False, labelbottom=False)\n lev=Level-l-1\n axs[l+s].text(x=.93, y=.63, s=r'$C_{l=%.1i}$'%(lev), fontsize=12,\n bbox={'facecolor': 'white', 'alpha': 0.5, 'pad': 5},\n transform=axs[l+s].transAxes)\n axs[l+s].set_yscale(scale)\n\n cbar = ColorbarBase(cbar_axs, cmap=cmap, norm=norm)\n #cbar_axs.text(.5, sig_max, r'$N\\sigma$', fontsize=12)\n fig.text(x=0.93, y=.86, s=r'$N\\sigma$', fontsize=12)\n\n if title is not None:\n fig.suptitle(title, fontsize=18, y=0.92)\n fig.text(x=0.5, y=0.1, s=xlabel, fontsize=14)\n if outputfile is not None:\n plt.savefig(outputfile, bbox_inches='tight')\n plt.show()", "def preprocess(N, sigma2, R, f_def, params):\n\n #\n if 'scale' in params:\n if params['scale']:\n\n #\n N0 = np.sum(R * f_def, axis=1)\n\n #\n f_def *= np.average(N / N0)\n\n return N, sigma2, R, f_def, params", "def plot_hist_snfit_meta(self): \n \n self.read_meta()\n self.read_snfit_results()\n\n \n self.diff_x0 = []\n self.diff_x0_err = []\n self.diff_x1 = []\n self.diff_x1_err = [] \n self.diff_c = []\n self.diff_c_err = [] \n self.diff_mb = []\n self.diff_mb_err = [] \n self.diff_cov_x0_x1 = []\n self.diff_cov_x0_c = []\n self.diff_cov_x1_c = []\n self.diff_cov_mb_x1 = []\n self.diff_cov_mb_c = []\n\n for i in range (len(self.sn_name)):\n for j in range (len(self.meta_sn_name_list)):\n if self.sn_name[i] == self.meta_sn_name_list[j]:\n if np.abs(self.mb[i] - self.meta_mb[j]) < 0.0001:\n self.diff_x0.append(self.x0[i] - self.meta_x0[j])\n self.diff_x0_err.append(self.x0_err[i] - self.meta_x0_err[j])\n self.diff_x1.append(self.x1[i] - self.meta_x1[j])\n self.diff_x1_err.append(self.x1_err[i] - self.meta_x1_err[j]) \n self.diff_c.append(self.c[i] - self.meta_c[j])\n self.diff_c_err.append(self.c_err[i] - self.meta_c_err[j]) \n self.diff_mb.append(self.mb[i] - self.meta_mb[j])\n self.diff_mb_err.append(self.mb_err[i] - self.meta_mb_err[j])\n# self.diff_cov_x0_x1.append()\n# self.diff_cov_x0_c.append()\n# self.diff_cov_x1_c.append()\n# self.diff_cov_mb_x1.append()\n# self.diff_cov_mb_c.append()\n else:\n print self.x1[i] - self.meta_x1[j], self.sn_name[i],self.meta_sn_name_list[j], self.x1[i], self.meta_x1[j]\n\n\n fig = plt.figure(figsize=(8.,8.)) \n \n gs = gridspec.GridSpec(2, 1) #subplots ratio\n f, (ax0_1, ax0_2) = plt.subplots(2, sharex=True)\n f.subplots_adjust(hspace = 0.5)\n ax0_1 = plt.subplot(gs[0, 0])\n ax0_2 = plt.subplot(gs[1, 0])\n \n ax0_1.hist(self.diff_x0,25,label='$\\Delta$ X0')\n ax0_2.hist(self.diff_x0_err,25,label='$\\Delta$ X0 error')\n ax0_1.legend()\n ax0_2.legend()\n ax0_1.set_ylabel('N')\n ax0_2.set_ylabel('N')\n pdffile = '../sugar_analysis_data/results/x0_plot_meta_snfit.pdf'\n plt.savefig(pdffile, bbox_inches='tight')\n plt.show()\n \n gs = gridspec.GridSpec(2, 1) #subplots ratio\n f, (ax0_1, ax0_2) = plt.subplots(2, sharex=True)\n f.subplots_adjust(hspace = 0.5)\n ax0_1 = plt.subplot(gs[0, 0])\n ax0_2 = plt.subplot(gs[1, 0])\n \n ax0_1.hist(self.diff_x1,25,label='$\\Delta$ X1')\n ax0_2.hist(self.diff_x1_err,25,label='$\\Delta$ X1 error')\n ax0_1.legend()\n ax0_2.legend()\n ax0_1.set_ylabel('N')\n ax0_2.set_ylabel('N')\n pdffile = '../sugar_analysis_data/results/x1_plot_meta_snfit.pdf'\n plt.savefig(pdffile, bbox_inches='tight')\n plt.show()\n \n gs = gridspec.GridSpec(2, 1) #subplots ratio\n f, (ax0_1, ax0_2) = plt.subplots(2, sharex=True)\n f.subplots_adjust(hspace = 0.5)\n ax0_1 = plt.subplot(gs[0, 0])\n ax0_2 = plt.subplot(gs[1, 0])\n \n ax0_1.hist(self.diff_c,25,label='$\\Delta$ Color')\n ax0_2.hist(self.diff_c_err,25,label='$\\Delta$ Color error')\n ax0_1.legend()\n ax0_2.legend()\n ax0_1.set_ylabel('N')\n ax0_2.set_ylabel('N')\n pdffile = '../sugar_analysis_data/results/color_plot_meta_snfit.pdf'\n plt.savefig(pdffile, bbox_inches='tight')\n plt.show()\n\n gs = gridspec.GridSpec(2, 1) #subplots ratio\n f, (ax0_1, ax0_2) = plt.subplots(2, sharex=True)\n f.subplots_adjust(hspace = 0.5)\n ax0_1 = plt.subplot(gs[0, 0])\n ax0_2 = plt.subplot(gs[1, 0])\n \n ax0_1.hist(self.diff_mb,50,label='$\\Delta$ mb')\n ax0_2.hist(self.diff_mb_err,50,label='$\\Delta$ mb error')\n ax0_1.legend()\n ax0_2.legend()\n ax0_1.set_ylabel('N')\n ax0_2.set_ylabel('N')\n pdffile = '../sugar_analysis_data/results/mb_plot_meta_snfit.pdf'\n plt.savefig(pdffile, bbox_inches='tight')\n plt.show()", "def dataset_handling_with_standardisation(init_data):\n #\n ##Maximum number of points = 72 , keep around 80 values for even number\n max_len = 80\n ##Fluxes, Standardisation is done over 1 type of feature\n data = init_data.loc[:, [u'fluxes_0', u'fluxes_1', u'fluxes_2', u'fluxes_3', u'fluxes_4', u'fluxes_5']].values\n zp_array_flux = []\n for dat in data:\n n_data = []\n for ii in range(len(dat)):\n n_data = np.append(n_data, np.pad(dat[ii], (0, max_len * 5 - len(dat[ii])), 'constant', constant_values=0))\n n_data = QuantileTransformer(output_distribution='uniform').fit_transform(n_data.reshape(-1, 1)).flatten()\n zp_array_flux.append(n_data)\n zp_array_flux = np.array(zp_array_flux)\n print(zp_array_flux.shape)\n\n ##Fluxerrors, Standardisation is done over 1 type of feature\n data = init_data.loc[:,\n [u'fluxerrs_0', u'fluxerrs_1', u'fluxerrs_2', u'fluxerrs_3', u'fluxerrs_4', u'fluxerrs_5']].values\n zp_array_flux_error = []\n for dat in data:\n n_data = []\n for ii in range(len(dat)):\n n_data = np.append(n_data, np.pad(dat[ii], (0, max_len * 5 - len(dat[ii])), 'constant', constant_values=0))\n n_data = QuantileTransformer(output_distribution='uniform').fit_transform(n_data.reshape(-1, 1)).flatten()\n zp_array_flux_error.append(n_data)\n zp_array_flux_error = np.array(zp_array_flux_error)\n print(zp_array_flux_error.shape)\n\n ##Time, Standardisation is done over 1 type of feature\n data = init_data.loc[:, [u'mjds_0', u'mjds_1', u'mjds_2', u'mjds_3', u'mjds_4', u'mjds_5']].values\n zp_array_mjds = []\n for dat in data:\n n_data = []\n for ii in range(len(dat)):\n n_data = np.append(n_data, np.pad(dat[ii], (0, max_len * 5 - len(dat[ii])), 'constant', constant_values=0))\n n_data = QuantileTransformer(output_distribution='uniform').fit_transform(n_data.reshape(-1, 1)).flatten()\n zp_array_mjds.append(n_data)\n zp_array_mjds = np.array(zp_array_mjds)\n print(zp_array_mjds.shape)\n\n ##Concatenating everything\n zp_data = np.c_[zp_array_flux, zp_array_flux_error, zp_array_mjds]\n\n ##Adding redshift info// Gal pos info might be necessary to remove\n zp_data = np.c_[\n zp_data, init_data.loc[:, [u'gal_b', u'gal_l', u'hostgal_photoz', u'hostgal_photoz_err', u'hostgal_specz', u'mwebv']].values]\n print(zp_data.shape)\n\n ##Load labels and convert to integer\n labels = init_data.loc[:, [u'target']].values\n labels = labels.flatten()\n labels_name = np.array([6, 15, 16, 42, 52, 53, 62, 64, 65, 67, 88, 90, 92, 95, 99])\n [np.place(labels, labels == labels_name[i], [i]) for i in range(len(labels_name))]\n\n return [zp_data, labels]", "def create_fixed_hist(self):\n hist = cv2.calcHist([self.obj], [0, 1, 2], None, [32, 8, 8],\n [0, 256, 0, 256, 0, 256])\n self.hist = cv2.normalize(hist).flatten()\n print self.hist", "def defineProcessTemplates(histos):\n\n templates=[]\n\n #nominal\n templates.append( histos[0] )\n nomStats=templates[-1].Integral()\n\n #systematic variations\n #if Up/Down already in the name store directly updating the name\n #if not, mirror the variation given \n for i in xrange(1,len(histos)): \n templates.append( histos[i] )\n key=templates[-1].GetName()\n if not 'Up' in key and not 'Down' in key :\n templates[-1].SetName(key+'Up')\n templates.append( histos[i].Clone(key+'Down') )\n for xbin in range(templates[0].GetNbinsX()):\n templates[-1].SetBinContent(xbin+1,2*templates[0].GetBinContent(xbin+1)-templates[-2].GetBinContent(xbin+1))\n \n #don't leave bins with 0's\n for h in templates:\n h.SetDirectory(0)\n iStats=h.Integral()\n if iStats>0: h.Scale(nomStats/iStats)\n for xbin in range(h.GetNbinsX()):\n if h.GetBinContent(xbin+1)>0: continue\n h.SetBinContent(xbin+1,1e-6)\n \n return templates", "def setup_hist(self):\n self.x_min = {}\n self.x_max = {}\n self.x_max_minus_min = {}\n self.dx = {}\n self.n_bins = {}\n\n self.histogram_edges = {}\n self.histogram_values = {}\n self.histogram_cdf = {}", "def test_normal(self):\r\n s = np.random.normal(-0.42, 0.55, 5000)\r\n plt.hist(s, 30, density=False)\r\n plt.xlabel('Interlayer point energy [eV]')\r\n plt.ylabel('Frequency')\r\n plt.show()", "def wScalogram(data, hypothesis=None,\n nsigma=None, nsigma_min=None, nsigma_percent=1,\n reconstruction_scaled=False,\n firsttrend=False, logscale=True,\n filled=False,\n title=None, xlabel=None,\n outputfile=None):\n\n WaveDec_data = HaarTransform(data)\n Ccoeffs = WaveDec_data[:-1]\n FirstTrend = WaveDec_data[-1]\n Level = len(Ccoeffs)\n\n nlevels = Level if firsttrend==False else Level+1\n nrows = nlevels+1 # the first panel is the data histogram\n if nsigma is not None:\n nrows += 1 # add another panel for the generating function\n ratio = [1.5]\n ratio += [1]*(nrows-1)\n\n if filled==True:\n histtype='bar'\n coeffs_color=Coeffs_color\n firsttrend_color=Firsttrend_color\n else:\n histtype='step'\n coeffs_color='black'\n firsttrend_color='black'\n\n if logscale==True:\n scale='log'\n else:\n scale='linear'\n\n fig = plt.figure(figsize=(12,12))\n gs = gridspec.GridSpec(ncols=1, nrows=nrows,\n height_ratios=ratio,\n hspace=0)\n axs = [fig.add_subplot(gs[i,0]) for i in range(nrows)]\n\n # Fill out top panel\n data_hist, _, data_center, data_width = _BinData(data, bins=2**Level)\n axs[0].bar(data_center, data_hist, align='center',\n width=data_width, color=Data_color)\n axs[0].text(x=.93, y=.63, s='Data', fontsize=12,\n bbox={'facecolor': 'white', 'alpha': 0.5, 'pad': 5},\n transform=axs[0].transAxes)\n axs[0].set_yscale(scale)\n\n # If nsigma is provided\n if nsigma is not None:\n\n nsigCcoeffs = nsigma\n\n cut = '(No cut)'\n if nsigma_percent is not None:\n cut = str(nsigma_percent*100) + '%'\n if nsigma_min is not None:\n cut = r'$\\sigma_{min}$ = ' + str(nsigma_min)\n\n if hypothesis is not None:\n #TODO: error trap\n DeltaCoeff = _NSigmaFilter(data, hypothesis, nsigma, nsigma_min, nsigma_percent)\n ReconstructedData = InvHaarTransform(DeltaCoeff, normalize=False)\n if reconstruction_scaled is True:\n RecData = np.divide(ReconstructedData, np.sqrt(hypothesis))\n else:\n RecData = ReconstructedData\n rec_hist, _, rec_center, rec_width = _BinData(RecData, bins=2**Level)\n axs[1].plot(rec_center, rec_hist, 'o', markersize=3, color='#E67E22',\n label='Reconstruction ({})'.format(cut))\n axs[1].set_yscale('linear')\n axs[1].legend(edgecolor=\"black\", fancybox=False,\n handletextpad=0.0, handlelength=0, markerscale=0, fontsize=12)\n\n # If firsttrend, fill out the bottom panel with the first trend\n if firsttrend==True:\n bins = 1\n axs[-1].hist(x=range(bins), bins=bins, weights=FirstTrend,\n histtype=histtype, color=firsttrend_color)\n axs[-1].tick_params(axis='both', bottom=False, labelbottom=False)\n axs[-1].set_yscale(scale)\n axs[-1].text(x=.93, y=.63, s=r'$\\ell={%.1i}$'%(0), fontsize=12,\n bbox={'facecolor': 'white', 'alpha': 0.5, 'pad': 5},\n transform=axs[-1].transAxes)\n\n # Fill out the rest of the pannels with the wavelet coefficients\n # If signal_only, start two panels below the top panel\n s = 2 if nsigma is not None else 1\n for l in range(Level):\n bins=2**(Level-l-1)\n coeffs = Ccoeffs[l]\n\n if logscale==True:\n # Plot the positive coefficients\n pos_ix = np.where(Ccoeffs[l]>0)\n pos_coeffs = np.zeros_like(coeffs)\n for i in pos_ix:\n pos_coeffs[i] = coeffs[i]\n axs[l+s].hist(x=range(bins), bins=bins,\n weights=pos_coeffs, histtype=histtype, color=coeffs_color)\n\n # Now plot the negative coefficients. The bars are hashed to distinguish the\n # pos and neg coefficients.\n neg_ix = np.where(Ccoeffs[l]<0)\n neg_coeffs = np.zeros_like(coeffs)\n for j in neg_ix:\n neg_coeffs[j] = np.absolute(coeffs[j])\n axs[l+s].hist(x=range(bins), bins=bins,\n weights=neg_coeffs, histtype=histtype, hatch='///', color=coeffs_color)\n\n axs[l+s].tick_params(axis='both', bottom=False, labelbottom=False)\n lev = Level-l-1\n axs[l+s].text(x=.93, y=.63, s=r'$\\ell={%.1i}$'%(lev+1), fontsize=12,\n bbox={'facecolor': 'white', 'alpha': 0.5, 'pad': 5},\n transform=axs[l+s].transAxes)\n axs[l+s].set_yscale(scale)\n\n else:\n axs[l+s].hist(x=range(bins), bins=bins, weights=coeffs, histtype=histtype, color=coeffs_color)\n axs[l+s].tick_params(axis='both', bottom=False, labelbottom=False)\n lev = Level-l-1\n axs[l+s].text(x=.93, y=.63, s=r'$\\ell={%.1i}$'%(lev+1), fontsize=12,\n bbox={'facecolor': 'white', 'alpha': 0.5, 'pad': 5},\n transform=axs[l+s].transAxes)\n axs[l+s].set_yscale(scale)\n\n if title is not None:\n fig.suptitle(title, fontsize=18, y=0.92)\n fig.text(x=0.5, y=0.1, s=xlabel, fontsize=14)\n if outputfile is not None:\n plt.savefig(outputfile, bbox_inches='tight')\n plt.show()", "def histogramFromSketch_M2M(sketch,Phi,domain,dimension,nb_cat_per_dim=None,bins_cont=10,project_on_probabilitySimplex=True,reg_rho=0.01):\n\n ## 0) Parsing the inputs\n\n # Number of categorical inputs\n if nb_cat_per_dim is None:\n nb_cat_per_dim = np.zeros(Phi.d)\n\n is_integer_dimension = False\n if nb_cat_per_dim[dimension] > 0:\n # The data is integer-type\n is_integer_dimension = True\n bins = int(nb_cat_per_dim[dimension])\n else:\n bins = bins_cont\n\n # Parse m, d\n if isinstance(Phi,SimpleFeatureMap):\n Omega = Phi.Omega\n d = Phi.d\n m = Phi.m\n else:\n raise ValueError('The Phi argument does not match one of the supported formats.')\n \n ## 1) Construct the A matrix\n # Build a new sketch with all the difference of Omega\n Omega_diffs = np.empty((d,m**2))\n for i in range(m):\n for j in range(m):\n Omega_diffs[:,i*m+j] = Omega[:,i] - Omega[:,j]\n\n Phi_diffs = SimpleFeatureMap(\"complexExponential\", Omega_diffs,xi=Phi.xi,c_norm=Phi.c_norm)\n\n # Evaluate the box constraints Fourier transform thanks to this sketch function\n z_diffs_domain = fourierSketchOfBox(domain,Phi_diffs,nb_cat_per_dim)\n\n # And reshape (not sure if correct)\n A_compl = z_diffs_domain.reshape(m,m)\n\n # Stack real and imaginary components\n A = np.zeros((2*m,2*m))\n A[:m,:m] = A_compl.real\n A[:m,m:] = A_compl.imag\n A[m:,:m] = -A_compl.imag\n A[m:,m:] = A_compl.real\n \n # Regularize\n A += reg_rho*np.eye(2*m)\n\n box = domain.copy() # the box in which we do the learning\n bin_edges = np.linspace(domain[dimension,0],domain[dimension,1],bins+1)\n h = np.zeros(bins)\n for p in range(bins):\n # move to the next box\n if is_integer_dimension:\n box[dimension,0] = p\n box[dimension,1] = p\n else:\n box[dimension,0] = bin_edges[p]\n box[dimension,1] = bin_edges[p+1]\n F = fourierSketchOfBox(box,Phi,nb_cat_per_dim)\n\n # Stack the b vector\n b = np.zeros(2*m)\n b[:m] = F.real\n b[m:] = -F.imag\n\n \n # ... and solve! \n a_ri = np.linalg.solve(A, b)\n a = a_ri[:m] + 1j*a_ri[m:]\n \n\n \n # Predict with the sketch\n #print(a)\n h[p] = np.real(np.dot(a,sketch))\n if project_on_probabilitySimplex:\n h = project_probabilitySimplex(h)\n return h", "def _derive_variance_(self):\n # Pure Photon Noise\n self._properties[\"var\"] = np.sqrt(self.rawdata*self.exposuretime) / self.exposuretime", "def main():\n\n\n ## Groups showing similar noise profile\n #grp1 = [ 1, 4, 5, 8, 9 ]\n #grp2 = [ 18, 19, 22, 23, 30, 31 ]\n grp1 = [ 0, 1, 6, 7, 4, 5 ]\n grp2 = [ 12, 13, 16, 17, 18, 19 ]\n #grp3 = [ 18, 19, 22, 23, 26, 27 ]\n with tb.open_file(sys.argv[1], 'r') as dataF:\n\n npm = len(dataF.root.Sensors.DataPMT)#len(dataF.root.RD.pmtrwf[0])\n nevt = len(dataF.root.RD.pmtrwf)\n\n ## Filter definition\n fSample = 40E6\n freqLPF = 100E3\n freqLPFd = 2*freqLPF / fSample\n b, a = signal.butter(1, freqLPFd, 'low', analog=False)\n ##\n fig, axes = plt.subplots(nrows=3, ncols=2, figsize=(20,6))\n #fig.tight_layout()\n fig.show()\n wf_len = len(dataF.root.RD.pmtrwf[0][0])\n if len(sys.argv) > 3:\n wf_len = wf_len/2+1 \n elif len(sys.argv) == 3:\n g1_first = np.zeros(wf_len, np.float64)\n g2_first = np.zeros(wf_len, np.float64)\n g3_first = np.zeros(wf_len, np.float64)\n mean_first = np.zeros(wf_len, np.float64)\n ##\n for ievt in range(nevt):\n ## clear the axies\n for ax in axes.flatten():\n ax.cla()\n plt_frq = np.zeros(wf_len, np.float64)\n fwf_mean = np.zeros(wf_len, np.float64)\n wf_mean = np.zeros(wf_len, np.float64) # No filter\n g1_mean = np.zeros(wf_len, np.float64)\n g2_mean = np.zeros(wf_len, np.float64)\n g3_mean = np.zeros(wf_len, np.float64)\n for ipm in range(npm):\n\n sg = getWF(dataF, ipm, ievt)\n sg = sg - np.mean(sg)\n\n sgf = signal.lfilter(b, a, sg)\n ## remove mean again just in case\n sgf = sgf - np.mean(sgf)\n #sgf = sg\n\n pmID = getPMid(dataF, ipm)\n\n if len(sys.argv) == 3:\n axes[0][0].plot(sgf, label='pmt '+str(pmID))\n fwf_mean += sgf/npm\n wf_mean += sg/npm\n if pmID in grp1:\n g1_mean += sgf/len(grp1)\n elif pmID in grp2:\n g2_mean += sgf/len(grp2)\n elif pmID in grp3:\n g3_mean += sgf/len(grp3)\n else:\n ft = np.fft.rfft(sgf)\n freq = np.fft.rfftfreq(len(sgf), d=25E-9)\n if ipm == 0:\n plt_frq = freq\n if sys.argv[2] == 'mag':\n ft_mag = np.absolute(ft)\n axes[0][0].plot(freq, ft_mag, label='pmt '+str(pmID))\n fwf_mean += ft_mag/npm\n if pmID in grp1:\n g1_mean += ft_mag/len(grp1)\n elif pmID in grp2:\n g2_mean += ft_mag/len(grp2)\n elif pmID in grp3:\n g3_mean += ft_mag/len(grp3)\n elif sys.argv[2] == 'phase':\n ft_pha = np.angle(ft)\n axes[0][0].plot(freq, ft_pha, label='pmt '+str(pmID))\n fwf_mean += ft_pha/npm\n if pmID in grp1:\n g1_mean += ft_pha/len(grp1)\n elif pmID in grp2:\n g2_mean += ft_pha/len(grp2)\n elif pmID in grp3:\n g3_mean += ft_pha/len(grp3)\n \n \n ## The axes not set\n if len(sys.argv) == 3:\n axes[0][1].plot(g1_mean)\n axes[0][1].set_title('Group 1 mean waveform')\n axes[1][0].plot(g2_mean)\n axes[1][0].set_title('Group 2 mean waveform')\n axes[1][1].plot(g3_mean)\n axes[1][1].set_title('Group 3 mean waveform')\n axes[2][0].plot(fwf_mean)\n axes[2][0].set_title('Mean waveform')\n if ievt == 0:\n g1_first = g1_mean\n g2_first = g2_mean\n g3_first = g3_mean\n mean_first = fwf_mean\n else:\n axes[0][1].plot(g1_first)\n axes[1][0].plot(g2_first)\n axes[1][1].plot(g3_first)\n axes[2][0].plot(mean_first)\n axes[2][1].plot(wf_mean)\n axes[2][1].set_title('Mean waveform and corrected')\n axes[2][1].plot(wf_mean-fwf_mean)\n axes[2][1].set_xlim(0, 1000)\n else:\n axes[0][0].set_xlim(0,50000)\n axes[0][1].plot(plt_frq, g1_mean)\n axes[0][1].set_title('Group 1 mean '+sys.argv[2])\n axes[0][1].set_xlim(0,50000)\n axes[1][0].plot(plt_frq, g2_mean)\n axes[1][0].set_title('Group 2 mean '+sys.argv[2])\n axes[1][0].set_xlim(0,50000)\n axes[1][1].plot(plt_frq, g3_mean)\n axes[1][1].set_title('Group 3 mean '+sys.argv[2])\n axes[1][1].set_xlim(0,50000)\n axes[2][0].plot(plt_frq, fwf_mean)\n axes[2][0].set_title('Mean '+sys.argv[2])\n axes[2][0].set_xlim(0,50000)\n plt.draw()\n #fig.legend(loc=0)\n catcher = input(\"next plot?\")\n if catcher == 'q':\n exit()\n plt.cla()", "def parameters_histograms(w, dw, a, da, b, db):\n w = w.cpu()\n dw = dw.cpu()\n a = a.cpu()\n da = da.cpu()\n b = b.cpu()\n db = db.cpu()\n \n fig = plt.figure(figsize=(10,6))\n ax = fig.add_subplot(231)\n ax.hist(w.reshape(1, w.shape[0] * w.shape[1]))\n ax.set_title('Weights', fontsize = 11)\n ax.ticklabel_format(style='sci', axis='y', scilimits=(0,0))\n ax = fig.add_subplot(232)\n ax.hist(dw.reshape(1, dw.shape[0] * dw.shape[1]))\n ax.set_title('Weights variations', fontsize = 11)\n ax.ticklabel_format(style='sci', axis='y', scilimits=(0,0))\n ax = fig.add_subplot(233)\n ax.hist(a)\n ax.set_title('Visible bias', fontsize = 11)\n ax.ticklabel_format(style='sci', axis='y', scilimits=(0,0))\n ax = fig.add_subplot(234)\n ax.hist(da)\n ax.set_title('Visible bias variations', fontsize = 11)\n ax.ticklabel_format(style='sci', axis='y', scilimits=(0,0))\n ax = fig.add_subplot(235)\n ax.hist(b)\n ax.set_title('Hidden bias', fontsize = 11)\n ax.ticklabel_format(style='sci', axis='y', scilimits=(0,0))\n ax = fig.add_subplot(236)\n ax.hist(db)\n ax.set_title('Hidden bias variations', fontsize = 11)\n ax.ticklabel_format(style='sci', axis='y', scilimits=(0,0))\n plt.subplots_adjust(hspace=0.25)\n plt.show()\n plt.close('all')", "def fwhmwhisker_multiext(filename,sigma,band,zenith):\n hdu=pf.open(filename)\n e1=[]\n e2=[]\n fwhmw=[]\n whiskerw=[]\n for hdui in hdu[1:]:\n Nobj = hdui.data.shape[0]\n for i in range(Nobj):\n print i\n img = hdui.data[i][4:].reshape(160,160)\n imgrbin = rebin(img,(40,40))\n res=wfwhm(imgrbin,sigma)\n e1.append(res[0])\n e2.append(res[1])\n whiskerw.append(res[2]*0.27)\n fwhmw.append(res[3]*0.27)\n e1 = np.array(e1)\n e2 = np.array(e2)\n fwhmw = np.array(fwhmw)\n whiskerw = np.array(whiskerw)\n e1mean = e1.mean()\n e1std = e1.std()\n e2mean = e2.mean()\n e2std = e2.std()\n whiskerwmean = whiskerw.mean()\n whiskerwstd = whiskerw.std()\n fwhmwmean = fwhmw.mean()\n fwhmwstd = fwhmw.std()\n r50mean = np.mean(fwhmw/2.)\n r50std = np.std(fwhmw/2.)\n pl.figure(figsize=(15,10))\n pl.subplot(2,3,1)\n pl.hist(e1,bins=20,normed=True)\n pl.xlabel('e1')\n pl.title('mean: '+str(round(e1mean,6))+' std: '+str(round(e1std,5)))\n pl.subplot(2,3,2)\n pl.hist(e2,bins=20,normed=True)\n pl.xlabel('e2')\n pl.title('mean: '+str(round(e2mean,6))+' std: '+str(round(e2std,5)))\n pl.subplot(2,3,3)\n pl.hist(whiskerw,bins=20,normed=True)\n pl.xlabel('whisker')\n pl.title('mean: '+str(round(whiskerwmean,5))+' std: '+str(round(whiskerwstd,5)))\n pl.subplot(2,3,4)\n pl.hist(fwhmw,bins=20,normed=True)\n pl.xlabel('fwhm')\n pl.title('mean: '+str(round(fwhmwmean,5))+' std: '+str(round(fwhmwstd,5)))\n pl.subplot(2,3,5)\n pl.hist(fwhmw/2.,bins=20,normed=True)\n pl.xlabel('r50')\n pl.title('mean: '+str(round(r50mean,5))+' std: '+str(round(r50std,5)))\n pl.figtext(0.7,0.4,'band: '+band)\n pl.figtext(0.7,0.37,'zenith angle: '+zenith +' deg')\n pl.figtext(0.3,0.95,'Perfect focus/alignment, 0.7 arcsec fwhm circular seeing',fontsize=18,color='red')\n pl.savefig(filename[0:-6]+'png')\n np.savetxt(filename[0:-6]+'txt',[e1mean,e1std,e2mean,e2std,whiskerwmean,whiskerwstd,fwhmwmean,fwhmwstd,r50mean,r50std],fmt='%10.5f')\n pl.close()\n return '---done !-----'", "def rescale_data(self):\n\n # Dividing every array of simulated data vectors by the mean of that array.\n '''# Didnt work\n for key in self.data.keys():\n self.data[key] /= np.mean(self.data[key])\n '''\n\n self.rescaled = True\n\n # Mean normalization\n \"\"\" didnt work\n for key in self.data.keys():\n self.data[key] -= np.mean(self.data[key])\n self.data[key] /= (np.max(self.data[key]) - np.min(self.data[key]))\n \"\"\"\n\n # Median normalization\n \"\"\" didnt work, still dividing by large number \n for key in self.data.keys():\n self.data[key] -= np.median(self.data[key])\n self.data[key] /= (np.max(self.data[key]) - np.min(self.data[key]))\n \"\"\"\n\n # Divide by median\n \"\"\" didnt work\n for key in self.data.keys():\n self.data[key] -= np.median(self.data[key])\n self.data[key] /= (np.median(self.data[key]))\n \"\"\"\n\n # Take logarithm of data\n \"\"\" didnt work\n for key in self.data.keys():\n self.data[key] = np.log10(self.data[key])\n \"\"\"\n\n # Scale by length of vector\n \"\"\"\n for key in self.data.keys():\n self.data[key] /= np.linalg.norm(self.Cl_noiseless)\n \"\"\"\n\n \n # Scale by negative of the natural logarithm \n for key in self.data.keys():\n self.data[key] = -1 * np.log(self.data[key]) \n \n \"\"\"\n # Scale by subtracting the mean and dividing by std\n std = np.nanstd(self.data['data'])\n mean = np.nanmean(self.data['data'])\n for key in self.data.keys():\n # self.data[key] -= np.log(self.Cl_noiseless) # -1* # scale this same way\n # self.data[key] -= self.Cl_noiseless # -1* # scale this same way\n self.data[key] -= mean \n self.data[key] /= std\n \"\"\"", "def preprocess_adata(adata, n_top_genes=5000):\n sc.pp.filter_cells(adata, min_genes=200)\n sc.pp.filter_genes(adata, min_cells=3)\n sc.pp.normalize_total(adata, target_sum=1e4)\n sc.pp.log1p(adata)\n sc.pp.highly_variable_genes(adata, n_top_genes=n_top_genes)\n adata.raw = adata\n adata = adata[:, adata.var.highly_variable]\n return adata", "def normalized_hist_dataframe(data_column, bin_number=50, output_dir='/var/tmp/'):\n db = celldatabase.load_hdf(\"/var/tmp/figuresdata/2019astrpi/direct_and_indirect_cells.h5\")\n # dbTuned = db.query(studyparams.TUNING_FILTER)\n D1DB = db.query(studyparams.D1_CELLS)\n nD1DB = db.query(studyparams.nD1_CELLS)\n D1DB = D1DB.replace([np.inf, -np.inf], np.nan)\n nD1DB = nD1DB.replace([np.inf, -np.inf], np.nan)\n D1DB = D1DB[D1DB[data_column].notnull()]\n nD1DB = nD1DB[nD1DB[data_column].notnull()]\n D1Hist, D1bins = np.histogram(D1DB[data_column], bins=bin_number, density=True)\n nD1Hist, nD1bins = np.histogram(nD1DB[data_column], bins=bin_number, density=True)\n center = (D1bins[:-1] + D1bins[1:])/2\n width = 0.7 * (D1bins[1] - D1bins[0])\n D1Median = np.median(D1DB[data_column])\n nD1Median = np.median(nD1DB[data_column])\n\n fig = plt.gcf()\n fig.clf()\n figFilename = \"{}\".format(data_column) # Do not include extension\n figFormat = 'png' # 'pdf' or 'svg'\n figSize = [5, 5]\n\n ax = fig.add_subplot()\n ax.bar(center, D1Hist, width=width, align='center', label='D1', alpha=0.5)\n ax.bar(center, nD1Hist, width=width, align='center', label='nD1', alpha=0.5)\n ax.legend()\n ax.set_xlabel('{} value'.format(data_column))\n ax.set_ylabel('Frequency')\n ax.set_title(data_column)\n ymin, ymax = ax.get_ybound()\n ax.vlines(D1Median, ymin, ymax, color=\"Green\")\n ax.vlines(nD1Median, ymin, ymax, color=\"Red\")\n\n extraplots.save_figure(figFilename, figFormat, figSize, output_dir, 'w')\n plt.show()\n return fig, ax", "def main(args):\n samples = TQSampleFolder.loadLazySampleFolder(args.input_file + \":\" + args.sample_folder)\n reader = TQSampleDataReader(samples)\n\n # this list contains 2-tuples with (\"CutName\", \"HistogramName\")\n hist_info = list()\n hist_info.append((\"Cut2TagMllSR1VBFVeto\", \"NN_SR1_Signal_Rebin\", \"[ee+mm+em+me]\"))\n hist_info.append((\"Cut2TagMllSR1VBFVeto\", \"NN_SR1_Top\", \"[ee+mm+em+me]\"))\n hist_info.append((\"Cut2TagMllSR1VBFVeto\", \"NN_SR1_Other\", \"[ee+mm+em+me]\"))\n\n processes = list()\n processes.append(Process(\"sig\", r\"Signal\", \"/sig/{channel}/{campaign}/nonres\"))\n processes.append(Process(\"bkg\", r\"Background\", \"/bkg/{channel}/{campaign}/[prompt+nonprompt]\"))\n\n output_directory = \"results/mva_yields_soverb/\"\n ensure_directory(output_directory)\n output_file_name = os.path.splitext(os.path.basename(args.input_file))[0] + \".tex\"\n\n with LaTeXFile.from_rel_path(os.path.join(output_directory, output_file_name)) as tex:\n tex.document_settings.append(\"landscape\")\n tex.write_header()\n tex.begin_document()\n\n logging.info(\"Getting per-bin significances\")\n for cut_name, histogram_name, channel in hist_info:\n logging.info(\"Processing %s/%s\", cut_name, histogram_name)\n hists = dict()\n for process in processes:\n campaign = \"[c16a+c16d+c16e]\"\n hists[process.name] = reader.getHistogram(\n process.path.format(channel=channel, campaign=campaign), \"{}/{}\".format(cut_name, histogram_name)\n )\n\n table_data = list()\n sigs = list()\n hist_sig = hists[\"sig\"]\n hist_bkg = hists[\"bkg\"]\n for i in range(1, hist_sig.GetNbinsX() + 1):\n s = hist_sig.GetBinContent(i)\n b = hist_bkg.GetBinContent(i)\n\n if b != 0:\n # z = math.sqrt(2 * ((s + b) * math.log(1 + s / b) - s))\n z = s / math.sqrt(b)\n sigs.append(z)\n else:\n z = \"--\"\n table_data.append((i, z))\n logging.debug(\"Bin % 2d: %g\", i, z)\n table_data.append((\"Total\", math.sqrt(sum([z ** 2 for z in sigs]))))\n\n tex.write_table(\n table_data,\n [\"{}\", \"{:.4f}\"],\n [\"Bin\", \"Significance\"],\n \"{}/{}\".format(cut_name, histogram_name),\n format_rows=\"cc\",\n )\n\n tex.end_document()\n tex.write_make_file()", "def preprocessing(self):\n # Standardizing series names\n self.raw.columns = ['stress', 'strain', 'e']\n # Removing percentage format to strain values\n if self.strainPercent:\n self.raw['strain'] = self.raw['strain'].divide(100)\n # On-table (initial) void ratio\n self.e_0 = self.raw['e'].iloc[0]\n return", "def bin_histogram (modified_df, v_to_bin):\n for variable in v_to_bin:\n # Remove Nas\n df = modified_df[modified_df[variable].notnull()]\n # Create surv filter\n hist_filter = df[\"Survived\"] == 1\n # Create Histogram\n plt.hist([df[variable][hist_filter], df[variable][~hist_filter]],\n stacked=True, label=['Survived', 'Not Survived'], color=['g', 'r'])\n plt.legend()\n # Save and reset fig\n plt.savefig(variable+\"_histogram\")\n plt.clf()", "def _perturbation(self):\n if self.P > 1:\n scales = []\n for term_i in range(self.n_randEffs):\n _scales = sp.randn(self.diag[term_i].shape[0])\n if self.jitter[term_i] > 0:\n _scales = sp.concatenate((_scales, sp.zeros(1)))\n scales.append(_scales)\n scales = sp.concatenate(scales)\n else:\n scales = sp.randn(self.vd.getNumberScales())\n return scales", "def create_histograms(PrimaryParticleName, LongVectorSignals, LongVectorSignalsCher,\n\tShortVectorSignals, ShortVectorSignalsCher, LongScinMaxFiber, LongCherMaxFiber, \n\tShortScinMaxFiber, ShortCherMaxFiber, EnergyTotContainer, MaxEnergyTotContainer):\n\n\t#Set ROOT histograms\n\tTH1LongScin = TH1F(\"LongScintillation\", PrimaryParticleName, 100, 0.0, LongScinMaxFiber+200.)\n\tTH1LongCher = TH1F(\"LongCherenkov\", PrimaryParticleName, 100, 0.0, LongCherMaxFiber+200.)\n\tTH1ShortScin = TH1F(\"ShortScintillation\", PrimaryParticleName, 100, 0.0, ShortScinMaxFiber+200.)\n\tTH1ShortCher = TH1F(\"ShortCherenkov\", PrimaryParticleName, 100, 0.0, ShortCherMaxFiber+200.)\n\tTH1EnergyTot = TH1F(\"EnergyTot\", PrimaryParticleName, 100, MaxEnergyTotContainer-10000., MaxEnergyTotContainer+500.) \n\n\t#Fill histograms in for loop\n\tfor index in range(len(LongVectorSignals)):\n\t\tTH1LongScin.Fill(LongVectorSignals[index])\n\t\tTH1LongCher.Fill(LongVectorSignalsCher[index])\n\t\tTH1ShortScin.Fill(ShortVectorSignals[index])\n\t\tTH1ShortCher.Fill(ShortVectorSignalsCher[index])\n\t\tTH1EnergyTot.Fill(EnergyTotContainer[index])\n\n\t#Draw + DrawOptions\n\tStyle = gStyle\n\tStyle.SetOptStat(1) #Show statistics\n\tStyle.SetLineWidth(1)\n\tXAxis = TH1LongScin.GetXaxis() #TH1LongScin\n\tXAxis.SetTitle(\"Energy (MeV)\")\n\tXAxis.SetTitleOffset(1.2)\n\tYAxis = TH1LongScin.GetYaxis()\n\tYAxis.SetTitle(\"Entries\")\n\tTH1LongScin.Draw()\n\tgPad.SaveAs(\"EnergyLongScin.eps\")\n\tXAxis = TH1LongCher.GetXaxis() #TH1LongCher\n\tXAxis.SetTitle(\"# Cher p.e.\")\n\tXAxis.SetTitleOffset(1.2)\n\tYAxis = TH1LongCher.GetYaxis()\n\tYAxis.SetTitle(\"Entries\")\n\tTH1LongCher.Draw()\n\tgPad.SaveAs(\"CherpeLong.eps\")\n\tXAxis = TH1ShortScin.GetXaxis() #TH1ShortScin\n\tXAxis.SetTitle(\"Energy (MeV)\")\n\tXAxis.SetTitleOffset(1.2)\n\tYAxis = TH1ShortScin.GetYaxis()\n\tYAxis.SetTitle(\"Entries\")\n\tTH1ShortScin.Draw()\n\tgPad.SaveAs(\"EnergyShortScin.eps\")\n\tXAxis = TH1ShortCher.GetXaxis() #TH1ShortCher\n\tXAxis.SetTitle(\"# Cher p.e.\")\n\tXAxis.SetTitleOffset(1.2)\n\tYAxis = TH1ShortCher.GetYaxis()\n\tYAxis.SetTitle(\"Entries\")\n\tTH1ShortCher.Draw()\n\tgPad.SaveAs(\"CherpeShort.eps\")\n\tXAxis = TH1EnergyTot.GetXaxis() #TH1EnergyTot\n\tXAxis.SetTitle(\"Energy (MeV)\")\n\tXAxis.SetTitleOffset(1.2)\n\tYAxis = TH1EnergyTot.GetYaxis()\n\tYAxis.SetTitle(\"Entries\")\n\tTH1EnergyTot.Draw()\n\tgPad.SaveAs(\"EnergyTot.eps\")", "def CL_histogram_MMD(sketch,Phi,domain,dimension,nb_cat_per_dim=None,bins_cont=10):\n ## 0) Parsing the inputs\n # Number of categorical inputs\n if nb_cat_per_dim is None:\n nb_cat_per_dim = np.zeros(Phi.d)\n \n is_integer_dimension = False\n if nb_cat_per_dim[dimension] > 0:\n # The data is integer-type\n is_integer_dimension = True\n bins = int(nb_cat_per_dim[dimension])\n else:\n bins = bins_cont\n\n m = sketch.size\n # 1) Construct the A matrix\n A = 1j*np.zeros((m,bins)) # Pre-allocation\n bin_edges = np.linspace(domain[dimension,0],domain[dimension,1],bins+1)\n box = domain.copy()\n for p in range(bins):\n # move to the next box\n if is_integer_dimension:\n box[dimension,0] = p\n box[dimension,1] = p\n else:\n box[dimension,0] = bin_edges[p]\n box[dimension,1] = bin_edges[p+1]\n A[:,p] = fourierSketchOfBox(box,Phi,nb_cat_per_dim) \n \n # 1.b) cast to real \n Ari = np.r_[A.real, A.imag]\n \n # 2) create b vector\n b = np.r_[sketch.real, sketch.imag]\n \n # 3) solve the optimization problem\n def _f_grad(x):\n r = Ari@x-b\n f = 0.5*np.linalg.norm(r)**2\n grad = Ari.T@r\n return (f,grad)\n \n # Starting point\n x0 = np.ones(bins)/bins\n # Linear constraints\n A_constr = np.zeros((bins,bins))\n l_constr = 0*np.ones(bins) # Positive constraints\n A_constr[:bins,:bins] = np.eye(bins)\n upper_bound = 5 # weird that it must be large\n u_constr = upper_bound*np.ones(bins) # Sum-to one constraints\n constr = LinearConstraint(A_constr,l_constr,u_constr)\n\n # Solve\n sol = minimize(_f_grad, x0, method='trust-constr', bounds=None, constraints=constr, jac=True, options={'verbose': 0})\n\n return project_probabilitySimplex(sol.x)", "def Make_Binned_ANN_ROC_Curves(title,Signal_title,Background_title,bins,log=False):\n #hsv = plt.get_cmap('hsv')\n #color = hsv(np.linspace(0,1.0,len(bins)-1))\n #color = ['b', 'g', 'r', 'c', 'm', 'y']\n if len(bins)<=6:\n color = ['red','green','blue','orange','brown']\n else:\n color = ['deepskyblue','rosybrown','olivedrab','royalblue','firebrick','chartreuse','navy','red','darkorchid','lightseagreen','mediumvioletred','blue']\n nbins = 60\n\tdis_string = \"ANN_\"\n\n Signal_file = rt.TFile(\"Thesis_Plots/root_files/{}_ANN_histograms.root\".format(Signal_title),\"READ\")\n Background_file = rt.TFile(\"Thesis_Plots/root_files/{}_ANN_histograms.root\".format(Background_title),\"READ\")\n\n plt.figure(\"ROC\")\n plt.clf()\n\n for bin_ in range(len(bins)-1):\n Dis_Signal_Eff = FCM.Get_ROC_Efficiencies(Signal_file.Get(dis_string+str(bins[bin_])+\"_\"+str(bins[bin_+1])),(0,1),nbins,0)\n Dis_BG_Eff = FCM.Get_ROC_Efficiencies(Background_file.Get(dis_string+str(bins[bin_])+\"_\"+str(bins[bin_+1])),(0,1),nbins,0)\n CSV_Signal_Eff = FCM.Get_ROC_Efficiencies(Signal_file.Get(\"CSV_\"+str(bins[bin_])+\"_\"+str(bins[bin_+1])),(0,1),ratio_bins,0)\n CSV_BG_Eff = FCM.Get_ROC_Efficiencies(Background_file.Get(\"CSV_\"+str(bins[bin_])+\"_\"+str(bins[bin_+1])),(0,1),ratio_bins,0)\n if log:\n plt.semilogy(Dis_Signal_Eff,Dis_BG_Eff, color = color[bin_], linestyle = '-',label=str(bins[bin_])+\"_\"+str(bins[bin_+1]))\n plt.semilogy(CSV_Signal_Eff,CSV_BG_Eff, color = color[bin_],linestyle = '--',)\n\n else:\n plt.plot(Dis_Signal_Eff,1-Dis_BG_Eff, color = color[bin_], linestyle = '-',label=str(bins[bin_])+\"_\"+str(bins[bin_+1]))\n plt.plot(CSV_Signal_Eff,1-CSV_BG_Eff, color = color[bin_],linestyle = '--',)\n\n if log:\n\t\tif diff:\n\t\t\tplt.semilogy([0,0],[0,0],'k-',label = 'L4-L1')\n\t\telse:\n \tplt.semilogy([0,0],[0,0],'k-',label = 'L4/L1')\n plt.semilogy([0,0],[0,0],'k-.',label = 'CSV')\n plt.semilogy([0,1],[0.1,0.1],'k:')\n plt.xlabel(r\"$\\epsilon$_signal\")\n plt.ylabel(r\"$\\epsilon$_background\")\n plt.legend(loc=4)\n else:\n\t\tif diff:\n\t\t\tplt.plot([0,0],[0,0],'k-',label = 'L4-L1')\n\t\telse:\n \tplt.plot([0,0],[0,0],'k-',label = 'L4/L1')\n plt.plot([0,0],[0,0],'k-.',label = 'CSV')\n #plt.plot([0,1],[0.9,0.9],'k:',label=\"10% mistag\")\n plt.plot([0,1],[0.9,0.9],'k:')\n plt.xlabel(r\"$\\epsilon$_signal\")\n plt.ylabel(r\"1-$\\epsilon$_background\")\n plt.legend(loc=3)\n #plt.title(title+\"_ROC-Curves\")\n\n plt.savefig(\"Thesis_Plots/{}_ROC_Curves.png\".format(title))\n print \"saved as Thesis_Plots/{}_ROC_Curves.png\".format(title)", "def FE_discretize_numeric_variables(train, bin_dict, test='', strategy='kmeans',verbose=0):\r\n df = copy.deepcopy(train)\r\n test = copy.deepcopy(test)\r\n num_cols = len(bin_dict)\r\n nrows = int((num_cols/2)+0.5)\r\n #print('nrows',nrows)\r\n if verbose:\r\n fig = plt.figure(figsize=(10,3*num_cols))\r\n for i, (col, binvalue) in enumerate(bin_dict.items()):\r\n new_col = col+'_discrete'\r\n if strategy == 'gaussian':\r\n kbd = GaussianMixture(n_components=binvalue, random_state=99)\r\n df[new_col] = kbd.fit_predict(df[[col]]).astype(int)\r\n if not isinstance(test, str):\r\n test[new_col] = kbd.predict(test[[col]]).astype(int)\r\n else:\r\n kbd = KBinsDiscretizer(n_bins=binvalue, encode='ordinal', strategy=strategy)\r\n df[new_col] = kbd.fit_transform(df[[col]]).astype(int)\r\n if not isinstance(test, str):\r\n test[new_col] = kbd.transform(test[[col]]).astype(int)\r\n if verbose:\r\n ax1 = plt.subplot(nrows,2,i+1)\r\n ax1.scatter(df[col],df[new_col])\r\n ax1.set_title(new_col)\r\n if not isinstance(test, str):\r\n return df, test\r\n else:\r\n return df", "def distribution_magnitude_histogram(cur, var, table, label):\n x = select(cur,var, table)\n print(\"Number of entries: \", len(x))\n print(\"Maximum: \", max(x))\n print(\"Minimum: \", min(x))\n \n fig = plt.figure()\n ax = fig.add_subplot(1,1,1)\n ax.set_xlabel(\"Sentiment Magnitude\")\n ax.set_ylabel(\"Number of Sentences\")\n fig.suptitle(label)\n ax.hist(x, bins = 20)\n plt.show()", "def variance_normalize(self):\n self.img = self.img / np.sqrt(np.sum(self.img ** 2))", "def model_hist(xvar, yvar, modfuncs, nbins=95, crange=(-10.0, 10.0)):\n hists = [TH2D(\n 'hmodel{0}{1}'.format(c, i), 'hmodel{0}{1}'.format(c, i),\n nbins, crange[0], crange[1],\n nbins, crange[0], crange[1]\n ) for (i, c) in ic]\n for xbin in range(nbins):\n xlo = hists[0].GetXaxis().GetBinLowEdge(xbin+1)\n xup = hists[0].GetXaxis().GetBinUpEdge(xbin+1)\n for ybin in range(nbins):\n ylo = hists[0].GetXaxis().GetBinLowEdge(ybin+1)\n yup = hists[0].GetXaxis().GetBinUpEdge(ybin+1)\n name = 'bin_{0}_{1}'.format(xbin, ybin)\n xvar.setRange(name, xlo, xup)\n yvar.setRange(name, ylo, yup)\n for hist, modfunc in zip(hists, modfuncs):\n integral = modfunc.createIntegral(\n RooArgSet(xvar, yvar),\n RooFit.NormSet(RooArgSet(xvar, yvar)),\n RooFit.Range(name)\n ).getVal()\n hist.SetBinContent(xbin+1, ybin+1, integral)\n return hists", "def hist_of_numeric(X):\n figsize(10,3)\n for col in get_numeric(X):\n print(col)\n X[col].hist(bins=50)\n show()", "def _compute_meta_descriptors(self, outputs):\n for v in self._variances:\n outputs[v + '_meta_desc'] = self._compute_meta_descriptor(\n outputs['raw_' + v], self.vlad_layers[v])", "def create_general_hist(self, obj):\n hist = cv2.calcHist([obj], [0, 1, 2], None, [32, 8, 8],\n [0, 256, 0, 256, 0, 256])\n print cv2.normalize(hist).flatten()\n return cv2.normalize(hist).flatten()", "def ANN_Make_Binned_ROC_histograms(title,model, x_data, pT, CSV, bins, PU_range='full',addFeature=False):\n nbins = 60\n\n ANN_hist_list = []\n CSV_hist_list = []\n for bin_ in range(len(bins)-1):\n ANN_hist_list.append(rt.TH1D(\"ANN_\"+str(bins[bin_])+\"_\"+str(bins[bin_+1]),\"ANN_\"+str(bins[bin_])+\"_\"+str(bins[bin_+1]),nbins,0,1))\n CSV_hist_list.append(rt.TH1D(\"CSV_\"+str(bins[bin_])+\"_\"+str(bins[bin_+1]),\"CSV_\"+str(bins[bin_])+\"_\"+str(bins[bin_+1]),nbins,0,1))\n\n\tif addFeature == False:\n\t\tpred_y = model.predict(ANN_functional_shape(x_data))\n\telif addFeature == \"pT\":\n\t\tpred_y = model.predict(ANN_functional_shape(x_data)+[pT/200])\n\telif addFeature == \"PV\":\n\t\tassert x_data.shape[1] == 21, \"wrong x_data shape: PV cannot be found\"\n\t\tpred_y = model.predict(ANN_functional_shape(x_data)+[x_data[:,-1]/10.])\n\telse:\n\t\tprint \"invalid feature selection\"\n\t\treturn None\n\tbin_numbers = ANN_bin_selection(pT,bins)\n\n for n,particle in enumerate(x_data):\n if PU_range != 'full':\n if particle[-1]<PU_range[0] or particle[-1]>PU_range[1]: continue\n if bin_numbers[n] == -100: continue\n ANN_hist_list[int(bin_numbers[n])].Fill(pred_y[n])\n CSV_hist_list[int(bin_numbers[n])].Fill(CSV[n])\n\n tfile = rt.TFile(\"Thesis_Plots/root_files/{}_histograms.root\".format(title),\"recreate\")\n for hist in ANN_hist_list:\n hist.Write()\n for hist in CSV_hist_list:\n hist.Write()\n print \"saved histograms in Thesis_Plots/root_files/{}_histograms.root\".format(title)", "def preprocess_vhb(batch, nr_samples=None, snr=None,\n magnification=None, bands_to_sortout=None):\n X, y = preprocess2(batch, nr_samples, snr,\n magnification, bands_to_sortout)\n\n return X, y[\"vhb\"].values", "def residual_hist(hdata, hmodel, scaling, crange=(-10.0, 10.0)):\n nbins = hdata[0].GetNbinsX()\n scDat = []\n scMod = []\n scRes = []\n for j, (i,c) in enumerate(ic):\n dat = TH2D(\n 'dataHist{0}{1}'.format(c, i), 'dataHist{0}{1}'.format(c, i),\n nbins, crange[0]*scaling, crange[1]*scaling,\n nbins, crange[0]*scaling, crange[1]*scaling\n )\n mod = TH2D(\n 'modelHist{0}{1}'.format(c, i), 'modelHist{0}{1}'.format(c, i),\n nbins, crange[0]*scaling, crange[1]*scaling,\n nbins, crange[0]*scaling, crange[1]*scaling\n )\n res = TH2D(\n 'residualHist{0}{1}'.format(c, i), 'residualHist{0}{1}'.format(c, i),\n nbins, crange[0]*scaling, crange[1]*scaling,\n nbins, crange[0]*scaling, crange[1]*scaling\n )\n for xbin in range(nbins):\n for ybin in range(nbins):\n m = hmodel[j].GetBinContent(xbin, ybin)\n mod.SetBinContent(xbin, ybin, m)\n d = hdata[j].GetBinContent(xbin, ybin)\n if d <= 0.0:\n continue\n dat.SetBinContent(xbin, ybin, d)\n if m < d:\n e = hdata[j].GetBinErrorLow(xbin, ybin)\n else:\n e = hdata[j].GetBinErrorUp(xbin, ybin)\n res.SetBinContent(xbin, ybin, (d-m)/e)\n scDat.append(dat)\n scMod.append(mod)\n scRes.append(res)\n return scDat, scMod, scRes", "def efficient_Make_Binned_ROC_histograms(title, data, bins, PU_range='full'):\n diff_ran = (-25,25)\n diff_bins = diff_ran[1]-diff_ran[0]\n ratio_ran = (0,10)\n ratio_bins = 60\n\n Diff_hist_list = []\n Ratio_hist_list = []\n CSV_hist_list = []\n ZeroDiv_list = []\n for bin_ in range(len(bins)-1):\n Diff_hist_list.append(rt.TH1D(\"L4-L1_\"+str(bins[bin_])+\"_\"+str(bins[bin_+1]),\"L4-L1_\"+str(bins[bin_])+\"_\"+str(bins[bin_+1]),diff_bins,diff_ran[0],diff_ran[1]))\n Ratio_hist_list.append(rt.TH1D(\"L4_L1_\"+str(bins[bin_])+\"_\"+str(bins[bin_+1]),\"L4_L1_\"+str(bins[bin_])+\"_\"+str(bins[bin_+1]),ratio_bins,ratio_ran[0],ratio_ran[1]))\n CSV_hist_list.append(rt.TH1D(\"CSV_\"+str(bins[bin_])+\"_\"+str(bins[bin_+1]),\"CSV_\"+str(bins[bin_])+\"_\"+str(bins[bin_+1]),ratio_bins,0,1))\n ZeroDiv_list.append(0)\n\n for particle in data:\n if PU_range != 'full':\n if particle[-1]<PU_range[0] or particle[-1]>PU_range[1]: continue\n bin_number = FCM.bin_selection(particle,bins)\n if bin_number == -100: continue\n\n Diff_hist_list[bin_number].Fill(particle[8]-particle[5])\n CSV_hist_list[bin_number].Fill(particle[1])\n if particle[17] != 0:\n L4_L1 = particle[20]/particle[17]\n Ratio_hist_list[bin_number].Fill(L4_L1)\n else:\n ZeroDiv_list[bin_number] += 1\n\n tfile = rt.TFile(\"Thesis_Plots/root_files/{}_histograms.root\".format(title),\"recreate\")\n for hist in Diff_hist_list:\n hist.Write()\n for hist in Ratio_hist_list:\n hist.Write()\n for hist in CSV_hist_list:\n hist.Write()\n print \"saved histograms in Thesis_Plots/root_files/{}_histograms.root\".format(title)\n\n csv_file = open(\"Thesis_Plots/root_files/{}_ZeroDiv.csv\".format(title),\"wb\")\n writer = csv.writer(csv_file)\n writer.writerow(ZeroDiv_list)\n csv_file.close()\n print \"saved zero division occurences in Thesis_Plots/root_files/{}_ZeroDiv.csv\".format(title)", "def meanSysSignedHist(*hists): ### keep track of the signs somehow for systematics in cards\n central = hists[0]\n variations = hists[1:]\n if not variations:\n raise Exception(\"No Variations Given! %s, %s\"%(a, variations) )\n systs = [ ]\n sign = 1\n for var in variations:\n syst_hist = SignedSysHistFunc(central, var)\n syst_hist.Scale(sign)\n #syst_hist.SetBit( syst_hist.kIsAverage ) ## with this when hists are added they are averaged\n systs.append( syst_hist )\n sign *= -1\n #print systs\n #for sh in systs[1:] :\n # systsum.Add(sh)\n abssysts = [ th2Func(h, lambda x: abs(x) ) for h in systs ]\n #signsysts = [ th2Func(h, lambda x: abs(x)/x) for h in systs]\n\n abssystmean = abssysts[0].Clone()\n abssystmean.SetBit(abssystmean.kIsAverage)\n signedsum = systs[0].Clone()\n for abssyst in abssysts[1:]:\n abssyst.SetBit(abssyst.kIsAverage)\n abssystmean.Add( abssyst )\n for systh in systs[1:]:\n signedsum.Add(systh)\n signs = th2Func( signedsum, lambda x: abs(x) )\n signs.Divide( signedsum ) \n\n systmean = abssystmean.Clone()\n systmean.Multiply( signs )\n \n print 'made this change' \n #systmean = th2Func( systmean, lambda x: x if float(x) >0 or float(x)<0 else (0.0000001 if x==0 else 0 ) ) # Set to small value if 0, set to 0 if nan\n systmean = th2Func( systmean, lambda x: x if float(x) >0 or float(x)<0 else (0.0000001 if x==0 else 0 ) ) # Set to small value if 0, set to 0 if nan\n \n return systmean, systs", "def normaliseTraces(ds, verbose=False):\n normalisation = measureUnfoldedLevel(ds)\n if verbose:\n points = getIndexedTraces(ds)\n pyplot.figure(figsize=(9, 5.6))\n pyplot.hist2d(points[:,0], points[:,1], \n bins=(70*2, 50*2),\n range = [[0, 900], [-0.45, 0.05]],\n cmax = 100000/2 # clip max\n );\n pyplot.plot([0,700], [normalisation]*2, \"-r\")\n \n return ds.trace.copy() / np.abs(normalisation)", "def normalize(histogram):\n nbins = histogram.GetNbinsX()\n integral = histogram.Integral(1,nbins)\n newhist = histogram.Clone()\n newhist.Reset()\n for bin in range(1,nbins+1):\n ibinY = histogram.GetBinContent(bin)\n newhist.SetBinContent(bin,ibinY/integral)\n return newhist", "def preprocess(self, **kwargs):\n otypes = np.array(kwargs.get('observation_types', []))\n if np.array_equiv(\n otypes, self._observation_types) and self._preprocessed:\n return\n self._observation_types = otypes\n self._mags = np.array(kwargs.get('magnitudes', []))\n self._fds = np.array(kwargs.get('fluxdensities', []))\n self._cts = np.array(kwargs.get('countrates', []))\n self._e_u_mags = kwargs.get('e_upper_magnitudes', [])\n self._e_l_mags = kwargs.get('e_lower_magnitudes', [])\n self._e_mags = kwargs.get('e_magnitudes', [])\n self._e_u_fds = kwargs.get('e_upper_fluxdensities', [])\n self._e_l_fds = kwargs.get('e_lower_fluxdensities', [])\n self._e_fds = kwargs.get('e_fluxdensities', [])\n self._u_fds = kwargs.get('u_fluxdensities', [])\n self._e_u_cts = kwargs.get('e_upper_countrates', [])\n self._e_l_cts = kwargs.get('e_lower_countrates', [])\n self._e_cts = kwargs.get('e_countrates', [])\n self._u_cts = kwargs.get('u_countrates', [])\n self._upper_limits = np.array(kwargs.get('upperlimits', []),\n dtype=bool)\n self._observed = np.array(kwargs.get('observed', []), dtype=bool)\n self._o_types = self._observation_types[self._observed]\n\n # Magnitudes first\n # Note: Upper limits (censored data) currently treated as a\n # half-Gaussian, this is very approximate and can be improved upon.\n self._e_u_mags = [\n kwargs['default_upper_limit_error']\n if (e is None and eu is None and self._upper_limits[i]) else\n (kwargs['default_no_error_bar_error']\n if (e is None and eu is None) else (e if eu is None else eu))\n for i, (e, eu) in enumerate(zip(self._e_mags, self._e_u_mags))\n ]\n self._e_l_mags = [\n kwargs['default_upper_limit_error']\n if (e is None and el is None and self._upper_limits[i]) else\n (kwargs['default_no_error_bar_error']\n if (e is None and el is None) else (e if el is None else el))\n for i, (e, el) in enumerate(zip(self._e_mags, self._e_l_mags))\n ]\n\n # Ignore upperlimits for countrate if magnitude is present.\n self._upper_limits[self._observation_types[\n self._observed] == 'magcount'] = False\n self._e_u_cts = [\n c if (e is None and eu is None) else\n e if eu is None else eu\n for i, (c, e, eu) in enumerate(zip(\n self._cts, self._e_cts, self._e_u_cts))\n ]\n self._e_l_cts = [\n c if (e is None and el is None) else\n e if el is None else el\n for i, (c, e, el) in enumerate(zip(\n self._cts, self._e_cts, self._e_l_cts))\n ]\n\n # Now flux densities\n self._e_u_fds = [\n v if (e is None and eu is None and self._upper_limits[i]) else\n (v if (e is None and eu is None) else (e if eu is None else eu))\n for i, (e, eu, v) in enumerate(\n zip(self._e_fds, self._e_u_fds, self._fds))\n ]\n self._e_l_fds = [\n 0.0 if self._upper_limits[i] else (\n v if (e is None and el is None) else (e if el is None else el))\n for i, (e, el, v) in enumerate(\n zip(self._e_fds, self._e_l_fds, self._fds))\n ]\n self._fds = np.array([\n x / flux_density_unit(y) if x is not None else None\n for x, y in zip(self._fds, self._u_fds)\n ])\n self._e_u_fds = [\n x / flux_density_unit(y) if x is not None else None\n for x, y in zip(self._e_u_fds, self._u_fds)\n ]\n self._e_l_fds = [\n x / flux_density_unit(y) if x is not None else None\n for x, y in zip(self._e_l_fds, self._u_fds)\n ]\n\n self._preprocessed = True", "def _collect_params(self) -> np.ndarray:\n res = np.array([0.]*(self.dimensions))\n res[0] = self.model.rbf.variance\n res[1:-1] = self.model.rbf.lengthscale\n res[-1] = self.model.Gaussian_noise.variance\n return res", "def compare_histograms(df, df_norm, fignum, fields, binns):\n fig = plt.figure(num=fignum, figsize=(18,18))\n fig.suptitle('Histogram before and after normalization', fontsize=22)\n ax1 = fig.add_subplot(421, axisbg='0.94')\n ax2 = fig.add_subplot(422, axisbg='0.94')\n ax3 = fig.add_subplot(423, axisbg='0.94')\n ax4 = fig.add_subplot(424, axisbg='0.94')\n ax5 = fig.add_subplot(425, axisbg='0.94')\n ax6 = fig.add_subplot(426, axisbg='0.94')\n ax7 = fig.add_subplot(427, axisbg='0.94')\n ax8 = fig.add_subplot(428, axisbg='0.94')\n alphas = [0.33, 0.33, 0.6, 0.6, 0.28, 0.28, 0.6, 0.6]\n hues = ['g','y','g','y','g','y','g','y']\n all_axes = plt.gcf().axes\n # print list(enumerate(fields))\n for i, ax in list(enumerate(all_axes)):\n ax.set_ylabel(\"count\", fontsize=10)\n for ticklabel in ax.get_xticklabels() + ax.get_yticklabels():\n ticklabel.set_fontsize(14)\n g = np.int(math.ceil(np.float(i)/2))\n \n if (len(fields)*2-1) >= i:\n if i in (0,2,4,6):\n ax.hist(df[fields[i-g]].dropna().values, bins=binns[i-g], color=hues[i],alpha=alphas[i])\n print \" plot \" + str(df[fields[i-g]].name)\n ax.set_title(df[fields[i-g]].name, fontsize=20)\n #if (len(fields)*2) >= i: \n if i in (1,3,5,7):\n #try:\n ax.hist(df_norm[fields[i-g]].dropna().values, bins=binns[i-g], color=hues[i],alpha=alphas[i])\n ax.set_title(\"As normalized:\", fontsize=20)\n \n try: # Save the figure as one file\n filename = \"data/vis/histogram_compare\" + \"_\" + str(fignum) + \".png\"\n plt.savefig(filename)\n print \"= Vis Output: \", filename\n print\n except IOError:\n print \"WARNING: Failed to write out file: \", filename\n print\n plt.close(fig)", "def getAbsNormalizationFactor(deltaE_wkspace,min,max):\n global reducer\n van_mass=reducer.get_default_parameter('vanadium-mass') \n \n Integration(InputWorkspace=deltaE_wkspace,OutputWorkspace='van_int',RangeLower=min,RangeUpper=max,IncludePartialBins='1')\n input_ws = mtd[deltaE_wkspace]\n ei_monovan = input_ws.getRun().getLogData(\"Ei\").value\n data_ws=mtd['van_int']\n nhist = data_ws.getNumberHistograms()\n #print nhist\n\n signal1_sum = 0.0\n weight1_sum = 0.0 \n signal2_sum = 0.0\n weight2_sum = 0.0 \n signal3_sum = 0.0\n weight3_sum = 0.0 \n signal4_sum = 0.0\n weight4_sum = 0.0 \n\n \n ic=0;\n izerc=0;\n for i in range(nhist):\n try:\n det = data_ws.getDetector(i)\n except Exception:\n continue\n if det.isMasked():\n continue\n\n signal = data_ws.readY(i)[0]\n error = data_ws.readE(i)[0]\n \n if signal != signal: #ignore NaN\n continue\n if ((error<=0) or (signal<=0)): # ignore Inf (0 in error are probably 0 in sign\n izerc+=1\n continue\n # Guess which minimizes the value sum(n_i-n)^2/Sigma_i -- this what Libisis had\n weight = 1.0/error\n signal1_sum += signal * weight\n weight1_sum += weight \n # Guess which minimizes the value sum(n_i-n)^2/Sigma_i^2\n weight2 = 1.0/(error*error)\n signal2_sum += signal * weight2\n weight2_sum += weight2 \n # Guess which assumes puassonian distribution with Err=Sqrt(signal) and calculates \n # the function: N_avrg = 1/(DetEfficiency_avrg^-1)*sum(n_i*DetEfficiency_i^-1)\n # where the DetEfficiency = WB_signal_i/WB_average WB_signal_i is the White Beam Vanadium \n # signal on i-th detector and the WB_average -- average WB vanadium signal. \n # n_i is the modified signal \n err_sq = error*error\n weight = err_sq/signal\n signal3_sum += err_sq\n weight3_sum += weight\n # Guess which estimatnes value sum(n_i^2/Sigma_i^2)/sum(n_i/Sigma_i^2) TGP suggestion from 12-2012\n signal4_sum += signal*signal/err_sq\n weight4_sum += signal/err_sq\n \n ic += 1 \n #print 'signal value =' ,signal\n #print 'error value =' ,error \n #print 'average ',signal_sum \n #---------------- Loop finished\n \n if( weight1_sum==0.0 or weight2_sum == 0.0 or weight3_sum == 0.0 or weight4_sum == 0.0) :\n print \"WB integral has been calculated incorrectrly, look at van_int workspace and input workspace: \",deltaE_wkspace\n raise IOError(\" divided by 0 weight\")\n \n integral_monovanLibISIS=signal1_sum / weight1_sum\n integral_monovanSigSq =signal2_sum / weight2_sum \n integral_monovanPuason =signal3_sum / weight3_sum \n integral_monovanTGP =signal4_sum / weight4_sum\n #integral_monovan=signal_sum /(wbVan_sum)\n van_multiplier = (float(reducer.van_rmm)/float(van_mass))\n absnorm_factorLibISIS = integral_monovanLibISIS * van_multiplier\n absnorm_factorSigSq = integral_monovanSigSq * van_multiplier \n absnorm_factorPuason = integral_monovanPuason * van_multiplier \n absnorm_factorTGP = integral_monovanTGP * van_multiplier \n #print 'Monovan integral :' ,integral_monovan \n \n if ei_monovan >= 210.0: \n xsection = 421 # vanadium cross-section in mBarn/sR (402 mBarn/Sr) (!!!modified to fit high energy limit?!!!)\n else: # old textbook cross-section for vanadium for ei=20mEv\n xsection = 400 + (ei_monovan/10) \n\n absnorm_factorLibISIS /= xsection\n absnorm_factorSigSq /= xsection \n absnorm_factorPuason /= xsection \n absnorm_factorTGP /= xsection \n \n sample_multiplier = (float(reducer.sample_mass)/float(reducer.sample_rmm))\n absnorm_factorLibISIS= absnorm_factorLibISIS *sample_multiplier\n absnorm_factorSigSq = absnorm_factorSigSq *sample_multiplier\n absnorm_factorPuason = absnorm_factorPuason *sample_multiplier\n absnorm_factorTGP = absnorm_factorTGP *sample_multiplier\n \n if (absnorm_factorLibISIS !=absnorm_factorLibISIS)|(izerc!=0): # It is an error, print diagnostics:\n if (absnorm_factorLibISIS !=absnorm_factorLibISIS):\n print '--------> Absolute normalization factor is NaN <----------------------------------------------'\n else:\n print '--------> Warning, Monovanadium has zero spectra <--------------------------------------------' \n print '--------> Processing workspace: ',deltaE_wkspace\n print '--------> Monovan Integration range : min=',min,' max=',max\n print '--------> Summarized: ',ic,' spectra with total value: ',signal2_sum, 'and total weight: ',weight2_sum\n print '--------> Dropped: ',izerc,' empty spectra'\n print '--------> Van multiplier: ',van_multiplier,' sample multiplier: ',sample_multiplier, 'and xsection: ',xsection \n print '--------> Abs norm factors: LibISIS: ',absnorm_factorLibISIS,' Sigma^2: ',absnorm_factorSigSq\n print '--------> Abs norm factors: Puasonian: ',absnorm_factorPuason, ' TGP: ',absnorm_factorTGP\n print '----------------------------------------------------------------------------------------------' \n else:\n DeleteWorkspace(Workspace=deltaE_wkspace)\n DeleteWorkspace(Workspace=data_ws)\n return (absnorm_factorLibISIS,absnorm_factorSigSq,absnorm_factorPuason,absnorm_factorTGP)", "def analyseHill(ekindicts):\n\n import pylab\n pylab.rc('text', usetex=True)\n f=pylab.figure()\n f.suptitle('n distributions- No linear (case 3)')\n i=1\n for e in ekindicts:\n ekindata = ekindicts[e]\n proteins = ekindata.keys()\n nvals = []\n for prot in proteins:\n edata = ekindata[prot]\n E = EkinProject(data=edata)\n for d in E.datasets:\n fdata = E.getMetaData(d)\n if fdata != None and fdata.has_key('model'):\n if fdata['model'] == 'Modified Hill':\n n=fdata['n']\n if n<5 and n>-5:\n nvals.append(n)\n print 'n=', n\n\n ax = f.add_subplot(2,2,i)\n n, b, patches = pylab.hist(nvals, 30, histtype='bar', alpha=0.8)\n std = round(numpy.std(nvals), 2)\n ave = round(numpy.mean(nvals), 2)\n ax.set_title(e +' mean= '+str(ave)+r' $\\sigma$= '+str(std))\n i+=1\n f.subplots_adjust(hspace=0.4)\n f.savefig('n_hist.png')\n return", "def scalarNormalizer(df):\r\n arr=dict()\r\n for col in CONT_FEATURES_COL_TO_USE:\r\n mean, std =df[col].mean(), df[col].std()\r\n df[col]=df[col].apply(lambda x: (x-mean)/std)\r\n arr[col] = [mean, std]\r\n json.dump(arr, open('normalize.json', 'w'))\r\n return df", "def getDataStats(vars, vardict):\n \n # for scale variables, make dictionary entries for min and max values\n # for categorical variables make dictionary entries for category lists based on value labels\n scaletemplate = r\"\"\"SCALE: linear(dim(1), min(%(themin)s), max(%(themax)s))\"\"\"\n cattemplate = r\"\"\"SCALE: cat(dim(1), include(%s))\"\"\"\n statsdict = {}\n datadict = {}\n scalevars = [v for v in vars if vardict[v].VariableLevel == \"scale\"]\n catvars = [v for v in vars if vardict[v].VariableLevel != \"scale\"]\n\n if scalevars:\n dsname = spss.ActiveDataset() # ensure activate dataset has a name\n if dsname == \"*\":\n dsname = \"D\" + str(random.random())\n spss.Submit(\"\"\"DATASET NAME %(dsname)s.\"\"\" % locals())\n\n # use AGGREGATE to calculate global min and max\n ads = \"S\"+ str(random.random())\n aggspecs = []\n for i, v in enumerate(scalevars):\n aggspecs.append(\"\"\"/V%(i)smin = MIN(%(v)s)\n/V%(i)smax=MAX(%(v)s)\"\"\" % locals())\n aggspecs = \"\\n\".join(aggspecs)\n spss.Submit(r\"\"\"DATASET DECLARE %(ads)s.\nAGGREGATE /OUTFILE=\"%(ads)s\"\n%(aggspecs)s.\nDATASET ACTIVATE %(ads)s.\"\"\" % locals())\n stats = spssdata.Spssdata(names=False).fetchall()\n spss.Submit(\"\"\"DATASET CLOSE %(ads)s.\n DATASET ACTIVATE %(dsname)s.\"\"\" % locals())\n \n for i, v in enumerate(scalevars):\n themin, themax = stats[0][i*2], stats[0][i*2+1]\n if themin is not None and themax is not None:\n statsdict[v] = scaletemplate % locals()\n datadict[v] = (themin, themax)\n \n for v in catvars:\n values = list(vardict[v].ValueLabels.keys())\n if values:\n vlvalues = ['\"' + item.replace('\"', '\\\\\"') + '\"' for item in values] # protect interior \" characters\n statsdict[v] = cattemplate % \",\".join(vlvalues)\n \n return statsdict, datadict", "def checkdyn():\n args = parse_args()\n config = ScatterConfig()\n paths = ScatterPath(args.infile)\n dyn = dyndat(paths)\n\n fig, axes = plt.subplots(1,1)\n nvib = list()\n for i in range(dyn.Nalgorithm):\n print(i)\n final_E_dist = dyn.final_Ek_dist + dyn.final_Ep_dist\n final_Ex_dist_filt = \\\n final_E_dist[i,:,0][np.where(np.logical_or(\n dyn.final_r_dist[i,:,1] < dyn.boundary_rmin[1],\n dyn.final_r_dist[i,:,1] > dyn.boundary_rmax[1])\n )]\n final_n_vib_dist = np.round((final_Ex_dist_filt / 0.008) - 0.5)\n nvib.append(final_n_vib_dist)\n\n ax = axes\n ax.hist(nvib, color=config.colors[:dyn.Nalgorithm],\n bins=tuple(range(0,21,1)),\n align='left', rwidth= 0.8,\n range=(0, 21), normed=True)\n ax.legend(dyn.algorithms)\n ax.set_xlim(0, 20)\n ax.set_xticks([i for i in range(0,21,1)])\n ax.set_xticklabels([str(int(i)) for i in range(0,21,1)])\n ax.set_ylim(0, 0.5)\n\n ## -- save fig -- ##\n saveto=args.savedir\n if not saveto and not HAVE_DISPLAY:\n saveto = './'\n\n if saveto:\n if not args.savedir.endswith('/'):\n args.savedir += '/'\n fig.savefig(args.savedir + savename + '.png', dpi=2 * fig.dpi)\n\n if HAVE_DISPLAY:\n plt.show()", "def ANN_efficiency_vs_PU(title, x_data, pT, CSV, model, ANN_Cuts, Ratio_Cuts, CSV_Cuts, bins, y_max, pT_Cut=200, BG=False, DrawTitle=False):\n assert x_data.shape[1]==21, \"x_data does not contain PV. Make sure it is made from a PU sample and has shape (x, 21).\"\n\tassert x_data.shape[0] == len(pT) == len(CSV), \"data inputs need to have the same length\"\n\tassert len(ANN_Cuts) == len(Ratio_Cuts) == len(CSV_Cuts) == len(bins)-1, \"cuts need to have the same length and be compatible with amount of bins\"\n\n ran = (0,80)\n nbins = 80\n import array\n\tif BG:\n\t\tbins_ = array.array('d',[0.0, 11.0]+range(19,41,8)+[42.0, 52.0, 80])\n\telse:\n \tbins_ = array.array('d',[0.0, 11.0]+range(15,41,4)+[42.0, 52.0, 58.0, 65.0, 80])\n\n\tif pT_Cut >= 1200:\n\t\tbins_ = array.array('d',[0.0, 20.0, 40.0, 80.0])\n\n\n #make histograms of efficiency vs PU\n AllJets_Hist = rt.TH1D(\"AllJets\",\"AllJets\",nbins,ran[0],ran[1])\n ANN_Hist = rt.TH1D(\"ANN\",\"ANN\",nbins,ran[0],ran[1])\n Ratio_Hist = rt.TH1D(\"Ratio\",\"Ratio\",nbins,ran[0],ran[1])\n CSV_Hist = rt.TH1D(\"CSV\",\"CSV\",nbins,ran[0],ran[1])\n\n\tAllJets_Hist = AllJets_Hist.Rebin(len(bins_)-1,\"AllJets\",bins_)\n ANN_Hist = ANN_Hist.Rebin(len(bins_)-1,\"ANN\",bins_)\n Ratio_Hist = Ratio_Hist.Rebin(len(bins_)-1,\"Ratio\",bins_)\n CSV_Hist = CSV_Hist.Rebin(len(bins_)-1,\"CSV\",bins_)\n \n\tpred_y = model.predict(ANN_functional_shape(x_data))\n\tbin_numbers = ANN_bin_selection(pT,bins)\n\t\n\tfor i,pT_value in enumerate(pT):\n\t\t\tif pT_value < pT_Cut: continue\n\t if bin_numbers[i] == -100: continue\n\t\t\tAllJets_Hist.Fill(x_data[i,-1])\n\t\t\tif CSV[i] >= CSV_Cuts[bin_numbers[i]]: CSV_Hist.Fill(x_data[i,-1])\n\t if pred_y[i] >= ANN_Cuts[bin_numbers[i]]: ANN_Hist.Fill(x_data[i,-1])\n\t\t\tif x_data[i,12] != 0:\n\t\t\t\tL_R = x_data[i,15]/float(x_data[i,12])\n\t\t\t\tif L_R >= Ratio_Cuts[bin_numbers[i]]: Ratio_Hist.Fill(x_data[i,-1])\n\t \n\t'''\t\t\n AllJets_Hist = AllJets_Hist.Rebin(len(bins_)-1,\"AllJets\",bins_)\n ANN_Hist = ANN_Hist.Rebin(len(bins_)-1,\"ANN\",bins_)\n Ratio_Hist = Ratio_Hist.Rebin(len(bins_)-1,\"Ratio\",bins_)\n CSV_Hist = CSV_Hist.Rebin(len(bins_)-1,\"CSV\",bins_)\n\t'''\n #Make Graphs and draw them\n canvas = rt.TCanvas('canvas','canvas',600,600)\n\tif DrawTitle == False: rt.gStyle.SetOptTitle(0)\n legend = rt.TLegend(0.1,0.9,0.35,0.75)\n ANN_Graph = rt.TGraphAsymmErrors()\n Ratio_Graph = rt.TGraphAsymmErrors()\n CSV_Graph = rt.TGraphAsymmErrors()\n if DrawTitle: Ratio_Graph.SetTitle(title+\"_vs_PU_pT{}{}\".format('jet',pT_Cut))\n ANN_Graph.Divide(ANN_Hist,AllJets_Hist,\"cl=0.683 b(1,1) mode\")\n Ratio_Graph.Divide(Ratio_Hist,AllJets_Hist,\"cl=0.683 b(1,1) mode\")\n CSV_Graph.Divide(CSV_Hist,AllJets_Hist,\"cl=0.683 b(1,1) mode\")\n ANN_Graph.SetLineColor(3)\n Ratio_Graph.SetLineColor(2)\n CSV_Graph.SetLineColor(4)\n legend.AddEntry(ANN_Graph, \"ANN\", \"LEP\")\n legend.AddEntry(Ratio_Graph, \"L4/L1\", \"LEP\")\n legend.AddEntry(CSV_Graph, \"CSV\", \"LEP\")\n Ratio_Graph.GetXaxis().SetTitle(\"#PV\")\n if BG:\n\t\tRatio_Graph.GetYaxis().SetTitle('mistag rate')\n\telse:\t\n\t\tRatio_Graph.GetYaxis().SetTitle('efficiency')\n Ratio_Graph.GetYaxis().SetTitleOffset(1.5)\n\tRatio_Graph.SetMinimum(0.)\n Ratio_Graph.SetMaximum(y_max)\n Ratio_Graph.Draw()\n ANN_Graph.Draw(\"SAME\")\n CSV_Graph.Draw(\"SAME\")\n legend.Draw()\n canvas.SaveAs('Thesis_Plots/'+title+\"_vs_PU_pT{}{}.png\".format('jet',pT_Cut))", "def statistik(teilchen, x_abs):\n count = len(teilchen[0, :])\n R = len(teilchen[:, 0])\n erwartung = np.empty(count)\n varianz = np.empty(count)\n norm = np.empty(count)\n for i in range(count):\n orte = teilchen[:, i]\n orte = orte[orte < x_abs]\n erwartung[i] = np.mean(orte)\n varianz[i] = np.var(orte, ddof=1)\n norm[i] = len(orte)/R\n return erwartung, varianz, norm", "def defstuff():\n\t\n\tglobal PA, PB, col, col2, rng, xlimits, nbin, lPbw, WJK, outTab\n\t\n\tPA = ['Per1', 'Per2', 'Per3', 'Per4', 'Per5', 'Per6', 'Per7', 'Per8', 'Per9', 'Per10'] # Period columns for A sample\n\tPB = ['P_1', 'P_2', 'P_3'] # Period columns for B sample\n\t# logPB = ['logP_1', 'logP_2', 'logP_3'] \n\tcol = {1:'r', 2:'g', 3:'b'} \n\tcol2 = {1:'m', 2:'y', 3:'k'}\n\trng = (8,14) # Magnitude range\n\txlimits = (0.3 ,3.0) # X-axis plot limits\n\tbw = 0.01 # histogram bin width -- not global!\n\tnbin = (max(rng)-min(rng))/bw # How many bins for histogram.\n\n\t################# CAREFUL!!!!! #####################\n\tlPbw = 0.025 # log period bin width\n\t\n\toutTab = Table(np.zeros((len(B), 11)), names=('ID', 'WJK', 'est_mag', 'delta_mag', 'delta1', 'delta2', 'delta3', 'KDE_mag', 'KDEdelta_mag', 'sigma', 'nstar'), dtype=('string', 'float64', 'float64', 'float64', 'float64', 'float64', 'float64', 'float64', 'float64', 'float64', 'float64' ))", "def fn_photonflux_hist(file_name,folder,mean_photons_per_sec):\n import numpy as np\n import matplotlib.pyplot as plt\n from scipy.stats import lognorm\n from pylab import text\n \n n_molecules=len(mean_photons_per_sec)\n \n #Plot photon flux\n figure_name=file_name+'_photonsPerSecond'\n ax = plt.subplot(111)\n num_bins = np.linspace(int(min(mean_photons_per_sec)), int(max(mean_photons_per_sec)), int(np.sqrt(len(mean_photons_per_sec))*4))\n ax.hist(mean_photons_per_sec, bins=num_bins, density=True, color='darkorange',edgecolor='black')\n \n #Fit curve\n sigma,loc,mean = lognorm.fit(mean_photons_per_sec, floc=0)\n pdf = lognorm.pdf(num_bins, sigma, loc, mean) #sigma=shape, mu=np.log(scale)\n ax.plot(num_bins, pdf, 'k',linestyle='--')\n \n #Edit plot\n plt.xlabel('Photon flux ($s^{-1}$)', fontname='Arial', fontsize=12)\n plt.ylabel('Probability density', fontname='Arial', fontsize=12)\n plt.xticks(fontname='Arial', fontsize=12)\n plt.yticks(fontname='Arial', fontsize=12)\n plt.ticklabel_format(style='sci', axis='y', scilimits=(0,0))\n plt.ticklabel_format(style='sci', axis='x', scilimits=(0,0))\n text(0.75, 0.95,'μ='+str(round(mean,2))+' photons $s^{-1}$',horizontalalignment='center', verticalalignment='center',transform = ax.transAxes,fontname='Arial', fontsize=12)\n text(0.40, 0.95,'N='+str(n_molecules),horizontalalignment='center', verticalalignment='center',transform = ax.transAxes,fontname='Arial', fontsize=12)\n plt.savefig(folder+'/Figures/PDFs'+ '/' + figure_name + '.pdf', dpi=500)\n plt.savefig(folder+'/Figures/PNGs'+ '/' + figure_name + '.png', dpi=500)\n \n return (plt.show())", "def get_prior_on_masses(likeli: LikelihoodGrid, plot_type, bin_edges):\n\n unit_sorted_samples = make_normalised_samples(likeli.n_samples)\n n_mu = len(likeli.muArray)\n\n n_mass_bins = len(bin_edges) - 1\n histogram_shape = (n_mass_bins, n_mass_bins)\n prior_histogram = np.zeros(histogram_shape)\n\n for i, mu in enumerate(likeli.muArray):\n for j, sigma in enumerate(likeli.sigmaArray):\n samples = get_sample_pseudorandom_data(mu, sigma, unit_sorted_samples)\n prior_histogram += get_prior_from_samples(samples, bin_edges, plot_type)\n\n print('Completed mean mass', i, ' of ', n_mu)\n\n return prior_histogram / np.sum(prior_histogram)", "def histogram_quartiles(cur,variable1, variable2, table):\n print(\"Sentiment distribution based on position of sentence in description\")\n sentence_scores = select(cur,variable1, table) # multiple list of strings\n sentence_mags = select(cur,variable2, table) # multiple list of strings\n \n quartileBottom_score = []\n quartileBottom_mag = []\n halfMiddle_score = []\n halfMiddle_mag = []\n quartileTop_score = []\n quartileTop_mag = []\n \n for i in range(len(sentence_scores)):\n sentence_score = eval(sentence_scores[i]) # simple list of floats\n sentence_mag = eval(sentence_mags[i])\n for i in range(len(sentence_score)):\n if i < round((0.25*len(sentence_score))):\n quartileBottom_score.append(sentence_score[i])\n quartileBottom_mag.append(sentence_mag[i])\n if i > round((0.75*len(sentence_score))):\n quartileTop_score.append(sentence_score[i])\n quartileTop_mag.append(sentence_mag[i])\n else:\n halfMiddle_score.append(sentence_score[i])\n halfMiddle_mag.append(sentence_mag[i])\n \n n_groups = 3\n means_score = (np.average(quartileBottom_score), np.average(halfMiddle_score), np.average(quartileTop_score))\n# std_score = (np.std(quartileBottom_score), np.std(halfMiddle_score), np.std(quartileTop_score))\n\n means_mag = (np.average(quartileBottom_mag), np.average(quartileTop_mag), np.average(quartileTop_mag))\n# std_mag = (np.std(quartileBottom_mag), np.std(quartileTop_mag), np.std(quartileTop_mag))\n fig, ax = plt.subplots()\n \n print(\"Means Sentiment Score: \", means_score)\n print(\"Means Magnitude: \", means_mag)\n \n index = np.arange(n_groups)\n bar_width = 0.35\n \n opacity = 0.4\n error_config = {'ecolor': '0.3'}\n \n rects1 = ax.bar(index, means_score, bar_width,\n alpha=opacity, color='b',\n error_kw=error_config,\n label='Sentiment')\n \n rects2 = ax.bar(index + bar_width, means_mag, bar_width,\n alpha=opacity, color='r',\n error_kw=error_config,\n label='Magnitude')\n \n# ax.set_xlabel('Quartiles')\n ax.set_ylabel('Scores')\n ax.set_title('Scores by sentiment and magnitude')\n ax.set_xticks(index + bar_width / 2)\n ax.set_xticklabels(('Bottom quartile', 'Middle half', 'Top quartile')) \n ax.legend((rects1[0], rects2[0]), ('Sentiment', 'Magnitude'))\n \n \n fig.tight_layout()\n plt.show()", "def meanAdjust(site_residuals, azSpacing=0.5,zenSpacing=0.5):\n tdata = res.reject_absVal(site_residuals,100.)\n del site_residuals \n data = res.reject_outliers_elevation(tdata,5,0.5)\n del tdata\n\n numd = np.shape(data)[0]\n numZD = int(90.0/zenSpacing) + 1\n numAZ = int(360./zenSpacing)\n pwl_All = np.zeros((numAZ,numZD))\n pwlSig_All = np.zeros((numAZ,numZD))\n postchis = []\n prechis = []\n model_complete = []\n meas_complete = []\n Bvec_complete = []\n Sol_complete = []\n\n for j in range(0,numAZ):\n # Find only those value within this azimuth bin:\n if(j - azSpacing/2. < 0) :\n criterion = (data[:,1] < (j + azSpacing/2.)) | (data[:,1] > (360. - azSpacing/2.) )\n else:\n criterion = (data[:,1] < (j + azSpacing/2.)) & (data[:,1] > (j - azSpacing/2.) )\n ind = np.array(np.where(criterion))[0]\n azData =data[ind,:]\n numd = np.shape(azData)[0]\n\n if numd < 2:\n continue\n\n Neq = np.eye(numZD,dtype=float) * 0.001\n Apart = np.zeros((numd,numZD))\n for i in range(0,numd):\n iz = int(np.floor(azData[i,2]/zenSpacing))\n Apart[i,iz] = 1.\n\n prechi = np.dot(azData[:,3].T,azData[:,3])\n\n Neq = np.add(Neq, np.dot(Apart.T,Apart) )\n Bvec = np.dot(Apart.T,azData[:,3])\n for val in Bvec:\n Bvec_complete.append(val)\n\n Cov = np.linalg.pinv(Neq)\n Sol = np.dot(Cov,Bvec)\n for val in Sol:\n Sol_complete.append(val)\n\n postchi = prechi - np.dot(Bvec.T,Sol)\n pwlsig = np.sqrt(np.diag(Cov) *postchi/numd)\n \n prechis.append(np.sqrt(prechi/numd))\n postchis.append(np.sqrt(postchi/numd))\n #print(\"STATS:\",numd,np.sqrt(prechi/numd),np.sqrt(postchi/numd),np.sqrt((prechi-postchi)/numd))\n model = np.dot(Apart,Sol)\n\n for d in range(0,numd):\n model_complete.append(model[d])\n meas_complete.append(azData[d,3])\n pwl_All[j,:] = Sol \n pwlSig_All[j,:] = pwlsig\n\n del Sol,pwlsig,Cov,Bvec,Neq,Apart,azData,ind\n\n #overallPrechi = np.dot(data[:,3].T,data[:,3])\n numd = np.size(meas_complete)\n #print(\"OVERALL STATS:\", np.mean(prechis),np.mean(postchis),np.sqrt(overallPrechi/numD))\n #prechi = np.dot(data[:,3].T,data[:,3])\n prechi = np.dot(np.array(meas_complete).T,np.array(meas_complete))\n postchi = prechi - np.dot(np.array(Bvec_complete).T,np.array(Sol_complete))\n f = loglikelihood(meas_complete,model_complete)\n dof = numd - np.shape(Sol_complete)[0]\n aic = calcAIC(f,dof)\n bic = calcBIC(f,dof,numd)\n #print(\"My loglikelihood:\",f,aic,bic,dof,numd)\n #print(\"STATS:\",numd,np.sqrt(prechi/numd),np.sqrt(postchi/numd),np.sqrt((prechi-postchi)/numd),aic,bic)\n stats = {}\n stats['prechi'] = np.sqrt(prechi/numd)\n stats['postchi'] = np.sqrt(postchi/numd)\n stats['chi_inc'] = np.sqrt((prechi-postchi)/numd)\n stats['aic'] = aic\n stats['bic'] = bic\n\n return pwl_All, pwlSig_All,stats", "def raw_processing(self):\n well_dilution_code = {'e': 5, 'f': 6, 'g': 7, 'h': 8}\n\n for well in self.data_labels:\n x = 10 ** well_dilution_code[well[-1]]\n y = self.film_count[self.data_labels.index(well)] * 5 * x * (20 / self.plated_volume)\n z = self.plank_count[self.data_labels.index(well)] * 5 * x * (20 / self.plated_volume)\n\n self.film_conc.append(y)\n self.plank_conc.append(z)", "def prep(self):\n print\n print 'Filtering rawdata to data as masked array...'\n# using 0 as flag\n# self.data = n.ma.masked_array(self.rawdata[:self.nints,:, self.chans,:], self.rawdata[:self.nints,:, self.chans,:] == 0j)\n# using standard flags\n self.data = n.ma.masked_array(self.rawdata[:self.nints,:, self.chans,:], self.flags[:self.nints,:, self.chans,:] == 0) # mask of True for flagged data (flags=0 in tpipe, which is flags=False in Miriad and flags=True in MS)\n self.dataph = (self.data.mean(axis=3).mean(axis=1)).real #dataph is summed and detected to form TP beam at phase center, multi-pol\n self.min = self.dataph.min()\n self.max = self.dataph.max()\n print 'Shape of data:'\n print self.data.shape\n print 'Dataph min, max:'\n print self.min, self.max\n\n self.freq = self.freq_orig[self.chans]\n\n # set up ur tracks (lol)\n self.dmtrack0 = {}\n self.twidths = {}\n for dmbin in xrange(len(self.dmarr)):\n self.dmtrack0[dmbin] = self.dmtrack(self.dmarr[dmbin],0) # track crosses high-freq channel in first integration\n self.twidths[dmbin] = 0\n for k in self.dmtrack0[dmbin][1]:\n self.twidths[dmbin] = max(self.twidths[dmbin], len(n.where(n.array(self.dmtrack0[dmbin][1]) == k)[0]))\n\n print 'Track width in time: '\n for dmbin in self.twidths:\n print 'DM=%.1f, twidth=%d. Iteration could step by %d/2.' % (self.dmarr[dmbin], self.twidths[dmbin], self.twidths[dmbin])", "def OF1_CalculateNormalizedHistogram(image):\n\n raw = OF1_CalculateRawHistogram(image)\n norm = np.zeros(256, np.float_)\n\n for i in range(256):\n norm[i] = raw[i] / image.size\n\n return norm", "def normalize(image, xbar, sigma):\n image = image.transpose(2, 0, 1) # Switch to channel-first\n mean, std = np.array(xbar), np.array(sigma)\n image = (image - mean[:, None, None]) / std[:, None, None]\n return image.transpose(1, 2, 0)", "def _process(self, data: np.ndarray) -> np.ndarray:\n probabilities = np.empty(data.size, dtype=object)\n\n for idx, counts_dict in enumerate(data):\n shots = sum(counts_dict.values())\n freq = counts_dict.get(self._outcome, 0)\n alpha_posterior = [freq + self._alpha_prior[0], shots - freq + self._alpha_prior[1]]\n alpha_sum = sum(alpha_posterior)\n\n p_mean = alpha_posterior[0] / alpha_sum\n p_var = p_mean * (1 - p_mean) / (alpha_sum + 1)\n\n probabilities[idx] = ufloat(nominal_value=p_mean, std_dev=np.sqrt(p_var))\n\n return probabilities", "def basicProcessing(volume, sigma, order, output, mode, truncate):\n\n\n #### Filters ###\n\n result = gaussian_filter(input=volume, sigma=sigma, order=order, output=output, mode=mode, truncate=truncate)\n\n val = threshold_otsu(result)\n print(\"val : {}\".format(val))\n\n mask = np.zeros(volume.shape, dtype=np.int8)\n mask[volume > val] = 1\n #mask = mask.astype(int)\n\n print(\"mask shape: {}\".format(mask.shape))\n print(mask)\n\n\n #### Morphological Operation ###\n\n # Opening removes small objects\n r1 = binary_opening(mask, structure=np.ones((3, 3, 3))).astype(np.int8)\n\n # Closing removes small holes\n r2 = binary_closing(r1, structure=np.ones((3, 3, 3))).astype(np.int8)\n\n\n # 3x3x3 structuring element with connectivity 4 or 8\n struct1 = generate_binary_structure(3, 1) # no diagonal elements\n #struct1 = generate_binary_structure(3, 2) # with diagonal elements\n ############struct1 = struct1.astype(int)\n print (struct1)\n\n\n #r3 = binary_dilation(r2).astype(int)\n r3 = binary_dilation(r2, structure=struct1).astype(int) # using a structure element\n\n # Erosion removes objects smaller than the structure\n r4 = binary_erosion(r3, structure=np.ones((3, 3, 3))).astype(np.int8)\n\n\n #### Measurements ###\n\n struct2 = np.ones((3, 3, 3), dtype=np.int8)\n labeled_array, num_features = label(r4, structure=struct2)\n\n #print(labeled_array)\n print(num_features)\n\n return labeled_array, num_features", "def whiskerStat_multiext(filename,sigma,noise=False,mag=None,exptime=None):\n hdu=pf.open(filename)\n data = []\n for hdui in hdu[1:]:\n Nobj = hdui.data.shape[0]\n Mcc=np.zeros(Nobj)\n Mrr = np.zeros(Nobj)\n Mrc = np.zeros(Nobj)\n r50 = np.zeros(Nobj)\n for i in range(Nobj):\n print i\n imgo = hdui.data[i][4:].reshape(160,160)\n psf = rebin(imgo,(40,40))\n if noise == True:\n gain = 0.21 # convert electrons to ADU\n zeropoint = 26.794176 # r band, from Nikolay\n objectphoton = exptime*10**(0.4*(zeropoint - mag))\n skyphoton = 8.460140*exptime\n bkg = skyphoton*gain\n img = (psf * objectphoton + skyphoton)*gain\n img = img + add_imageNoise(img) - bkg\n else:\n img = psf\n Mcc[i],Mrr[i],Mrc[i]=complex2ndMoments(img,sigma)\n r50[i] = mfwhm(img)[5]\n data.append([np.mean(Mcc),np.mean(Mrr),np.mean(Mrc),np.mean(r50)])\n data = np.array(data)\n datamean =np.array([robust_mean(data[:,0]),robust_mean(data[:,1]),robust_mean(data[:,2]),robust_mean(data[:,3])])\n #r50 = 0.5*2.35482*np.sqrt((datamean[0]+datamean[1])/2.)*0.27\n r50moffat = datamean[3]*0.27\n whk = ((datamean[0]-datamean[1])**2 + (2.*datamean[2])**2)**(0.25)*0.27\n phi = np.rad2deg(0.5*np.arctan2(2.*datamean[2],(datamean[0]-datamean[1])))\n datasubmean = data - datamean\n whkrms = (robust_mean((datasubmean[:,0] - datasubmean[:,1])**2 + 4.*datasubmean[:,2]**2))**(0.25)*0.27\n np.savetxt(filename[0:-6]+'txt',[r50moffat,whk,phi,whkrms,datamean[0],datamean[1],datamean[2]],fmt='%10.5f')\n return '---done !-----'", "def monitor_normalize(sansdata,mon0=1e8):\n monitor=sansdata.metadata['run.moncnt']\n result=sansdata.data*mon0/monitor\n res=SansData()\n res.data=result\n res.metadata=deepcopy(sansdata.metadata)\n res.qx=copy(sansdata.qx)\n res.qy=copy(sansdata.qy)\n res.theta=copy(sansdata.theta)\n return res", "def normalization_stats(completeData):\n data_mean = np.mean(completeData, axis=0)\n data_std = np.std(completeData, axis=0)\n\n dimensions_to_ignore = []\n dimensions_to_use = []\n\n dimensions_to_ignore.extend(list(np.where(data_std < 1e-4)[0]))\n dimensions_to_use.extend(list(np.where(data_std >= 1e-4)[0]))\n\n data_std[dimensions_to_ignore] = 1.0\n\n return data_mean, data_std, dimensions_to_ignore, dimensions_to_use", "def distribution_sentences_histo(cur, variable1, variable2, table):\n print(\"Sentiment distribution of sentences in bottom quartile (beginning of text) and top quartile (end of text) in description\")\n # Retrieve data from DB\n # sentences scores are stored as string list \n sentence_scores = select(cur,variable1, table) # multiple list of strings\n sentence_mags = select(cur,variable2, table) # multiple list of strings\n \n quartileBottom_score = []\n quartileBottom_mag = []\n# halfMiddle_score = []\n# halfMiddle_mag = []\n quartileTop_score = []\n quartileTop_mag = []\n \n \n for i in range(len(sentence_scores)):\n sentence_score = eval(sentence_scores[i]) # simple list of floats\n sentence_mag = eval(sentence_mags[i])\n for i in range(len(sentence_score)):\n if i < round((0.25*len(sentence_score))):\n quartileBottom_score.append(sentence_score[i])\n quartileBottom_mag.append(sentence_mag[i])\n if i > round((0.75*len(sentence_score))):\n quartileTop_score.append(sentence_score[i])\n quartileTop_mag.append(sentence_mag[i])\n# else:\n# halfMiddle_score.append(sentence_score[i])\n# halfMiddle_mag.append(sentence_mag[i])\n \n \n # create an empty figure object\n fig = plt.figure()\n # create a single axis on that figure\n ax = fig.add_subplot(1,1,1)\n # histogram the data and label the axes\n ax.set_xlabel(\"Sentiment Score\")\n ax.set_ylabel(\"Number of loans\")\n fig.suptitle('Distribution of bottom quartile sentences sentiment')\n ax.hist(quartileBottom_score)\n plt.show()\n \n # create an empty figure object\n fig = plt.figure()\n # create a single axis on that figure\n ax = fig.add_subplot(1,1,1)\n # histogram the data and label the axes\n ax.set_xlabel(\"Sentiment Score\")\n ax.set_ylabel(\"Number of loans\")\n fig.suptitle('Distribution of top quartile sentences sentiment')\n ax.hist(quartileTop_score)\n plt.show()", "def rescale(ds,normalization):\n norm_constant = float(normalization)/ds.max()\n ds *= norm_constant\n info_string = \"Intensities were then multiplied by %f to give a maximum intensity of %f.\" % (norm_constant,float(normalization))\n ds.add_metadata(\"_pd_proc_info_data_reduction\",info_string,append=True)", "def compute_and_plot_mass_hist(raw_images, gen_sample_raw, display=True, ax=None, log=True, lim=lim_hist, confidence=None, ylim=None, fractional_difference=False, algo='relative', loc=1, **kwargs):\n# raw_max = 250884 \n# lim = [np.log10(1), np.log10(raw_max/3)]\n y_real, y_fake, x = stats.mass_hist_real_fake(raw_images, gen_sample_raw, log=log, lim=lim, mean=False)\n# l2, logel2, l1, logel1 = stats.diff_vec(np.mean(y_real, axis=0), np.mean(y_fake, axis=0))\n# rel_diff = None\n# if confidence is not None:\n# rel_diff = stats.relative_diff(y_real, y_fake).mean()\n# if display:\n# print('Log l2 Mass histogram loss: {}\\n'\n# 'L2 Peak Mass histogram: {}\\n'\n# 'Log l1 Mass histogram loss: {}\\n'\n# 'L1 Mass histogram loss: {}'.format(logel2, l2, logel1, l1))\n npix = np.prod(raw_images.shape[1:])\n d = safe_fd(y_real,y_fake, npix)\n score = fd2score(d)\n if display:\n print('Mass Frechet Distance: {}\\n'\n 'Mass Score : {}\\n'.format(d, score))\n \n plot_cmp(x, y_fake, y_real, title='Mass histogram', xlabel='Number of particles', ylabel='Pixel count', ax=ax, xscale='log' if log else 'linear', shade=True, confidence=confidence, ylim=ylim, fractional_difference=fractional_difference, algorithm=algo, loc=loc)\n return score", "def stats_preprocessing(self):\n output = {'before_tot':[],\n 'before_unique':[],\n 'after_tot':[],\n 'after_unique':[]}\n for i in range(len(self.table)):\n description_raw = self.table.description.iloc[i].split(' ')\n clean_txt = self.table.clean_text.iloc[i].split(' ')\n\n output['before_tot'].append(len(description_raw))\n output['before_unique'].append(len(set(description_raw)))\n output['after_tot'].append(len(clean_txt))\n output['after_unique'].append(len(set(clean_txt)))\n \n print(\"\"\"Before preprocessing a description had on average {0} words with standard deviation {1}. \\n\nMoreover, the average of unique words was {2} and the standard deviation {3}.\"\"\"\\\n .format(round(mean(output['before_tot']), 2), round(stdev(output['before_tot']), 2), \n round(mean(output['before_unique']), 2), round(stdev(output['before_unique'])), 2))\n \n print(\"\"\"\\nAfter preprocessing a description has on average {0} words with standard deviation {1}. \\n \nThe average of unique words is now {2} and the standard deviation {3}.\"\"\"\\\n .format(round(mean(output['after_tot']), 2), round(stdev(output['after_tot']), 2), \n round(mean(output['after_unique']),2), round(stdev(output['after_unique']), 2)))\n\n return output", "def normalize(data):\n\n p_means = np.mean(data,axis=0)\n p_vars = np.var(data,axis=0)\n\n # subtract dc component\n data = data-p_means\n\n # contrast normalize \n data = data/np.sqrt(p_vars+10) # plus 10 to account for small variances\n \n return data", "def make_histograms(df, suffix, fignum, fields, binns):\n fig = plt.figure(num=fignum, figsize=(18,18))\n fig.suptitle('Histograms of ' + str(suffix) + ' features', fontsize=22)\n ax1 = fig.add_subplot(421, axisbg='0.94')\n ax2 = fig.add_subplot(422, axisbg='0.94')\n ax3 = fig.add_subplot(423, axisbg='0.94')\n ax4 = fig.add_subplot(424, axisbg='0.94')\n ax5 = fig.add_subplot(425, axisbg='0.94')\n ax6 = fig.add_subplot(426, axisbg='0.94')\n ax7 = fig.add_subplot(427, axisbg='0.94')\n ax8 = fig.add_subplot(428, axisbg='0.94')\n alphas = [0.33, 0.33, 0.6, 0.6, 0.28, 0.28, 0.6, 0.6]\n hues = ['g','b','b','g','g','b','b','g']\n all_axes = plt.gcf().axes\n for i, ax in list(enumerate(all_axes)):\n ax.set_ylabel(\"count\", fontsize=10)\n for ticklabel in ax.get_xticklabels() + ax.get_yticklabels():\n ticklabel.set_fontsize(14)\n if (len(fields) - 1) >= i:\n if suffix == \"raw\":\n transformed = df[fields[i]].dropna().values\n elif suffix == \"log10\":\n transformed = np.log10(df[fields[i]].dropna().values)\n elif suffix == \"log\":\n transformed = np.log(df[fields[i]].dropna().values)\n \n #try:\n ax.hist(transformed, bins=binns[i], color=hues[i],alpha=alphas[i])\n ax.set_title(df[fields[i]].name, fontsize=20)\n #except:\n # print \"WARNING: An error occurred in composing {} Figure %d\".format(str(suffix)) % fignum\n # return\n \n try: # Save the figure as one file\n filename = \"data/vis/histogram\" + \"_\" + str(fignum) + \"_\" + str(suffix) + \".png\"\n plt.savefig(filename)\n print \"= Vis Output: \", filename\n except IOError:\n print \"WARNING: Failed to write out file: \", filename\n print\n plt.close(fig)", "def histmaker(\r\n #filename\r\n filename='hist/monotonic',\r\n # increment number, control flag, increment, temperature\r\n nstep=None, ictrl=None, eqincr=None, temp=278.,\r\n #velocity gradient control flag (1:known, 0:unknown)\r\n iudot=[[1,1,1],[1,0,1],[1,1,0]],\r\n #velocity gradient tensor\r\n udot=[[1.,0.,0.],[0.,-0.5,0.],[0.,0.,-0.5]],\r\n #Stress control flag (1:known, 0:unknown)\r\n iscau=[0,1,1,0,0,0],\r\n #Cauchy stress tensor\r\n scauchy=[0.,0.,0.,0.,0.,0.,], mode=None\r\n ):\r\n \r\n FILE = open(filename,'w')\r\n FILE.writelines('%i %i %f %f '%(nstep, ictrl, eqincr, temp))\r\n FILE.writelines(' nsteps ictrl eqincr temp\\n')\r\n FILE.writelines('* boundary conditions *\\n')\r\n for i in range(3):\r\n for j in range(3):\r\n FILE.writelines('%8i '%(iudot[i][j]))\r\n FILE.writelines('\\n')\r\n FILE.writelines('\\n')\r\n\r\n for i in range(3):\r\n for j in range(3):\r\n FILE.writelines('%15.9f '%(udot[i][j]))\r\n FILE.writelines('\\n')\r\n FILE.writelines('\\n')\r\n\r\n \r\n FILE.writelines('%15i %15i %15i\\n'%(iscau[0],iscau[5],iscau[4]))\r\n FILE.writelines('%15s %15i %15i\\n'%(' ', iscau[1],iscau[3]))\r\n FILE.writelines('%15s %15s %15i\\n\\n'%(' ', ' ', iscau[2]))\r\n \r\n FILE.writelines('%15i %15i %15i\\n'%(scauchy[0], scauchy[5], scauchy[4]))\r\n FILE.writelines('%15s %15i %15i\\n'%(' ', scauchy[1], scauchy[3]))\r\n FILE.writelines('%15s %15s %15i\\n'%(' ', ' ', scauchy[2]))", "def histopi(data):\n dataset = discrete_dataset(data)\n theoretical_dataset = [theoretical_effective(dataset)]*10\n observed = plt.bar(numpy.arange(len(dataset)) - 0.4, dataset, color=\"blue\", width=0.4)\n theoretical = plt.bar(numpy.arange(len(theoretical_dataset)), theoretical_dataset, color=\"deepskyblue\", width=0.4)\n plt.legend([observed, theoretical], [\"effectifs observes\", \"effectifs theoriques\"])\n plt.xlabel('pi digits')\n plt.ylabel('occurrence')\n plt.axis([-0.7, 9.7, 0, 130000])\n plt.savefig(\"report/khi2_histopi.png\", bbox_inches='tight')\n return dataset", "def pm_histogram(fig, ax, data, title, dwarf_pmra=None, dwarf_pmdec=None, cut=None, colorbar=True, append_title=\"\"):\n if cut is not None:\n ra, dec, pmra, pmdec, parallax, = cut_on_parallax(*data, cut)\n else:\n ra, dec, pmra, pmdec, parallax, _ = data\n\n # bin data from gaia in 2d histogram\n bound = 5\n bins = np.linspace(-bound, bound, num=20*bound)\n counts, xedges, yedges, im = ax.hist2d(pmra, pmdec, bins=(bins, bins), vmin=0, cmap='gnuplot')\n print(title, str(counts.max()))\n title = fix_names(title)\n # plot pm motion of dwarf from simbad\n if dwarf_pmra is not None:\n dwarf_pmra, dwarf_pmdec = fix_pms(title, dwarf_pmra, dwarf_pmdec)\n ax.plot(dwarf_pmra, dwarf_pmdec, marker='X', markersize=10, color='xkcd:white', alpha=1)\n\n ax.set_title(title + append_title)\n ax.set_xlabel(r\"Right ascension proper motion [mas/yr])\")\n ax.set_ylabel(r\"Declination proper motion [mas/yr]\")\n\n cbar = colorbar_for_subplot(fig, ax, cm.gnuplot, image=im)\n cbar.ax.set_ylabel(\"Bin counts\", rotation=270, labelpad=10)\n\n return counts, xedges, yedges, im", "def preprocess_pipeline(self):\n if self.trim_size > 0:\n self.df = trim_initial_timeseries(\n df=self.df,\n trim_size=self.trim_size,\n aornums=self.df.aornum.unique()\n )\n\n if self.timebinsize > 0:\n med_df, std_df = bin_df_time(self.df, timebinsize=self.timebinsize)\n\n # Option 1\n self.df = med_df.copy()\n\n \"\"\"\n # Option 2\n self.df = med_df.copy()\n for colname in df.columns:\n if 'noise' in colname:\n std_colname = colname.replace('noise', 'flux')\n self.df[colname] = std_df[std_colname]\n \"\"\"\n\n del med_df, std_df\n\n if self.df is None:\n tso_data = load_from_wanderer(\n planet_name=self.planet_name,\n channel=self.channel,\n aor_dir=self.aor_dir,\n aper_key='gaussian_fit_annular_mask_rad_2.5_0.0',\n centering_key=self.centering_key\n )\n else:\n tso_data = load_from_df(\n self.df,\n aper_key=self.aper_key,\n centering_key=self.centering_key\n )\n\n isfinite = np.isfinite(tso_data.times)\n isfinite = np.bitwise_and(isfinite, np.isfinite(tso_data.fluxes))\n isfinite = np.bitwise_and(isfinite, np.isfinite(tso_data.flux_errs))\n # isfinite = np.bitwise_and(isfinite, np.isfinite(tso_data.aornums))\n isfinite = np.bitwise_and(isfinite, np.isfinite(tso_data.ycenters))\n isfinite = np.bitwise_and(isfinite, np.isfinite(tso_data.xcenters))\n isfinite = np.bitwise_and(isfinite, np.isfinite(tso_data.npix))\n\n times = tso_data.times[isfinite]\n fluxes = tso_data.fluxes[isfinite]\n flux_errs = tso_data.flux_errs[isfinite]\n aornums = tso_data.aornums[isfinite]\n ycenters = tso_data.ycenters[isfinite]\n xcenters = tso_data.xcenters[isfinite]\n npix = tso_data.npix[isfinite]\n\n med_flux = np.median(fluxes)\n flux_errs = flux_errs / med_flux\n fluxes = fluxes / med_flux\n\n arg_times = times.argsort()\n fluxes = fluxes[arg_times]\n flux_errs = flux_errs[arg_times]\n aornums = aornums[arg_times]\n times = times[arg_times]\n ycenters = ycenters[arg_times]\n xcenters = xcenters[arg_times]\n npix = npix[arg_times]\n\n if self.standardise_centers:\n # Center by assuming eclipse is near center\n ycenter = (ycenter - ycenter.mean()) / ycenter.std()\n xcenter = (xcenter - xcenter.mean()) / xcenter.std()\n\n if self.standardise_times:\n # Center by assuming eclipse is near center\n times = times - times.mean()\n\n if self.standardise_fluxes:\n # Center by assuming eclipse is near center\n med_flux = np.median(fluxes)\n std_flux = scale.mad(fluxes)\n\n idxkeep = np.abs(fluxes - med_flux) < self.n_sig * std_flux\n\n self.tso_data = ExoplanetTSOData(\n times=times[idxkeep],\n fluxes=fluxes[idxkeep],\n flux_errs=flux_errs[idxkeep],\n aornums=aornums[idxkeep],\n ycenters=ycenters[idxkeep],\n xcenters=xcenters[idxkeep],\n npix=npix[idxkeep]\n )\n\n # # TODO: Confirm if this is still required\n # self.tso_data.times = self.tso_data.times\n # self.tso_data.fluxes = self.tso_data.fluxes\n # self.tso_data.flux_errs = self.tso_data.flux_errs\n # self.tso_data.aornums = self.tso_data.aornums", "def hist_2_panel(path: str, outfile: str, kernel: str, s_above=5):\n sns.set(style=\"white\", color_codes=True, font_scale=1)\n fig, axes = plt.subplots(1, 2, figsize=(10, 5))\n fig.suptitle(SUB_TITLE, y=0.97)\n plt.subplots_adjust(wspace=0.3)\n\n sig = np.load('{}/sig_{}.npy'.format(path, kernel))\n is_peak = sig > s_above\n sig_finite_flat = sig[np.isfinite(sig)].flatten()\n\n bins = 20\n\n axes[0].hist(sig_finite_flat, bins=bins)\n axes[0].set_title('%e pixels' % len(sig_finite_flat))\n axes[0].set_ylabel('number of pixels')\n\n axes[1].hist(sig_finite_flat, bins=bins, density=True)\n axes[1].set_title('sig > %0.1f= %d pixels' % (s_above, np.sum(is_peak)))\n axes[1].set_ylabel('normalized density of pixels')\n\n # norm distribution\n mu, variance = 0., 1.\n sigma = np.sqrt(variance)\n xmin, xmax = sig_finite_flat.min(), sig_finite_flat.max()\n x = np.linspace(mu + xmin * sigma, mu + xmax * sigma, 100)\n axes[1].plot(x, stats.norm.pdf(x, mu, sigma), lw=3)\n axes[1].set_xlim([xmin, 10])\n axes[1].set_ylim([1e-10, 1])\n\n for u in range(2):\n axes[u].set_xlabel('significance in %s' % kernel)\n axes[u].set_yscale('log')\n\n _filename = \"{}-{}.png\".format(outfile, kernel)\n plt.savefig(_filename, bbox_inches='tight', dpi=100)", "def plot_histplots(\n df: pd.DataFrame,\n var_type: str = \"quant\",\n drop_cols: list = None,\n figsize=(15, 20),\n sub_col=3,\n ticksize=15,\n div: int = 1,\n subplot=True,\n) -> sns.histplot:\n\n assert var_type == \"quant\" or \"qual\", \"var_type has to be either 'quant' or 'qual'.\"\n\n def print_error():\n print(f\"Input var_type: {var_type} is invalid.\")\n print(\"Valide var_type can only be 'quant' or 'qual'.\")\n return\n\n def print_col():\n print(f\"Number of {var_type}itaive columns: {df.shape[1]}\")\n return\n\n def create_fig():\n # create figure and axes based on the number of columns of the dataframe\n _, axes = plt.subplots(\n ceil(len(df.columns) / sub_col), sub_col, figsize=figsize\n )\n y = 0 # set counter\n return axes, y\n\n if not subplot:\n # plt.figure(figsize=figsize)\n if var_type == \"quant\":\n sns.histplot(x=df)\n elif var_type == \"qual\":\n sns.histplot(y=df)\n else:\n print_error()\n\n else:\n # drop unnecessary columns\n if drop_cols:\n df = df.drop(drop_cols, axis=1)\n\n # create relative dataframe according to the var_type\n if var_type == \"quant\":\n # keep only quantitative features\n df = create_quanti_df(df)\n print_col()\n axes, y = create_fig()\n # plot histplot for each column of data\n for col in df.columns:\n i, j = divmod(y, sub_col)\n # sns.histplot(x=df[col], ax=axes[i, j]).set_title(col, fontsize=20)\n sns.histplot(x=df[col][: int(len(df) / div)], ax=axes[i, j]).set_title(\n col, fontsize=20\n )\n y += 1\n elif var_type == \"qual\":\n # keep only qualitatve features\n df = create_quali_df(df)\n print_col()\n axes, y = create_fig()\n # plot histplot for each column of data\n for col in df.columns:\n i, j = divmod(y, sub_col)\n ax = axes[i, j]\n sns.histplot(y=df[col], ax=ax)\n ax.set_title(col, fontsize=20)\n ax.tick_params(axis=\"y\", which=\"major\", labelsize=ticksize)\n y += 1\n else:\n print_error()\n\n plt.tight_layout()\n plt.show()\n return", "def profile_mass(df,variable_xaxis, sign, peak, edge_left, edge_right, pdf_key):\n\n if sign == 1:\n keyword = 'signal'\n if sign == 0:\n keyword = 'background'\n\n df = df[(df[variable_xaxis] < edge_right) & (df[variable_xaxis] > edge_left)]\n\n for var in df.columns:\n if var != variable_xaxis:\n\n fig, axs = plt.subplots(figsize=(20, 15))\n\n bin_means, bin_edges, binnumber = b_s(df[variable_xaxis],df[var], statistic='mean', bins=25)\n bin_std, bin_edges, binnumber = b_s(df[variable_xaxis],df[var], statistic='std', bins=25)\n bin_count, bin_edges, binnumber = b_s(df[variable_xaxis],df[var], statistic='count',bins= 25)\n bin_width = (bin_edges[1] - bin_edges[0])\n bin_centers = bin_edges[1:] - bin_width/2\n\n nan_ind = np.where(np.isnan(bin_means))\n bin_centers = np.delete(bin_centers, nan_ind)\n bin_means = np.delete(bin_means, nan_ind)\n bin_count = np.delete(bin_count, nan_ind)\n bin_std = np.delete(bin_std , nan_ind)\n\n\n plt.errorbar(x=bin_centers, y=bin_means, yerr=(bin_std/np.sqrt(bin_count)), linestyle='none', marker='.',mfc='red', ms=10)\n\n\n\n plt.title('Mean of ' +var+ ' plotted versus bin centers of '+variable_xaxis+ \\\n '('+keyword+')', fontsize=25)\n plt.xlabel('Mass', fontsize=25)\n plt.ylabel(\"Mean of each bin with the SEM ($\\dfrac{bin\\ std}{\\sqrt{bin\\ count}}$) of bin\", fontsize=25)\n\n\n plt.vlines(x=peak,ymin=bin_means.min(),ymax=bin_means.max(), color='r', linestyle='-')\n\n\n fig.tight_layout()\n plt.savefig(pdf_key,format='pdf')\n\n pdf_key.close()", "def CreateHistFactoryFromMeasurement(measurement_dict, options=None):\n \n channel_list = measurement_dict[\"channel_list\"]\n measurement_info = measurement_dict[\"measurement_info\"]\n\n # Get the name of the sample\n # that is interpreted as signal\n signal_sample = str(measurement_info[\"signal_name\"])\n SigmaVarName = \"Sigma_\" + signal_sample + \"_OverSM\"; \n\n meas = ROOT.RooStats.HistFactory.Measurement(\"meas\", \"meas\")\n meas.SetPOI( SigmaVarName )\n meas.SetLumi( 1.0 )\n meas.SetLumiRelErr( float(measurement_info[\"lumi_uncertainty\"]) )\n meas.SetExportOnly( False )\n \n for chan_dict in channel_list:\n chan = ROOT.RooStats.HistFactory.Channel( str(chan_dict[\"name\"]) )\n chan.SetData( float(chan_dict['data']) )\n # chan.SetStatErrorConfig( 0.05, \"Poisson\" )\n \n for sample_dict in chan_dict[\"samples\"]:\n sample_name = sample_dict[\"name\"]\n sample = ROOT.RooStats.HistFactory.Sample( str(sample_name) )\n for syst in sample_dict[\"systematics\"]: \n sample.AddOverallSys( str(syst[\"name\"]), float(syst[\"FracDown\"]), float(syst[\"FracUp\"]) )\n sample.SetValue( float(sample_dict['value']) )\n if sample_name == signal_sample:\n sample.AddNormFactor( SigmaVarName, 1, 0, 3 )\n chan.AddSample( sample )\n \n meas.AddChannel( chan )\n \n # Now, print and do the fit\n meas.PrintTree();\n\n # Fit the workspace\n wspace = ROOT.RooStats.HistFactory.HistoToWorkspaceFactoryFast.MakeCombinedModel( meas );\n combined_config = wspace.obj(\"ModelConfig\");\n simData = wspace.data(\"obsData\");\n constrainedParams = combined_config.GetNuisanceParameters();\n POIs = combined_config.GetParametersOfInterest();\n \n # RooCmdArg(\"Minos\",kTRUE,0,0,0,0,0,&minosArgs,0)\n\n model = combined_config.GetPdf();\n fit_result = model.fitTo(simData, ROOT.RooCmdArg(\"Minos\",True,0,0,0,\"\",\"\",ROOT.RooArgSet(wspace.var(SigmaVarName)),0), \n ROOT.RooCmdArg(\"PrintLevel\",1), \n ROOT.RooCmdArg(\"Save\",True));\n\n # Get the Likelihood curve\n POI = wspace.var(SigmaVarName)\n png_string = CreateProfileLikelihoodPlot(model, simData, POI)\n\n # Get the Fitted Bins\n fitted_bins = getFittedBinHeights(combined_config, simData)\n\n # Delete the model\n wspace.IsA().Destructor( wspace )\n meas.IsA().Destructor( meas )\n\n return (fit_result, fitted_bins, png_string)", "def mi(self, lhs, rhs, cond=None):\n\t\tbins = np.amax(data, axis=0) # read levels for each variable\n\t\tif len(bins) == 1:\n\t\t\thist,_ = np.histogramdd(data, bins=(bins)) # frequency counts\n\t\t\tPx = hist/hist.sum()\n\t\t\tMI = -1 * np.sum( Px * np.log( Px ) )\n\t\t\treturn round(MI, 4)\n\t\t\t\n\t\tif len(bins) == 2:\n\t\t\thist,_ = np.histogramdd(data, bins=bins[0:2]) # frequency counts\n\n\t\t\tPxy = hist / hist.sum()# joint probability distribution over X,Y,Z\n\t\t\tPx = np.sum(Pxy, axis = 1) # P(X,Z)\n\t\t\tPy = np.sum(Pxy, axis = 0) # P(Y,Z)\t\n\n\t\t\tPxPy = np.outer(Px,Py)\n\t\t\tPxy += 1e-7\n\t\t\tPxPy += 1e-7\n\t\t\tMI = np.sum(Pxy * np.log(Pxy / (PxPy)))\n\t\t\treturn round(MI,4)\n\t\telif len(bins) > 2 and conditional==True:\n\t\t\t# CHECK FOR > 3 COLUMNS -> concatenate Z into one column\n\t\t\tif len(bins) > 3:\n\t\t\t\tdata = data.astype('str')\n\t\t\t\tncols = len(bins)\n\t\t\t\tfor i in range(len(data)):\n\t\t\t\t\tdata[i,2] = ''.join(data[i,2:ncols])\n\t\t\t\tdata = data.astype('int')[:,0:3]\n\n\t\t\tbins = np.amax(data,axis=0)\n\t\t\thist,_ = np.histogramdd(data, bins=bins) # frequency counts\n\n\t\t\tPxyz = hist / hist.sum()# joint probability distribution over X,Y,Z\n\t\t\tPz = np.sum(Pxyz, axis = (0,1)) # P(Z)\n\t\t\tPxz = np.sum(Pxyz, axis = 1) # P(X,Z)\n\t\t\tPyz = np.sum(Pxyz, axis = 0) # P(Y,Z)\t\n\n\t\t\tPxy_z = Pxyz / (Pz+1e-7) # P(X,Y | Z) = P(X,Y,Z) / P(Z)\n\t\t\tPx_z = Pxz / (Pz+1e-7) # P(X | Z) = P(X,Z) / P(Z)\t\n\t\t\tPy_z = Pyz / (Pz+1e-7) # P(Y | Z) = P(Y,Z) / P(Z)\n\n\t\t\tPx_y_z = np.empty((Pxy_z.shape)) # P(X|Z)P(Y|Z)\n\t\t\tfor i in range(bins[0]):\n\t\t\t\tfor j in range(bins[1]):\n\t\t\t\t\tfor k in range(bins[2]):\n\t\t\t\t\t\tPx_y_z[i][j][k] = Px_z[i][k]*Py_z[j][k]\n\t\t\tPxyz += 1e-7\n\t\t\tPxy_z += 1e-7\n\t\t\tPx_y_z += 1e-7\n\t\t\tMI = np.sum(Pxyz * np.log(Pxy_z / (Px_y_z)))\n\t\t\t\n\t\t\treturn round(MI,4)\n\t\telif len(bins) > 2 and conditional == False:\n\t\t\tdata = data.astype('str')\n\t\t\tncols = len(bins)\n\t\t\tfor i in range(len(data)):\n\t\t\t\tdata[i,1] = ''.join(data[i,1:ncols])\n\t\t\tdata = data.astype('int')[:,0:2]\n\n\t\t\thist,_ = np.histogramdd(data, bins=bins[0:2]) # frequency counts\n\n\t\t\tPxy = hist / hist.sum()# joint probability distribution over X,Y,Z\n\t\t\tPx = np.sum(Pxy, axis = 1) # P(X,Z)\n\t\t\tPy = np.sum(Pxy, axis = 0) # P(Y,Z)\t\n\n\t\t\tPxPy = np.outer(Px,Py)\n\t\t\tPxy += 1e-7\n\t\t\tPxPy += 1e-7\n\t\t\tMI = np.sum(Pxy * np.log(Pxy / (PxPy)))\n\t\t\treturn round(MI,4)", "def mixture_statistics_brute_force(self):\n \n Z = 0\n hist1d = np.zeros(self.Ns)\n hist2d = np.zeros((self.Ns, self.Ns))\n \n # iterate over all mixtures\n for c, weight_c in self._iterate_mixtures():\n Z += weight_c \n hist1d += c * weight_c\n hist2d += np.outer(c, c) * weight_c\n \n # calculate the frequency and the correlations \n ci_mean = hist1d / Z\n cij = hist2d / Z\n cij_corr = cij - np.outer(ci_mean, ci_mean)\n \n ci_var = np.diag(cij_corr)\n return {'mean': ci_mean, 'std': np.sqrt(ci_var), 'var': ci_var,\n 'cov': cij_corr}", "def preprocess(data,scale):\n ##log_transformation\n #data['log_sale_price'] = np.log(data['sale_price'])\n #data['log_lot_area'] = np.log(data['lot_area'])\n #data['house_age'] = data['year_sold']- data['year_built']\n \n y = data['stay']\n \n #sales['log_sale_price'] = np.log(sales['sale_price'])\n #sales['log_lot_area'] = np.log(sales['lot_area'])\n #sales['house_age'] = sales['year_sold']- sales['year_built']\n data_dummy = data.copy()\n \n #dummy coding\n data_scale = pd.get_dummies(data_dummy).drop(columns = ['stay'])\n\n \n #scale the value\n if scale == True:\n S = StandardScaler().fit(data_scale)\n data_scale = S.transform(data_scale)\n \n return y, data_scale", "def envSysSignedHist(*hists): ### keep track of the signs somehow for systematics in cards\n central = hists[0]\n variations = hists[1:]\n if not variations:\n raise Exception(\"No Variations Given! %s, %s\"%(a, variations) )\n systs = [ ]\n sign = 1\n for var in variations:\n syst_hist = SignedSysHistFunc(central, var)\n syst_hist.Scale(sign)\n #syst_hist.SetBit( syst_hist.kIsAverage ) ## with this when hists are added they are averaged\n systs.append( syst_hist )\n sign *= -1\n #print systs\n #for sh in systs[1:] :\n # systsum.Add(sh)\n abssysts = [ th2Func(h, lambda x: abs(x) ) for h in systs ]\n\n nx = central.GetNbinsX()\n ny = central.GetNbinsY()\n envhist = central.Clone()\n for x in xrange(nx):\n for y in xrange(ny):\n systvals = [ systhist.GetBinContent(x+1, y+1 ) for systhist in abssysts ]\n v = max(systvals)\n envhist.SetBinContent(x+1,y+1, v)\n return envhist, systs", "def freq_optimization(self):\n index = identify_scale(self.vz, True)\n # In case the patient is limping\n if index > 35:\n index = index / 2\n print(f\"Scale used is {index}\")", "def test_1d_cut():\n \n dic,data = ng.pipe.read(\"common_data/1d_pipe/test_cut.ft\")\n assert data.shape == (2766,)\n assert data.dtype == 'float32'\n assert round(data[0],2) == -12123.67\n assert round(data[1],2) == -8979.31\n assert round(data[100],2) == -7625.30\n write_readback(dic,data)\n check_ppm_limits(dic,data,0,[278.59, 10.03])", "def __init__(self, x, bin_edges, Nsamp):\n raw_vals, bin_edges = np.histogram(x, bins=bin_edges, normed=False)\n self.bin_edges = bin_edges\n self.bin_widths = np.diff(self.bin_edges)\n self.bin_centers = 0.5*(self.bin_edges[:-1] + self.bin_edges[1:])\n \n P, low, high = np.array([BinomialErrors(v, Nsamp) for v in raw_vals]).T\n self.raw_vals = P\n self.raw_low = low\n self.raw_high = high\n self.complete_vals = None\n self.malm_vals = None\n return", "def view_marginals_raw(data, label=''):\n variables = ['sao2', 'heartrate', 'respiration', 'systemicmean']\n\n num_gradations = 25\n # for cutoff in the gradations, what fraction of samples (at a given time point) fall into that cutoff bracket?\n grid = np.zeros(shape=(16, num_gradations, 4))\n grid = np.zeros(shape=(16, num_gradations, 4))\n assert data.shape[-1] == 4\n ranges = []\n for var in range(4):\n # allow for a different range per variable (if zoom)\n low = np.min(data[:, :, var])\n high = np.max(data[:, :, var])\n ranges.append([low, high])\n gradations = np.linspace(low, high, num_gradations)\n for (i, cutoff) in enumerate(gradations):\n # take the mean over samples\n frac = ((data[:, :, var] > low) & (data[:, :, var] <= cutoff)).mean(axis=0)\n low = cutoff\n grid[:, i, var] = frac\n\n fig, axarr = plt.subplots(nrows=4, ncols=1, sharex=True)\n axarr[0].imshow(grid[:, :, 0].T, origin='lower', aspect=0.5, cmap='magma_r')\n axarr[1].imshow(grid[:, :, 1].T, origin='lower', aspect=0.5, cmap='magma_r')\n axarr[2].imshow(grid[:, :, 2].T, origin='lower', aspect=0.5, cmap='magma_r')\n axarr[3].imshow(grid[:, :, 3].T, origin='lower', aspect=0.5, cmap='magma_r')\n\n for (var, ax) in enumerate(axarr):\n labels = np.round(np.linspace(ranges[var][0], ranges[var][1], num_gradations)[1::4], 0)\n ax.set_yticks(np.arange(num_gradations)[1::4])\n ax.set_yticklabels(labels)\n ax.set_ylabel(variables[var])\n ax.yaxis.set_ticks_position('none')\n ax.xaxis.set_ticks_position('none')\n ax.set_adjustable('box-forced')\n ax.spines['top'].set_visible(False)\n ax.spines['bottom'].set_visible(False)\n ax.spines['right'].set_visible(False)\n ax.spines['left'].set_visible(False)\n ax.grid(b=True, color='black', alpha=0.2, linestyle='--')\n\n axarr[-1].set_xticks(np.arange(16)[::2])\n\n plt.tight_layout(pad=0.0, w_pad=-5.0, h_pad=0.1)\n plt.savefig(\"./experiments/eval/eICU_marginals_\" + label + \".png\")\n\n return True", "def preprocess(data):\n # Data Preprocessing\n data['GDP_scaled']=preprocessing.scale(data['GDP'])\n data['CLPRB_scaled']=preprocessing.scale(data['CLPRB'])\n data['EMFDB_scaled']=preprocessing.scale(data['EMFDB'])\n data['ENPRP_scaled']=preprocessing.scale(data['ENPRP'])\n data['NGMPB_scaled']=preprocessing.scale(data['NGMPB'])\n data['PAPRB_scaled']=preprocessing.scale(data['PAPRB'])\n data['PCP_scaled']=preprocessing.scale(data['PCP'])\n data['ZNDX_scaled']=preprocessing.scale(data['ZNDX'])\n data['OP_scaled']=preprocessing.scale(data['Nominal Price'])\n data['OP2_scaled']=preprocessing.scale(data['Inflation Adjusted Price'])\n\n return data", "def diff_bmf(mass_arr, volume, cvar_err, sim_bool, h1_bool): \n if sim_bool:\n mass_arr = np.log10(mass_arr)\n \n if not h1_bool:\n # changing from h=0.7 to h=1\n mass_arr = np.log10((10**mass_arr) / 2.041)\n \n if survey == 'eco':\n bin_min = np.round(np.log10((10**9.4) / 2.041), 1)\n bin_max = np.round(np.log10((10**11.8) / 2.041), 1)\n bins = np.linspace(bin_min, bin_max, 7)\n \n if survey == 'resolvea':\n bin_min = np.round(np.log10((10**9.4) / 2.041), 1)\n bin_max = np.round(np.log10((10**11.5) / 2.041), 1)\n bins = np.linspace(bin_min, bin_max, 7) \n\n if survey == 'resolveb':\n bin_min = np.round(np.log10((10**9.1) / 2.041), 1)\n bin_max = np.round(np.log10((10**11.5) / 2.041), 1)\n bins = np.linspace(bin_min, bin_max, 7) \n \n\n # Unnormalized histogram and bin edges\n counts, edg = np.histogram(mass_arr, bins=bins) # paper used 17 bins\n dm = edg[1] - edg[0] # Bin width\n maxis = 0.5 * (edg[1:] + edg[:-1]) # Mass axis i.e. bin centers\n # Normalized to volume and bin width\n err_poiss = np.sqrt(counts) / (volume * dm)\n\n phi = counts / (volume * dm) # not a log quantity\n return maxis, phi, err_poiss, bins, counts", "def unnormalize_multivariate_data(normed_data, scaling_values):\n data = np.zeros(normed_data.shape, dtype=normed_data.dtype)\n for i in range(normed_data.shape[-1]):\n data[:, :, :, i] = normed_data[:, :, :, i] * scaling_values.loc[i, \"std\"] + scaling_values.loc[i, \"mean\"]\n return data", "def plotResultsNoNoise(inputfile, title, bins=10):\n path = datetime.datetime.now().isoformat()\n os.mkdir(path)\n path += '/'\n\n results = cPickle.load(open(inputfile))\n #copy input to the path\n try:\n shutil.copy2(inputfile, path+inputfile)\n except:\n pass\n\n print '\\n\\n\\n\\nFitted centre:'\n\n e = results['eclean'] - results['eCTI']\n e1 = results['e1clean'] - results['e1CTI']\n e2 = results['e2clean'] - results['e2CTI']\n x = results['xclean'] - results['xCTI']\n y = results['yclean'] - results['yCTI']\n r2 = (results['R2clean'] - results['R2CTI']) / results['R2clean']\n meane = np.mean(e)\n meane1 = np.mean(e1)\n meane2 = np.mean(e2)\n meanx = np.mean(x)\n meany = np.mean(y)\n meanr2 = np.mean(r2)\n\n print 'Delta e, e_1, e_2:', meane, meane1, meane2\n #print 'std e, e_1, e_2:', np.std(e), np.std(e1), np.std(e2)\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.set_title(title)\n ax.hist(e, bins=bins, color='b', label='$e$', alpha=0.5)\n ax.hist(e1, bins=bins, color='r', label='$e_{1}$', alpha=0.5)\n ax.hist(e2, bins=bins, color='g', label='$e_{2}$', alpha=0.5)\n ax.axvline(x=meane, color='b', label='%.2e' % meane)\n ax.axvline(x=meane1, color='r', label='%.2e' % meane1)\n ax.axvline(x=meane2, color='g', label='%.2e' % meane2)\n ax.set_xlabel(r'$\\delta e$ [w/o - w/ CTI]')\n plt.legend(shadow=True, fancybox=True)\n plt.savefig(path+'ellipticityDeltaFittedCentre.pdf')\n plt.close()\n\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.set_title(title)\n ax.scatter(e1, e2, s=8, color='r', marker='o', alpha=0.5, label='w/o - w/ CTI')\n ax.set_xlabel(r'$\\delta e_{1}$')\n ax.set_ylabel(r'$\\delta e_{2}$')\n plt.legend(shadow=True, fancybox=True)\n plt.savefig(path+'ellipticityFittedCentre.pdf')\n plt.close()\n\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.set_title(title)\n ax.scatter(results['e1clean'], results['e2clean'], s=8, color='k', marker='s', alpha=0.1, label='no CTI')\n ax.scatter(results['e1CTI'], results['e2CTI'], s=8, color='r', marker='o', alpha=0.4, label='CTI')\n ax.set_xlim(-1, 1)\n ax.set_ylim(-1, 1)\n ax.set_xlabel(r'$e_{1}$')\n ax.set_ylabel(r'$e_{2}$')\n plt.legend(shadow=True, fancybox=True)\n plt.savefig(path+'e1vse2FittedCentre.pdf')\n plt.close()\n\n print 'delta R2 / R2: mean, std ', meanr2, np.std(r2)\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.set_title(title)\n ax.hist(r2, bins=bins, color='b', label='$R^{2}$')\n ax.axvline(x=meanr2,color='b', label='%.2e' % meanr2)\n ax.set_xlabel(r'$\\frac{\\delta R^{2}}{R^{2}_{ref}}$ [w/o - w CTI]')\n plt.legend(shadow=True, fancybox=True)\n plt.savefig(path+'sizeDeltaFittedCentre.pdf')\n plt.close()\n\n print 'delta x: mean, std ', meanx, np.std(x)\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.set_title(title)\n ax.hist(x, bins=bins, color='b', label='X Centre')\n ax.axvline(x=meanx,color='b', label='%.2e' % meanx)\n ax.set_xlabel(r'$\\delta X - X_{CTI}$ [w/o - w CTI]')\n plt.legend(shadow=True, fancybox=True)\n plt.savefig(path+'xDeltaFittedCentre.pdf')\n plt.close()\n\n print 'delta y: mean, std ', meany, np.std(y)\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.set_title(title)\n ax.hist(y, bins=bins, color='b', label='Y Centre')\n ax.axvline(x=meany,color='b', label='%.2e' % meany)\n ax.set_xlabel(r'$\\delta Y - Y_{CTI}$ [w/o - w CTI]')\n plt.legend(shadow=True, fancybox=True)\n plt.savefig(path+'yDeltaFittedCentre.pdf')\n plt.close()\n\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.set_title(title)\n ax.scatter(x, y, s=15, color='k', marker='s', alpha=0.5, label='w/o - w/ CTI')\n ax.set_xlabel(r'$\\delta X$')\n ax.set_ylabel(r'$\\delta Y$')\n plt.legend(shadow=True, fancybox=True, scatterpoints=1)\n plt.savefig(path+'coordinatesFittedCentre.pdf')\n plt.close()\n\n print '\\n\\n\\n\\nFixed centre:'\n\n e = results['eclean'] - results['eCTIfixed']\n e1 = results['e1clean'] - results['e1CTIfixed']\n e2 = results['e2clean'] - results['e2CTIfixed']\n x = results['xclean'] - results['xCTIfixed']\n y = results['yclean'] - results['yCTIfixed']\n r2 = (results['R2clean'] - results['R2CTIfixed']) / results['R2clean']\n meane = np.mean(e)\n meane1 = np.mean(e1)\n meane2 = np.mean(e2)\n meanx = np.mean(x)\n meany = np.mean(y)\n meanr2 = np.mean(r2)\n\n print 'Delta e, e_1, e_2:', meane, meane1, meane2\n #print 'std e, e_1, e_2:', np.std(e), np.std(e1), np.std(e2)\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.set_title(title)\n ax.hist(e, bins=bins, color='b', label='$e$', alpha=0.5)\n ax.hist(e1, bins=bins, color='r', label='$e_{1}$', alpha=0.5)\n ax.hist(e2, bins=bins, color='g', label='$e_{2}$', alpha=0.5)\n ax.axvline(x=meane, color='b', label='%.2e' % meane)\n ax.axvline(x=meane1, color='r', label='%.2e' % meane1)\n ax.axvline(x=meane2, color='g', label='%.2e' % meane2)\n ax.set_xlabel(r'$\\delta e$ [w/o - w/ CTI]')\n plt.legend(shadow=True, fancybox=True)\n plt.savefig(path+'ellipticityDeltaFixedCentre.pdf')\n plt.close()\n\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.set_title(title)\n ax.scatter(e1, e2, s=8, color='r', marker='o', alpha=0.5, label='w/o - w/ CTI')\n ax.set_xlabel(r'$\\delta e_{1}$')\n ax.set_ylabel(r'$\\delta e_{2}$')\n plt.legend(shadow=True, fancybox=True)\n plt.savefig(path+'ellipticityFixedCentre.pdf')\n plt.close()\n\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.set_title(title)\n ax.scatter(results['e1clean'], results['e2clean'], s=8, color='k', marker='s', alpha=0.1, label='no CTI')\n ax.scatter(results['e1CTIfixed'], results['e2CTIfixed'], s=8, color='r', marker='o', alpha=0.4, label='CTI')\n ax.set_xlabel(r'$e_{1}$')\n ax.set_ylabel(r'$e_{2}$')\n ax.set_xlim(-1, 1)\n ax.set_ylim(-1, 1)\n plt.legend(shadow=True, fancybox=True)\n plt.savefig(path+'e1vse2FixedCentre.pdf')\n plt.close()\n\n print 'delta R2 / R2: mean, std ', meanr2, np.std(r2)\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.set_title(title)\n ax.hist(r2, bins=bins, color='b', label='$R^{2}$')\n ax.axvline(x=meanr2, color='b', label='%.2e' % meanr2)\n ax.set_xlabel(r'$\\frac{\\delta R^{2}}{R^{2}_{ref}}$ [w/o - w CTI]')\n plt.legend(shadow=True, fancybox=True)\n plt.savefig(path+'sizeDeltaFixedCentre.pdf')\n plt.close()\n\n print 'delta x: mean, std ', meanx, np.std(x)\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.set_title(title)\n ax.hist(x, bins=bins, color='b', label='X Centre')\n ax.axvline(x=meanx, color='b', label='%.2e' % meanx)\n ax.set_xlabel(r'$X - X_{CTI}$')\n plt.legend(shadow=True, fancybox=True)\n plt.savefig(path+'xDeltaFixedCentre.pdf')\n plt.close()\n\n print 'delta y: mean, std ', meany, np.std(y)\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.set_title(title)\n ax.hist(y, bins=bins, color='b', label='Y Centre')\n ax.axvline(x=meany, color='b', label='%.2e' % meany)\n ax.set_xlabel(r'$Y - Y_{CTI}$')\n plt.legend(shadow=True, fancybox=True)\n plt.savefig(path+'yDeltaFixedCentre.pdf')\n plt.close()\n\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.set_title(title)\n ax.scatter(x, y, s=15, color='k', marker='s', alpha=0.5, label='w/o - w/ CTI')\n ax.set_xlabel(r'$\\delta X$')\n ax.set_ylabel(r'$\\delta Y$')\n plt.legend(shadow=True, fancybox=True, scatterpoints=1)\n plt.savefig(path+'coordinatesFixedCentre.pdf')\n plt.close()", "def preprocess(self, fs, signal):\n # NUMERO TOTAL DE MUESTRAS\n Y, frq, ceps = self.calculate_spectrum_cepstrum(signal, fs)\n # Hacemos lista de (decibeles(Y), frecuencia(x)) tuples\n esp_frecuencia_pairs = [(Y[i], frq[i]) for i in range(len(Y))]\n # APLICAMOS FILTRO DE FRECUENCIAS\n esp_frecuencia_pairs = [(Y[i], frq[i]) for i in range(\n len(Y)) if frq[i] > self.min_frq and frq[i] < self.max_frq]\n # FRECUENCIAS FILTRADAS\n esp_aux = np.array(esp_frecuencia_pairs)\n frq = esp_aux[:, 1]\n # ESPECTRO DE POTENCIA\n Y = esp_aux[:, 0]\n # ORDENAMOS\n esp_frecuencia_pairs.sort()\n esp_frecuencia_pairs.reverse()\n # OBTENEMOS TODAS LAS CARACTERISTICAS RELEVANTES\n result = self.calculate_all_spec_props(\n frq, Y, ceps, esp_frecuencia_pairs)\n return result", "def hist_qual(self, fontsize=16, bins=100):\n self.rewind()\n data = [x.QUAL for x in self._vcf_reader]\n pylab.hist(data, bins=bins)\n pylab.grid(True)\n pylab.xlabel(\"Variant quality\", fontsize=fontsize)", "def standardize_data(data):\n return (data - np.mean(data, axis=0)) / (np.std(data, axis=0) + 10 ** -16)", "def ANN_efficiency_vs_PU_pT_PV(title, x_data, pT, CSV, model_noPT, model_withPT, model_withPV, ANN_noPT_Cuts, ANN_withPT_Cuts, ANN_withPV_Cuts, Ratio_Cuts, CSV_Cuts, bins, y_max, pT_Cut=200, BG=False, DrawTitle=False, LargeLegend=False):\n assert x_data.shape[1]==21, \"x_data does not contain PV. Make sure it is made from a PU sample and has shape (x, 21).\"\n\tassert x_data.shape[0] == len(pT) == len(CSV), \"data inputs need to have the same length\"\n\tassert len(ANN_noPT_Cuts) == len(ANN_withPT_Cuts) == len(ANN_withPV_Cuts) == len(Ratio_Cuts) == len(CSV_Cuts) == len(bins)-1, \"cuts need to have the same length and be compatible with amount of bins\"\n\n ran = (0,80)\n nbins = 80\n import array\n\tif BG:\n\t\tbins_ = array.array('d',[0.0, 11.0]+range(19,41,8)+[42.0, 52.0, 80])\n\telse:\n \tbins_ = array.array('d',[0.0, 11.0]+range(15,41,4)+[42.0, 52.0, 58.0, 65.0, 80])\n\n\tif pT_Cut >= 1200:\n\t\tbins_ = array.array('d',[0.0, 20.0, 40.0, 80.0])\n\n\n #make histograms of efficiency vs PU\n AllJets_Hist = rt.TH1D(\"AllJets\",\"AllJets\",nbins,ran[0],ran[1])\n ANN_noPT_Hist = rt.TH1D(\"ANN_noPT\",\"ANN_noPT\",nbins,ran[0],ran[1])\n\tANN_withPT_Hist = rt.TH1D(\"ANN_withPT\",\"ANN_withPT\",nbins,ran[0],ran[1])\n\tANN_withPV_Hist = rt.TH1D(\"ANN_withPV\",\"ANN_withPV\",nbins,ran[0],ran[1])\n Ratio_Hist = rt.TH1D(\"Ratio\",\"Ratio\",nbins,ran[0],ran[1])\n CSV_Hist = rt.TH1D(\"CSV\",\"CSV\",nbins,ran[0],ran[1])\n\n\tAllJets_Hist = AllJets_Hist.Rebin(len(bins_)-1,\"AllJets\",bins_)\n ANN_noPT_Hist = ANN_noPT_Hist.Rebin(len(bins_)-1,\"ANN_noPT\",bins_)\n\tANN_withPT_Hist = ANN_withPT_Hist.Rebin(len(bins_)-1,\"ANN_withPT\",bins_)\n\tANN_withPV_Hist = ANN_withPV_Hist.Rebin(len(bins_)-1,\"ANN_withPV\",bins_)\n Ratio_Hist = Ratio_Hist.Rebin(len(bins_)-1,\"Ratio\",bins_)\n CSV_Hist = CSV_Hist.Rebin(len(bins_)-1,\"CSV\",bins_)\n \n\tpred_y_noPT = model_noPT.predict(ANN_functional_shape(x_data))\n\tpred_y_withPT = model_withPT.predict(ANN_functional_shape(x_data)+[pT/200.])\n\tpred_y_withPV = model_withPV.predict(ANN_functional_shape(x_data)+[x_data[:,-1]/10.])\n\n\tbin_numbers = ANN_bin_selection(pT,bins)\n\t\n\tfor i,pT_value in enumerate(pT):\n\t\t\tif pT_value < pT_Cut: continue\n\t if bin_numbers[i] == -100: continue\n\t\t\tAllJets_Hist.Fill(x_data[i,-1])\n\t\t\tif CSV[i] >= CSV_Cuts[bin_numbers[i]]: CSV_Hist.Fill(x_data[i,-1])\n\t if pred_y_noPT[i] >= ANN_noPT_Cuts[bin_numbers[i]]: ANN_noPT_Hist.Fill(x_data[i,-1])\n\t\t\tif pred_y_withPT[i] >= ANN_withPT_Cuts[bin_numbers[i]]: ANN_withPT_Hist.Fill(x_data[i,-1])\n\t\t\tif pred_y_withPV[i] >= ANN_withPV_Cuts[bin_numbers[i]]: ANN_withPV_Hist.Fill(x_data[i,-1])\n\n\t\t\tif x_data[i,12] != 0:\n\t\t\t\tL_R = x_data[i,15]/float(x_data[i,12])\n\t\t\t\tif L_R >= Ratio_Cuts[bin_numbers[i]]: Ratio_Hist.Fill(x_data[i,-1])\n \n\t#Make Graphs and draw them\n canvas = rt.TCanvas('canvas','canvas',600,600)\n\tif DrawTitle == False: rt.gStyle.SetOptTitle(0)\n\tif LargeLegend:\n\t\tlegend = rt.TLegend(0.1,0.9,0.4,0.7)\n\telse:\n \tlegend = rt.TLegend(0.1,0.9,0.35,0.75)\n ANN_noPT_Graph = rt.TGraphAsymmErrors()\n\tANN_withPT_Graph = rt.TGraphAsymmErrors()\n\tANN_withPV_Graph = rt.TGraphAsymmErrors()\n Ratio_Graph = rt.TGraphAsymmErrors()\n CSV_Graph = rt.TGraphAsymmErrors()\n if DrawTitle: Ratio_Graph.SetTitle(title+\"_vs_PU_pT{}{}\".format('jet',pT_Cut))\n ANN_noPT_Graph.Divide(ANN_noPT_Hist,AllJets_Hist,\"cl=0.683 b(1,1) mode\")\n\tANN_withPT_Graph.Divide(ANN_withPT_Hist,AllJets_Hist,\"cl=0.683 b(1,1) mode\")\n\tANN_withPV_Graph.Divide(ANN_withPV_Hist,AllJets_Hist,\"cl=0.683 b(1,1) mode\")\n Ratio_Graph.Divide(Ratio_Hist,AllJets_Hist,\"cl=0.683 b(1,1) mode\")\n CSV_Graph.Divide(CSV_Hist,AllJets_Hist,\"cl=0.683 b(1,1) mode\")\n ANN_noPT_Graph.SetLineColor(3)\n ANN_withPT_Graph.SetLineColor(6)\n\tANN_withPV_Graph.SetLineColor(7)\n\tRatio_Graph.SetLineColor(2)\n CSV_Graph.SetLineColor(4)\n #legend.AddEntry(ANN_noPT_Graph, \"ANN without p_{T}/PV\", \"LEP\")\n\tlegend.AddEntry(ANN_noPT_Graph, \"ANN without p_{T}\", \"LEP\")\n legend.AddEntry(ANN_withPT_Graph, \"ANN with p_{T}\", \"LEP\")\n\t#legend.AddEntry(ANN_withPV_Graph, \"ANN with PV\", \"LEP\")\n\tlegend.AddEntry(Ratio_Graph, \"L4/L1\", \"LEP\")\n legend.AddEntry(CSV_Graph, \"CSV\", \"LEP\")\n Ratio_Graph.GetXaxis().SetTitle(\"#PV\")\n if BG:\n\t\tRatio_Graph.GetYaxis().SetTitle('mistag rate')\n\telse:\t\n\t\tRatio_Graph.GetYaxis().SetTitle('efficiency')\n Ratio_Graph.GetYaxis().SetTitleOffset(1.5)\n\tRatio_Graph.SetMinimum(0.)\n Ratio_Graph.SetMaximum(y_max)\n Ratio_Graph.Draw()\n ANN_noPT_Graph.Draw(\"SAME\")\n\tANN_withPT_Graph.Draw(\"SAME\")\n\t#ANN_withPV_Graph.Draw(\"SAME\")\n CSV_Graph.Draw(\"SAME\")\n legend.Draw()\n canvas.SaveAs('Thesis_Plots/'+title+\"_vs_PU_pT{}{}.png\".format('jet',pT_Cut))", "def tabulate_histogram(self):\n\n # Generate a table of uniform variates\n from mitsuba.core import Float, Vector2f, Vector2u, Float32, \\\n UInt64, PCG32\n\n rng = PCG32(initseq=ek.arange(UInt64, self.sample_count))\n\n samples_in = getattr(mitsuba.core, 'Vector%if' % self.sample_dim)()\n for i in range(self.sample_dim):\n samples_in[i] = rng.next_float32() if Float is Float32 \\\n else rng.next_float64()\n\n self.pdf_start = time.time()\n\n # Invoke sampling strategy\n samples_out = self.sample_func(samples_in)\n\n if type(samples_out) is tuple:\n weights_out = samples_out[1]\n samples_out = samples_out[0]\n else:\n weights_out = Float(1.0)\n\n # Map samples into the parameter domain\n xy = self.domain.map_backward(samples_out)\n\n # Sanity check\n eps = self.bounds.extents() * 1e-4\n in_domain = ek.all((xy >= self.bounds.min - eps) &\n (xy <= self.bounds.max + eps))\n if not ek.all(in_domain):\n self._log('Encountered samples outside of the specified '\n 'domain: %s' % str(ek.compress(xy, ~in_domain)))\n self.fail = True\n\n # Normalize position values\n xy = (xy - self.bounds.min) / self.bounds.extents()\n xy = Vector2u(ek.clamp(xy * Vector2f(self.res), 0,\n Vector2f(self.res - 1)))\n\n # Compute a histogram of the positions in the parameter domain\n self.histogram = ek.zero(Float, ek.hprod(self.res))\n\n ek.scatter_add(\n target=self.histogram,\n index=xy.x + xy.y * self.res.x,\n source=weights_out\n )\n\n self.pdf_end = time.time()\n\n histogram_min = ek.hmin(self.histogram)\n if not histogram_min >= 0:\n self._log('Encountered a cell with negative sample '\n 'weights: %f' % histogram_min)\n self.fail = True\n\n self.histogram_sum = ek.hsum(self.histogram) / self.sample_count\n if self.histogram_sum > 1.1:\n self._log('Sample weights add up to a value greater '\n 'than 1.0: %f' % self.histogram_sum)\n self.fail = True", "def data_preprocessing_TA(X):\n \n #Removing the mean and scaling the data\n X_prep=StandardScaler().fit_transform(X)\n #do here your preprocessing\n return X_prep", "def main():\n strikes, dips, normals, slip = generate_normal_ss_data(330, 60, n=500, porp=1)\n #strikes, dips, normals, slip = generate_normal_data(330, 60, n=500, porp=10)\n sigma = invert_plane_stress(normals, slip)\n plot(sigma, strikes, dips)\n plt.show()", "def SAS(sampled_data, n_min=1, n_max=50):\n x_max = max(sampled_data)\n x_min = min(sampled_data)\n N_MIN = n_min\n N_MAX = n_max\n N = range(N_MIN,N_MAX)\n N = np.array(N)\n D = (x_max-x_min)/N\n C = np.zeros(shape=(np.size(D),1))\n \n plt.figure()\n temp_graph = plt.subplot(1,1,1)\n for i in range(np.size(N)):\n edges = np.linspace(x_min,x_max,N[i]+1)\n ki = temp_graph.hist(sampled_data,edges)\n ki = ki[0]\n k = np.mean(ki)\n v = sum((ki-k)**2)/N[i]\n C[i] = (2*k-v)/((D[i])**2)\n \n temp_graph.cla()\n cmin = min(C)\n idx = np.where(C==cmin)\n idx = int(idx[0])\n optD = D[idx]\n \n fig = plt.figure()\n plt.title(\"Shimazaki and Shinomoto's choice optimization process\")\n plt.plot(D,C,'.b',optD,cmin,'*r')\n \n return N[idx]+1", "def build_hist(concept_values: np.ndarray, num_bins: int = 100) -> np.ndarray:\n hist, _ = np.histogram(concept_values, bins=num_bins, range=(0., 1.), density=True)\n return hist", "def mag_postprocessing(variables):\n return variables" ]
[ "0.64501214", "0.5601664", "0.5529052", "0.55120814", "0.54677695", "0.5449569", "0.5407651", "0.53764164", "0.5360028", "0.5337787", "0.53310555", "0.5325349", "0.5292115", "0.529134", "0.5287892", "0.5283185", "0.5263711", "0.5261005", "0.5248296", "0.52357006", "0.52268076", "0.52246106", "0.5220019", "0.5214336", "0.5212858", "0.5193024", "0.51807946", "0.5169738", "0.51696086", "0.5169486", "0.5164296", "0.5161555", "0.51601195", "0.5159217", "0.5144758", "0.51417875", "0.51290333", "0.51272047", "0.51227427", "0.5122392", "0.5113952", "0.5111325", "0.51042664", "0.5103936", "0.51010305", "0.50983876", "0.5091333", "0.50794345", "0.50783557", "0.5071098", "0.5061789", "0.505786", "0.5045415", "0.5038896", "0.50363654", "0.5034727", "0.5034541", "0.5033383", "0.50263184", "0.502236", "0.50194037", "0.50179845", "0.5009857", "0.4998881", "0.49963966", "0.49902397", "0.4989457", "0.49886045", "0.49715826", "0.49697277", "0.49678344", "0.49605924", "0.49540237", "0.49466962", "0.49446315", "0.49420917", "0.4938612", "0.49379203", "0.49349993", "0.49254444", "0.49228126", "0.49227858", "0.49201244", "0.492001", "0.4919977", "0.49177867", "0.49152204", "0.49142563", "0.49126974", "0.4908331", "0.49022713", "0.4897055", "0.4892634", "0.4890898", "0.48797306", "0.48779154", "0.48748383", "0.48734158", "0.48731324", "0.48660612" ]
0.59027475
1
takes the preprocessed variation histograms and makes the final summed histograms symmeterises effects in bins
def make_sys_sum(fileName, systematics, variations): file = TFile(fileName) histTotSysUp = file.Get("Nominal").Clone() histTotSysDown = file.Get("Nominal").Clone() histTotSysUp.Reset() histTotSysDown.Reset() histTotSysUp.SetDirectory(0) histTotSysDown.SetDirectory(0) for ibin in range(0,histTotSysUp.GetNbinsX()): totSys = 0.0 totSysUp = 0.0 for systematic in systematics: histSysUpName = systematic + "_UP" histSysUp = file.Get(histSysUpName) sysUp = 1.0 - histSysUp.GetBinContent(ibin+1) totSysUp = totSysUp + (sysUp**2.0) totSysUp = totSysUp**0.5 histTotSysUp.SetBinContent(ibin+1, 1.0 + totSysUp) histTotSysDown.SetBinContent(ibin+1, 1.0 - totSysUp) return (histTotSysUp, histTotSysDown)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compare_histograms(df, df_norm, fignum, fields, binns):\n fig = plt.figure(num=fignum, figsize=(18,18))\n fig.suptitle('Histogram before and after normalization', fontsize=22)\n ax1 = fig.add_subplot(421, axisbg='0.94')\n ax2 = fig.add_subplot(422, axisbg='0.94')\n ax3 = fig.add_subplot(423, axisbg='0.94')\n ax4 = fig.add_subplot(424, axisbg='0.94')\n ax5 = fig.add_subplot(425, axisbg='0.94')\n ax6 = fig.add_subplot(426, axisbg='0.94')\n ax7 = fig.add_subplot(427, axisbg='0.94')\n ax8 = fig.add_subplot(428, axisbg='0.94')\n alphas = [0.33, 0.33, 0.6, 0.6, 0.28, 0.28, 0.6, 0.6]\n hues = ['g','y','g','y','g','y','g','y']\n all_axes = plt.gcf().axes\n # print list(enumerate(fields))\n for i, ax in list(enumerate(all_axes)):\n ax.set_ylabel(\"count\", fontsize=10)\n for ticklabel in ax.get_xticklabels() + ax.get_yticklabels():\n ticklabel.set_fontsize(14)\n g = np.int(math.ceil(np.float(i)/2))\n \n if (len(fields)*2-1) >= i:\n if i in (0,2,4,6):\n ax.hist(df[fields[i-g]].dropna().values, bins=binns[i-g], color=hues[i],alpha=alphas[i])\n print \" plot \" + str(df[fields[i-g]].name)\n ax.set_title(df[fields[i-g]].name, fontsize=20)\n #if (len(fields)*2) >= i: \n if i in (1,3,5,7):\n #try:\n ax.hist(df_norm[fields[i-g]].dropna().values, bins=binns[i-g], color=hues[i],alpha=alphas[i])\n ax.set_title(\"As normalized:\", fontsize=20)\n \n try: # Save the figure as one file\n filename = \"data/vis/histogram_compare\" + \"_\" + str(fignum) + \".png\"\n plt.savefig(filename)\n print \"= Vis Output: \", filename\n print\n except IOError:\n print \"WARNING: Failed to write out file: \", filename\n print\n plt.close(fig)", "def parameters_histograms(w, dw, a, da, b, db):\n w = w.cpu()\n dw = dw.cpu()\n a = a.cpu()\n da = da.cpu()\n b = b.cpu()\n db = db.cpu()\n \n fig = plt.figure(figsize=(10,6))\n ax = fig.add_subplot(231)\n ax.hist(w.reshape(1, w.shape[0] * w.shape[1]))\n ax.set_title('Weights', fontsize = 11)\n ax.ticklabel_format(style='sci', axis='y', scilimits=(0,0))\n ax = fig.add_subplot(232)\n ax.hist(dw.reshape(1, dw.shape[0] * dw.shape[1]))\n ax.set_title('Weights variations', fontsize = 11)\n ax.ticklabel_format(style='sci', axis='y', scilimits=(0,0))\n ax = fig.add_subplot(233)\n ax.hist(a)\n ax.set_title('Visible bias', fontsize = 11)\n ax.ticklabel_format(style='sci', axis='y', scilimits=(0,0))\n ax = fig.add_subplot(234)\n ax.hist(da)\n ax.set_title('Visible bias variations', fontsize = 11)\n ax.ticklabel_format(style='sci', axis='y', scilimits=(0,0))\n ax = fig.add_subplot(235)\n ax.hist(b)\n ax.set_title('Hidden bias', fontsize = 11)\n ax.ticklabel_format(style='sci', axis='y', scilimits=(0,0))\n ax = fig.add_subplot(236)\n ax.hist(db)\n ax.set_title('Hidden bias variations', fontsize = 11)\n ax.ticklabel_format(style='sci', axis='y', scilimits=(0,0))\n plt.subplots_adjust(hspace=0.25)\n plt.show()\n plt.close('all')", "def bin_histogram (modified_df, v_to_bin):\n for variable in v_to_bin:\n # Remove Nas\n df = modified_df[modified_df[variable].notnull()]\n # Create surv filter\n hist_filter = df[\"Survived\"] == 1\n # Create Histogram\n plt.hist([df[variable][hist_filter], df[variable][~hist_filter]],\n stacked=True, label=['Survived', 'Not Survived'], color=['g', 'r'])\n plt.legend()\n # Save and reset fig\n plt.savefig(variable+\"_histogram\")\n plt.clf()", "def make_histograms(df, suffix, fignum, fields, binns):\n fig = plt.figure(num=fignum, figsize=(18,18))\n fig.suptitle('Histograms of ' + str(suffix) + ' features', fontsize=22)\n ax1 = fig.add_subplot(421, axisbg='0.94')\n ax2 = fig.add_subplot(422, axisbg='0.94')\n ax3 = fig.add_subplot(423, axisbg='0.94')\n ax4 = fig.add_subplot(424, axisbg='0.94')\n ax5 = fig.add_subplot(425, axisbg='0.94')\n ax6 = fig.add_subplot(426, axisbg='0.94')\n ax7 = fig.add_subplot(427, axisbg='0.94')\n ax8 = fig.add_subplot(428, axisbg='0.94')\n alphas = [0.33, 0.33, 0.6, 0.6, 0.28, 0.28, 0.6, 0.6]\n hues = ['g','b','b','g','g','b','b','g']\n all_axes = plt.gcf().axes\n for i, ax in list(enumerate(all_axes)):\n ax.set_ylabel(\"count\", fontsize=10)\n for ticklabel in ax.get_xticklabels() + ax.get_yticklabels():\n ticklabel.set_fontsize(14)\n if (len(fields) - 1) >= i:\n if suffix == \"raw\":\n transformed = df[fields[i]].dropna().values\n elif suffix == \"log10\":\n transformed = np.log10(df[fields[i]].dropna().values)\n elif suffix == \"log\":\n transformed = np.log(df[fields[i]].dropna().values)\n \n #try:\n ax.hist(transformed, bins=binns[i], color=hues[i],alpha=alphas[i])\n ax.set_title(df[fields[i]].name, fontsize=20)\n #except:\n # print \"WARNING: An error occurred in composing {} Figure %d\".format(str(suffix)) % fignum\n # return\n \n try: # Save the figure as one file\n filename = \"data/vis/histogram\" + \"_\" + str(fignum) + \"_\" + str(suffix) + \".png\"\n plt.savefig(filename)\n print \"= Vis Output: \", filename\n except IOError:\n print \"WARNING: Failed to write out file: \", filename\n print\n plt.close(fig)", "def makeHist(data, bins, wgt=None, factor=1.0, pdf=False):\n n_arr, bins = np.histogram(data, bins, weights=wgt)\n ctr_bins = centerOfBins(bins)\n \n if pdf == True:\n n_arr = asFloat(n_arr) / (float(sum(n_arr)) * (bins[1:] - bins[:-1]))\n else:\n n_arr = asFloat(n_arr) * factor\n \n return n_arr, ctr_bins", "def setup_hist(self):\n self.x_min = {}\n self.x_max = {}\n self.x_max_minus_min = {}\n self.dx = {}\n self.n_bins = {}\n\n self.histogram_edges = {}\n self.histogram_values = {}\n self.histogram_cdf = {}", "def hog_histograms(*args, **kwargs): # real signature unknown\n pass", "def create_histograms(PrimaryParticleName, LongVectorSignals, LongVectorSignalsCher,\n\tShortVectorSignals, ShortVectorSignalsCher, LongScinMaxFiber, LongCherMaxFiber, \n\tShortScinMaxFiber, ShortCherMaxFiber, EnergyTotContainer, MaxEnergyTotContainer):\n\n\t#Set ROOT histograms\n\tTH1LongScin = TH1F(\"LongScintillation\", PrimaryParticleName, 100, 0.0, LongScinMaxFiber+200.)\n\tTH1LongCher = TH1F(\"LongCherenkov\", PrimaryParticleName, 100, 0.0, LongCherMaxFiber+200.)\n\tTH1ShortScin = TH1F(\"ShortScintillation\", PrimaryParticleName, 100, 0.0, ShortScinMaxFiber+200.)\n\tTH1ShortCher = TH1F(\"ShortCherenkov\", PrimaryParticleName, 100, 0.0, ShortCherMaxFiber+200.)\n\tTH1EnergyTot = TH1F(\"EnergyTot\", PrimaryParticleName, 100, MaxEnergyTotContainer-10000., MaxEnergyTotContainer+500.) \n\n\t#Fill histograms in for loop\n\tfor index in range(len(LongVectorSignals)):\n\t\tTH1LongScin.Fill(LongVectorSignals[index])\n\t\tTH1LongCher.Fill(LongVectorSignalsCher[index])\n\t\tTH1ShortScin.Fill(ShortVectorSignals[index])\n\t\tTH1ShortCher.Fill(ShortVectorSignalsCher[index])\n\t\tTH1EnergyTot.Fill(EnergyTotContainer[index])\n\n\t#Draw + DrawOptions\n\tStyle = gStyle\n\tStyle.SetOptStat(1) #Show statistics\n\tStyle.SetLineWidth(1)\n\tXAxis = TH1LongScin.GetXaxis() #TH1LongScin\n\tXAxis.SetTitle(\"Energy (MeV)\")\n\tXAxis.SetTitleOffset(1.2)\n\tYAxis = TH1LongScin.GetYaxis()\n\tYAxis.SetTitle(\"Entries\")\n\tTH1LongScin.Draw()\n\tgPad.SaveAs(\"EnergyLongScin.eps\")\n\tXAxis = TH1LongCher.GetXaxis() #TH1LongCher\n\tXAxis.SetTitle(\"# Cher p.e.\")\n\tXAxis.SetTitleOffset(1.2)\n\tYAxis = TH1LongCher.GetYaxis()\n\tYAxis.SetTitle(\"Entries\")\n\tTH1LongCher.Draw()\n\tgPad.SaveAs(\"CherpeLong.eps\")\n\tXAxis = TH1ShortScin.GetXaxis() #TH1ShortScin\n\tXAxis.SetTitle(\"Energy (MeV)\")\n\tXAxis.SetTitleOffset(1.2)\n\tYAxis = TH1ShortScin.GetYaxis()\n\tYAxis.SetTitle(\"Entries\")\n\tTH1ShortScin.Draw()\n\tgPad.SaveAs(\"EnergyShortScin.eps\")\n\tXAxis = TH1ShortCher.GetXaxis() #TH1ShortCher\n\tXAxis.SetTitle(\"# Cher p.e.\")\n\tXAxis.SetTitleOffset(1.2)\n\tYAxis = TH1ShortCher.GetYaxis()\n\tYAxis.SetTitle(\"Entries\")\n\tTH1ShortCher.Draw()\n\tgPad.SaveAs(\"CherpeShort.eps\")\n\tXAxis = TH1EnergyTot.GetXaxis() #TH1EnergyTot\n\tXAxis.SetTitle(\"Energy (MeV)\")\n\tXAxis.SetTitleOffset(1.2)\n\tYAxis = TH1EnergyTot.GetYaxis()\n\tYAxis.SetTitle(\"Entries\")\n\tTH1EnergyTot.Draw()\n\tgPad.SaveAs(\"EnergyTot.eps\")", "def genHistArrays(df,csname,bins=50):\n #initiate matrix which will contain values of histograms\n allpixV = np.zeros((df.shape[0],bins*3))\n #attain histograms\n hists = df['SKImage'].apply(lambda x: getHists(x,bins))\n \n #Generate column names for result dataframe\n fullnames = []\n for chs in ['CH1', 'CH2', 'CH3']:\n fullnames.extend([chs+'-'+str(j) for j in range(bins)])\n fullnames = [csname+'-'+str(j) for j in fullnames]\n \n #extract histograms\n for rowi, histArr in enumerate(hists):\n allpixV[rowi,:] = np.array(histArr).flatten()\n \n return allpixV,fullnames", "def get_extended_hist(img, sid_bin_edges):\n extended_bin_edges = np.append(sid_bin_edges.numpy(), float('inf'))\n img_hist, _ = np.histogram(img, bins=extended_bin_edges)\n return img_hist", "def histogramFromSketch_M2M(sketch,Phi,domain,dimension,nb_cat_per_dim=None,bins_cont=10,project_on_probabilitySimplex=True,reg_rho=0.01):\n\n ## 0) Parsing the inputs\n\n # Number of categorical inputs\n if nb_cat_per_dim is None:\n nb_cat_per_dim = np.zeros(Phi.d)\n\n is_integer_dimension = False\n if nb_cat_per_dim[dimension] > 0:\n # The data is integer-type\n is_integer_dimension = True\n bins = int(nb_cat_per_dim[dimension])\n else:\n bins = bins_cont\n\n # Parse m, d\n if isinstance(Phi,SimpleFeatureMap):\n Omega = Phi.Omega\n d = Phi.d\n m = Phi.m\n else:\n raise ValueError('The Phi argument does not match one of the supported formats.')\n \n ## 1) Construct the A matrix\n # Build a new sketch with all the difference of Omega\n Omega_diffs = np.empty((d,m**2))\n for i in range(m):\n for j in range(m):\n Omega_diffs[:,i*m+j] = Omega[:,i] - Omega[:,j]\n\n Phi_diffs = SimpleFeatureMap(\"complexExponential\", Omega_diffs,xi=Phi.xi,c_norm=Phi.c_norm)\n\n # Evaluate the box constraints Fourier transform thanks to this sketch function\n z_diffs_domain = fourierSketchOfBox(domain,Phi_diffs,nb_cat_per_dim)\n\n # And reshape (not sure if correct)\n A_compl = z_diffs_domain.reshape(m,m)\n\n # Stack real and imaginary components\n A = np.zeros((2*m,2*m))\n A[:m,:m] = A_compl.real\n A[:m,m:] = A_compl.imag\n A[m:,:m] = -A_compl.imag\n A[m:,m:] = A_compl.real\n \n # Regularize\n A += reg_rho*np.eye(2*m)\n\n box = domain.copy() # the box in which we do the learning\n bin_edges = np.linspace(domain[dimension,0],domain[dimension,1],bins+1)\n h = np.zeros(bins)\n for p in range(bins):\n # move to the next box\n if is_integer_dimension:\n box[dimension,0] = p\n box[dimension,1] = p\n else:\n box[dimension,0] = bin_edges[p]\n box[dimension,1] = bin_edges[p+1]\n F = fourierSketchOfBox(box,Phi,nb_cat_per_dim)\n\n # Stack the b vector\n b = np.zeros(2*m)\n b[:m] = F.real\n b[m:] = -F.imag\n\n \n # ... and solve! \n a_ri = np.linalg.solve(A, b)\n a = a_ri[:m] + 1j*a_ri[m:]\n \n\n \n # Predict with the sketch\n #print(a)\n h[p] = np.real(np.dot(a,sketch))\n if project_on_probabilitySimplex:\n h = project_probabilitySimplex(h)\n return h", "def normalize(histogram):\n nbins = histogram.GetNbinsX()\n integral = histogram.Integral(1,nbins)\n newhist = histogram.Clone()\n newhist.Reset()\n for bin in range(1,nbins+1):\n ibinY = histogram.GetBinContent(bin)\n newhist.SetBinContent(bin,ibinY/integral)\n return newhist", "def super_hist(self, data_list, alpha=0.5, log_scale=True, bins=45):\r\n\r\n fig, _ = mp.subplots(1, 1, figsize=(15, 10), constrained_layout=True)\r\n\r\n names = []\r\n for data in data_list:\r\n plot_data = data[data.Day_First_N_Infections != \"None\"]\r\n column_data = plot_data[\"Day_First_N_Infections\"].values\r\n sns.distplot(column_data,\r\n kde=False,\r\n bins=bins,\r\n hist_kws={\r\n \"linewidth\": 1,\r\n \"alpha\": alpha,\r\n \"edgecolor\": 'black',\r\n \"log\": log_scale\r\n })\r\n\r\n mp.legend(loc='upper left', fontsize=20)\r\n mp.xlabel(\"Days from outbreak to case number \" + str(data_list[0].N) +\r\n \" in county\",\r\n fontsize=18)\r\n mp.ylabel(\"Frequency\", fontsize=18)\r\n\r\n fig.savefig(\"hist_N\" + str(data_list[0].N) + \"_\" + \"_\".join(names) +\r\n \".png\")", "def processSystematic(observable, xsecType, xsecLevel, systematic, histNominal):\n varHists = []\n\n linkStr = \"_\"\n variations = [\"\"]\n\n for variation in variations:\n if xsecType == \"normalised\":\n xsecType = \"normalized\"\n if xsecLevel == \"particle\":\n xsecLevel = \"pseudo\"\n path = directory_base + xsecType + \"_\" + xsecLevel + directory_tail + systematic + linkStr + variation + \"/combinedUnfolded/Hyp\" + observable + \"Results.txt\"\n #print \"directory = \" + str(path)\n inputfile = open(path, 'r').readlines()\n bins = []\n for line in inputfile:\n bins.append(float(line.split( )[3]))\n bins.append(float(line.split( )[5]))\n bins = sorted(bins)\n binsArray = array('f',bins)\n histNameUp = systematic + \"_UP\" \n histNameDown = systematic + \"_DOWN\" \n histUp = TH1F(histNameUp, histNameUp, len(bins)-1, binsArray)\n histDown = TH1F(histNameDown, histNameDown, len(bins)-1, binsArray)\n histUpFinal = TH1F(\"\", \"\", len(bins)-1, binsArray)\n histDownFinal = TH1F(\"\", \"\", len(bins)-1, binsArray)\n \n ibin = 0\n\n for line in inputfile:\n nomBin = histNominal.GetBinContent(ibin+1)\n nomBinCenter = histNominal.GetBinCenter(ibin+1)\n unc = float(line.split( )[7])\n# if systematic == \"MASS\":\n# unc = unc/(3.0)\n# if systematic == \"PSFSRSCALE\":\n# unc = unc/(sqrt(2.0))\n\n histUp.SetBinContent(ibin+1, 1.0 + unc)\n histDown.SetBinContent(ibin+1,1.0 - unc)\n ibin = ibin + 1 \n\n histUpVis = histUp.Clone()\n histDownVis = histDown.Clone()\n histUpFinal = histUp.Clone()\n histDownFinal = histDown.Clone()\n\n if systematic == \"PDF\":\n histUpFinal, histDownFinal = reNormalise(histNominal, histUpVis, histDownVis)\n\n return (histUpFinal, histDownFinal)", "def histograms(probs, actual, bins=100):\n actual = actual.astype(np.bool)\n edges, step = np.linspace(0., 1., bins, retstep=True, endpoint=False)\n idx = np.digitize(probs, edges) - 1\n top = np.bincount(idx, weights=actual, minlength=bins)\n bot = np.bincount(idx, weights=(~actual), minlength=bins)\n return top, bot, edges, step", "def makeHistogram(values, numBins, xLabel, yLabel, title=None):", "def create_fixed_hist(self):\n hist = cv2.calcHist([self.obj], [0, 1, 2], None, [32, 8, 8],\n [0, 256, 0, 256, 0, 256])\n self.hist = cv2.normalize(hist).flatten()\n print self.hist", "def hist(data):\n\n fig = plt.figure()\n ax1 = fig.add_subplot(111)\n plt.hold(True)\n for x in xrange(len(data[:,0,0])):\n counts, edges = np.histogram(data[x,:,:],bins=100)\n centers = [(edges[i]+edges[i+1])/2.0 for i,v in enumerate(edges[:-1])]\n ax1.plot(centers,counts)\n plt.hold(False)\n\n plt.show(block=False)\n\n # return fig", "def CL_histogram_MMD(sketch,Phi,domain,dimension,nb_cat_per_dim=None,bins_cont=10):\n ## 0) Parsing the inputs\n # Number of categorical inputs\n if nb_cat_per_dim is None:\n nb_cat_per_dim = np.zeros(Phi.d)\n \n is_integer_dimension = False\n if nb_cat_per_dim[dimension] > 0:\n # The data is integer-type\n is_integer_dimension = True\n bins = int(nb_cat_per_dim[dimension])\n else:\n bins = bins_cont\n\n m = sketch.size\n # 1) Construct the A matrix\n A = 1j*np.zeros((m,bins)) # Pre-allocation\n bin_edges = np.linspace(domain[dimension,0],domain[dimension,1],bins+1)\n box = domain.copy()\n for p in range(bins):\n # move to the next box\n if is_integer_dimension:\n box[dimension,0] = p\n box[dimension,1] = p\n else:\n box[dimension,0] = bin_edges[p]\n box[dimension,1] = bin_edges[p+1]\n A[:,p] = fourierSketchOfBox(box,Phi,nb_cat_per_dim) \n \n # 1.b) cast to real \n Ari = np.r_[A.real, A.imag]\n \n # 2) create b vector\n b = np.r_[sketch.real, sketch.imag]\n \n # 3) solve the optimization problem\n def _f_grad(x):\n r = Ari@x-b\n f = 0.5*np.linalg.norm(r)**2\n grad = Ari.T@r\n return (f,grad)\n \n # Starting point\n x0 = np.ones(bins)/bins\n # Linear constraints\n A_constr = np.zeros((bins,bins))\n l_constr = 0*np.ones(bins) # Positive constraints\n A_constr[:bins,:bins] = np.eye(bins)\n upper_bound = 5 # weird that it must be large\n u_constr = upper_bound*np.ones(bins) # Sum-to one constraints\n constr = LinearConstraint(A_constr,l_constr,u_constr)\n\n # Solve\n sol = minimize(_f_grad, x0, method='trust-constr', bounds=None, constraints=constr, jac=True, options={'verbose': 0})\n\n return project_probabilitySimplex(sol.x)", "def hist(self):\r\n plt.hist(self.data_array, bins='auto', density=False, facecolor='b')\r\n plt.title(self.column_name)\r\n plt.savefig(self.column_name + \".svg\")\r\n plt.close()", "def histo ( self ,\n xbins = 20 , xmin = None , xmax = None ,\n ybins = 20 , ymin = None , ymax = None ,\n hpars = () , \n histo = None ,\n integral = False ,\n errors = False , \n density = False ) :\n \n \n histos = self.make_histo ( xbins = xbins , xmin = xmin , xmax = xmax ,\n ybins = ybins , ymin = ymin , ymax = ymax ,\n hpars = hpars ,\n histo = histo )\n\n # loop over the historgam bins \n for ix,iy,x,y,z in histo.items() :\n\n xv , xe = x.value() , x.error()\n yv , ye = y.value() , y.error()\n \n # value at the bin center \n c = self ( xv , yv , error = errors ) \n\n if not integral : \n histo[ix,iy] = c\n continue\n\n # integral over the bin \n v = self.integral( xv - xe , xv + xe , yv - ye , yv + ye )\n \n if errors :\n if 0 == c.cov2 () : pass\n elif 0 != c.value() and 0 != v : \n v = c * ( v / c.value() )\n \n histo[ix,iy] = v \n\n ## coovert to density historgam, if requested \n if density : histo = histo.density()\n \n return histo", "def hist_save(self, d, bin1, name, no):\n\t\tfor i in range(0,no):\n\t\t\ts = d[:,i]\n\t\t\tplt.hist(s, bin1, normed=True, color='c')\t# Extracting the parameters from the histogram\n\t\t\tplt.title('Probability Distribution Fnction of %s' %name, fontsize=20)\n\t\t\tplt.xlabel(\"Filter tap values\", fontsize=20)\n\t\t\tplt.ylabel(\"Probability Distribution\", fontsize=20)\n#\t\t\tplt.xlim(0,0.10)\n\t\t\tplt.ylim(0,100)\n#\t\t\tplt.legend(fontsize = 'xx-large')\n\t\t\tplt.savefig('/home/abhishek/Results/comparison_all_sets/Curve fitting/test/set_1/hist_%s_index_%d' %(name,i))\n\t\t\tplt.close()", "def _compute_histogram(self, x, momentum):\n num_bins = self.histogram.size(0)\n x_detached = x.detach()\n self.bin_width = (self._max_val - self._min_val) / (num_bins - 1)\n lo = torch.floor((x_detached - self._min_val) / self.bin_width).long()\n hi = (lo + 1).clamp(min=0, max=num_bins - 1)\n hist = x.new_zeros(num_bins)\n alpha = (\n 1.0\n - (x_detached - self._min_val - lo.float() * self.bin_width)\n / self.bin_width\n )\n hist.index_add_(0, lo, alpha)\n hist.index_add_(0, hi, 1.0 - alpha)\n hist = hist / (hist.sum() + 1e-6)\n self.histogram = (1.0 - momentum) * self.histogram + momentum * hist", "def getHistogram( self, img):\n bins = 256\n range_scale = [0,254]\n nivel_transparencia = 0.5\n plt.hist(img.ravel(),bins,range_scale, label=\"histogram\", alpha=nivel_transparencia);\n plt.legend(loc='upper right')\n plt.show()", "def addHistogram2D(self, name, title, n_bins_x, minimum_x, maximum_x, n_bins_y, minimum_y, maximum_y):\n\t\tself.histograms[ name ] = ROOT.TH2F(name, title, n_bins_x, minimum_x, maximum_x, n_bins_y, minimum_y, maximum_y)", "def _make_histogram(\n dict_,\n data,\n bins=25,\n show_output=False,\n figsize=(10, 6),\n fontsize=15,\n plot_title=False,\n):\n indicator = dict_[\"ESTIMATION\"][\"indicator\"]\n\n treated = data[[indicator, \"prop_score\"]][data[indicator] == 1].values\n untreated = data[[indicator, \"prop_score\"]][data[indicator] == 0].values\n\n treated = treated[:, 1].tolist()\n untreated = untreated[:, 1].tolist()\n\n # Make the histogram using a list of lists\n fig = plt.figure(figsize=figsize)\n hist = plt.hist(\n [treated, untreated],\n bins=bins,\n weights=[\n np.ones(len(treated)) / len(treated),\n np.ones(len(untreated)) / len(untreated),\n ],\n density=0,\n alpha=0.55,\n label=[\"Treated\", \"Unreated\"],\n )\n\n if show_output is True:\n plt.tick_params(axis=\"both\", labelsize=14)\n plt.legend(loc=\"upper right\", prop={\"size\": 14})\n plt.xticks(np.arange(0, 1.1, step=0.1))\n plt.grid(axis=\"y\", alpha=0.25)\n plt.xlabel(\"$P$\", fontsize=fontsize)\n plt.ylabel(\"$f(P)$\", fontsize=fontsize)\n\n if plot_title is True:\n plt.title(r\"Support of $P(\\hat{Z})$ for $D=1$ and $D=0$\")\n\n else:\n plt.close(fig)\n\n return hist, treated, untreated", "def n_particles_bins(DG, bins=[0, 0.5, 3, 10, 100]):\n radii = fid.rss(DG.gas['Coordinates'][()])\n hist, bin_edges = np.histogram(radii, bins)\n\n return hist, bin_edges", "def pm_histogram(fig, ax, data, title, dwarf_pmra=None, dwarf_pmdec=None, cut=None, colorbar=True, append_title=\"\"):\n if cut is not None:\n ra, dec, pmra, pmdec, parallax, = cut_on_parallax(*data, cut)\n else:\n ra, dec, pmra, pmdec, parallax, _ = data\n\n # bin data from gaia in 2d histogram\n bound = 5\n bins = np.linspace(-bound, bound, num=20*bound)\n counts, xedges, yedges, im = ax.hist2d(pmra, pmdec, bins=(bins, bins), vmin=0, cmap='gnuplot')\n print(title, str(counts.max()))\n title = fix_names(title)\n # plot pm motion of dwarf from simbad\n if dwarf_pmra is not None:\n dwarf_pmra, dwarf_pmdec = fix_pms(title, dwarf_pmra, dwarf_pmdec)\n ax.plot(dwarf_pmra, dwarf_pmdec, marker='X', markersize=10, color='xkcd:white', alpha=1)\n\n ax.set_title(title + append_title)\n ax.set_xlabel(r\"Right ascension proper motion [mas/yr])\")\n ax.set_ylabel(r\"Declination proper motion [mas/yr]\")\n\n cbar = colorbar_for_subplot(fig, ax, cm.gnuplot, image=im)\n cbar.ax.set_ylabel(\"Bin counts\", rotation=270, labelpad=10)\n\n return counts, xedges, yedges, im", "def dp_hist ( data, num_bins=10, epsilon=1.0, delta=0.1, histtype = 'continuous' ):\n\n import numpy as np\n\n if epsilon < 0.0:\n print('ERROR: Epsilon should be positive.')\n return\n elif delta < 0.0 or delta > 1.0:\n print('ERROR: Delta should be bounded in [0,1].')\n return\n else:\n if histtype == 'discrete':\n num_bins = len( np.unique(data) )\n hist_counts = [0] * num_bins\n data_min = min(data)\n data_max = max(data)\n bin_edges = np.linspace(data_min, data_max, num_bins+1)\n interval = (data_max - data_min) + 0.000000000001\n \n for kk in data:\n loc = (kk - data_min) / interval\n index = int(loc * num_bins)\n hist_counts[index] += 1.0\n\n if delta==0:\n noise = np.random.laplace(loc = 0, scale = 1.0/epsilon, size = (1,len(hist_counts)))\n else:\n sigma = (1.0/epsilon)*np.sqrt(2*np.log(1.25/delta))\n noise = np.random.normal(0.0, sigma, len(hist_counts))\n\n hist_array=np.asarray(hist_counts)\n noise_array=np.asarray(noise)\n dp_hist_counts = hist_array+noise_array\n\n return ( dp_hist_counts, bin_edges )", "def calculateHistogram(self):\n \n # Define color map\n colors = [ (255,0,0),(0,255,0),(0,0,255) ]\n # Define empty image to plot histogram in\n plot_to_fill = np.zeros((280,400,3))\n # Define bins of the histogram\n bins = np.arange(256).reshape(256,1)\n \n # Boucle sur les canaux\n for channel, color in enumerate(colors):\n # Calcul de l'histogramme\n hist_item = cv2.calcHist(self.frame,[channel],None,[256],[0,256])\n # Normalisation\n cv2.normalize(hist_item,hist_item,0,255,cv2.NORM_MINMAX)\n # Conversion\n hist = np.int32(np.around(hist_item))\n pts = np.int32(np.column_stack((bins, hist)))\n cv2.polylines(plot_to_fill, [pts], False, color)\n # Mettre dans le bon sens\n histplot = np.flipud(plot_to_fill)\n histplot = np.uint8(histplot)\n \n # Conversion en objet QPixelMap\n self.histplot_qpix = self.convertToQPixelmap(histplot)", "def hist_of_numeric(X):\n figsize(10,3)\n for col in get_numeric(X):\n print(col)\n X[col].hist(bins=50)\n show()", "def h3(data, bins=None, **kwargs):\n return histogramdd(data, bins, **kwargs)", "def test_make_histograms(self):\r\n raw_lengths = [90, 100, 110, 110, 130, 135]\r\n pre_lengths = [100, 110, 105, 130, 135]\r\n post_lengths = [130, 135]\r\n raw_hist, pre_hist, post_hist, bin_edges = \\\r\n make_histograms(raw_lengths, pre_lengths, post_lengths)\r\n assert_almost_equal(pre_hist, array([0, 2, 1, 0, 2]))\r\n assert_almost_equal(post_hist, array([0, 0, 0, 0, 2]))\r\n assert_almost_equal(bin_edges, array([90, 100, 110, 120, 130, 140]))", "def end_hist(pulse, trap):\n all_trial_n, all_trial_n_ave = trap.sideband_cool_sch(pulse)\n n_max = np.amax(all_trial_n)\n hist_xar = sp.arange(n_max + 1) - 0.5\n \n # fig, ax = plt.subplots()\n plt.hist(all_trial_n[:, -1], bins = hist_xar)\n plt.xlabel('Phonon State')\n plt.ylabel('Distribution')\n # return ax", "def efficient_Make_Binned_ROC_histograms(title, data, bins, PU_range='full'):\n diff_ran = (-25,25)\n diff_bins = diff_ran[1]-diff_ran[0]\n ratio_ran = (0,10)\n ratio_bins = 60\n\n Diff_hist_list = []\n Ratio_hist_list = []\n CSV_hist_list = []\n ZeroDiv_list = []\n for bin_ in range(len(bins)-1):\n Diff_hist_list.append(rt.TH1D(\"L4-L1_\"+str(bins[bin_])+\"_\"+str(bins[bin_+1]),\"L4-L1_\"+str(bins[bin_])+\"_\"+str(bins[bin_+1]),diff_bins,diff_ran[0],diff_ran[1]))\n Ratio_hist_list.append(rt.TH1D(\"L4_L1_\"+str(bins[bin_])+\"_\"+str(bins[bin_+1]),\"L4_L1_\"+str(bins[bin_])+\"_\"+str(bins[bin_+1]),ratio_bins,ratio_ran[0],ratio_ran[1]))\n CSV_hist_list.append(rt.TH1D(\"CSV_\"+str(bins[bin_])+\"_\"+str(bins[bin_+1]),\"CSV_\"+str(bins[bin_])+\"_\"+str(bins[bin_+1]),ratio_bins,0,1))\n ZeroDiv_list.append(0)\n\n for particle in data:\n if PU_range != 'full':\n if particle[-1]<PU_range[0] or particle[-1]>PU_range[1]: continue\n bin_number = FCM.bin_selection(particle,bins)\n if bin_number == -100: continue\n\n Diff_hist_list[bin_number].Fill(particle[8]-particle[5])\n CSV_hist_list[bin_number].Fill(particle[1])\n if particle[17] != 0:\n L4_L1 = particle[20]/particle[17]\n Ratio_hist_list[bin_number].Fill(L4_L1)\n else:\n ZeroDiv_list[bin_number] += 1\n\n tfile = rt.TFile(\"Thesis_Plots/root_files/{}_histograms.root\".format(title),\"recreate\")\n for hist in Diff_hist_list:\n hist.Write()\n for hist in Ratio_hist_list:\n hist.Write()\n for hist in CSV_hist_list:\n hist.Write()\n print \"saved histograms in Thesis_Plots/root_files/{}_histograms.root\".format(title)\n\n csv_file = open(\"Thesis_Plots/root_files/{}_ZeroDiv.csv\".format(title),\"wb\")\n writer = csv.writer(csv_file)\n writer.writerow(ZeroDiv_list)\n csv_file.close()\n print \"saved zero division occurences in Thesis_Plots/root_files/{}_ZeroDiv.csv\".format(title)", "def sym_histograms(self, X, masks=None):\n if masks is None:\n histograms, updates = theano.map(self.sym_histogram, sequences=(X,))\n else:\n histograms, updates = theano.map(self.sym_histogram, sequences=(X, masks))\n return histograms", "def build_hist(concept_values: np.ndarray, num_bins: int = 100) -> np.ndarray:\n hist, _ = np.histogram(concept_values, bins=num_bins, range=(0., 1.), density=True)\n return hist", "def add_histogram(self, tag, values, global_step=None, bins='tensorflow'):\n values = make_np(values)\n self.vis.histogram(make_np(values), opts={'title': tag})", "def histogram_equalize(im_orig):\n\n color_flag = False\n image = im_orig\n\n\n if len(im_orig.shape) == 3: #RGB image\n color_flag = True\n y_im = rgb2yiq(im_orig)\n image = y_im[:, :, 0]\n\n image *= NORMALIZE\n hist_orig, bins = np.histogram(image, range(BINS))\n hist_cum = np.cumsum(hist_orig) #cumulative distribution function\n\n cum = ((hist_cum - hist_cum.min()) / ( hist_cum.max() - hist_cum.min())) * NORMALIZE\n\n im_eq = cum[image.astype(np.uint8)]\n\n hist_eq, bins = np.histogram(im_eq, range(BINS)) #before getting back to float64 does the histogram)\n\n im_eq /= NORMALIZE\n im_eq = im_eq.astype(np.float64)\n\n\n if color_flag:\n y_im[:, :, 0] = im_eq\n im_eq = yiq2rgb(y_im)\n\n im_eq = im_eq.clip(0,1)\n return [im_eq, hist_orig, hist_eq]", "def normalized_hist(data1, data2, ax, color1, color2, bin_number=50):\n D1Hist, D1bins = np.histogram(data1, bins=bin_number, density=True)\n nD1Hist, nD1bins = np.histogram(data2, bins=bin_number, density=True)\n center = (D1bins[:-1] + D1bins[1:])/2\n width = 0.7 * (D1bins[1] - D1bins[0])\n medianD1 = np.median(data1)\n medianD2 = np.median(data2)\n\n ax.bar(center, D1Hist, width=width, align='center', label='D1', alpha=0.5, color=color1)\n ax.bar(center, nD1Hist, width=width, align='center', label='nD1', alpha=0.5, color=color2)\n ax.legend()\n ymin, ymax = ax.get_ybound()\n ax.vlines(medianD1, ymin, ymax, color=color1)\n ax.vlines(medianD2, ymin, ymax, color=color2)\n return ax", "def distribution_magnitude_histogram(cur, var, table, label):\n x = select(cur,var, table)\n print(\"Number of entries: \", len(x))\n print(\"Maximum: \", max(x))\n print(\"Minimum: \", min(x))\n \n fig = plt.figure()\n ax = fig.add_subplot(1,1,1)\n ax.set_xlabel(\"Sentiment Magnitude\")\n ax.set_ylabel(\"Number of Sentences\")\n fig.suptitle(label)\n ax.hist(x, bins = 20)\n plt.show()", "def normalized_hist_dataframe(data_column, bin_number=50, output_dir='/var/tmp/'):\n db = celldatabase.load_hdf(\"/var/tmp/figuresdata/2019astrpi/direct_and_indirect_cells.h5\")\n # dbTuned = db.query(studyparams.TUNING_FILTER)\n D1DB = db.query(studyparams.D1_CELLS)\n nD1DB = db.query(studyparams.nD1_CELLS)\n D1DB = D1DB.replace([np.inf, -np.inf], np.nan)\n nD1DB = nD1DB.replace([np.inf, -np.inf], np.nan)\n D1DB = D1DB[D1DB[data_column].notnull()]\n nD1DB = nD1DB[nD1DB[data_column].notnull()]\n D1Hist, D1bins = np.histogram(D1DB[data_column], bins=bin_number, density=True)\n nD1Hist, nD1bins = np.histogram(nD1DB[data_column], bins=bin_number, density=True)\n center = (D1bins[:-1] + D1bins[1:])/2\n width = 0.7 * (D1bins[1] - D1bins[0])\n D1Median = np.median(D1DB[data_column])\n nD1Median = np.median(nD1DB[data_column])\n\n fig = plt.gcf()\n fig.clf()\n figFilename = \"{}\".format(data_column) # Do not include extension\n figFormat = 'png' # 'pdf' or 'svg'\n figSize = [5, 5]\n\n ax = fig.add_subplot()\n ax.bar(center, D1Hist, width=width, align='center', label='D1', alpha=0.5)\n ax.bar(center, nD1Hist, width=width, align='center', label='nD1', alpha=0.5)\n ax.legend()\n ax.set_xlabel('{} value'.format(data_column))\n ax.set_ylabel('Frequency')\n ax.set_title(data_column)\n ymin, ymax = ax.get_ybound()\n ax.vlines(D1Median, ymin, ymax, color=\"Green\")\n ax.vlines(nD1Median, ymin, ymax, color=\"Red\")\n\n extraplots.save_figure(figFilename, figFormat, figSize, output_dir, 'w')\n plt.show()\n return fig, ax", "def plot_histograms(p_hist, p_hbins, title, figure_path=None):\n\n base_fig_size = 7\n h_fig = base_fig_size\n w_fig = base_fig_size * 4\n\n fig = plt.figure(figsize=(w_fig, h_fig))\n fig.suptitle(title)\n iplot = 0\n\n p_Nx, p_Ny = np.amax(p_hbins, axis=1) + 1\n\n p_hist = np.reshape(p_hist, (4, p_Ny, p_Nx))\n\n iplot += 1\n p_plot_title = 'Spectral BEHistogram Amp (log10 of counts)'\n p_plot = fig.add_subplot(1, 4, iplot, title=p_plot_title)\n p_im = p_plot.imshow(np.rot90(np.log10(p_hist[0])), interpolation='nearest')\n p_plot.axis('tight')\n fig.colorbar(p_im, fraction=0.1)\n\n iplot += 1\n p_plot_title = 'Spectral BEHistogram Phase (log10 of counts)'\n p_plot = fig.add_subplot(1, 4, iplot, title=p_plot_title)\n p_im = p_plot.imshow(np.rot90(np.log10(p_hist[1])), interpolation='nearest')\n p_plot.axis('tight')\n fig.colorbar(p_im, fraction=0.1)\n\n iplot += 1\n p_plot_title = 'Spectral BEHistogram Real (log10 of counts)'\n p_plot = fig.add_subplot(1, 4, iplot, title=p_plot_title)\n p_im = p_plot.imshow(np.rot90(np.log10(p_hist[2])), interpolation='nearest')\n p_plot.axis('tight')\n fig.colorbar(p_im, fraction=0.1)\n\n iplot += 1\n p_plot_title = 'Spectral BEHistogram Imag (log10 of counts)'\n p_plot = fig.add_subplot(1, 4, iplot, title=p_plot_title)\n p_im = p_plot.imshow(np.rot90(np.log10(p_hist[3])), interpolation='nearest')\n p_plot.axis('tight')\n fig.colorbar(p_im, fraction=0.1)\n\n if figure_path:\n plt.savefig(figure_path, format='png')\n\n return fig", "def hist(self,geo,pfile):\n\n # Create histogram of box data, rounding to nearest integers if temperature\n boxdata = self.img.flatten()\n imin = int(round(min(boxdata))) - 1\n imax = int(round(max(boxdata))) + 1\n ni = imax-imin+1 # number of bins to plot\n h = np.zeros(ni,dtype=int) # initialise with zeros\n for val in boxdata: # assign each image value to a bin\n i = int(round(val)) - imin \n h[i] += 1\n n = sum(h) # total number of values binned\n h = h * 100.0/n # convert no.in bins to %frequency\n plt.figure(WINDOW_HIST,figsize=(4,4))\n plt.clf()\n # Create title for histogram plot\n ttl = self.desc + '\\n' + \\\n 'Box: X=' + str(self.ix-self.mbox) + ':' \\\n + str(self.ix) + ':' \\\n + str(self.ix+self.mbox) + \\\n ', Y=' + str(self.iy-self.mbox) + ':' \\\n + str(self.iy) + ':' \\\n + str(self.iy+self.mbox)\n plt.title(ttl)\n plt.ylabel(\"% Frequency\")\n tdisp = self.label in ( 'T9', 'T10', 'TS' )\n if tdisp: plt.xlabel(\"Pixel Temperature [K]\")\n else: plt.xlabel(\"Pixel Value [0:255]\")\n xval = np.arange(imin,imax+1,dtype=int)\n # Set colour of histogram according to channel\n plt.bar(xval,h,color=plot_colours.get(self.label,'gray'))\n x0,x1 = plt.xlim()\n y0,y1 = plt.ylim()\n boxmean = np.mean(boxdata)\n boxsd = np.std(boxdata)\n midpix = self.img[self.mbox,self.mbox]\n plt.plot( boxmean+[0,0], [y0,y1], ':', color='black' )\n plt.errorbar ( boxmean, 0.9*y1, xerr=boxsd, color='black', \n capsize=4 )\n plt.plot ( midpix, 0.9*y1, 's', color='black', \n markerfacecolor='none' ) \n plt.tight_layout()\n if boxmean > 0.5 * ( x1 + x0 ): xt = x0 + 0.4 * ( x1 - x0 )\n else: xt = x0 + 0.95*(x1-x0)\n yt = y0 + 0.95*(y1-y0)\n yd = 0.05*(y1-y0)\n text = 'Mean = {:6.2f}'.format(boxmean)\n plt.text(xt,yt,text,ha=\"right\")\n yt -= yd\n text = 'S.D. = {:6.2f}'.format(boxsd)\n plt.text(xt,yt,text,ha=\"right\")\n yt -= yd\n text = 'NPix = {:6n}'.format(n)\n plt.text(xt,yt,text,ha=\"right\")\n yt -= yd\n if tdisp: text = 'MidPix = {:6.2f}'.format(midpix)\n else: text = 'MidPix = {:6n}'.format(midpix)\n plt.text(xt,yt,text,ha=\"right\")\n if geo.cal:\n lat,lon,zen = geo.locate(self.ix,self.iy) \n text = 'Lat = {:6.2f}'.format(lat)\n yt -= yd\n plt.text(xt,yt,text,ha=\"right\") \n text = 'Lon = {:6.2f}'.format(lon)\n yt -= yd\n plt.text(xt,yt,text,ha=\"right\") \n if pfile: \n file = input ( \"Save to file (<CR>=hist.pdf): \" ) or \"hist.pdf\"\n plt.savefig(file)", "def histogram(self):\n if np.size(self.stats['Counts']): # don't do anything to an empty list\n if np.size(self.bins) and not self.redo:\n return self.bins, self.occs, self.thresh\n elif np.size(self.bin_array) > 0: \n self.occs, self.bins = np.histogram(self.stats['Counts'], self.bin_array) # fixed bins. \n else:\n try:\n lo, hi = min(self.stats['Counts'])*0.97, max(self.stats['Counts'])*1.02\n # scale number of bins with number of files in histogram and with separation of peaks\n num_bins = int(15 + self.ind//100 + (abs(hi - abs(lo))/hi)**2*15) \n self.occs, self.bins = np.histogram(self.stats['Counts'], bins=np.linspace(lo, hi, num_bins+1)) # no bins provided by user\n except: \n self.occs, self.bins = np.histogram(self.stats['Counts'])\n else: self.occs, self.bins = np.zeros(10), np.arange(0,1.1,0.1)\n return self.bins, self.occs, self.thresh", "def featuresHist_colors(self, **kwargs):\n # Selecting bins automatically:\n bins_onpower = np.arange(self.onpower_train.min().values[0],\n self.onpower_train.max().values[0],\n (self.onpower_train.max().values[0] -\n self.onpower_train.min().values[0]) / 50)\n\n bins_offpower = np.arange(self.offpower_train.min().values[0],\n self.offpower_train.max().values[0],\n (self.offpower_train.max().values[0] -\n self.offpower_train.min().values[0]) / 50)\n\n bins_duration = np.arange(self.duration_train.min().values[0],\n self.duration_train.max().values[0],\n (self.duration_train.max().values[0] -\n self.duration_train.min().values[0]) / 50)\n\n # If a bin has been specified update the bin sizes.\n # Updating bins with specified values.\n for key in kwargs:\n if key == 'bins_onpower':\n bins_onpower = kwargs[key]\n elif key == 'bins_offpower':\n bins_offpower = kwargs[key]\n elif key == 'bins_duration':\n bins_duration = kwargs[key]\n else:\n print(\"Non valid kwarg\")\n\n # Plot:\n fig1 = plt.figure()\n ax1 = fig1.add_subplot(311)\n ax2 = fig1.add_subplot(312)\n ax3 = fig1.add_subplot(313)\n\n start = 0\n end = 0\n for ind in np.arange(len(self.stats)):\n\n if self.stats[ind]['Nevents'] != 0:\n if ind == 0:\n start = 0\n else:\n start = end\n end += self.stats[ind]['Nevents']\n ax1.hist(\n self.onpower_train[start:end].onpower.values, bins=bins_onpower, alpha=0.5)\n ax2.hist(\n self.offpower_train[start:end].offpower.values, bins=bins_offpower, alpha=0.5)\n ax3.hist(\n self.duration_train[start:end].duration.values, bins=bins_duration, alpha=0.5)\n\n ax1.set_title(\"Feature: Onpower\")\n ax1.set_xlabel(\"Watts\")\n ax1.set_ylabel(\"Counts\")\n\n ax2.set_title(\"Feature: Offpower\")\n ax2.set_xlabel(\"Watts\")\n ax2.set_ylabel(\"Counts\")\n\n ax3.set_title(\"Feature: Duration\")\n ax3.set_xlabel(\"Seconds\")\n ax3.set_ylabel(\"Counts\")", "def histogramintegrals(self):\n return {}", "def interactive_histograms(adata, keys=['n_counts', 'n_genes'],\n bins=100, min_bins=1, max_bins=1000,\n tools='pan, reset, wheel_zoom, save',\n groups=None, fill_alpha=0.4,\n palette=Set1[9] + Set2[8] + Set3[12],\n legend_loc='top_right', display_all=True,\n *args, **kwargs):\n\n from itertools import product\n from functools import reduce\n from bokeh.plotting import figure, show, ColumnDataSource\n from bokeh.models.widgets import CheckboxGroup\n from bokeh.models.widgets.buttons import Button\n from bokeh.models import Slider\n from bokeh.models.callbacks import CustomJS\n from bokeh.io import output_notebook\n from bokeh.layouts import layout, column, row\n\n from copy import copy\n from numpy import array_split, ceil\n output_notebook()\n\n if min_bins < 1:\n raise ValueError(f'Expected min_bins >= 1, got min_bins={min_bins}.')\n if max_bins < min_bins:\n raise ValueError(f'Expected min_bins <= max_bins, got min_bins={min_bins}, max_bins={max_bins}.')\n if not (bins >= min_bins and bins <= max_bins):\n raise ValueError(f'Expected min_bins <= bins <= max_bins, got min_bins={min_bins}, bins={bins}, max_bins={max_bins}.')\n\n # check the input\n for key in keys:\n if key not in adata.obs.keys() and \\\n key not in adata.var.keys() and \\\n key not in adata.var_names:\n raise ValueError(f'The key `{key}` does not exist in adata.obs, adata.var or adata.var_names.')\n\n def _create_adata_groups():\n if groups is None:\n return [('all',)], [adata]\n\n combs = list(product(*[set(adata.obs[g]) for g in groups]))\n adatas= [adata[reduce(lambda l, r: l & r,\n (adata.obs[k] == v for k, v in zip(groups, vals)), True)]\n for vals in combs] + [adata]\n\n if display_all:\n combs += [('all',)]\n adatas += [adata]\n\n return combs, adatas\n\n # group_v_combs contains the value combinations\n # used for grupping\n group_v_combs, adatas = _create_adata_groups()\n n_plots = len(group_v_combs)\n checkbox_group = CheckboxGroup(active=list(range(n_plots)), width=200)\n\n for key in keys:\n # create histogram\n cols, legends, callbacks = [], [], []\n plot_map = dict()\n slider = Slider(start=min_bins, end=max_bins, value=bins, step=1,\n title='Bins')\n\n fig = figure(*args, tools=tools, **kwargs)\n\n plot_ids = []\n for j, (ad, group_vs) in enumerate(zip(adatas, group_v_combs)):\n\n if ad.n_obs == 0:\n continue\n\n plot_ids.append(j)\n color = palette[len(plot_ids) - 1]\n\n if key in ad.obs.keys():\n orig = ad.obs[key]\n hist, edges = np.histogram(orig, density=True, bins=bins)\n elif key in ad.var.keys():\n orig = ad.var[key]\n hist, edges = np.histogram(orig, density=True, bins=bins)\n else:\n orig = ad[:, key].X\n hist, edges = np.histogram(orig, density=True, bins=bins)\n\n # original data, used for recalculation of histogram in JS code\n orig = ColumnDataSource(data=dict(values=orig))\n # data that we update in JS code\n source = ColumnDataSource(data=dict(hist=hist, l_edges=edges[:-1], r_edges=edges[1:]))\n\n legend = ', '.join(': '.join(map(str, gv)) for gv in zip(groups, group_vs)) \\\n if groups is not None else 'all'\n legends.append(legend)\n # create figure\n p = fig.quad(source=source, top='hist', bottom=0,\n left='l_edges', right='r_edges',\n fill_color=color, legend=legend,\n line_color=\"#555555\", fill_alpha=fill_alpha)\n\n # create callback and slider\n callback = CustomJS(args=dict(source=source, orig=orig), code=_inter_hist_js_code)\n callback.args['bins'] = slider\n callbacks.append(callback)\n\n # add the current plot so that we can set it\n # visible/invisible in JS code\n plot_map[f'p_{j}'] = p\n\n # slider now updates all values\n slider.js_on_change('value', *callbacks)\n plot_map['cb'] = checkbox_group\n\n button = Button(label='Toggle All', button_type='primary')\n code_t='\\n'.join(f'p_{p_id}.visible = false;' for i, p_id in enumerate(plot_ids))\n code_f ='\\n'.join(f'p_{p_id}.visible = true;' for i, p_id in enumerate(plot_ids))\n button.callback = CustomJS(\n args=plot_map,\n code=f'''if (cb.active.length == {len(plot_map) - 1}) {{\n console.log(cb.active);\n cb.active = Array();\n {code_t};\n }} else {{\n console.log(cb.active);\n cb.active = Array.from(Array({len(plot_map) - 1}).keys());\n {code_f};\n }}'''\n )\n\n checkbox_group.callback = CustomJS(\n args=plot_map,\n code='\\n'.join(f'p_{p_id}.visible = cb.active.includes({i});' for i, p_id in enumerate(plot_ids))\n )\n checkbox_group.labels = legends\n\n fig.legend.location = legend_loc\n fig.xaxis.axis_label = key\n fig.yaxis.axis_label = 'normalized frequency'\n fig.plot_width = kwargs.get('plot_width', 400)\n fig.plot_height = kwargs.get('plot_height', 400)\n\n cols.append(column(slider, button, row(fig, checkbox_group)))\n\n\n # transform list of pairs of figures and sliders into list of lists, where\n # each sublist has length <= 2\n # note that bokeh does not like np.arrays\n grid = list(map(list, array_split(cols, ceil(len(cols) / 2))))\n\n show(layout(children=grid, sizing_mode='fixed', ncols=2))", "def equalise_hist(image, bin_count=256):\n # TODO: your histogram equalization code\n #define arrays\n image = img_as_ubyte(image)\n row,col = image.shape\n new_image = np.zeros((row,col),dtype='uint8') \n\n # compute the value of each grayscale,and save in image_hist \n image_hist = np.bincount(image.flatten(), minlength=(bin_count))\n\n # normalise n[]\n norm_arr = (np.cumsum(image_hist)/(image.size))*(bin_count-1)\n norm_arr = norm_arr.astype('uint8')\n \n #Compute a normalized cumulative histogram\n for x in range(row):\n for y in range(col):\n new_image[x,y] = norm_arr[image[x,y]]\n \n return new_image", "def compute_histogram(im, block_factor=3, color_space='HSV'):\n\n # Shape = rows and columns\n remainder_rows = im.shape[0] % block_factor\n remainder_cols = im.shape[1] % block_factor\n\n im_block = cv2.copyMakeBorder(im, block_factor - remainder_rows, 0, block_factor - remainder_cols, 0,\n cv2.BORDER_CONSTANT)\n\n windowsize_r = int(im_block.shape[0] / block_factor)\n windowsize_c = int(im_block.shape[1] / block_factor)\n\n # print(im_block.shape)\n # print(str(windowsize_r)+' '+str(windowsize_c))\n # cv2.imshow(\"fullImg\", im_block)\n\n hist = []\n for r in range(0, im_block.shape[0], windowsize_r):\n for c in range(0, im_block.shape[1], windowsize_c):\n hist_blocks = []\n window = im_block[r:r + windowsize_r, c:c + windowsize_c]\n if color_space == 'GRAY':\n window_gray = cv2.cvtColor(window, cv2.COLOR_BGR2GRAY)\n hist_block = cv2.calcHist([window_gray], [0], None, [256], [0, 256])\n cv2.normalize(hist_block, hist_block, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX)\n hist_blocks.append(hist_block)\n elif color_space == 'RGB':\n hist_block = cv2.calcHist([window], [0], None, [256], [0, 256])\n cv2.normalize(hist_block, hist_block, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX)\n hist_blocks.append(hist_block)\n hist_block = cv2.calcHist([window], [1], None, [256], [0, 256])\n cv2.normalize(hist_block, hist_block, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX)\n hist_blocks.append(hist_block)\n hist_block = cv2.calcHist([window], [2], None, [256], [0, 256])\n cv2.normalize(hist_block, hist_block, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX)\n hist_blocks.append(hist_block)\n elif color_space == 'HSV':\n window = cv2.cvtColor(window, cv2.COLOR_BGR2HSV)\n hist_block = cv2.calcHist([window], [0], None, [256], [0, 256])\n cv2.normalize(hist_block, hist_block, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX)\n hist_blocks.append(hist_block)\n hist_block = cv2.calcHist([window], [1], None, [256], [0, 256])\n cv2.normalize(hist_block, hist_block, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX)\n hist_blocks.append(hist_block)\n hist_block = cv2.calcHist([window], [2], None, [256], [0, 256])\n cv2.normalize(hist_block, hist_block, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX)\n hist_blocks.append(hist_block)\n \n hist.append(hist_blocks)\n\n return hist", "def yieldhist(self):\n labels = [\"initial\"] + [f\"N - {i}\" for i in self._names] + [\"N\"]\n if not self._delayed_mode:\n h = hist.Hist(hist.axis.Integer(0, len(labels), name=\"N-1\"))\n h.fill(numpy.arange(len(labels)), weight=self._nev)\n\n else:\n h = hist.dask.Hist(hist.axis.Integer(0, len(labels), name=\"N-1\"))\n for i, weight in enumerate(self._masks, 1):\n h.fill(dask_awkward.full_like(weight, i, dtype=int), weight=weight)\n h.fill(dask_awkward.zeros_like(weight))\n\n return h, labels", "def plot_variation_distn(gene_vars: pd.DataFrame):\n plt.hist(gene_vars.median(axis=1), bins=100, alpha=0.4, label='median')\n plt.hist(gene_vars.mean(axis=1), bins=100, alpha=0.4, label='mean')\n plt.legend()", "def _determine_histogram_bins(self, ma_maps):\n if isinstance(ma_maps, list):\n ma_values = self.masker.transform(ma_maps)\n elif isinstance(ma_maps, np.ndarray):\n ma_values = ma_maps.copy()\n else:\n raise ValueError(f\"Unsupported data type '{type(ma_maps)}'\")\n\n # Determine bins for null distribution histogram\n # Remember that numpy histogram bins are bin edges, not centers\n # Assuming values of 0, .001, .002, etc., bins are -.0005-.0005, .0005-.0015, etc.\n INV_STEP_SIZE = 100000\n step_size = 1 / INV_STEP_SIZE\n max_ma_values = np.max(ma_values, axis=1)\n # round up based on resolution\n max_ma_values = np.ceil(max_ma_values * INV_STEP_SIZE) / INV_STEP_SIZE\n max_poss_ale = self.compute_summarystat(max_ma_values)\n # create bin centers\n hist_bins = np.round(np.arange(0, max_poss_ale + (1.5 * step_size), step_size), 5)\n self.null_distributions_[\"histogram_bins\"] = hist_bins", "def create_general_hist(self, obj):\n hist = cv2.calcHist([obj], [0, 1, 2], None, [32, 8, 8],\n [0, 256, 0, 256, 0, 256])\n print cv2.normalize(hist).flatten()\n return cv2.normalize(hist).flatten()", "def make_and_save_histogramsX(pred_steerings, real_steerings,\n img_name = \"histogramsX.png\"):\n pred_steerings = np.array(pred_steerings)\n real_steerings = np.array(real_steerings)\n max_h = np.maximum(np.max(pred_steerings), np.max(real_steerings))\n min_h = np.minimum(np.min(pred_steerings), np.min(real_steerings))\n bins = np.linspace(min_h, max_h, num=50)\n plt.hist(pred_steerings, bins=bins, alpha=0.5, label='Predicted', color='b')\n plt.hist(real_steerings, bins=bins, alpha=0.5, label='Real', color='r')\n #plt.title('Steering angle')\n plt.legend(fontsize=10)\n plt.savefig(img_name, bbox_inches='tight')", "def show_histogram(im):\n\n if im.ndim == 2:\n # Input image is single channel\n plt.hist(im.flatten(), 256, range=(0, 250), fc='k')\n plt.show()\n\n elif im.ndim == 3:\n # Input image is three channels\n fig = plt.figure()\n fig.add_subplot(311)\n plt.hist(im[..., 0].flatten(), 256, range=(0, 250), fc='b')\n fig.add_subplot(312)\n plt.hist(im[..., 1].flatten(), 256, range=(0, 250), fc='g')\n fig.add_subplot(313)\n plt.hist(im[..., 2].flatten(), 256, range=(0, 250), fc='r')\n plt.show()", "def histogram(self, mask=None, extrema=None):\r\n uni, counts = self._getcolors()\r\n return [l for l in counts]", "def normalise_histogram(histogram):\n total_sum = np.sum(histogram)\n for i in range(len(histogram)):\n histogram[i] /= total_sum\n\n return histogram", "def test_num_bins(self):\n with Pandas() as pd:\n if pd is None:\n return\n with Numpy() as np: # noqa\n if numpy is None:\n return\n sys.stderr.write(\"\\n\")\n\n df1 = pd.DataFrame({'A': [0, 2, 4, 5, 7, 9, 11, 13, 13, 15]})\n df2 = pd.DataFrame({'A': [2, 4, 4, 6, 8, 7, 10, 14, 17, 19]})\n\n # building 1d-, 2d-, and 3d-histogram (iteratively)\n hist2 = hg.SparselyBin(origin=0.0, binWidth=1.0, quantity=unit('A'))\n hist3 = hg.SparselyBin(origin=0.0, binWidth=1.0, quantity=unit('A'))\n hist4 = hg.Bin(num=20, low=0.0, high=20., quantity=unit('A'))\n hist5 = hg.Bin(num=20, low=0.0, high=20., quantity=unit('A'))\n hist6 = hg.Bin(num=201, low=0.0, high=1.005)\n\n # fill them\n hist2.fill.numpy(df1)\n hist3.fill.numpy(df2)\n hist4.fill.numpy(df1)\n hist5.fill.numpy(df2)\n\n assert hist2.num_bins() == 16\n assert hist3.num_bins() == 18\n assert hist4.num_bins() == 20\n assert hist5.num_bins() == 20\n assert hist6.num_bins() == 201\n\n assert hist2.num_bins(low=10, high=25) == 15\n assert hist3.num_bins(low=10, high=25) == 15\n assert hist4.num_bins(low=10, high=25) == 10\n assert hist5.num_bins(low=10, high=25) == 10\n assert hist6.num_bins(low=0.2089, high=0.9333) == 146\n\n assert hist2.num_bins(low=-10, high=28) == 38\n assert hist3.num_bins(low=-10, high=28) == 38\n assert hist4.num_bins(low=-10, high=28) == 20\n assert hist5.num_bins(low=-10, high=28) == 20\n assert hist6.num_bins(low=0.205, high=0.935) == 146", "def _generate_histograms(self):\n\n def get_xbins(xcolname):\n \"\"\"Returns the 'xbins' dictinary for plotly's 'Histrogram()' method.\"\"\"\n\n xmin, xmax = (float(\"inf\"), -float(\"inf\"))\n for res in self.rsts:\n xdata = self._base_unit(res.df, xcolname)\n xmin = min(xmin, xdata.min())\n xmax = max(xmax, xdata.max())\n\n return {\"size\" : (xmax - xmin) / 1000}\n\n xcolnames = Trivial.list_dedup(self.hist + self.chist)\n hist_set = set(self.hist)\n chist_set = set(self.chist)\n\n for xcolname in xcolnames:\n if xcolname in hist_set:\n ycolname = \"Count\"\n pinfo = self._add_pinfo(xcolname, ycolname, is_hist=True)\n _LOG.info(\"Generating histogram: %s vs %s.\", xcolname, ycolname)\n gobjs = []\n xbins = get_xbins(xcolname)\n for res in self.rsts:\n xdata = self._base_unit(res.df, xcolname)\n try:\n gobj = plotly.graph_objs.Histogram(x=xdata, name=res.reportid, xbins=xbins,\n opacity=self._opacity)\n except Exception as err:\n raise Error(f\"failed to create histogram \"\n f\"'{ycolname}-vs-{xcolname}':\\n{err}\")\n gobjs.append(gobj)\n\n self._create_diagram(gobjs, pinfo)\n\n if xcolname in chist_set:\n ycolname = \"Percentile\"\n _LOG.info(\"Generating cumulative histogram: %s vs %s.\", xcolname, ycolname)\n pinfo = self._add_pinfo(xcolname, ycolname, is_hist=True)\n gobjs = []\n if xcolname not in hist_set:\n xbins = get_xbins(xcolname)\n for res in self.rsts:\n xdata = self._base_unit(res.df, xcolname)\n try:\n gobj = plotly.graph_objs.Histogram(x=xdata, name=res.reportid, xbins=xbins,\n cumulative=dict(enabled=True),\n histnorm=\"percent\",\n opacity=self._opacity)\n except Exception as err:\n raise Error(f\"failed to create cumulative histogram \"\n f\"'{ycolname}-vs-{xcolname}':\\n{err}\")\n gobjs.append(gobj)\n\n self._create_diagram(gobjs, pinfo)", "def _make_histogram(values, bins):\n values = values.reshape(-1)\n counts, limits = np.histogram(values, bins=bins)\n limits = limits[1:]\n\n sum_sq = values.dot(values)\n return HistogramProto(min=values.min(),\n max=values.max(),\n num=len(values),\n sum=values.sum(),\n sum_squares=sum_sq,\n bucket_limit=limits,\n bucket=counts)", "def addHistogram1D(self, name, title, n_bins, minimum, maximum):\n\t\tself.histograms[ name ] = ROOT.TH1F(name, title, n_bins, minimum, maximum)", "def histograms(self, *args, **kwargs):\n return _image.image_histograms(self, *args, **kwargs)", "def get_histogram(self):\n\n for bin in range(self.bins.size):\n bin_inf = self.bins[bin]\n try: bin_sup = self.bins[bin + 1]\n except IndexError: bin_sup = self.vmax\n self.hist[bin] = np.sum(\n (self.values >= bin_inf)*(self.values < bin_sup))\n\n binned_values = np.sum(self.hist)\n if binned_values == 0: return self.hist # no binned value\n else: self.hist /= np.sum(self.hist)\n return self.hist", "def histopi(data):\n dataset = discrete_dataset(data)\n theoretical_dataset = [theoretical_effective(dataset)]*10\n observed = plt.bar(numpy.arange(len(dataset)) - 0.4, dataset, color=\"blue\", width=0.4)\n theoretical = plt.bar(numpy.arange(len(theoretical_dataset)), theoretical_dataset, color=\"deepskyblue\", width=0.4)\n plt.legend([observed, theoretical], [\"effectifs observes\", \"effectifs theoriques\"])\n plt.xlabel('pi digits')\n plt.ylabel('occurrence')\n plt.axis([-0.7, 9.7, 0, 130000])\n plt.savefig(\"report/khi2_histopi.png\", bbox_inches='tight')\n return dataset", "def _hist(xs, bins=100, range=None, stats=('entries', 'mean', 'rms'),\n xylabels = (), stats_xypos=(0.1, 0.7),\n *args, **kargs):\n if (range==None):\n range = (np.min(xs), np.max(xs))\n cc = hst.hist(xs, bins=bins, range=range, *args, **kargs);\n if (not stats):\n return cc\n ys, xedges = np.histogram(xs, bins, range=range)\n ns = len(xs)\n sel = np.logical_and(xs >= range[0], xs <= range[1])\n nos, mean, rms = len(xs[sel]), np.mean(xs[sel]), np.std(xs[sel])\n epsilon = (1.*nos)/(1.*ns)\n ss = ''\n if ('total entries') in stats:\n ss += 'total entries {0:d} \\n'.format(ns)\n if ('entries') in stats:\n ss += 'entries {0:d} \\n'.format(nos)\n if ('mean') in stats:\n ss += 'mean {0:.3f} \\n'.format(mean)\n if ('rms') in stats:\n ss += 'rms {0:.3f} \\n'.format(rms)\n xp, yp = _xypos(xedges, ys, xf=stats_xypos[0], yf=stats_xypos[1])\n ##plt.set_label(ss)\n # plt.gca().set_label(ss)\n # plt.legend()\n plt.text(xp, yp, ss)\n return cc", "def plotHistogram(a):\n plt.figure(figsize=(10,5))\n plt.subplot(1,2,1)\n plt.imshow(a)\n plt.axis('off')\n histo = plt.subplot(1,2,2)\n histo.set_ylabel('Count')\n histo.set_xlabel('Pixel Intensity')\n n_bins = 30\n plt.hist(a[:,:,0].flatten(), bins= n_bins, lw = 0, color='r', alpha=0.5);\n plt.hist(a[:,:,1].flatten(), bins= n_bins, lw = 0, color='g', alpha=0.5);\n plt.hist(a[:,:,2].flatten(), bins= n_bins, lw = 0, color='b', alpha=0.5);", "def plotHistogram(a):\n plt.figure(figsize=(10,5))\n plt.subplot(1,2,1)\n plt.imshow(a)\n plt.axis('off')\n histo = plt.subplot(1,2,2)\n histo.set_ylabel('Count')\n histo.set_xlabel('Pixel Intensity')\n n_bins = 30\n plt.hist(a[:,:,0].flatten(), bins= n_bins, lw = 0, color='r', alpha=0.5);\n plt.hist(a[:,:,1].flatten(), bins= n_bins, lw = 0, color='g', alpha=0.5);\n plt.hist(a[:,:,2].flatten(), bins= n_bins, lw = 0, color='b', alpha=0.5);", "def plotHistogram(a):\n plt.figure(figsize=(10,5))\n plt.subplot(1,2,1)\n plt.imshow(a)\n plt.axis('off')\n histo = plt.subplot(1,2,2)\n histo.set_ylabel('Count')\n histo.set_xlabel('Pixel Intensity')\n n_bins = 30\n plt.hist(a[:,:,0].flatten(), bins= n_bins, lw = 0, color='r', alpha=0.5);\n plt.hist(a[:,:,1].flatten(), bins= n_bins, lw = 0, color='g', alpha=0.5);\n plt.hist(a[:,:,2].flatten(), bins= n_bins, lw = 0, color='b', alpha=0.5);", "def plot_hist(self):\n labels = [self.get_class_str(action, obj)\n for (action, obj, subj, rec, beg, end) in self.action_clips]\n visualize.plot_hist(labels, proportion=True)", "def histograma(p):\n img = read_img(p)\n show_histograma(img.reshape((-1)))", "def update_histo_frame():\n min_histo.text = str(MIN_RANGE_F) # Display the legend\n max_histo.text = str(MAX_RANGE_F)\n\n histogram = np.zeros(GRID_AXIS) # Clear histogram accumulation array\n # Collect camera data and calculate the histogram\n for _row in range(0, GRID_AXIS):\n for _col in range(0, GRID_AXIS):\n histo_index = int(map_range(GRID_DATA[_col, _row], 0, 1, 0, GRID_AXIS - 1))\n histogram[histo_index] = histogram[histo_index] + 1\n\n histo_scale = np.max(histogram) / (GRID_AXIS - 1)\n if histo_scale <= 0:\n histo_scale = 1\n\n # Display the histogram\n for _col in range(0, GRID_AXIS):\n for _row in range(0, GRID_AXIS):\n if histogram[_col] / histo_scale > GRID_AXIS - 1 - _row:\n image_group[((_row * GRID_AXIS) + _col)].fill = index_to_rgb(\n round((_col / GRID_AXIS), 3)\n )\n else:\n image_group[((_row * GRID_AXIS) + _col)].fill = BLACK", "def get_histogram(self):\n\n values_array = np.array(self.values)\n for bin0 in range(self.bins[0].size):\n bin_inf0 = self.bins[0][bin0]\n try: bin_sup0 = self.bins[0][bin0 + 1]\n except IndexError: bin_sup0 = self.vmax[0]\n values = values_array[\n (values_array[:, 0] >= bin_inf0)\n *(values_array[:, 0] < bin_sup0)][:, 1]\n for bin1 in range(self.bins[1].size):\n bin_inf1 = self.bins[1][bin1]\n try: bin_sup1 = self.bins[1][bin1 + 1]\n except IndexError: bin_sup1 = self.vmax[1]\n self.hist[bin0*self.Nbins[1] + bin1, 2] = (\n np.sum((values >= bin_inf1)*(values < bin_sup1)))\n\n if np.sum(self.hist[:, 2]) > 0: # there are binned values\n self.hist[:, 2] /= np.sum(self.hist[:, 2])\n return self.hist", "def processSystematic(observable, xsecType, xsecLevel, systematic, histNominal):\n varHists = []\n linkStr = \"\"\n singlePointSystematics = [\"ERDON\", \"ERDONRETUNE\", \"GLUONMOVETUNE\", \"BFRAG_PETERSON\"]\n\n sPS = 0\n\n if any(singlePointSystematic in systematic for singlePointSystematic in singlePointSystematics):\n sPS = 1\n\n linkStr = \"_\"\n variations = [\"\"]\n for variation in variations:\n path = directory_base + xsec_type + \"_\" + xsec_level + directory_tail + systematic + linkStr + variation + \"/combinedUnfolded/Hyp\" + observable + \"Results.txt\"\n inputfile = open(path, 'r').readlines()\n bins = []\n for line in inputfile:\n bins.append(float(line.split( )[3]))\n bins.append(float(line.split( )[5]))\n bins = sorted(bins)\n binsArray = array('f',bins)\n histNameUp = systematic + \"_UP\" \n histNameDown = systematic + \"_DOWN\" \n histUp = TH1F(histNameUp, histNameUp, len(bins)-1, binsArray)\n histDown = TH1F(histNameDown, histNameDown, len(bins)-1, binsArray)\n histUpFinal = TH1F(\"\", \"\", len(bins)-1, binsArray)\n histDownFinal = TH1F(\"\", \"\", len(bins)-1, binsArray)\n \n ibin = 0\n\n for line in inputfile:\n nomBin = histNominal.GetBinContent(ibin+1)\n nomBinCenter = histNominal.GetBinCenter(ibin+1)\n unc = float(line.split( )[7])\n if systematic == \"DY\":\n print \"DY UP = \" + str(1.0 + unc)\n print \"DY DOWN = \" + str(1.0 - unc)\n\n\n histUp.SetBinContent(ibin+1, 1.0 + unc)\n histDown.SetBinContent(ibin+1,1.0 - unc)\n ibin = ibin + 1 \n\n histUpVis = histUp.Clone()\n histDownVis = histDown.Clone()\n histUpFinal = histUp.Clone()\n histDownFinal = histDown.Clone()\n\n if systematic == \"PDF\":\n histUpFinal, histDownFinal = reNormalise(histNominal, histUpVis, histDownVis)\n\n return (histUpFinal, histDownFinal)", "def rebin_plot(histogram, bins_array):\n newname = histogram.GetName()+'_rebinned'\n newplot = histogram.Rebin(len(bins_array)-1, newname, bins_array)\n newplot.SetDirectory(0)\n\n #print \"found overflow for\", newname, \"of\", overflow\n #newplot.SetBinContent(newplot.GetNbinsX(),newplot.GetBinContent(newplot.GetNbinsX())+newplot.GetBinContent(newplot. GetNbinsX()+1))\n #newplot.SetBinError(newplot.GetNbinsX(),math.sqrt(newplot.GetBinError(newplot.GetNbinsX())**2 + newplot. GetBinError(newplot.GetNbinsX()+1)**2 ) )\n #newplot.SetBinContent(newplot.GetNbinsX()+1,0) # Set overflow to 0\n\n return newplot", "def channel_histograms(df, xlim=None, n_bins=100, save_fig_filename=None):\n sns.set(font_scale=3)\n plt.rcParams['patch.linewidth'] = 0\n plt.rcParams['patch.edgecolor'] = 'none'\n n_rows = int(np.ceil(df.columns.shape[0] / 4))\n _, axes = plt.subplots(n_rows, 4, sharex=True)\n if xlim is None:\n pass\n else:\n plt.xlim(xlim)\n axes = axes.ravel()\n for i, col in enumerate(df.columns):\n figsize = (4 * 4, n_rows * 3)\n df[col].hist(bins=n_bins, ax=axes[i],\n figsize=figsize, label=col, grid=False)\n axes[i].set_title(col, x=0.5, y=0.7)\n if save_fig_filename is None:\n plt.show()\n else:\n plt.savefig(save_fig_filename)\n plt.close()\n sns.set(font_scale=1)", "def hist_shifts(key,conn,fun,range_bn = None, fig = None):\n\n # get file name/comp_num\n (comp_num,fname) = conn.execute(\"select comp_key,fout from comps\\\n where comp_key = ? and function = 'Iden'\",(key,)).fetchone()\n\n # open file/group\n\n F = h5py.File(fname,'r')\n\n nbins = 100\n\n if range_bn is None:\n bin_edges = np.linspace(-2,2,nbins + 1)\n else:\n bin_edges = np.linspace(*(range_bn + (nbins + 1,)))\n bin_counts = np.zeros(nbins)\n # extract the relevant data\n for fr in F:\n if fr == 'parameters':\n continue\n bin_counts += fun(F,fr,comp_num,bin_edges)\n \n # plot\n istatus = lplts.non_i_plot_start()\n if fig is None:\n (fig,ax) = lplts.set_up_plot()\n else:\n ax = fig.get_axes()[0]\n \n sh = ax.step(bin_edges[:-1],bin_counts/np.sum(bin_counts))\n\n if ax.get_legend() is None:\n print 'attempt to set leg'\n ax.legend([sh],[F.attrs['Exposure']],loc = 3)\n else:\n #leg =aff ax.get_legend()\n pass\n lplts.non_i_plot_stop(istatus)\n # clean up\n F.close()\n del F\n\n return fig", "def hist_weights(p1, p2, z, zbins, n_chop=4, truncated=True):\n if not truncated:\n ixs = (z >= zbins[0]) & (z < zbins[-1])\n z = z[ixs]\n p1, p2 = p1[ixs], p2[ixs]\n\n n_zbins = len(zbins) - 1\n\n # Left closed, right open partitioning\n z0_bins = zbins\n z0_bins[-1] += 0.001\n z_ind = np.digitize(z, z0_bins)\n\n chop1 = np.linspace(min(p1), max(p1), n_chop)\n chop2 = np.linspace(min(p2), max(p2), n_chop)\n\n # CREATING A 3D DATACUBE OF WEIGHTS\n cube = np.zeros((n_zbins, n_chop - 1, n_chop - 1))\n\n for i in range(n_zbins):\n ind = (z >= zbins[i]) & (z < zbins[i + 1])\n cube[i] = np.histogram2d(p1[ind], p2[ind], bins=(chop1, chop2))[0]\n\n # Trim bins with no objects\n # Outer - parameter; Inner - redshift\n for i in range(n_chop - 1):\n for j in range(n_chop - 1):\n # Sets all bins to 0 if any one bin has no objects in it\n if 0 in cube[:, i, j]:\n cube[:, i, j] = 0\n\n cube_sum = np.sum(cube, axis=0)\n\n # A. NORMALIZED WEIGHTS ACROSS ALL REDSHIFTS\n p0_bins, p1_bins = chop1, chop2\n\n # <-- Required since histogram2d and digitize have different\n # binning schemes\n p0_bins[-1] += 0.001\n p1_bins[-1] += 0.001\n\n foo = np.digitize(p1, p0_bins)\n blah = np.digitize(p2, p1_bins)\n\n weight_mat = cube_sum / cube\n weight_mat[np.isnan(weight_mat)] = 0\n\n # To obtain consistent weights across all redshifts\n weight_mat /= np.linalg.norm(weight_mat, axis=(1, 2))[:, None, None]\n\n # Final histogram weights to be applied\n h_weights = weight_mat[z_ind - 1, foo - 1, blah - 1]\n\n # # To verify that the histogram rebinning has been done correctly\n # for i in range(n_zbins):\n # ind = (z >= zbins[i]) & (z < zbins[i + 1])\n # plt.figure()\n # plt.hist2d(p1[ind], p2[ind], bins=(chop1, chop2), weights=h_weights[ind], normed=True)[0]\n # plt.colorbar()\n # plt.show()\n\n return h_weights", "def histeq(im,nbr_bins=256):\r\n # Calculate histogram of images\r\n imhist,bins = histogram(im.flatten(),nbr_bins,normed=True)\r\n cdf = imhist.cumsum() # cumulative distribution function\r\n cdf = 255 * cdf / cdf[-1] # 归一化\r\n # Using the linear interpolation of cumulative distribution function, the new pixel value is calculated.\r\n im2 = interp(im.flatten(),bins[:-1],cdf)\r\n return im2.reshape(im.shape), cdf", "def fwhmwhisker_multiext(filename,sigma,band,zenith):\n hdu=pf.open(filename)\n e1=[]\n e2=[]\n fwhmw=[]\n whiskerw=[]\n for hdui in hdu[1:]:\n Nobj = hdui.data.shape[0]\n for i in range(Nobj):\n print i\n img = hdui.data[i][4:].reshape(160,160)\n imgrbin = rebin(img,(40,40))\n res=wfwhm(imgrbin,sigma)\n e1.append(res[0])\n e2.append(res[1])\n whiskerw.append(res[2]*0.27)\n fwhmw.append(res[3]*0.27)\n e1 = np.array(e1)\n e2 = np.array(e2)\n fwhmw = np.array(fwhmw)\n whiskerw = np.array(whiskerw)\n e1mean = e1.mean()\n e1std = e1.std()\n e2mean = e2.mean()\n e2std = e2.std()\n whiskerwmean = whiskerw.mean()\n whiskerwstd = whiskerw.std()\n fwhmwmean = fwhmw.mean()\n fwhmwstd = fwhmw.std()\n r50mean = np.mean(fwhmw/2.)\n r50std = np.std(fwhmw/2.)\n pl.figure(figsize=(15,10))\n pl.subplot(2,3,1)\n pl.hist(e1,bins=20,normed=True)\n pl.xlabel('e1')\n pl.title('mean: '+str(round(e1mean,6))+' std: '+str(round(e1std,5)))\n pl.subplot(2,3,2)\n pl.hist(e2,bins=20,normed=True)\n pl.xlabel('e2')\n pl.title('mean: '+str(round(e2mean,6))+' std: '+str(round(e2std,5)))\n pl.subplot(2,3,3)\n pl.hist(whiskerw,bins=20,normed=True)\n pl.xlabel('whisker')\n pl.title('mean: '+str(round(whiskerwmean,5))+' std: '+str(round(whiskerwstd,5)))\n pl.subplot(2,3,4)\n pl.hist(fwhmw,bins=20,normed=True)\n pl.xlabel('fwhm')\n pl.title('mean: '+str(round(fwhmwmean,5))+' std: '+str(round(fwhmwstd,5)))\n pl.subplot(2,3,5)\n pl.hist(fwhmw/2.,bins=20,normed=True)\n pl.xlabel('r50')\n pl.title('mean: '+str(round(r50mean,5))+' std: '+str(round(r50std,5)))\n pl.figtext(0.7,0.4,'band: '+band)\n pl.figtext(0.7,0.37,'zenith angle: '+zenith +' deg')\n pl.figtext(0.3,0.95,'Perfect focus/alignment, 0.7 arcsec fwhm circular seeing',fontsize=18,color='red')\n pl.savefig(filename[0:-6]+'png')\n np.savetxt(filename[0:-6]+'txt',[e1mean,e1std,e2mean,e2std,whiskerwmean,whiskerwstd,fwhmwmean,fwhmwstd,r50mean,r50std],fmt='%10.5f')\n pl.close()\n return '---done !-----'", "def numpy_gw_hist(data, bins, scale):\n data = np.atleast_1d(data)\n bins = np.atleast_1d(bins)\n nbins, ndata = bins.size, data.size\n\n scale = np.zeros(ndata) + scale\n\n logsm_bin_matrix = np.repeat(\n bins, ndata).reshape((nbins, ndata)).astype('f4')\n data_matrix = np.tile(data, nbins).reshape((nbins, ndata)).astype('f4')\n smoothing_kernel_matrix = np.tile(\n scale, nbins).reshape((nbins, ndata)).astype('f4')\n\n cdf_matrix = norm.cdf(\n logsm_bin_matrix, loc=data_matrix, scale=smoothing_kernel_matrix)\n\n prob_bin_member = np.diff(cdf_matrix, axis=0) # Shape (nbins-1, ndata)\n\n total_num_bin_members = np.sum(\n prob_bin_member, axis=1) # Shape (nbins-1, )\n\n return total_num_bin_members", "def _plot_psth_flat(self, sigma=5, figsize = (15, 8)):\n\t\n\t\tgaus_filt = sp.ndimage.gaussian_filter1d\n\t\tall_resp = gaus_filt(self.conditions_hist_mean.flatten(), sigma)\n\t\t\n\t\tfig = plt.figure(figsize=figsize)\n\t\tax = fig.add_subplot(1, 1, 1)\n\t\t\n\t\tax.plot(all_resp, linestyle='-', color='0.28')\n\t\t\n\t\tn_con = self.parameters['conditions']\n\t\tcon_mark = np.arange(0, (self.bins.size -1) * n_con, self.bins.size -1)\n\t\t\t\t\n\t\tax.xaxis.set_ticks(con_mark)\n\n\t\ttry:\n\t\t\tax.xaxis.set_ticklabels(self.cond_label)\n\t\texcept:\n\t\t\tax.xaxis.set_ticklabels(np.unique(self.marker_codes))\n\t\t\n\t\tfreq_label = np.round(ax.get_yticks() * (1/self.bin_width),\n\t\t\t\t\t\t\t decimals = 1)\n\t\tax.set_yticklabels(freq_label)\n\t\tax.set_ylabel('Frequency')\n\t\t\n\t\tfor label in ax.xaxis.get_majorticklabels():\n\t\t\tlabel.set_horizontalalignment('left')\n\t\t\t\n\t\tax.set_xlim(0, (self.bins.size -1) * n_con)\n\t\t\n\t\t# bug with macosx backend\n# plt.tight_layout()\n\t\tplt.subplots_adjust(hspace=0.45)", "def calcHistogram(Tech_res):\n A_Hist = np.histogram(Tech_res.A_mean, Tech_res.hist_bins)\n top, bin_list, x_steps = list(A_Hist[0]), list(A_Hist[1]), []\n for n in range(np.shape(bin_list)[0]-1):\n x_steps.append((bin_list[n+1] + bin_list[n]) / 2)\n Tech_res.update_tech_meas(Hist_tops=top, Hist_steps=x_steps)\n return", "def histogram(histo,nbr_launch,file):\n with open(\"Results/Histogram_{}_{}.txt\".format(nbr_launch,file.strip(\".yaml\")),'w') as f:\n f.write(\"mgm results :\"+\"\\n\")\n for val,occur in histo[\"mgm\"].items():\n f.write(\"value \"+str(val)+\" : \"+str(occur[0])+\" \"+\"Initial costs : \"+str(occur[1]).strip(\"[\"+\"]\")+\"\\n\")\n f.write(\"\\n\")\n f.write(\"mcs_mgm results :\" + \"\\n\")\n for val, occur in histo[\"mcs_mgm\"].items():\n f.write(\"value \" + str(val) + \" : \" + str(occur[0])+\" \"+\"Initial costs : \"+str(occur[1]).strip(\"[\"+\"]\")+\"\\n\")\n f.write(\"\\n\")\n f.write(\"gca_mgm results :\" + \"\\n\")\n for val, occur in histo[\"gca_mgm\"].items():\n f.write(\"value \" + str(val) + \" : \" + str(occur[0])+\" \"+\"Initial costs : \"+str(occur[1]).strip(\"[\"+\"]\")+\"\\n\")", "def plot_hist(datasets, bins, labels, alphas):\n assert len(labels) == len(datasets)\n assert len(alphas) == len(datasets)\n plt.figure(figsize=[9,6])\n for idx, data in enumerate(datasets):\n plt.hist(data, bins=bins[idx], density=True, label=labels[idx], alpha=alphas[idx])\n plt.xlabel(\"PHQ score\")\n plt.ylabel(\"Probability\")\n plt.legend()\n plt.savefig(\"saved_plots/hist_\"+\"_\".join(labels)+\".png\")\n plt.show()", "def do_mi_histogram(ACT, d, mirange=None, overflow=False, summi=False, final=False, log=None):\n directory = d.dirname\n prefix = d.name\n infile = d.path(d.adj_supp)\n if final:\n outfile = prefix + \".final.hist.csv\"\n d.finalhist = outfile\n elif summi:\n outfile = prefix + \".summi.hist.csv\"\n d.summihist = outfile\n else:\n outfile = prefix + \".mi.hist.csv\"\n d.mihist = outfile\n\n cmdline = \"apple.py histogram -n 1000 -o {}\".format(outfile)\n if mirange:\n cmdline += \" -r {} {}\".format(mirange[0], mirange[1])\n if overflow:\n cmdline += \" -v\"\n if final:\n cmdline += \" {}/{}.summi.adj\".format(directory, prefix)\n elif summi:\n cmdline += \" -s\"\n cmdline += \" {}/{}.mi.adj\".format(directory, prefix)\n else:\n cmdline += \" \" + infile\n if log:\n log.log(\"Executing: {}\", cmdline)\n qsubfile = d.name + \".hist.qsub\"\n with open(qsubfile, \"w\") as out:\n out.write(\"\"\"#!/bin/bash\n#SBATCH --mem-per-cpu=40G\n#SBATCH --time=60:00:00\n\nmodule load dibig_tools\n{}\n\"\"\".format(cmdline))\n\n if True: #ACT.missingOrStale(outfile, infile): # *** Is this correct?\n jid = ACT.submit(qsubfile, done=\"hist.@.done\")\n return jid\n else:\n return False", "def np_histogram(data, title, bins=\"auto\"):\n figure = plt.figure()\n canvas = figure.canvas\n plt.hist(data, bins=bins)\n plt.title(title)\n\n canvas.draw()\n w, h = canvas.get_width_height()\n np_hist = np.fromstring(canvas.get_renderer().tostring_rgb(), dtype=np.uint8).reshape(h, w, 3)\n plt.close(figure)\n util.np_info(np_hist)\n return np_hist", "def drawHist(data, xLabel, unit, binSize, title):\n mean = np.mean(data)\n median = np.median(data)\n mode = stats.mode(data)[0].astype(float)\n \n q1, q3 = np.percentile(data, [25, 75])\n iqr = q3 - q1\n sigma = np.std(data)\n \n \n bins = np.arange(min(data), max(data) + 1, binSize)\n plt.style.use('dark_background')\n fig, ax = plt.subplots(figsize=(12,7))\n plt.hist(data, bins=bins, histtype='bar') \n plt.title(title)\n plt.xlabel(xLabel + \" \" + unit)\n plt.ylabel('count')\n ymax = ax.get_ylim()[1]\n ax.vlines(mean, 0, ymax, color='red', label='mean')\n ax.vlines(mean-sigma, 0, ymax, color='red', linestyle='--', \n label='mean +/- std')\n ax.vlines(mean+sigma, 0, ymax, color='red', linestyle='--')\n plt.legend()\n plt.show()\n \n print(\"Einheit: \", unit)\n print(\"Minimum: \", round(data.min(),3))\n print(\"Maximum: \", round(data.max(),3))\n print(\"Mittelwert: \", round(mean,3))\n print(\"Median: \", round(median,3))\n print(\"Modus: \", round(mode[0],3))\n print(\"Standardabweichung: \", round(sigma, 3))\n print(\"1. Quartil: \", round(q1,3))\n print(\"3. Quartil: \", round(q3,3))\n print(\"Quartilsdifferenz: \", round(iqr,3))", "def residual_hist(hdata, hmodel, scaling, crange=(-10.0, 10.0)):\n nbins = hdata[0].GetNbinsX()\n scDat = []\n scMod = []\n scRes = []\n for j, (i,c) in enumerate(ic):\n dat = TH2D(\n 'dataHist{0}{1}'.format(c, i), 'dataHist{0}{1}'.format(c, i),\n nbins, crange[0]*scaling, crange[1]*scaling,\n nbins, crange[0]*scaling, crange[1]*scaling\n )\n mod = TH2D(\n 'modelHist{0}{1}'.format(c, i), 'modelHist{0}{1}'.format(c, i),\n nbins, crange[0]*scaling, crange[1]*scaling,\n nbins, crange[0]*scaling, crange[1]*scaling\n )\n res = TH2D(\n 'residualHist{0}{1}'.format(c, i), 'residualHist{0}{1}'.format(c, i),\n nbins, crange[0]*scaling, crange[1]*scaling,\n nbins, crange[0]*scaling, crange[1]*scaling\n )\n for xbin in range(nbins):\n for ybin in range(nbins):\n m = hmodel[j].GetBinContent(xbin, ybin)\n mod.SetBinContent(xbin, ybin, m)\n d = hdata[j].GetBinContent(xbin, ybin)\n if d <= 0.0:\n continue\n dat.SetBinContent(xbin, ybin, d)\n if m < d:\n e = hdata[j].GetBinErrorLow(xbin, ybin)\n else:\n e = hdata[j].GetBinErrorUp(xbin, ybin)\n res.SetBinContent(xbin, ybin, (d-m)/e)\n scDat.append(dat)\n scMod.append(mod)\n scRes.append(res)\n return scDat, scMod, scRes", "def envSysSignedHist(*hists): ### keep track of the signs somehow for systematics in cards\n central = hists[0]\n variations = hists[1:]\n if not variations:\n raise Exception(\"No Variations Given! %s, %s\"%(a, variations) )\n systs = [ ]\n sign = 1\n for var in variations:\n syst_hist = SignedSysHistFunc(central, var)\n syst_hist.Scale(sign)\n #syst_hist.SetBit( syst_hist.kIsAverage ) ## with this when hists are added they are averaged\n systs.append( syst_hist )\n sign *= -1\n #print systs\n #for sh in systs[1:] :\n # systsum.Add(sh)\n abssysts = [ th2Func(h, lambda x: abs(x) ) for h in systs ]\n\n nx = central.GetNbinsX()\n ny = central.GetNbinsY()\n envhist = central.Clone()\n for x in xrange(nx):\n for y in xrange(ny):\n systvals = [ systhist.GetBinContent(x+1, y+1 ) for systhist in abssysts ]\n v = max(systvals)\n envhist.SetBinContent(x+1,y+1, v)\n return envhist, systs", "def simetrize_3dhistogram(histogram):\n N =len(histogram)\n n_histogram = np.zeros((N,N,N))\n for i in range(N):\n for j in range(i,N):\n for k in range(j, N):\n S = histogram[i][j][k] + histogram[k][i][j] + histogram[j][k][i] + histogram[i][k][j] + histogram[j][i][k] + histogram[k][j][i]\n n_histogram[i][j][k] = S\n n_histogram[k][i][j] = S\n n_histogram[j][k][i] = S\n n_histogram[i][k][j] = S\n n_histogram[j][i][k] = S\n n_histogram[k][j][i] = S\n #a[i][j][k], a[k][i][j], a[j][k][i], a[i][k][j], a[j][i][k], a[k][j][i]\n return n_histogram", "def generate_formula_histogram(self):\n\n histogram = dict()\n for element in self.atomelements:\n if element in histogram.keys():\n histogram[element] += 1\n else:\n histogram[element] = 1\n return histogram", "def hist(a,bins):\n n=searchsorted(sort(a),bins)\n n=concatenate([n,[len(a)]])\n n=array(list(map(float,n)))\n# n=array(n)\n return n[1:]-n[:-1]", "def pcf2_iso_histo(data_location='../../fake_DATA/DATOS/data_500.dat',rand_location='../../fake_DATA/DATOS/rand0_500.dat', d_max=180.0, bins_number=30):\n \n data = np.loadtxt(fname=data_location, delimiter=\" \", usecols=(0,1,2))\n rand0 = np.loadtxt(fname=rand_location, delimiter=\" \", usecols=(0,1,2))\n \n if not data.shape == rand0.shape:\n raise Exception(\"The data file and rand file do not have the same size\")\n #351 s\n\n #Pure histograms\n start = time.perf_counter()\n print('start DDD distances')\n triangle_points = np.array(list(combinations(data,3)))\n r_12 = triangle_points[:,0,:]-triangle_points[:,1,:]\n r_12=r_12**2\n r_12 = r_12[:,0]+r_12[:,1]+r_12[:,2]\n r_12 = np.sqrt(r_12)\n\n r_23 = triangle_points[:,1,:]-triangle_points[:,2,:]\n r_23 = r_23**2\n r_23 = r_23[:,0]+r_23[:,1]+r_23[:,2]\n r_23 = np.sqrt(r_23)\n\n r_31 = triangle_points[:,2,:]-triangle_points[:,0,:]\n r_31 = r_31**2\n r_31 = r_31[:,0]+r_31[:,1]+r_31[:,2]\n r_31 = np.sqrt(r_31)\n\n DDD, edges = np.histogramdd(np.column_stack((r_12, r_23, r_31)), bins=(bins_number,bins_number,bins_number), range=[[0,d_max],[0,d_max],[0,d_max]])\n \n end = time.perf_counter()\n print(f'Finished creating the DDD histo in {end-start} s')\n\n start = time.perf_counter()\n print('start RRR distances')\n triangle_points = np.array(list(combinations(rand0,3)))\n r_12 = triangle_points[:,0,:]-triangle_points[:,1,:]\n r_12=r_12**2\n r_12 = r_12[:,0]+r_12[:,1]+r_12[:,2]\n r_12 = np.sqrt(r_12)\n\n r_23 = triangle_points[:,1,:]-triangle_points[:,2,:]\n r_23 = r_23**2\n r_23 = r_23[:,0]+r_23[:,1]+r_23[:,2]\n r_23 = np.sqrt(r_23)\n\n r_31 = triangle_points[:,2,:]-triangle_points[:,0,:]\n r_31 = r_31**2\n r_31 = r_31[:,0]+r_31[:,1]+r_31[:,2]\n r_31 = np.sqrt(r_31)\n\n RRR, edges = np.histogramdd(np.column_stack((r_12, r_23, r_31)), bins=(bins_number,bins_number,bins_number), range=[[0,d_max],[0,d_max],[0,d_max]])\n \n end = time.perf_counter()\n\n\n print(f'Finished creating the RRR histo in {end-start} s')\n\n #Mixed histogram\n start = time.perf_counter()\n print(\"Started gathering the data points pairs\")\n DD_side_points = np.array(list(combinations(data,2)))\n print(\"Finished data points pairs\")\n\n print(\"Started gathering the rand0 points pairs\")\n RR_side_points = np.array(list(combinations(rand0,2)))\n print(\"Finished rand0 points pairs\")\n\n print(\"Started loop for DDR and RRD histograms\")\n \n DDR = np.zeros((bins_number,bins_number,bins_number))\n RRD = np.zeros((bins_number,bins_number,bins_number))\n\n for data_point, rand_point in zip(data, rand0):\n ##DDR\n r_12 = rand_point-DD_side_points[:,0,:]\n r_12=r_12**2\n r_12 = r_12[:,0]+r_12[:,1]+r_12[:,2]\n r_12 = np.sqrt(r_12)\n\n r_23 = DD_side_points[:,0,:]-DD_side_points[:,1,:]\n r_23 = r_23**2\n r_23 = r_23[:,0]+r_23[:,1]+r_23[:,2]\n r_23 = np.sqrt(r_23)\n\n r_31 = DD_side_points[:,1,:]-rand_point\n r_31 = r_31**2\n r_31 = r_31[:,0]+r_31[:,1]+r_31[:,2]\n r_31 = np.sqrt(r_31)\n H_DDR, edges = np.histogramdd(np.column_stack((r_12, r_23, r_31)), bins=(bins_number,bins_number,bins_number), range=[[0,d_max],[0,d_max],[0,d_max]])\n DDR += H_DDR\n\n #RRD\n r_12 = data_point-RR_side_points[:,0,:]\n r_12=r_12**2\n r_12 = r_12[:,0]+r_12[:,1]+r_12[:,2]\n r_12 = np.sqrt(r_12)\n\n r_23 = RR_side_points[:,0,:]-RR_side_points[:,1,:]\n r_23 = r_23**2\n r_23 = r_23[:,0]+r_23[:,1]+r_23[:,2]\n r_23 = np.sqrt(r_23)\n\n r_31 = RR_side_points[:,1,:]-data_point\n r_31 = r_31**2\n r_31 = r_31[:,0]+r_31[:,1]+r_31[:,2]\n r_31 = np.sqrt(r_31)\n H_RRD, edges = np.histogramdd(np.column_stack((r_12, r_23, r_31)), bins=(bins_number,bins_number,bins_number), range=[[0,d_max],[0,d_max],[0,d_max]])\n RRD += H_RRD\n \n DDR = DDR/3\n RRD = RRD/3\n end = time.perf_counter()\n print(f'Finished the mixed histograms DDR an RRD in {end-start} s')\n\n return RRR, DDD, DDR, RRD, edges", "def histeq( im, nbr_bins = 256):\n\t# get image histogram \n\timhist, bins = histogram( im.flatten(), nbr_bins, normed = True) \n\tcdf = imhist.cumsum() \n\t# cumulative distribution function cdf = 255 * cdf / cdf[-1] \n\t# normalize \n\t# use linear interpolation of cdf to find new pixel values \n\tim2 = interp( im.flatten(), bins[:-1], cdf) \n\treturn im2.reshape( im.shape), cdf", "def img_histogram(img):\n\n plt.figure()\n\n if len(img.shape) > 2:\n\n plt.subplot(3,1,1)\n plt.hist(img[:,:,0].ravel(),bins=range(257),color='b')\n plt.title('Image Histogram')\n plt.legend('Blue')\n plt.xlabel('Pixel Values')\n plt.ylabel('Frequency')\n\n plt.subplot(3,1,2)\n plt.hist(img[:,:,1].ravel(),bins=range(257),color='g')\n plt.legend('Green')\n plt.xlabel('Pixel Values')\n plt.ylabel('Frequency')\n\n plt.subplot(3,1,3)\n plt.hist(img[:,:,2].ravel(),bins=range(257),color='r')\n plt.legend('Red')\n plt.xlabel('Pixel Values')\n plt.ylabel('Frequency')\n\n plt.ion()\n plt.show()\n\n else:\n\n plt.hist(img[:,:].ravel(),bins=range(257))\n plt.title('Image Histogram - Grayscale')\n plt.xlabel('Pixel Values')\n plt.ylabel('Frequency')\n\n plt.ion()\n plt.show()", "def fillHistograms(self, params, hists, mode = INTENS):\n\t\tif mode.IS_THEO and not self.hasTheo:\n\t\t\tprint \"No theory loaded, cannot fill histogram\"\n\t\tif not len(hists) == self.nSect:\n\t\t\traise IndexError(\"Histogram number mismatch\")\n\t\tcorrAmp = self.getCorrectedAmplitudes(params)\n\t\tfor s in range(self.nSect):\n\t\t\tcount = 0\n\t\t\tstart = self.borders[s ]\n\t\t\tstop = self.borders[s+1]\n\t\t\tfor i in range(start, stop):\n\t\t\t\tampl = corrAmp[2*i] + 1.j * corrAmp[2*i+1]\n\t\t\t\tnorm = self.norms[i]\n\t\t\t\tcoma = np.zeros((2,2))\n\t\t\t\tjac = np.zeros((2))\n\t\t\t\tcoma[0,0] = self.coma[2*i ,2*i ]\n\t\t\t\tcoma[0,1] = self.coma[2*i ,2*i+1]\n\t\t\t\tcoma[1,0] = self.coma[2*i+1,2*i ]\n\t\t\t\tcoma[1,1] = self.coma[2*i+1,2*i+1]\n\t\t\t\tif mode == INTENS:\n\t\t\t\t\tval = abs(ampl)**2\n\t\t\t\t\tjac[0] = 2*ampl.real\n\t\t\t\t\tjac[1] = 2*ampl.imag\n\t\t\t\telif mode == INTENSNORM:\n\t\t\t\t\tval = abs(ampl)**2/norm\n\t\t\t\t\tjac[0] = 2*ampl.real/norm\n\t\t\t\t\tjac[1] = 2*ampl.imag/norm\n\t\t\t\telif mode == REAL:\n\t\t\t\t\tval = ampl.real\n\t\t\t\t\tjac[0] = 1.\n\t\t\t\telif mode == IMAG:\n\t\t\t\t\tval = ampl.imag\n\t\t\t\t\tjac[1] = 1.\n\t\t\t\telif mode == REIMCORRELATION:\n\t\t\t\t\tval = coma[0,1]\n\t\t\t\telif mode == PHASE:\n\t\t\t\t\tval = phase(ampl)\n\t\t\t\t\tif ampl.real == 0.:\n\t\t\t\t\t\tif ampl.imag > 0.:\n\t\t\t\t\t\t\tjac[0] = -1./ampl.imag\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tjac[0] = 1./ampl.imag\n\t\t\t\t\telse:\n\t\t\t\t\t\tcommon = 1. + ampl.imag**2/ampl.real**2\n\t\t\t\t\t\tjac[0] = -ampl.imag/ampl.real**2/common\n\t\t\t\t\t\tjac[1] = 1./ampl.real/common\n\t\t\t\telif mode == INTENSTHEO:\n\t\t\t\t\tval = abs(self.theo[i])**2\n\t\t\t\telif mode == REALTHEO:\n\t\t\t\t\tval = self.theo[i].real\n\t\t\t\telif mode == IMAGTHEO:\n\t\t\t\t\tval = self.theo[i].imag\n\t\t\t\telif mode == PHASETHEO:\n\t\t\t\t\tval = phase(self.theo[i])\n\t\t\t\telse:\n\t\t\t\t\traise ValueError(\"Unknown mode '\" + mode + \"'\")\n\t\t\t\terr = np.dot(jac, np.dot(coma,jac))**.5\n\t\t\t\thists[s].SetBinContent(self.bin3pi+1, count + 1, val)\n\t\t\t\thists[s].SetBinError(self.bin3pi+1, count + 1, err)\n\t\t\t\tcount += 1", "def histogram_equalization_helper(im):\n\n im *= (255 / im.max())\n c_m = im.min()\n hist_orig, bins = np.histogram(im, bins=256, range=[0, 256])\n cumulative_hist = np.cumsum(hist_orig)\n cumulative_hist = (((cumulative_hist - c_m) * 255) /(im.size)).astype(int)\n im_eq = cumulative_hist[im.astype(int)]\n hist_eq, bins_eq = np.histogram(im_eq, bins=256, range=[0, 256])\n im_eq = im_eq/ 255\n\n # plt.plot((bins[:-1] + bins[1:]) / 2, hist_orig)\n # plt.hist(im.flatten(), bins=128)\n # plt.show()\n #\n # plt.plot((bins_eq[:-1] + bins_eq[1:]) / 2, hist_eq)\n # plt.hist(im.flatten(), bins=128)\n #\n # plt.show()\n return im_eq, hist_orig, hist_eq", "def meanSysSignedHist(*hists): ### keep track of the signs somehow for systematics in cards\n central = hists[0]\n variations = hists[1:]\n if not variations:\n raise Exception(\"No Variations Given! %s, %s\"%(a, variations) )\n systs = [ ]\n sign = 1\n for var in variations:\n syst_hist = SignedSysHistFunc(central, var)\n syst_hist.Scale(sign)\n #syst_hist.SetBit( syst_hist.kIsAverage ) ## with this when hists are added they are averaged\n systs.append( syst_hist )\n sign *= -1\n #print systs\n #for sh in systs[1:] :\n # systsum.Add(sh)\n abssysts = [ th2Func(h, lambda x: abs(x) ) for h in systs ]\n #signsysts = [ th2Func(h, lambda x: abs(x)/x) for h in systs]\n\n abssystmean = abssysts[0].Clone()\n abssystmean.SetBit(abssystmean.kIsAverage)\n signedsum = systs[0].Clone()\n for abssyst in abssysts[1:]:\n abssyst.SetBit(abssyst.kIsAverage)\n abssystmean.Add( abssyst )\n for systh in systs[1:]:\n signedsum.Add(systh)\n signs = th2Func( signedsum, lambda x: abs(x) )\n signs.Divide( signedsum ) \n\n systmean = abssystmean.Clone()\n systmean.Multiply( signs )\n \n print 'made this change' \n #systmean = th2Func( systmean, lambda x: x if float(x) >0 or float(x)<0 else (0.0000001 if x==0 else 0 ) ) # Set to small value if 0, set to 0 if nan\n systmean = th2Func( systmean, lambda x: x if float(x) >0 or float(x)<0 else (0.0000001 if x==0 else 0 ) ) # Set to small value if 0, set to 0 if nan\n \n return systmean, systs", "def getHistogram(self, var, idx = None, translation = None, other = None,\\\n verbose = 1, ab = [], bins = 100, minmax = None):\n \n if idx is None: idx = np.arange(self.atoms.shape[0])\n if translation is None: translation = [0]\n if isinstance(translation, (int, np.integer)): translation = [translation]\n\n data, lbl, leg = self.getData(var = var, idx = idx, translation = translation,\\\n verbose = verbose, ab = ab, other = other)\n x = []\n y = []\n for i, item in enumerate(data):\n cnt, bin = np.histogram(item, bins = bins, range = minmax)\n x.append((bin[:-1] + bin[1:]) / 2)\n y.append(cnt)\n\n return x, y, lbl, leg", "def hist_average_quality(self, fontsize=16, bins=None):\n\n hq_qv = [pylab.mean([ord(X)-33 for X in read['quality'].decode()]) \n for read in self.hq_sequence]\n lq_qv = [pylab.mean([ord(X) -33 for X in read['quality'].decode()]) \n for read in self.lq_sequence]\n\n if bins is None:\n bins = range(0,94)\n Y1, X = np.histogram(hq_qv, bins=bins)\n Y2, X = np.histogram(lq_qv, bins=bins)\n pylab.bar(X[1:], Y1, width=1, label=\"HQ\")\n pylab.bar(X[1:], Y2, bottom=Y1, width=1, label=\"LQ\")\n pylab.xlim([0.5, 93.5])\n\n pylab.xlabel(\"Isoform average QV\")\n pylab.ylabel(\"# Isoform\")\n pylab.legend(fontsize=fontsize)\n\n ax = pylab.twinx()\n N = np.sum(Y1+Y2)\n ax.plot(X, [N] + list(N-np.cumsum(Y1+Y2)), \"k\")" ]
[ "0.63964415", "0.62685835", "0.62414837", "0.62069935", "0.60966927", "0.60651386", "0.60223716", "0.5942936", "0.5920461", "0.5877657", "0.5857634", "0.5831009", "0.5829086", "0.58266604", "0.5823835", "0.5822443", "0.5815776", "0.58128756", "0.57792675", "0.57546276", "0.5750782", "0.57480633", "0.5736632", "0.5733259", "0.5724578", "0.57160056", "0.5709715", "0.5693564", "0.56912637", "0.5687391", "0.5675257", "0.5670932", "0.566606", "0.5664174", "0.56464", "0.5632326", "0.56191474", "0.5605996", "0.5592198", "0.55881923", "0.55717736", "0.5564286", "0.55570537", "0.55352265", "0.55318373", "0.5526988", "0.55180925", "0.55169463", "0.5514021", "0.5510804", "0.55078644", "0.5507285", "0.5500804", "0.55002874", "0.5497458", "0.5496922", "0.5496516", "0.5494018", "0.5491867", "0.5491864", "0.5489018", "0.5481925", "0.54797125", "0.547766", "0.54692733", "0.5463416", "0.54593617", "0.54593617", "0.54593617", "0.5457472", "0.54483974", "0.5442555", "0.54403347", "0.5439712", "0.54388887", "0.54383194", "0.5437739", "0.5437733", "0.5435645", "0.54307795", "0.54243255", "0.54146844", "0.54138404", "0.5412733", "0.5411603", "0.5408544", "0.5405765", "0.5402308", "0.5401888", "0.54017997", "0.53990984", "0.5397976", "0.53928226", "0.5389284", "0.5387697", "0.5386045", "0.5382877", "0.5381946", "0.5374044", "0.5372854", "0.5371493" ]
0.0
-1
setUp is called before each test is run, tearDown is called after
def setUp(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setUp(self):\n MainTests.setUp(self)", "def setUp(self):\n \n \n pass", "def setUp(self):\n \n \n pass", "def setUp(self):\n \n \n pass", "def setUp(self):\n \n \n pass", "def setUp(self):\n \n \n pass", "def setUp(self):\n \n \n pass", "def setUp(self):\n \n \n pass", "def setUp(self):\n \n \n pass", "def setUp(self):\n \n \n pass", "def setUp(self):\r\n # nothing to do, all tests use different things\r\n pass", "def setUp(self):\n\n pass", "def setUp(self):\n\n pass", "def setUp(self):\r\n pass", "def setUp(self):\n setUp()", "def setUp(self):\n self", "def setUp(self):\n self", "def setUp(self) :\n pass", "def setUp(self):\r\n pass # nothing used by all\r", "def setUp(self):", "def setUp(self):", "def setUp(self):", "def setUp(self):", "def setUp(self):", "def setUp(self):", "def setUp(self):", "def setUp(self):", "def setUp(self):", "def setUp(self):", "def setUp(self):", "def setUp(self):", "def setUp(self):", "def setUp(self):", "def setUp(self):", "def setUp(self):", "def setUp(self):", "def setUp(self):", "def setUp(self):", "def setUp(self):", "def setUp(self):", "def setUp(self):", "def setUp(self):", "def setUp(self):", "def setUp(self):", "def setUp(self):\n \n pass", "def setUp(self):\n\n BaseTest.setUp(self)", "def runTest(self):\r\n self.setUp()", "def setUp(self):\n pass #because we dont have anything to setup.", "def tearDown(self):\n pass", "def setUp(self):\r\n pass # nothing required by all\r", "def setUpTestCase(self):\n pass", "def tearDown(self):\r\n pass", "def tearDown(self):\r\n pass", "def tearDown(self):\r\n pass", "def tearDown(self):\r\n pass", "def tearDown(self):\r\n pass", "def setUp(self):\r\n print('---------------------------------------------\\n')\r\n print('STARTING TEST...')", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass" ]
[ "0.8581003", "0.8570658", "0.8570658", "0.8570658", "0.8570658", "0.8570658", "0.8570658", "0.8570658", "0.8570658", "0.8570658", "0.8552113", "0.8523558", "0.8523558", "0.851478", "0.85131484", "0.85049874", "0.85049874", "0.8482348", "0.84664434", "0.84575176", "0.84575176", "0.84575176", "0.84575176", "0.84575176", "0.84575176", "0.84575176", "0.84575176", "0.84575176", "0.84575176", "0.84575176", "0.84575176", "0.84575176", "0.84575176", "0.84575176", "0.84575176", "0.84575176", "0.84575176", "0.84575176", "0.84575176", "0.84575176", "0.84575176", "0.84575176", "0.84575176", "0.84575176", "0.84517854", "0.8380619", "0.8367129", "0.83546513", "0.8339879", "0.8324766", "0.8314957", "0.8309711", "0.8309711", "0.8309711", "0.8309711", "0.8309711", "0.83078814", "0.8286712", "0.8286712", "0.8286712", "0.8286712", "0.8286712", "0.8286712", "0.8286712", "0.8286712", "0.8286712", "0.8286712", "0.8286712", "0.8286712", "0.8286712", "0.8286712", "0.8286712", "0.8286712", "0.8286712" ]
0.85146415
37
Test if setting multiple features works.
def test_setter_extended(): group = Group({"a": Numerical(), "b": Numerical(), "c": Group({"d": Numerical(), }), }) group.set_a(10) group.set_b(20) group.set_c_d(30) group.push() group.set_a(11) group.set_b(21) group.set_c(31) group.push() group.set("a", 12) group.set("b", 22) group.set("c", "d", 32) group.push() group.set("a", 13) group.set("b", 23) group.set("c", 33) group.push() array = group.array() assert array.shape == (4, 3) for i, row in enumerate(array): assert tuple(row) == (10 + i, 20 + i, 30 + i)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_all_features(self):\n to_create = ['looktest1', 'looktest2', 'looktest3']\n for f in to_create:\n Feature(f).activate()\n\n all_features = Feature.all_features()\n self.assertEqual(len(all_features), len(to_create))\n for f in to_create:\n self.assertTrue(f in all_features)", "def test_all_features_with_data(self):\n feature1 = Feature('looktest1')\n feature1.set_percentage(5)\n\n feature2 = Feature('looktest2')\n feature2.activate()\n feature2.add_to_whitelist(3)\n\n feature3 = Feature('looktest3')\n feature3.activate()\n feature3.add_to_blacklist(4)\n feature3.add_to_blacklist(5)\n\n feature4 = Feature('looktest4')\n feature4.activate()\n feature4.add_to_whitelist(3)\n feature4.add_to_whitelist(5)\n feature4.add_to_blacklist(4)\n\n all_features = Feature.all_features(include_data=True)\n self.assertEqual(len(all_features), 4)\n\n for key in ['looktest1', 'looktest2', 'looktest3', 'looktest4']:\n self.assertTrue(key in all_features)\n if not key == 'looktest1':\n self.assertEqual(all_features[key]['percentage'], 100)\n\n self.assertEqual(all_features['looktest1']['percentage'], 5)\n self.assertFalse('whitelist' in all_features['looktest1'])\n self.assertFalse('blacklist' in all_features['looktest1'])\n\n self.assertTrue('whitelist' in all_features['looktest2'])\n self.assertEqual(all_features['looktest2']['whitelist'], [3])\n self.assertFalse('blacklist' in all_features['looktest2'])\n\n self.assertFalse('whitelist' in all_features['looktest3'])\n self.assertTrue('blacklist' in all_features['looktest3'])\n self.assertEqual(all_features['looktest3']['blacklist'], [4, 5])\n\n self.assertTrue('whitelist' in all_features['looktest4'])\n self.assertEqual(all_features['looktest4']['whitelist'], [3, 5])\n self.assertTrue('blacklist' in all_features['looktest4'])\n self.assertEqual(all_features['looktest4']['blacklist'], [4])", "def test__validate_features__0():\n for input_value, expected_output in (\n (None, None),\n ([], None),\n ([GuildFeature.animated_banner], (GuildFeature.animated_banner, )),\n ([GuildFeature.animated_banner.value], (GuildFeature.animated_banner, )),\n (\n [GuildFeature.animated_banner, GuildFeature.animated_icon],\n (GuildFeature.animated_banner, GuildFeature.animated_icon,),\n ),\n ):\n output = validate_features(input_value)\n vampytest.assert_eq(output, expected_output)", "def __check_features(f_list, stopwords):\n ok = True\n for f in f_list:\n if not(__check_feature(f,stopwords)):\n return False\n return True", "def test_setTunaFeatures(self):\n tuna = Tuna()\n array = [\"1\", \"2\", \"3\", \"4\"]\n tuna.setTunaFeatures(array)\n self.assertEqual(tuna.getTunaFeatures(), array)", "def test_feature_is_filtered(self):\n\n # Duplicate 1st row in var and assigned to 2nd\n self.validator.adata.var[\"feature_is_filtered\"][0] = True\n for i in range(self.validator.adata.X.shape[0]):\n self.validator.adata.X[i, 0] = 0\n self.validator.adata.X[0, 0] = 1\n\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: Some features are 'True' in 'feature_is_filtered' of dataframe 'var', \"\n \"but there are 1 non-zero values in the corresponding columns of the matrix 'X'. \"\n \"All values for these features must be 0.\"\n ],\n )", "def test_features(iris):\n assert iris.num_features == 4\n assert iris.feature_names == [\n \"sepal length (cm)\",\n \"sepal width (cm)\",\n \"petal length (cm)\",\n \"petal width (cm)\",\n ]", "def check_supported_features(self):", "def test_features_property():\n atom = ATOMClassifier(X_bin, y_bin, random_state=1)\n atom.run(\"LR\")\n assert [i == j for i, j in zip(atom.lr.features, atom.features)]", "def test_confirm_features_in_class_variable(self):\n # Given I have Romaine's core\n from tests.common import romaine\n core = romaine.Core()\n\n # When I locate features in /tmp/romaine_tests/features\n # And I locate features in tests/features\n core.locate_features('/tmp/romaine_tests/features')\n core.locate_features('tests/features')\n\n # Then the core's feature_paths_list variable contains:\n # | path |\n # | /tmp/romaine_tests/features/feature1 |\n # | /tmp/romaine_tests/features/feature2 |\n # | /tmp/romaine_tests/features/subdir/feature3 |\n # | tests/features/feature1 |\n # | tests/features/feature2 |\n # | tests/features/subdir/feature3 |\n self.assertEqual(\n sorted(core.feature_file_paths),\n [\n '/tmp/romaine_tests/features/feature1',\n '/tmp/romaine_tests/features/feature2',\n '/tmp/romaine_tests/features/subdir/feature3',\n 'tests/features/feature1',\n 'tests/features/feature2',\n 'tests/features/subdir/feature3',\n ]\n )", "def test_feature(feature, value, good_features):\r\n\tbase_write(good_features,\"bin/stanford-ner-2015-04-20/base.prop\")\r\n\tbase_prop = open(\"bin/stanford-ner-2015-04-20/base.prop\", \"a\")\r\n\tbase_prop.write(feature.strip() + \"=\" + str(value) + \"\\n\")\r\n\tbase_prop.close()\r\n\r\n\t#Test read base.prop - To display in console\r\n\tread = open(\"bin/stanford-ner-2015-04-20/base.prop\").read()\r\n\tlogging.warning(read)\r\n\r\n\tos.system(\"bash src/other/features/features_selection.sh\")", "def supports_feature_set(flags: Set[str], feature_set: str) -> bool:\n\n if feature_set not in REQUIRED_FEATURES:\n raise Exception('Unknown feature set \"{}\"'.format(feature_set))\n\n return not False in (supports_feature(flags, feat) for feat in REQUIRED_FEATURES[feature_set])", "def matches(self, feature):\n pass", "def feature(self):\n Feature(run=default_frame, flags=TE)\n Feature(run=load(\"window_functions.tests.rows_frame\", \"feature\"), flags=TE)\n Feature(run=load(\"window_functions.tests.range_frame\", \"feature\"), flags=TE)\n Feature(run=load(\"window_functions.tests.range_overflow\", \"feature\"), flags=TE)\n Feature(run=load(\"window_functions.tests.range_datetime\", \"feature\"), flags=TE)\n Feature(run=load(\"window_functions.tests.range_errors\", \"feature\"), flags=TE)", "def test_feature_in_collection(self):\n fc1 = self.read_feature()\n fc2 = self.read_feature('Aegean_Sea')\n\n feature = fc1.features[0]\n assert fc1.feature_in_collection(feature)\n\n feature = fc2.features[0]\n assert not fc1.feature_in_collection(feature)", "def run_feature_extraction_tests():\n test_feature_extraction()\n test_distributed_feature_extraction()\n test_multimodel_feature_extraction()\n test_distributed_multimodel_feature_extraction()", "def testfeatures(self):\n self.set_wdiff()\n xp,wp=st.crosslinematch(self.xarr, self.farr, self.slines, self.sfluxes,\n self.ws, mdiff=self.mdiff, wdiff=20, sigma=self.sigma, niter=self.niter)\n for x, w in zip(xp, wp):\n if w not in self.wp and w>-1: \n self.xp.append(x)\n self.wp.append(w)\n self.plotFeatures()\n self.redraw_canvas()", "def findFeatures(self):\n\t\tpass", "def __contains__(self, feature):\n return feature in self.features", "def test_read_feature_collection(self):\n fc = self.read_feature()\n assert len(fc.features) == 1\n feature = fc.features[0]\n self.check_feature(feature)", "def test__validate_features__1():\n for input_value in (\n 12.6,\n ):\n with vampytest.assert_raises(TypeError):\n validate_features(input_value)", "def __call__(self, f1, f2):\n # return len(f1.set & f2.set)\n return len(set(f1.features) & set(f2.features))", "def test_registry():\n assert(CQT.get_id() in msaf.base.features_registry.keys())\n assert(PCP.get_id() in msaf.base.features_registry.keys())\n assert(Tonnetz.get_id() in msaf.base.features_registry.keys())\n assert(MFCC.get_id() in msaf.base.features_registry.keys())\n assert(Tempogram.get_id() in msaf.base.features_registry.keys())", "def is_valid_input(features) -> bool:\n for index, feature in enumerate(features):\n log.info(f\"Validating feature {index}: {feature}\")\n\n try:\n float(feature)\n except ValueError as value_error:\n log.error(value_error)\n return False\n\n log.info(\"Input successfully validated\")\n return True", "def test_can_enable_features_per_user(page):\n username = login_as_superuser(page)\n\n features.toggle_feature_for_user(page=page,\n feature_name='team-management-redo',\n username=username,\n enable=False)\n\n page.goto(\"/form/view\")\n nav = page.locator('.usa-nav__primary li').all()\n assert len(nav) == 2\n\n features.toggle_feature_for_user(page=page,\n feature_name='team-management-redo',\n username=username,\n enable=True)\n\n page.goto(\"/form/view\")\n nav = page.locator('.usa-nav__primary li').all()\n assert len(nav) == 3\n assert '🆕 Team Management' in nav[2].text_content().strip()\n\n features.toggle_feature_for_user(page=page,\n feature_name='team-management-redo',\n username=username,\n enable=False)\n\n page.goto(\"/form/view\")\n nav = page.locator('.usa-nav__primary li').all()\n assert len(nav) == 2", "def supports_feature(flags: Set[str], feature: str) -> bool:\n\n if not isinstance(feature, str):\n raise Exception('Given feature is not of type str')\n if feature not in FLAG_NAMES:\n raise Exception('Unknown feature flag \"{}\"'.format(feature))\n\n # Check if there are multiple flags that signal this feature\n flag = FLAG_NAMES[feature]\n if isinstance(flag, str):\n required_flags = [flag]\n elif isinstance(flag, list):\n required_flags = flag\n\n return True in (flag in flags for flag in required_flags)", "def feature_selection(train_features, test_features, train_similarity_target, test_similarity_target, regressor, used_features):\n\t# percentile selector\n\tpercentile_selector, percentile_score, percentile_train_features_selected, percentile_test_features_selected, percentile_mask = best_percentile_selector(train_features, test_features, train_similarity_target, test_similarity_target, regressor)\n\n\t# model based selector\n\tmodel_based_selector, model_based_score, model_based_train_features_selected, model_based_test_features_selected, model_based_mask = best_model_based_selector(train_features, test_features, train_similarity_target, test_similarity_target, regressor)\n\n\t# iterative based selector\n\titerative_based_selector, iterative_based_score, iterative_based_train_features_selected, iterative_based_test_features_selected, iterative_based_mask = best_iterative_based_selector(train_features, test_features, train_similarity_target, test_similarity_target, regressor)\n\n\tall_scores = []\n\n\tregressor.fit(train_features, train_similarity_target)\n\tprint(\"The score on all features: %.3f\" % regressor.score(test_features, test_similarity_target))\n\tall_scores.append(regressor.score(test_features, test_similarity_target))\n\n\t# show results for the percentile selector\n\tall_scores.append(percentile_score)\n\n\t# show results for the model based selector\n\tall_scores.append(model_based_score)\n\n\t# show results for the iterative based selector\n\tall_scores.append(iterative_based_score)\n\n\tmax_value_position = all_scores.index(max(all_scores))\n\n\tif max_value_position == 0:\n\t\tprint(\"Returning all features!\\n\")\n\t\treturn train_features, test_features\n\telif max_value_position == 1:\n\t\tpercentile_mask = build_mask(percentile_mask, used_features)\n\t\tmask_save_path = os.path.join('feature_selection_masks', 'percentile_mask.txt')\n\t\tdebug_data(percentile_mask, mask_save_path)\n\n\t\tprint(\"Returning features selected with the percentile selector!\\n\")\n\t\treturn percentile_selector, percentile_train_features_selected, percentile_test_features_selected\n\telif max_value_position == 2:\n\t\tmodel_based_mask = build_mask(model_based_mask, used_features)\n\t\tmask_save_path = os.path.join('feature_selection_masks', 'model_based_mask.txt')\n\t\tdebug_data(model_based_mask, mask_save_path)\n\n\t\tprint(\"Returning features selected with the model based selector!\\n\")\n\t\treturn model_based_selector, model_based_train_features_selected, model_based_test_features_selected\n\telse:\n\t\titerative_based_mask = build_mask(iterative_based_mask, used_features)\n\t\tmask_save_path = os.path.join('feature_selection_masks', 'iterative_based_mask.txt')\n\t\tdebug_data(iterative_based_mask, mask_save_path)\n\n\t\tprint(\"Returning features selected with the iterative based selector!\\n\")\n\t\treturn iterative_based_selector, iterative_based_train_features_selected, iterative_based_test_features_selected", "def test__extract_features(self):\n text_sample = \"I really really love this movie\"\n feature_sample = ['really','love','good']\n feature_score_type = \"presence\"\n model_sample = Model(feature_sample,feature_score_type)\n result_features = model_sample.extract_features(text_sample)\n assert_equal(result_features,{'really':1,'love':1,'good':0})\n feature_score_type = \"term_frequency\"\n model_sample = Model(feature_sample,feature_score_type)\n result_features = model_sample.extract_features(text_sample)\n assert_equal(result_features,{'really':2,'love':1,'good':0})", "def test_findFeatures(self):\n features = self.builder._findChanges(\n self.project, self.builder._FEATURE)\n self.assertEquals(\n features,\n [(5, \"We now support the web.\"),\n (12, \"The widget is more robust.\"),\n (15,\n \"A very long feature which takes many words to describe with \"\n \"any accuracy was introduced so that the line wrapping behavior \"\n \"of the news generating code could be verified.\"),\n (16, \"A simpler feature described on multiple lines was added.\")])", "def exposes_features(self):\n return self._features_op is not None", "def test_multiple_calls_no_duplicates(self):\n # Given I have Romaine's core\n from tests.common import romaine\n core = romaine.Core()\n\n # When I locate features in /tmp/romaine_tests/features\n core.locate_features('/tmp/romaine_tests/features')\n # And I locate features in /tmp/romaine_tests/features\n core.locate_features('/tmp/romaine_tests/features')\n\n # Then the core's feature_paths_list variable contains no duplicates\n feature_file_paths = list(core.feature_file_paths)\n for item in feature_file_paths:\n self.assertEqual(\n feature_file_paths.count(item),\n 1,\n )", "def features_selection(x_train, y_train,x_val,x_test,model,feature_list):\n n_features = x_train.shape[1]\n print(\"n_features original: \",n_features)\n if model == 'LR':\n estimator = LogisticRegression(random_state = 442, penalty = 'elasticnet', solver= 'saga',l1_ratio=0.5)\n if model == 'SVM':\n estimator = svm.LinearSVC(class_weight = 'balanced', random_state = 442)\n if model == 'SGD':\n estimator = SGDClassifier(class_weight = 'balanced', random_state = 442)\n if model == 'ADA':\n estimator = AdaBoostClassifier(DecisionTreeClassifier(max_depth=5, class_weight = 'balanced'),random_state = 442)\n if model == 'RF':\n estimator = RandomForestClassifier(random_state=442, class_weight = 'balanced')\n if model == 'GBT':\n estimator = GradientBoostingClassifier(random_state = 442)\n if model == 'XGBT':\n ratio = float(np.sum(y_train == 0)) / np.sum(y_train==1)\n estimator = XGBClassifier(seed = 442,eval_metric = 'auc', scale_pos_weight = ratio)\n if model == 'LightGB':\n ratio = float(np.sum(y_train == 0)) / np.sum(y_train==1)\n estimator = lgb.LGBMClassifier(seed = 442, scale_pos_weight = ratio)\n\n print(\"Searching RFE\")\n classifier = RFE(estimator=estimator, step=1)\n model = Pipeline([('classifier', classifier)])\n parameters = {'classifier__n_features_to_select': [int(n_features*0.25),int(n_features*0.5),int(n_features*0.75),n_features]}\n grid = GridSearchCV(model, parameters, cv=3)\n grid.fit(x_train, y_train)\n num_features = grid.best_params_\n num_features = re.sub(r'[^\\d]','',str(num_features))\n print(\"Optimal number of features\",num_features)\n\n print(\"SelectKBest\")\n selector = SelectKBest(f_classif, k=int(num_features)) #we pass the \"optimal number of features\" discovered in the previous pass\n selector.fit(x_train, y_train)\n x_train = selector.transform(x_train).astype('float32')\n x_val = selector.transform(x_val).astype('float32')\n x_test = selector.transform(x_test).astype('float32')\n feature_list = [feature_list[i] for i in selector.get_support(indices=True)]\n return x_train, x_val, x_test,feature_list, num_features", "def test_can_return_all_current_features_only(self):\n returned_features = return_current_features()\n self.assertTrue(len(returned_features) > 0)\n for feature in returned_features:\n self.assertTrue(feature.is_feature)\n feature_admin_object = SuggestionAdminPage.objects.get(suggestion=feature)\n self.assertTrue(feature_admin_object.in_current_voting_cycle)\n\n all_current_features_admin = SuggestionAdminPage.objects.filter(suggestion__is_feature=True,\n in_current_voting_cycle=True)\n self.assertEqual(len(all_current_features_admin), len(returned_features))", "def test_add_feature(self):\n fc1 = self.read_feature()\n fc2 = self.read_feature('Aegean_Sea')\n\n # add a feature already in the feature collection\n fc1.add_feature(fc1.features[0])\n assert len(fc1.features) == 1\n\n # add a new feature to the feature collection\n fc1.add_feature(fc2.features[0])\n assert len(fc1.features) == 2\n\n self.check_feature(fc1.features[0])\n self.check_feature(fc1.features[1], expected_name='Aegean Sea')", "def test_available_features():\n features = (\n \"Feature Name : Capa1\\r\\n State : Enabled\\r\\n\"\n \"Feature Name : Capa2\\r\\n State : Disabled\\r\\n\"\n )\n\n mock = MagicMock(return_value=features)\n with patch.dict(dism.__salt__, {\"cmd.run\": mock}):\n out = dism.available_features()\n mock.assert_called_once_with(\n [dism.bin_dism, \"/English\", \"/Online\", \"/Get-Features\"]\n )\n assert out == [\"Capa2\"]", "def test_visible_white_and_blacklisted(self):\n\n self.feature_test.set_percentage(0)\n self.feature_test.add_to_whitelist(3)\n self.feature_test.add_to_blacklist(3)\n self.assertTrue(self.feature_test.is_visible(3))", "def test(self, test):\r\n self.ml_data.set_target(test[0])\r\n self.ml_data.set_features(test[1])\r\n if self.ml_data.target_type.all() == np.float64 or self.ml_data.target_type.all() == np.int64:\r\n self.model_qua.open()\r\n else:\r\n self.model_quali.open()", "def test_support_SAFELIST(self):\n self.assertEqual(self._parseFeature(\"SAFELIST\"), True)", "def test_add_to_whitelist(self):\n\n self.feature_test.add_to_whitelist(3)\n self.assertTrue(3 in Feature(\"testing\").whitelist)", "def set_features(self, features):\n self.features_ = list(features)", "def getFeatures(self, state, action):\n util.raiseNotDefined()", "def test_vector_feature_count(self):\n\n # Read and verify test data\n for vectorname in ['test_buildings.shp',\n 'tsunami_building_exposure.shp',\n 'Padang_WGS84.shp',\n 'OSM_building_polygons_20110905.shp',\n 'OSM_subset.shp']:\n\n filename = '%s/%s' % (TESTDATA, vectorname)\n layer = read_layer(filename)\n coords = layer.get_geometry()\n attributes = layer.get_data()\n\n # Check basic data integrity\n N = len(layer)\n assert len(coords) == N\n assert len(attributes) == N\n assert FEATURE_COUNTS[vectorname] == N", "def test_getTunaFeatures(self):\n tuna = Tuna(\"1\", \"2\", \"3\", \"4\")\n array = [\"1\", \"2\", \"3\", \"4\"]\n self.assertEqual(tuna.getTunaFeatures(), array)", "def test_verify_set_multi(self):\n self._verify([self.applied_commands['setm']])", "def test_feature_values(iris, name, x_feature, y_feature, x_vals, y_vals):\n iris.x_feature = x_feature\n iris.y_feature = y_feature\n assert iris.title == \"{} x {}\".format(x_feature, y_feature)\n data = iris.sources[name].data\n np.testing.assert_array_almost_equal(data[\"x\"][:2], x_vals)\n np.testing.assert_array_almost_equal(data[\"y\"][:2], y_vals)", "def test_create_features(self):\n\n ## Features in list form, default argument\n self.assertItemsEqual(self.model.features, [\"X1\"])\n self.assertItemsEqual(self.model.unpacked_features, [\"X1[0]\", \"X1[1]\"])\n\n ## Separate features, default argument\n sf = self.sf.unpack(\"X1\")\n m = tc.dbscan.create(\n sf,\n distance=\"euclidean\",\n radius=self.radius,\n min_core_neighbors=self.min_core_neighbors,\n verbose=False,\n )\n self.assertItemsEqual(m.features, [\"X1.0\", \"X1.1\"])\n\n ## Separate features, specified explicitly\n m = tc.dbscan.create(\n sf,\n features=[\"X1.0\"],\n distance=\"euclidean\",\n radius=self.radius,\n min_core_neighbors=self.min_core_neighbors,\n verbose=False,\n )\n self.assertItemsEqual(m.features, [\"X1.0\"])\n\n ## Features can be specified by the composite distance argument.\n test_dist = [[[\"X1.0\"], \"euclidean\", 1], [[\"X1.1\"], \"manhattan\", 1]]\n\n m = tc.dbscan.create(\n sf,\n distance=test_dist,\n radius=self.radius,\n min_core_neighbors=self.min_core_neighbors,\n verbose=False,\n )\n self.assertItemsEqual(m.features, [\"X1.0\", \"X1.1\"])\n\n ## Features parameter should be overridden by the composite distance\n # argument.\n m = tc.dbscan.create(\n sf,\n features=[\"X1.0\"],\n distance=test_dist,\n radius=self.radius,\n min_core_neighbors=self.min_core_neighbors,\n verbose=False,\n )\n self.assertItemsEqual(m.features, [\"X1.0\", \"X1.1\"])", "def test_multimodel_feature_extraction():\n # set up parameters\n testcol = testcol_multi\n exp_id = 'validation1'\n\n params = {}\n\n model1_params = {'func': model.mnist_tfutils}\n model2_params = {'func': model.mnist_tfutils}\n model_params = [model1_params, model2_params]\n num_models = len(model_params)\n\n params['model_params'] = model_params\n\n params['load_params'] = {'host': testhost,\n 'port': testport,\n 'dbname': testdbname,\n 'collname': testcol,\n 'exp_id': 'training0'}\n\n params['save_params'] = {'exp_id': exp_id,\n 'save_intermediate_freq': 1,\n 'save_to_gfs': ['features', 'more_features']}\n\n targdict1 = {'func': get_extraction_target,\n 'to_extract': {'features': 'model_0/validation/valid1/hidden1/output:0',\n 'more_features': 'model_0/validation/valid1/hidden2/output:0'}}\n\n targdict2 = {'func': get_extraction_target,\n 'to_extract': {'features': 'model_1/validation/valid1/hidden1/output:0',\n 'more_features': 'model_1/validation/valid1/hidden2/output:0'}}\n\n targdict1.update(base.DEFAULT_LOSS_PARAMS)\n targdict2.update(base.DEFAULT_LOSS_PARAMS)\n\n validation_params1 = {'valid1': {'data_params': {'func': data.MNIST,\n 'batch_size': 100,\n 'group': 'test',\n 'n_threads': 4},\n 'queue_params': {'queue_type': 'fifo',\n 'batch_size': 100},\n 'targets': targdict1,\n 'num_steps': 10,\n 'online_agg_func': utils.reduce_mean_dict}}\n\n validation_params2 = {'valid1': {'data_params': {'func': data.MNIST,\n 'batch_size': 100,\n 'group': 'test',\n 'n_threads': 4},\n 'queue_params': {'queue_type': 'fifo',\n 'batch_size': 100},\n 'targets': targdict2,\n 'num_steps': 10,\n 'online_agg_func': utils.reduce_mean_dict}}\n\n params['validation_params'] = [validation_params1, validation_params2]\n params['skip_check'] = True\n\n conn = pm.MongoClient(host=testhost,\n port=testport)\n for i in range(num_models):\n valid_exp_id = 'validation0_model_{}'.format(i)\n conn[testdbname][testcol + '.files'].delete_many({'exp_id': valid_exp_id})\n\n # actually run the feature extraction\n base.test_from_params(**params)\n\n # check that things are as expected.\n coll = conn[testdbname][testcol + '.files']\n\n for i in range(num_models):\n exp_id = 'validation1_model_{}'.format(i)\n assert coll.find({'exp_id': exp_id}).count() == 11\n\n # ... load the containing the final \"aggregate\" result after all features have been extracted\n q = {'exp_id': exp_id, 'validation_results.valid1.intermediate_steps': {'$exists': True}}\n assert coll.find(q).count() == 1\n r = coll.find(q)[0]\n # ... check that the record is well-formed\n asserts_for_record(r, params, train=False)\n\n # ... check that the correct \"intermediate results\" (the actual features extracted) records exist\n # and are correctly referenced.\n q1 = {'exp_id': exp_id, 'validation_results.valid1.intermediate_steps': {'$exists': False}}\n ids = coll.find(q1).distinct('_id')\n assert r['validation_results']['valid1']['intermediate_steps'] == ids\n\n # ... actually load feature batch 3\n idval = r['validation_results']['valid1']['intermediate_steps'][3]\n fn = coll.find({'item_for': idval})[0]['filename']\n fs = gridfs.GridFS(coll.database, testcol)\n fh = fs.get_last_version(fn)\n saved_data = cPickle.loads(fh.read())\n fh.close()\n first_results = saved_data['validation_results']['valid1']\n assert 'features' in first_results and 'more_features' in first_results\n features = saved_data['validation_results']['valid1']['features']\n more_features = saved_data['validation_results']['valid1']['more_features']\n assert features.shape == (100, 128)\n assert features.dtype == np.float32\n assert more_features.shape == (100, 32)\n assert more_features.dtype == np.float32", "def check_featuring(self):\n existing_featuring = pd.read_csv(self.path_checkpoint)\n array_to_check = [float(self.radious), self.type_feature, self.type_filtering, self.h_filterig]\n bool_answer = (existing_featuring == array_to_check).all(1).any()\n # self.file_checkpoint_data.close()\n return bool_answer", "def test_copy_features(self):\n fc = self.read_feature()\n other = FeatureCollection(features=fc.features,\n otherProperties=fc.otherProperties)\n assert len(other.features) == 1\n feature = other.features[0]\n\n self.check_feature(feature)", "def __contains__(self, feature):\n return feature == 'cvarsort' or feature in self.features", "async def async_set_features(self, features):\n self._features = features", "def test_Utilities__test_1():\n assert test(True, 1, False, 2) == 1\n assert test(False, 1, True, 2) == 2\n assert test(False, 1, False, 2, True, 3) == 3", "def test_test(task_dataset, features):\n features = torch.cat(features)\n feat = features[0]\n expected = features.eq(feat).sum().item() / N_SAMPLES\n\n class FakeModule(nn.Module):\n \"\"\"Always returns the same prediction.\"\"\"\n\n def forward(self, reps):\n \"\"\"Just returns the tag.\"\"\"\n assert reps.shape[-1] == N_DIMS_PER_REP\n logits = torch.zeros(len(reps), N_UNIQUE_FEATS)\n logits[:, feat] = 1\n return logits\n\n actual = learning.test(FakeModule(),\n task_dataset,\n device=torch.device('cpu'))\n assert actual == expected", "def set_features(self, features: list):\n self._features = features", "def test_load_then_featurize_data_multiple_columns():\n feat = ImageFeaturizer(auto_sample=True)\n feat.load_data(**LOAD_DATA_ARGS_MULT)\n feat.featurize_preloaded_data(save_features=True)\n check_array = np.load(CHECK_ARRAY_MULT.format('squeezenet'))\n\n try:\n compare_featurizer_class(feat, (227, 227), check_array, featurized=True,\n check_csv=CHECK_CSV_MULT.format('squeezenet'), **COMPARE_ARGS_MULT)\n\n finally:\n # Remove path to the generated csv at the end of the test\n remove_generated_paths()\n del feat", "def test_add_feature():\n mock = MagicMock()\n with patch.dict(dism.__salt__, {\"cmd.run_all\": mock}):\n dism.add_feature(\"test\")\n mock.assert_called_once_with(\n [\n dism.bin_dism,\n \"/Quiet\",\n \"/Online\",\n \"/Enable-Feature\",\n \"/FeatureName:test\",\n \"/NoRestart\",\n ]\n )", "def test_build_feature_base(self):\n data = pd.DataFrame(pd.read_csv(\"tests/in_data/pro1_sub.csv\"))\n\n X = data.ix[:,1]\n Y = data.ix[:,0]\n model_sample = Model([],\"presence\")\n\n feature_base = model_sample.build_feature_base(X,Y)\n feature_evaluation =\n assert_equal(len(feature_base) > 10, True)", "def _testFeatureDefault(self, name, features=None):\n default = irc.ServerSupportedFeatures()._features[name]\n\n if features is None:\n features = [(\"DEFINITELY_NOT\", \"a_feature\")]\n\n supported = self._parse(features)\n self.assertTrue(supported.hasFeature(name))\n self.assertEqual(supported.getFeature(name), default)", "def test_reset_settings(self):\n\n self.feature_test.set_percentage(5)\n self.feature_test.add_to_whitelist(3)\n self.feature_test.add_to_blacklist(4)\n self.feature_test.reset_settings()\n\n generated = Feature(\"testing\")\n self.assertEqual(generated.percentage, 0)\n self.assertFalse(3 in generated.whitelist)\n self.assertFalse(4 in generated.blacklist)", "def has_feature(self, feature):\n features = self.features\n if features is None:\n return False\n \n return feature in features", "def test_split_feature(tree):\r\n print(\"test_split_feature()...\", end = \"\")\r\n assert (tree.process_split_feature() == True)\r\n print(\"Passed!\")", "def match_features(phone_feats, other_feats):\n for feat in other_feats.keys():\n if phone_feats[feat] != other_feats[feat] and other_feats[feat] != UNDEF:\n return False\n return True", "def evaluate_features(trainFeatures, testFeatures):\n classifier = NaiveBayesClassifier.train(trainFeatures)\n\n #initiates referenceSets and testSets\n referenceSets = collections.defaultdict(set)\n testSets = collections.defaultdict(set)\n\n for i, (features, label) in enumerate(testFeatures):\n referenceSets[label].add(i)\n predicted = classifier.classify(features)\n testSets[predicted].add(i)\n\n print 'train on %d instances, test on %d instances' % (len(trainFeatures), len(testFeatures))\n print 'accuracy:', nltk.classify.util.accuracy(classifier, testFeatures)\n print 'pos precision:', precision(referenceSets['pos'], testSets['pos'])\n print 'pos recall:', recall(referenceSets['pos'], testSets['pos'])\n print 'neg precision:',precision(referenceSets['neg'], testSets['neg'])\n print 'neg recall:', recall(referenceSets['neg'], testSets['neg'])\n classifier.show_most_informative_features(50)", "def test_many_values(self):\n write this test!", "def getFeatures(self, state, action):\n util.raiseNotDefined()", "def getFeatures(self, state, action):\n util.raiseNotDefined()", "def getFeatures(self, state, action):\n util.raiseNotDefined()", "def features(self, state, action, next_state):\n raise NotImplementedError", "def features(self, features):\n\n self._features = features", "def feature():\n pass", "def test_feature_format(X):\r\n print(\"test_feature_format()...\", end = \"\")\r\n for row in range(len(X)):\r\n for col in range(len(X[0])):\r\n assert (isinstance(X[row][col], float) == True)\r\n print(\"Passed!\")", "def test_get_features():\n features = (\n \"Feature Name : Capa1\\r\\n State : Enabled\\r\\n\"\n \"Feature Name : Capa2\\r\\n State : Disabled\\r\\n\"\n )\n\n mock = MagicMock(return_value=features)\n with patch.dict(dism.__salt__, {\"cmd.run\": mock}):\n out = dism.get_features()\n mock.assert_called_once_with(\n [dism.bin_dism, \"/English\", \"/Online\", \"/Get-Features\"]\n )\n assert out == [\"Capa1\", \"Capa2\"]", "def uses_feature(self, fcname):\n used = False\n if any([fcname.upper() in y for y in [x.upper() for x in self._featureclasses]]):\n used = True\n return used", "def featureset( key=None, promo=None, **kwargs ):\n\t# Local reference to features\n\tallfeatures = features()\n\t\n\t# Check to see if the specified key indicates a promotion\n\tif promo not in allfeatures: promo = \"default\"\n\tpromofeatures = allfeatures[promo] if promo != \"default\" else None\n\t\n\t# Check request and lookup existing feature set, or create new empty set\n\tkey = key or \"uuid:%s\" % str( uuid.uuid4() )\n\tfs = db.get( db.Key.from_path( \"FeatureSet\", key ) ) or FeatureSet( key_name=key )\n\tchanged = not fs.is_saved()\n\t\n\t# Fill in promo on featureset\n\tif not hasattr( fs, \"promo\" ) or fs.promo != promo:\n\t\tchanged = True\n\t\tsetattr( fs, \"promo\", promo )\n\t\n\t# Fill in any keyword arguments\n\tfor k, v in kwargs.iteritems():\n\t\tif hasattr( fs, k ) and getattr( fs, k ) == v: continue\n\t\tchanged = True\n\t\tsetattr( fs, k, v )\n\n\t# Fill in FeatureSet instance from allfeatures\n\tfor feature, groups in allfeatures[\"default\"].iteritems():\n\t\n\t\t# Override groups if in a promo which contains that feature\n\t\tif promofeatures and feature in promofeatures: groups = promofeatures[feature]\n\t\n\t\t# If groups is really just one value, the only choice is to set it\n\t\tif type(groups) != dict:\n\t\t\tif hasattr( fs, feature ) and getattr( fs, feature ) == groups: continue\n\t\t\tchanged = True\n\t\t\tsetattr( fs, feature, groups )\n\t\t\tcontinue\n\t\n\t\t# If the FeatureSet already has this feature, make sure it's a legal value\n\t\tif hasattr( fs, feature ):\n\t\t\toldval = getattr( fs, feature )\n\t\t\tfound = False\n\t\t\tfor value, frequency in groups.iteritems():\n\t\t\t\tif oldval == value: \n\t\t\t\t\tfound = True\n\t\t\t\t\tbreak\n\t\t\tif found: continue\n\t\t\t\n\t\t# Randomly pick from the feature's groups based on defined frequencies\n\t\tstops = []\n\t\tlast = 0\n\t\tfor value, frequency in groups.iteritems():\n\t\t\tlast += frequency\n\t\t\tstops.append( ( last, value ) )\n\t\tr = random.uniform( 0.0, last )\n\t\tfor i in range( len( stops ) ):\n\t\t\tif r < stops[i][0]:\n\t\t\t\tbreak\n\t\t\n\t\t# Set the feature on the feature set\n\t\tchanged = True\n\t\tsetattr( fs, feature, stops[i][1] )\n\t\t\n\t# Save the FeatureSet\n\tif changed: fs.put()\n\treturn fs", "def iter_specified(self):\n for feat in self.features:\n val = self[feat]\n if val not in ['?', 'u', None]:\n yield (feat, val)", "def test_getOrderedFeatures(self):\n print 'Running %s ...' % getName()\n \n s1 = self.sequenceListingFixture.create_sequence_instance(self.sequenceListing) \n \n# test that source feature is at index 0 when feature table has only 1 feature \n source_feature = next((f for f in s1.feature_set.all() if f.featureKey == 'source'), None)\n ordered_features = s1.getOrderedFeatures()\n self.assertTrue(source_feature)\n self.assertEqual(0, ordered_features.index(source_feature))\n \n# add feature\n f1_1 = Feature.objects.create(sequence=s1, \n featureKey='misc_feature', \n location='4')\n \n ordered_features_after_f1_1 = s1.getOrderedFeatures()\n \n self.assertEqual(0, ordered_features_after_f1_1.index(source_feature))\n self.assertEqual(1, ordered_features_after_f1_1.index(f1_1))\n \n # add feature\n f1_2 = Feature.objects.create(sequence=s1, \n featureKey='misc_feature', \n location='2')\n \n ordered_features_after_f1_2 = s1.getOrderedFeatures()\n \n self.assertEqual(0, ordered_features_after_f1_2.index(source_feature))\n self.assertEqual(1, ordered_features_after_f1_2.index(f1_2))\n self.assertEqual(2, ordered_features_after_f1_2.index(f1_1))\n \n # add feature\n f1_3 = Feature.objects.create(sequence=s1, \n featureKey='variation', \n location='9')\n \n ordered_features_after_f1_3 = s1.getOrderedFeatures()\n \n self.assertEqual(0, ordered_features_after_f1_3.index(source_feature))\n self.assertEqual(1, ordered_features_after_f1_3.index(f1_2))\n self.assertEqual(2, ordered_features_after_f1_3.index(f1_1))\n self.assertEqual(3, ordered_features_after_f1_3.index(f1_3))\n \n # add feature\n f1_4 = Feature.objects.create(sequence=s1, \n featureKey='allele', \n location='9')\n \n ordered_features_after_f1_4 = s1.getOrderedFeatures()\n \n self.assertEqual(0, ordered_features_after_f1_4.index(source_feature))\n self.assertEqual(1, ordered_features_after_f1_4.index(f1_2))\n self.assertEqual(2, ordered_features_after_f1_4.index(f1_1))\n self.assertEqual(3, ordered_features_after_f1_4.index(f1_4))\n self.assertEqual(4, ordered_features_after_f1_4.index(f1_3))\n \n # add feature\n f1_5 = Feature.objects.create(sequence=s1, \n featureKey='iDNA', \n location='9')\n \n ordered_features_after_f1_5 = s1.getOrderedFeatures()\n \n self.assertEqual(0, ordered_features_after_f1_5.index(source_feature))\n self.assertEqual(1, ordered_features_after_f1_5.index(f1_2))\n self.assertEqual(2, ordered_features_after_f1_5.index(f1_1))\n self.assertEqual(3, ordered_features_after_f1_5.index(f1_4))\n self.assertEqual(4, ordered_features_after_f1_5.index(f1_5))\n self.assertEqual(5, ordered_features_after_f1_5.index(f1_3))\n \n # add feature this will be ordered before 'allele', because \n# capital letters are lower than lower case in ASCII\n f1_6 = Feature.objects.create(sequence=s1, \n featureKey='CDS', \n location='9..17')\n \n ordered_features_after_f1_6 = s1.getOrderedFeatures()\n \n self.assertEqual(0, ordered_features_after_f1_6.index(source_feature))\n self.assertEqual(1, ordered_features_after_f1_6.index(f1_2))\n self.assertEqual(2, ordered_features_after_f1_6.index(f1_1))\n self.assertEqual(3, ordered_features_after_f1_6.index(f1_6))\n self.assertEqual(4, ordered_features_after_f1_6.index(f1_4))\n self.assertEqual(5, ordered_features_after_f1_6.index(f1_5))\n self.assertEqual(6, ordered_features_after_f1_6.index(f1_3))", "def test_add_to_blacklist(self):\n\n self.feature_test.add_to_blacklist(3)\n self.assertTrue(3 in Feature(\"testing\").blacklist)", "def _check_support(X: np.ndarray, **kwargs) -> None:\n\n assert ((X == 0) | (X == 1)).all(), \"x should be equal to 0 or 1.\"", "def test_solution_usage(self, test_data):\n for finput in test_data[\"EKFSLAM.EKFSLAM.Fu\"][:1]:\n params = finput\n\n solution.used[\"EKFSLAM.EKFSLAM.Fu\"] = False\n\n EKFSLAM.EKFSLAM.Fu(**params)\n\n assert not solution.used[\"EKFSLAM.EKFSLAM.Fu\"], \"The function uses the solution\"", "def test_general_subset_all():\n pass", "def validate_source(features):\n click.echo(f\"Validating features\", err=True)\n\n for feature in features:\n utils.validate_geojson(feature)\n\n click.echo(\"✔ valid\")", "def test_category_and_its_feature(self):\n class RunnerBlah(Runner):\n def __init__(self, renv):\n super(RunnerBlah, self).__init__(renv)\n self.register_feature_class('bravo', Feature)\n self.register_feature_class('charlie', Feature)\n self.register_feature_category_class(\n 'alpha', features=['bravo', 'charlie'], mono=True)\n\n renv = create_runtime(RunnerBlah)\n renv.create_runner('runner')\n\n ctrl = renv.feature_ctrl\n\n total_order, _ = ctrl.get_activation_order(['alpha', 'bravo'])\n self.assertEqual(['bravo'], total_order)", "def list_feature_tests(self):\n\t\treturn self.test_names", "def test_data(self):\n\n # Boolean tests\n is_datas = [True, False]\n for is_data in is_datas:\n self.colorspace.setIsData(is_data)\n self.assertEqual(is_data, self.colorspace.isData())\n\n # Wrong type tests\n wrong_is_datas = [['test'], 'test']\n for wrong_is_data in wrong_is_datas:\n with self.assertRaises(TypeError):\n self.colorspace.setIsData(wrong_is_data)", "def getFeatures(self, state, action, thisAgent):\n util.raiseNotDefined()", "def take_some_features(data,features,given=None):\n if given is None:\n return data,features\n common,ind1,ind2=take_common_features(features,given)\n data=data[:,ind1]\n features=features[ind1]\n return data,features", "def test_contains_true(self):\n self.assertTrue('DEPTH' in self.tester)\n self.assertTrue('depth' in self.tester)", "def test_solution_usage(self, test_data):\n for finput in test_data[\"EKFSLAM.EKFSLAM.f\"][:1]:\n params = finput\n\n solution.used[\"EKFSLAM.EKFSLAM.f\"] = False\n\n EKFSLAM.EKFSLAM.f(**params)\n\n assert not solution.used[\"EKFSLAM.EKFSLAM.f\"], \"The function uses the solution\"", "def select(self, features):\n if 'Weather Type' not in features:\n features.append('Weather Type')\n self.data = self.data[:,[self._getFIdx(f) for f in features]]\n self.featureNames = self.featureNames[[self._getFIdx(f) for f in features]]\n return 0", "def choose_features(nof_features, max_features):\n features_indices = numpy.arange(nof_features)\n #numpy.random.seed()\n #features_chosen = numpy.random.choice(features_indices, size=max_features, replace = True)\n features_chosen = numpy.random.choice(features_indices,\n size=nof_features,\n replace=False)\n\n #print(features_chosen)\n return features_chosen", "def _check_features_df(df, features):\n # check columns\n if not set(features).issubset(df.columns):\n raise ValueError(\"The dataframe does not seem to have the right \"\n \"features. {0} instead of {1}\"\n .format(df.columns, features))\n\n return", "def __call__(self, feature):\n return self.is_enabled(feature)", "def test_training(self):\n\t\tpass", "def test_validate_media_player_features():\n config = {}\n attrs = {ATTR_SUPPORTED_FEATURES: 20873}\n entity_state = State(\"media_player.demo\", \"on\", attrs)\n assert validate_media_player_features(entity_state, config) is True\n\n config = {FEATURE_ON_OFF: None}\n assert validate_media_player_features(entity_state, config) is True\n\n entity_state = State(\"media_player.demo\", \"on\")\n assert validate_media_player_features(entity_state, config) is False", "def _checkValues(set_):\n if len(set_)<3: return False\n x = set_[2]\n # TODO: OPT: need optimization\n if (x is None) or len(x) == 0: return False # undefined\n for v in x:\n try:\n if Nlabels <= 2 and N.isscalar(v):\n continue\n if (isinstance(v, dict) or # not dict for pairs\n ((Nlabels>=2) and len(v)!=Nlabels) # 1 per each label for multiclass\n ): return False\n except Exception, e:\n # Something else which is not supported, like\n # in shogun interface we don't yet extract values per each label or\n # in pairs in the case of built-in multiclass\n if __debug__:\n debug('ROC', \"Exception %s while checking \"\n \"either %s are valid labels\" % (str(e), x))\n return False\n return True", "def has_test(args):\n return (args.test_set or args.test_source or args.test_dataset or\n args.test_stdin or args.test_datasets)", "def test_add_one_more_test(self):\n self.assertTrue(True)", "def test_check_estimator_subset_tests():\n tests_to_run = [\n \"test_get_params\",\n \"test_set_params\",\n \"test_clone\",\n \"test_repr\",\n \"test_capability_inverse_tag_is_correct\",\n \"test_remember_data_tag_is_correct\",\n ]\n tests_to_exclude = [\"test_repr\", \"test_remember_data_tag_is_correct\"]\n\n expected_tests = set(tests_to_run).difference(tests_to_exclude)\n\n results = check_estimator(\n ExponentTransformer,\n verbose=False,\n tests_to_run=tests_to_run,\n tests_to_exclude=tests_to_exclude,\n )\n results_tests = {x.split(\"[\")[0] for x in results.keys()}\n\n assert results_tests == expected_tests", "def test_add_feature_with_extras():\n mock = MagicMock()\n with patch.dict(dism.__salt__, {\"cmd.run_all\": mock}):\n dism.add_feature(\"sponge\", \"bob\", \"C:\\\\temp\", True, True)\n mock.assert_called_once_with(\n [\n dism.bin_dism,\n \"/Quiet\",\n \"/Online\",\n \"/Enable-Feature\",\n \"/FeatureName:sponge\",\n \"/PackageName:bob\",\n \"/Source:C:\\\\temp\",\n \"/LimitAccess\",\n \"/All\",\n \"/NoRestart\",\n ]\n )", "def need_feature_generation(self):\n if self.feature_cmd_params:\n return True\n return False", "def need_feature_generation(self):\n if self.feature_cmd_params:\n return True\n return False" ]
[ "0.74074817", "0.7168108", "0.6827362", "0.6718771", "0.647861", "0.64621735", "0.6363413", "0.63128954", "0.6267152", "0.6253744", "0.6251584", "0.6227782", "0.6165864", "0.613769", "0.61193776", "0.61076474", "0.6096284", "0.60629445", "0.59956205", "0.5987819", "0.59747297", "0.59509313", "0.5945111", "0.5941956", "0.59272313", "0.5915295", "0.5910277", "0.5900698", "0.5900564", "0.5894117", "0.5884199", "0.5869305", "0.58597136", "0.5853922", "0.58237344", "0.5812065", "0.57941556", "0.5787468", "0.578593", "0.5774323", "0.5768919", "0.57598794", "0.5744328", "0.5731029", "0.572986", "0.5727686", "0.5726737", "0.5718445", "0.57115394", "0.5708498", "0.5707115", "0.5676111", "0.5669246", "0.566356", "0.565522", "0.5652078", "0.5648506", "0.56440586", "0.56354064", "0.5614252", "0.56094223", "0.55967945", "0.5583878", "0.5581145", "0.5579562", "0.5579562", "0.5579562", "0.55731326", "0.5567567", "0.5548805", "0.55443573", "0.55343086", "0.5533087", "0.55249476", "0.55196524", "0.55127704", "0.55094427", "0.55060977", "0.5502676", "0.54939276", "0.5482222", "0.5466095", "0.54615754", "0.54531175", "0.5442737", "0.54372954", "0.5424143", "0.54223603", "0.54209375", "0.5419764", "0.54184157", "0.541604", "0.5415738", "0.54106236", "0.540775", "0.5407316", "0.54040796", "0.5403691", "0.5402845", "0.5401363", "0.5401363" ]
0.0
-1
Test the Numerical feature class.
def test_numerical_feature(): group = Group({"a": Numerical(), "b": Numerical(), "c": Numerical(dimensions=3), "d": Numerical(dimensions="xyz"), }) group.set_a(100) group.set_b(200) group.set_c(0, 10) group.set_c(1, 20) group.set_c(2, 30) group.set_d("x", 1) group.set_d("y", 2) group.set_d("z", 3) group.push() group.set_a(100) group.set_b(200) group.set_c(0, 40) group.set_c(1, 50) group.set_c(2, 60) group.set_d_x(1) group.set_d_y(2) group.set_d_z(3) group.push() array = group.array() assert array.shape == (2, 8) count = Counter() for row in array: for column, value in zip(array.columns, row): count[column[0]] += value assert count["a"] == 200 assert count["b"] == 400 assert count["c"] == 210 assert count["d"] == 12
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_predictor():", "def test_basic(self):\n result = NonLinearWeights(0.85).nonlinear_weights(3)\n self.assertIsInstance(result, np.ndarray)", "def test_analytical_vs_numerical():\n pass", "def test_machine_learning():", "def test_n_features_property():\n atom = ATOMClassifier(X_bin, y_bin, random_state=1)\n atom.run(\"LR\")\n assert atom.lr.n_features == atom.n_features", "def test_Gaussian_NB_estimators():", "def test_features_property():\n atom = ATOMClassifier(X_bin, y_bin, random_state=1)\n atom.run(\"LR\")\n assert [i == j for i, j in zip(atom.lr.features, atom.features)]", "def test_xray_classifier():\n model = X_ray_Classifier()\n assert type(model) == X_ray_Classifier", "def test_score():\n\n tpot_obj = TPOTClassifier()\n\n try:\n tpot_obj.score(testing_features, testing_classes)\n assert False # Should be unreachable\n except ValueError:\n pass", "def test_basic(self):\n plugin = NonLinearWeights(0.85)\n result = plugin.process(self.cube, self.coord_name)\n self.assertIsInstance(result, iris.cube.Cube)", "def test_features(iris):\n assert iris.num_features == 4\n assert iris.feature_names == [\n \"sepal length (cm)\",\n \"sepal width (cm)\",\n \"petal length (cm)\",\n \"petal width (cm)\",\n ]", "def test_X_test_property():\n atom = ATOMClassifier(X_bin, y_bin, random_state=1)\n atom.run([\"MNB\", \"LR\"])\n assert atom.X_test.equals(atom.mnb.X_test)\n assert check_scaling(atom.lr.X_test)", "def test_feature_format(X):\r\n print(\"test_feature_format()...\", end = \"\")\r\n for row in range(len(X)):\r\n for col in range(len(X[0])):\r\n assert (isinstance(X[row][col], float) == True)\r\n print(\"Passed!\")", "def test_svm():\n backend = BasicAer.get_backend('statevector_simulator')\n random_seed = r.randint(1, 10598)\n\n quantum_instance = QuantumInstance(backend, seed=random_seed, seed_transpiler=random_seed)\n\n # iris\n pres = \"Test pour le data set Iris (facile, classique)\"\n test_from_func(pres, 15, 10, 3, True, Iris, quantum_instance)\n\n # breast cancer\n pres = \"Test pour le data set Breast Cancer (facile, classique)\"\n test_from_func(pres, 15, 10, 3, True, Breast_cancer, quantum_instance)\n\n # digits (it's long so be careful)\n #pres = \"Test pour le data set Digits (difficile, classique)\"\n #test_from_func(pres, 10, 10, 10, True, Digits, quantum_instance)\n\n # wine\n pres = \"Test pour le data set Wine (moyen, classique)\"\n test_from_func(pres, 15, 10, 5, True, Wine, quantum_instance)\n\n # gaussian\n pres = \"Test pour des données gaussiennes (moyen, classique)\"\n for _ in range(1):\n print(\"\\n\")\n print(\"New iteration\")\n test_from_func(pres, 25, 10, 2, True, Gaussian, quantum_instance)\n print(\"\\n\")\n\n # small adn strings\n pres = \"Test pour des séquences ADN courtes (difficile, classique)\"\n test_from_func(pres, 10, 15, 14, True, Sequence, quantum_instance)", "def test_test_property():\n atom = ATOMClassifier(X_bin, y_bin, random_state=1)\n atom.run([\"MNB\", \"LR\"])\n assert atom.test.equals(atom.mnb.test)\n assert check_scaling(atom.lr.test)", "def test_support_INVEX(self):\n self.assertEqual(self._parseFeature(\"INVEX\", \"Z\"), \"Z\")\n self.assertEqual(self._parseFeature(\"INVEX\"), \"I\")", "def run_feature_extraction_tests():\n test_feature_extraction()\n test_distributed_feature_extraction()\n test_multimodel_feature_extraction()\n test_distributed_multimodel_feature_extraction()", "def feature():\n pass", "def test___init__(self):\n f0 = 5 * (np.random.rand(10, 5) - 0.5)\n ga = population.Evolver(f0, eval_one_max)\n self.assertTrue(hasattr(ga, 'register'))\n\n # should have called evalute\n self.assertEqual(ga.generations[-1].new, 0)\n\n # should have registered a default ranking function\n self.assertEqual(np.round(np.sum(ga.rank())), len(f0))", "def evaluate(self, test_data, test_labels):\n raise NotImplementedError", "def test(self, dataset) -> None:\n raise NotImplementedError()", "def evaluate(self, features, labels):\n raise NotImplementedError('Not implemented')", "def test_trainable_property(self):\n scalar_weighted_addition_model = ScalarWeightedAddition(10)\n np.testing.assert_equal(scalar_weighted_addition_model.is_trainable, True)", "def test_evaluate():\n X_train, X_test, y_train, y_test = src.load()\n clf, score = src.train(X_train, y_train)\n test_score = src.evaluate(clf, X_test, y_test)\n assert isinstance(test_score, float)", "def __test_float(self, bk):\n for arg in self.args['float']:\n print(\"\\nTesting:\", arg)\n ds = ArgoDataFetcher(backend=bk).float(arg).to_xarray()\n assert isinstance(ds, xr.Dataset) == True", "def test_getSampleCount(self):\r\n self.assertEqual(self.estimator1.getSampleCount(), 1)", "def test_neuron(self):\r\n # crear una lista 1-D (Horizontal, Entradas).\r\n Z = [1, 2, 3]\r\n # crear una lista 1-D (Vertical, Pesos de la red).\r\n W = [10, 20, 30]\r\n # Inicializamos la neurona, y obtenemos el valor que toma dado W * Z\r\n # X(k) = W * Z\r\n result = rhonn(W, Z).predict()\r\n # Comprobamos el resultado \r\n self.assertEqual(result, 140)", "def testMulticlassDataWithAndWithoutKernels(self):\n feature_column = layers.real_valued_column('feature', dimension=4)\n\n # Metrics for linear classifier (no kernels).\n linear_classifier = kernel_estimators.KernelLinearClassifier(\n feature_columns=[feature_column], n_classes=3)\n linear_classifier.fit(input_fn=test_data.iris_input_multiclass_fn, steps=50)\n linear_metrics = linear_classifier.evaluate(\n input_fn=test_data.iris_input_multiclass_fn, steps=1)\n linear_loss = linear_metrics['loss']\n linear_accuracy = linear_metrics['accuracy']\n\n # Using kernel mappers allows to discover non-linearities in data (via RBF\n # kernel approximation), reduces loss and increases accuracy.\n kernel_mappers = {\n feature_column: [\n RandomFourierFeatureMapper(\n input_dim=4, output_dim=50, stddev=1.0, name='rffm')\n ]\n }\n kernel_linear_classifier = kernel_estimators.KernelLinearClassifier(\n feature_columns=[], n_classes=3, kernel_mappers=kernel_mappers)\n kernel_linear_classifier.fit(\n input_fn=test_data.iris_input_multiclass_fn, steps=50)\n kernel_linear_metrics = kernel_linear_classifier.evaluate(\n input_fn=test_data.iris_input_multiclass_fn, steps=1)\n kernel_linear_loss = kernel_linear_metrics['loss']\n kernel_linear_accuracy = kernel_linear_metrics['accuracy']\n self.assertLess(kernel_linear_loss, linear_loss)\n self.assertGreater(kernel_linear_accuracy, linear_accuracy)", "def test_training(self):\n\t\tpass", "def test_energy():\n # Test something\n \n from nose.tools import assert_equal\n assert_equal(energy([0.0,0.0],1.0), 0)\n assert_equal(energy([4.0,5.0],1.0), 10)", "def test_non_integral_validation(self):", "def test_non_integral_validation(self):", "def test_X_train_property():\n atom = ATOMClassifier(X_bin, y_bin, random_state=1)\n atom.run([\"MNB\", \"LR\"])\n assert atom.X_train.equals(atom.mnb.X_train)\n assert check_scaling(atom.lr.X_train)", "def testRegression(self):\n cont_features = [\n tf.contrib.layers.real_valued_column('feature', dimension=4)]\n\n regressor = tf.contrib.learn.DNNRegressor(feature_columns=cont_features,\n hidden_units=[3, 3])\n\n regressor.fit(input_fn=_iris_input_multiclass_fn, steps=1000)\n regressor.evaluate(input_fn=_iris_input_multiclass_fn, steps=100)", "def test_Bernoulli_NB_estimators():", "def test_variational():\n # iris\n #pres = \"Test pour le data set Iris (facile, classique)\"\n #test_from_func_variational(pres, 15, 10, 3, True, Iris)\n\n # breast cancer\n pres = \"Test pour le data set Breast Cancer (facile, classique)\"\n test_from_func_variational(pres, 15, 10, 3, True, Breast_cancer)\n\n # digits\n # pres = \"Test pour le data set Digits (difficile, classique)\"\n # test_from_func(pres, 10, 10, 10, True, Digits, quantum_instance)\n\n # wine\n # pres = \"Test pour le data set Wine (moyen, classique)\"\n # test_from_func(pres, 15, 10, 5, True, Wine, quantum_instance)\n\n # gaussian\n pres = \"Test pour des données gaussiennes (moyen, classique)\"\n for _ in range(1):\n print(\"\\n\")\n print(\"New iteration\")\n test_from_func_variational(pres, 25, 10, 2, True, Gaussian)\n print(\"\\n\")\n\n # small adn strings\n pres = \"Test pour des séquences ADN courtes (difficile, classique)\"\n test_from_func_variational(pres, 10, 15, 14, True, Sequence)\n\n #Quantum data\n pres = \"Test pour des données générées par ordinateur quantique (facile, quantique)\"\n print(pres)\n _, samp_train, samp_test, labels = ad_hoc_data(15, 10, 2, 0.3, True)\n sample_m, sample_p = stock_get(20, 0.3)\n\n labels_me = [-1, 1]\n samp_train_me = {-1: np.array(sample_m[:15]), 1: np.array(sample_p[:15])}\n samp_test_me = {-1: np.array(sample_m[15:]), 1: np.array(sample_p[15:])}\n print(samp_train)\n print(samp_train_me)\n print(samp_test)\n print(samp_test_me)\n\n my_impl_variational(samp_train, samp_test, labels)\n print(\"Pour autres données quantiques\")\n my_impl_variational(samp_train_me, samp_test_me, labels_me)", "def test__extract_features(self):\n text_sample = \"I really really love this movie\"\n feature_sample = ['really','love','good']\n feature_score_type = \"presence\"\n model_sample = Model(feature_sample,feature_score_type)\n result_features = model_sample.extract_features(text_sample)\n assert_equal(result_features,{'really':1,'love':1,'good':0})\n feature_score_type = \"term_frequency\"\n model_sample = Model(feature_sample,feature_score_type)\n result_features = model_sample.extract_features(text_sample)\n assert_equal(result_features,{'really':2,'love':1,'good':0})", "def evaluate_features(trainFeatures, testFeatures):\n classifier = NaiveBayesClassifier.train(trainFeatures)\n\n #initiates referenceSets and testSets\n referenceSets = collections.defaultdict(set)\n testSets = collections.defaultdict(set)\n\n for i, (features, label) in enumerate(testFeatures):\n referenceSets[label].add(i)\n predicted = classifier.classify(features)\n testSets[predicted].add(i)\n\n print 'train on %d instances, test on %d instances' % (len(trainFeatures), len(testFeatures))\n print 'accuracy:', nltk.classify.util.accuracy(classifier, testFeatures)\n print 'pos precision:', precision(referenceSets['pos'], testSets['pos'])\n print 'pos recall:', recall(referenceSets['pos'], testSets['pos'])\n print 'neg precision:',precision(referenceSets['neg'], testSets['neg'])\n print 'neg recall:', recall(referenceSets['neg'], testSets['neg'])\n classifier.show_most_informative_features(50)", "def test_generate_nb_testing(self):\n pass", "def test_categorical_feature():\n\n feature = Categorical(\"abc\")\n\n for element in \"abc\":\n feature.set(element)\n feature.set(\"ignore this\")\n feature.push()\n\n for element in \"abc\":\n getattr(feature, \"set_\" + element)()\n feature.push()\n\n array = feature.array()\n assert array.shape == (6, 3)\n for i, row in enumerate(array):\n assert sum(row) == 1.0 and row[i % 3] == 1.0", "def __evaluateLocal__(self, featureVals):\n pass", "def test_mean(self):\n pass", "def test_mean(self):\n pass", "def test_nb(x, y, tune):\n # Perform classification without tuning\n nb = GaussianNB()\n pipeline = create_pipeline(nb)\n return accuracy(pipeline, x, y)", "def test__validate_features__1():\n for input_value in (\n 12.6,\n ):\n with vampytest.assert_raises(TypeError):\n validate_features(input_value)", "def test_init(self):\r\n c = AlphaDiversityCalc(observed_otus)\r\n self.assertEqual(c.Metric, observed_otus)\r\n self.assertEqual(c.Params, {})", "def test_test(task_dataset, features):\n features = torch.cat(features)\n feat = features[0]\n expected = features.eq(feat).sum().item() / N_SAMPLES\n\n class FakeModule(nn.Module):\n \"\"\"Always returns the same prediction.\"\"\"\n\n def forward(self, reps):\n \"\"\"Just returns the tag.\"\"\"\n assert reps.shape[-1] == N_DIMS_PER_REP\n logits = torch.zeros(len(reps), N_UNIQUE_FEATS)\n logits[:, feat] = 1\n return logits\n\n actual = learning.test(FakeModule(),\n task_dataset,\n device=torch.device('cpu'))\n assert actual == expected", "def test_reflected_numerics():\n circle = Circle(2)\n assert circle * 3 == 3 * circle", "def test_train_property():\n atom = ATOMClassifier(X_bin, y_bin, random_state=1)\n atom.run([\"MNB\", \"LR\"])\n assert atom.train.equals(atom.mnb.train)\n assert check_scaling(atom.lr.train)", "def xx(x):\r\n return Feature(x, \"XX\")", "def test_gtf(self):\n #TODO write bed tests", "def test_build_feature_base(self):\n data = pd.DataFrame(pd.read_csv(\"tests/in_data/pro1_sub.csv\"))\n\n X = data.ix[:,1]\n Y = data.ix[:,0]\n model_sample = Model([],\"presence\")\n\n feature_base = model_sample.build_feature_base(X,Y)\n feature_evaluation =\n assert_equal(len(feature_base) > 10, True)", "def test(name, data, classifier):\n classification = classifier.classify(data)\n print('Item ' + name + ' is a ' + classification)", "def feature_simulator(self, function, x):\n if function == 'bitmap_count':\n return utils.bitmap_count(x.member, BITMAP_THRESHOLD)\n if function == 'move_distance':\n return utils.move_distance(x.member)\n if function == 'orientation_calc':\n return utils.orientation_calc(x.member, 0)", "def test(self, w=None, testSet=None):\n \n setAccuracy = False;\n if w is None:\n w = self.w;\n testSet = self.testSet;\n setAccuracy = True;\n \n if self.type == self.ClassifierType.SVM: \n (correct, attempts) = testSVM(w, testSet); \n else:\n raise NotImplementedError(\"Only SVM is currently supported\");\n \n if setAccuracy:\n self.accuracy = correct/attempts;\n \n return (correct, attempts);", "def test_X_property():\n atom = ATOMClassifier(X_bin, y_bin, random_state=1)\n atom.run([\"MNB\", \"LR\"])\n assert atom.X.equals(atom.mnb.X)\n assert check_scaling(atom.lr.X)", "def accuracy(self):", "def testModel( self, classTest, classPred):", "def test(classifier, data, labels):\n eval_input_fn = tf.estimator.inputs.numpy_input_fn(\n x={\"x\": data},\n y=labels,\n num_epochs=1,\n shuffle=False)\n eval_results = classifier.evaluate(input_fn=eval_input_fn)\n eval_results[\"F-Score\"] = 2 * eval_results[\"precision\"] * eval_results[\"recall\"] / (eval_results[\"precision\"] + eval_results[\"recall\"])\n# print(eval_results)\n return eval_results", "def test(self):\r\n error_count = 0\r\n N_TESTING = len(self.TESTING_DATA)\r\n for i in range(N_TESTING):\r\n x_vec = self.TESTING_DATA[i][:-1]\r\n y = self.TESTING_DATA[i][-1]\r\n\r\n result = self.bp.classify(x_vec)\r\n if result != y: error_count += 1\r\n print(error_count, \" errors on the test data, out of \", N_TESTING, \"items.\")", "def test_example(self, example_dataset, expected_result):\n\n transformer = PreprocessFeatures()\n result = transformer.fit_transform(example_dataset)\n\n assert (result == expected_result).all()", "def test_term(self):\n\t\tterm_one = schrodinger.term(0)\n\t\tself.assertEqual(1, term_one(0).numpy())\n\t\tterm_two = schrodinger.term(1)\n\t\tself.assertEqual(0, term_two(0).numpy())", "def testInvalidNumberOfClasses(self):\n\n feature = layers.real_valued_column('feature')\n with self.assertRaises(ValueError):\n _ = kernel_estimators.KernelLinearClassifier(\n feature_columns=[feature], n_classes=1)", "def test_10_test_model(self, example):\n res = example.calc_model()\n print(example.trips_ij)\n total_trips_target = example.persons_gi.sum()\n total_trips_actual = example.trips_ij.sum()\n np.testing.assert_almost_equal(total_trips_target, total_trips_actual)", "def test_any(alg, repNum=1):\n\tperformance = alg.predict_perf(repNum=repNum)\n\tprint performance\n\talg.plot()\n\n\treturn performance", "def __init__(self, alpha=1.0, epsilon=0.05, gamma=0.8, numTraining = 10):\n self.alpha = float(alpha)\n self.epsilon = float(epsilon)\n self.discount = float(gamma)\n self.numTraining = int(numTraining)", "def test_active_inference_SPM_1b(self):", "def testMultiClass_MatrixData(self):\n cont_features = [\n tf.contrib.layers.real_valued_column('feature', dimension=4)]\n\n classifier = tf.contrib.learn.DNNClassifier(\n n_classes=3,\n feature_columns=cont_features,\n hidden_units=[3, 3],\n config=tf.contrib.learn.RunConfig(tf_random_seed=1))\n\n classifier.fit(input_fn=_iris_input_multiclass_fn, steps=200)\n self.assertTrue('centered_bias_weight' in classifier.get_variable_names())\n scores = classifier.evaluate(input_fn=_iris_input_multiclass_fn, steps=1)\n self.assertGreater(scores['accuracy'], 0.8)\n self.assertLess(scores['loss'], 0.3)", "def num_examples(self):\r\n raise NotImplementedError", "def test_basic(self):\n result = NonLinearWeights(0.85)\n self.assertAlmostEqual(result.cval, 0.85)", "def __init__(self, alpha=1.0, epsilon=0.05, gamma=0.8, numTraining=10):\n self.alpha = float(alpha)\n self.epsilon = float(epsilon)\n self.discount = float(gamma)\n self.numTraining = int(numTraining)", "def fit_test(self):", "def test_score(self):\n reg = ElasticRegistration()\n reg.fit(self.unimodal_samples)\n score = reg.score(self.unimodal_samples)\n np.testing.assert_almost_equal(score, 0.9994225)", "def test(self):\n raise NotImplementedError", "def test_get_hyperflex_feature_limit_internal_by_moid(self):\n pass", "def testing_featurizer_build():\n f = ImageFeaturizer()\n compare_featurizer_class(f, (0, 0), np.zeros((1)), 0, '', False, '', {}, 1)", "def testActivation(self):\n result = Sigmoid.activ(self, 12)\n self.assertEqual(0.9999938558253978, result)", "def __init__(self,\n num_factors=40,\n regularization=0.01,\n alpha=1.0,\n iterations=15,\n use_native=True,\n num_threads=0,\n dtype=np.float64):\n self.num_factors = num_factors\n self.regularization = regularization\n self.alpha = alpha\n self.iterations = iterations\n self.use_native = use_native\n self.num_threads = num_threads\n self.dtype = dtype", "def test_feature(feature, value, good_features):\r\n\tbase_write(good_features,\"bin/stanford-ner-2015-04-20/base.prop\")\r\n\tbase_prop = open(\"bin/stanford-ner-2015-04-20/base.prop\", \"a\")\r\n\tbase_prop.write(feature.strip() + \"=\" + str(value) + \"\\n\")\r\n\tbase_prop.close()\r\n\r\n\t#Test read base.prop - To display in console\r\n\tread = open(\"bin/stanford-ner-2015-04-20/base.prop\").read()\r\n\tlogging.warning(read)\r\n\r\n\tos.system(\"bash src/other/features/features_selection.sh\")", "def test(self):\n self.load()\n bottleneck_features = np.load(self.feature_path)\n test = bottleneck_features['test']\n _, test_targets = load_dataset(self.image_path_test) \n predictions = [np.argmax(self.model.predict(np.expand_dims(feature, axis=0))) for feature in test]\n test_accuracy = 100*np.sum(np.array(predictions) == np.argmax(test_targets, axis=1))/len(predictions)\n print('{}, test accuracy: {:.4f}%'.format(self.name, test_accuracy))\n return test_accuracy", "def test__learn_one_symbol():\n if platform.machine() == 'x86_64':\n classifier = classifier_module.Classifier(None)\n tolerance = classifier._learn_one_symbol('test')\n\n file_with_model = open(TEST_LOCATION + 'test_nn_model.dat', 'rb')\n nbrs_from_file = pickle.load(file_with_model)\n\n assert 'ball_tree' == nbrs_from_file.algorithm\n assert 30 == nbrs_from_file.leaf_size\n assert 'minkowski' == nbrs_from_file.metric\n assert nbrs_from_file.metric_params is None\n assert 2 == nbrs_from_file.n_neighbors\n assert 2 == nbrs_from_file.p\n assert 1.0 == nbrs_from_file.radius\n assert tolerance < 398.85960989443032 + epsilon\n assert tolerance > 398.85960989443032 - epsilon", "def __init__(self, eta, mu, n_feature):\r\n self.eta = 0.09\r\n self.weight = [0.0] * n_feature\r\n self.temp = [0.0] * n_feature\r\n self.mu = 0.0\r\n self.size= n_feature", "def test_other_functions(self):\r\n\r\n # Test sqrt\r\n self.assert_function_values(\r\n 'sqrt',\r\n [0, 1, 2, 1024], # -1\r\n [0, 1, 1.414, 32] # 1j\r\n )\r\n # sqrt(-1) is NAN not j (!!).\r\n\r\n # Test logs\r\n self.assert_function_values(\r\n 'log10',\r\n [0.1, 1, 3.162, 1000000, '1+j'],\r\n [-1, 0, 0.5, 6, 0.151 + 0.341j]\r\n )\r\n self.assert_function_values(\r\n 'log2',\r\n [0.5, 1, 1.414, 1024, '1+j'],\r\n [-1, 0, 0.5, 10, 0.5 + 1.133j]\r\n )\r\n self.assert_function_values(\r\n 'ln',\r\n [0.368, 1, 1.649, 2.718, 42, '1+j'],\r\n [-1, 0, 0.5, 1, 3.738, 0.347 + 0.785j]\r\n )\r\n\r\n # Test abs\r\n self.assert_function_values('abs', [-1, 0, 1, 'j'], [1, 0, 1, 1])\r\n\r\n # Test factorial\r\n fact_inputs = [0, 1, 3, 7]\r\n fact_values = [1, 1, 6, 5040]\r\n self.assert_function_values('fact', fact_inputs, fact_values)\r\n self.assert_function_values('factorial', fact_inputs, fact_values)\r\n\r\n self.assertRaises(ValueError, calc.evaluator, {}, {}, \"fact(-1)\")\r\n self.assertRaises(ValueError, calc.evaluator, {}, {}, \"fact(0.5)\")\r\n self.assertRaises(ValueError, calc.evaluator, {}, {}, \"factorial(-1)\")\r\n self.assertRaises(ValueError, calc.evaluator, {}, {}, \"factorial(0.5)\")", "def test_initialized() -> None:\n MapieClassifier()", "def test_get_hyperflex_feature_limit_external_by_moid(self):\n pass", "def test_num_isom(self):\n self.assertEqual(self.Nisom, 2)", "def test_ann_features():\n CQT(file_struct, FeatureTypes.ann_beatsync, sr=11025).features", "def svm():", "def test_check_estimator(estimator):\n check_estimator(estimator)", "def test_generate_nb(self):\n pass", "def test_classify(self):\n classifiers, estimates =\\\n ada_boost.train_dataset(self.larger_matrix,\n self.larger_class_labels,\n 9)\n data_to_classify = [1, 0.5]\n classifications = ada_boost.classify(data_to_classify, classifiers)\n expected = np.mat([-1.])\n self.assertEqual(classifications, expected)", "def test_feature_values(iris, name, x_feature, y_feature, x_vals, y_vals):\n iris.x_feature = x_feature\n iris.y_feature = y_feature\n assert iris.title == \"{} x {}\".format(x_feature, y_feature)\n data = iris.sources[name].data\n np.testing.assert_array_almost_equal(data[\"x\"][:2], x_vals)\n np.testing.assert_array_almost_equal(data[\"y\"][:2], y_vals)", "def test_perceptron(self) -> None:\n self.network = self.nn_class(self.n_features, False)\n accuracy = self.get_accuracy()\n self.assertTrue(\n accuracy > self.threshold,\n \"This implementation is most likely wrong since \"\n f\"the accuracy ({accuracy}) is less than {self.threshold}.\",\n )", "def test_scalene():\n assert 'scalene' == classify_triangle(2,3,4)", "def test_getTunaFeatures(self):\n tuna = Tuna(\"1\", \"2\", \"3\", \"4\")\n array = [\"1\", \"2\", \"3\", \"4\"]\n self.assertEqual(tuna.getTunaFeatures(), array)", "def test_menhinick(self):\n self.assertEqual(menhinick(self.TestData), 9/sqrt(22))", "def numeric(*args):", "def test_train(C, gamma):\n X_train, X_test, y_train, y_test = src.load()\n clf, score = src.train(X_train, y_train)\n assert isinstance(clf, svm.SVC)\n assert isinstance(score, float)", "def test_train(self):\n print \"x=\",self.trainer.train()", "def __init__(self, feature, assertion=NoAssertion):\n self.feature = feature\n self.assertion = assertion" ]
[ "0.65769154", "0.6479049", "0.64082533", "0.63766325", "0.62874675", "0.6269006", "0.62610215", "0.62139887", "0.617032", "0.61396384", "0.608057", "0.6065621", "0.6053544", "0.60263926", "0.59525865", "0.5915357", "0.590433", "0.58927613", "0.5877672", "0.58714354", "0.58414525", "0.5841115", "0.5831041", "0.58176476", "0.57856107", "0.5782199", "0.57720935", "0.57640374", "0.574072", "0.5727299", "0.5721821", "0.5721821", "0.57008487", "0.56968683", "0.5696014", "0.5692528", "0.5691691", "0.56905687", "0.5689851", "0.56844515", "0.56802773", "0.5669263", "0.5669263", "0.56688964", "0.5664773", "0.56643087", "0.5662754", "0.56568325", "0.5653753", "0.5646292", "0.5639732", "0.5635628", "0.5629887", "0.5629217", "0.5617634", "0.5617556", "0.56170386", "0.56164044", "0.56055623", "0.5603586", "0.5600252", "0.55991703", "0.55984706", "0.5594611", "0.5594138", "0.55871874", "0.55827266", "0.5579053", "0.55770236", "0.5576505", "0.55744374", "0.5563347", "0.55524695", "0.55523306", "0.5523377", "0.5517802", "0.55162233", "0.55133533", "0.5508017", "0.5500313", "0.5487525", "0.54799515", "0.5466957", "0.54652756", "0.54503036", "0.54487103", "0.5447945", "0.54474735", "0.54464453", "0.5439222", "0.54370725", "0.54370296", "0.54329795", "0.54290915", "0.54275775", "0.54207647", "0.5420518", "0.5418879", "0.5408269", "0.5400761" ]
0.5612527
58
Test the Categorical feature class.
def test_categorical_feature(): feature = Categorical("abc") for element in "abc": feature.set(element) feature.set("ignore this") feature.push() for element in "abc": getattr(feature, "set_" + element)() feature.push() array = feature.array() assert array.shape == (6, 3) for i, row in enumerate(array): assert sum(row) == 1.0 and row[i % 3] == 1.0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_categorical():\n # assert the distribution of the samples is close to the distribution of the data\n # using cstest:\n # - uniform (assert p-value > 0.05)\n # - very skewed / biased? (assert p-value > 0.05)\n # - inversely correlated (assert correlation < 0)", "def test_get_cat_score(self):\n classes = ['blue skin', 'pointy ears']\n negated_classes = []\n categories = ['ear feature', 'skin feature']\n\n categorical_score = self.annot_scorer._get_categorical_score(\n classes, negated_classes, categories,\n self.negation_weight, self.mock_ic_values\n )\n\n assert categorical_score == 0.7002519289078384", "def test_categorical(self):\n with Model() as model:\n Categorical('x', np.array([0.25, 0.75]))\n steps = assign_step_methods(model, [])\n assert isinstance(steps, BinaryGibbsMetropolis)\n with Model() as model:\n Categorical('y', np.array([0.25, 0.70, 0.05]))\n steps = assign_step_methods(model, [])\n assert isinstance(steps, CategoricalGibbsMetropolis)", "def predict_category(self):\n pass", "def test_category(self):\n\n # Test empty categories\n self.assertFalse(self.colorspace.hasCategory('ocio'))\n self.assertEqual(len(self.colorspace.getCategories()), 0)\n with self.assertRaises(IndexError):\n self.colorspace.getCategories()[0]\n\n # Test with defined TEST_CATEGORIES.\n for i, y in enumerate(TEST_CATEGORIES):\n self.assertEqual(len(self.colorspace.getCategories()), i)\n self.colorspace.addCategory(y)\n self.assertTrue(self.colorspace.hasCategory(y))\n\n # Test the output list is equal to TEST_CATEGORIES.\n self.assertListEqual(\n list(self.colorspace.getCategories()), TEST_CATEGORIES)\n\n # Test the length of list is equal to the length of TEST_CATEGORIES.\n self.assertEqual(len(self.colorspace.getCategories()),\n len(TEST_CATEGORIES))\n\n iterator = self.colorspace.getCategories()\n for a in TEST_CATEGORIES:\n self.assertEqual(a, next(iterator))\n\n # Test the length of categories is zero after clearCategories()\n self.colorspace.clearCategories()\n self.assertEqual(len(self.colorspace.getCategories()), 0)\n\n # Testing individually adding and removing a category.\n self.colorspace.addCategory(TEST_CATEGORIES[0])\n self.assertEqual(len(self.colorspace.getCategories()), 1)\n self.colorspace.removeCategory(TEST_CATEGORIES[0])\n self.assertEqual(len(self.colorspace.getCategories()), 0)", "def test(name, data, classifier):\n classification = classifier.classify(data)\n print('Item ' + name + ' is a ' + classification)", "def data_categorical(df, cat_features = [], cont_features = []):\n subset_cat = []\n subset_dict={}\n # Add all the object type features to config.cat_features \n for col in df.columns:\n if df[col].dtype == 'object' and col not in cont_features:\n subset_cat.append(col)\n if col not in cat_features :\n cat_features.append(col)\n if cat_features !=[]:\n print('Categorical features : ', ' '.join(cat_features))\n printmd('**Number of unique values for every feature:**')\n print(pd.DataFrame(df[cat_features].nunique(), columns = ['Unique values']).sort_values(by = 'Unique values', ascending=False))\n printmd(\"**5 uniques samples of every Categorical Features :**\")\n for col in cat_features :\n subset_dict[col]= df[col].unique()[:5]\n print(pd.DataFrame.from_dict(subset_dict, orient='index').transpose())\n return (cat_features)", "def test_compare_categories_categorical_variables(self):\r\n for method in self.cat_methods:\r\n compare_categories(self.dm1_fp, self.map1_fp, method,\r\n self.cat_categories, self.num_perms, self.test_dir)\r\n results_fp = join(self.test_dir, '%s_results.txt' % method)\r\n self.files_to_remove.append(results_fp)\r\n results_f = open(results_fp, 'U')\r\n results = results_f.readlines()\r\n results_f.close()\r\n\r\n # Make sure the files aren't empty.\r\n self.assertTrue(len(results) > 0)", "def test_category_and_its_feature(self):\n class RunnerBlah(Runner):\n def __init__(self, renv):\n super(RunnerBlah, self).__init__(renv)\n self.register_feature_class('bravo', Feature)\n self.register_feature_class('charlie', Feature)\n self.register_feature_category_class(\n 'alpha', features=['bravo', 'charlie'], mono=True)\n\n renv = create_runtime(RunnerBlah)\n renv.create_runner('runner')\n\n ctrl = renv.feature_ctrl\n\n total_order, _ = ctrl.get_activation_order(['alpha', 'bravo'])\n self.assertEqual(['bravo'], total_order)", "def classify(self, example):\n raise NotImplementedError()", "def check_classifier():\n content = []\n labels = []\n file = 'COMP3074-CW1-Dataset.csv'\n content, labels = get_tag(file, \"question_book\", content, labels)\n file = 'name.csv'\n content, labels = get_tag(file, \"question_book\", content, labels)\n file = 'Small_talk.csv'\n content, labels = get_tag(file, \"small_talk\", content, labels, )\n x_train, x_test, y_train, y_test = train_test_split(content, # Sample feature set to be divided\n labels, # The sample result to be divided (label)\n stratify=labels, # Keep the category proportions\n # the same in training and testing\n test_size=0.25, # Refers to the proportion of\n # samples reserved for testing\n random_state=22) # Random seed\n count_vect = CountVectorizer(stop_words=stopwords.words('english'))\n x_train_counts = count_vect.fit_transform(x_train)\n tfidf_transformer = TfidfTransformer(use_idf=True, # Tf_idf\n sublinear_tf=True).fit(x_train_counts)\n x_train_tf = tfidf_transformer.transform(x_train_counts) # Standardize the inherent attributes of the training set,\n # reduce dimensionality and normalize\n classify = LogisticRegression(random_state=0).fit(x_train_tf, y_train) # Logistic regression\n return classify, tfidf_transformer, count_vect", "def test_category(self):\n # XXX identifiers would be groovy\n self.check_search(\n dict(category=u'36:self'), # trap\n [u'Ingrain'],\n 'simple category search, vs self',\n exact=True,\n )\n self.check_search(\n dict(category=u'14:target'), # protect\n [u'Conversion 2', u'False Swipe'],\n 'simple category search, vs target',\n exact=True,\n )\n\n # Multiple categories\n # sleep OR attack up\n self.check_search(\n dict(category=[u'29:self', u'15:target'], category_operator=u'any'),\n [u'Rest', u'Swagger'],\n 'multiple category search (OR)',\n exact=True,\n )\n\n # sleep AND heal self\n self.check_search(\n dict(category=[u'29:self', u'13:self'], category_operator=u'all'),\n [u'Rest'],\n 'multiple category search (AND)',\n exact=True,\n )", "def test_classify_cuisine(self):\n pass", "def test_extract_categories():\n pass", "def test_classify(self):\n classifiers, estimates =\\\n ada_boost.train_dataset(self.larger_matrix,\n self.larger_class_labels,\n 9)\n data_to_classify = [1, 0.5]\n classifications = ada_boost.classify(data_to_classify, classifiers)\n expected = np.mat([-1.])\n self.assertEqual(classifications, expected)", "def classify_test(classifier, test_data):\n for d in test_data:\n test(d[\"name\"], d[\"attribute\"], classifier)", "def test_categorical_log_frequency():\n # assert the distribution of the samples is close to the distribution of the data\n # using cstest:\n # - uniform (assert p-value > 0.05)\n # - very skewed / biased? (assert p-value > 0.05)\n # - inversely correlated (assert correlation < 0)", "def test_0005_create_categories(self):\n self.create_category(name='Test 0060 Workflow Features', description='Test 0060 - Workflow Features')", "def test_get_categories(self):\n pass", "def test_assign_categorical(curve):\n assert curve.dtypes[0] == 'float'\n curve.dtypes = 'category'\n assert curve.dtypes[0] == 'category'", "def get_categorical_features(self, x: pd.DataFrame) -> pd.DataFrame:\n return x[self.categorical_features]", "def test_text_classifier_curate(self):\n pass", "def test_text_classifier_test(self):\n pass", "def test_category_manip_pipeline(self):\n raise NotImplementedError(\"\")", "def test_with_tuple(self, seed):\n categories = (\"asdfa\", 2)\n dim = Categorical(\"yolo\", categories)\n samples = dim.sample(seed=seed)\n assert len(samples) == 1\n assert samples[0] == \"asdfa\"\n assert dim._probs == (0.5, 0.5)\n\n assert categories == dim.categories\n\n assert 2 in dim\n assert 3 not in dim\n\n assert (\n str(dim) == \"Categorical(name=yolo, prior={asdfa: 0.50, 2: 0.50}, \"\n \"shape=(), default value=None)\"\n )\n\n assert dim.name == \"yolo\"\n assert dim.type == \"categorical\"\n assert dim.shape == ()", "def _is_categorical(df, field):\n return df[field].dtype.name == 'category'", "def test_categorical_column_validates_categories(self):\n\n categories = 1\n\n with pytest.raises(CitrinationClientError):\n CategoricalColumn(name=self.name, role=self.role, group_by_key=self.group_by_key, categories=categories)\n\n categories = [\"Grey\", 1]\n with pytest.raises(CitrinationClientError):\n CategoricalColumn(name=self.name, role=self.role, group_by_key=self.group_by_key, categories=categories)\n\n categories = [\"Grey\", \"Blue\"]\n CategoricalColumn(name=self.name, role=self.role, group_by_key=self.group_by_key, categories=categories)", "def test_category_and_its_feature_dep(self):\n class RunnerBlah(Runner):\n def __init__(self, renv):\n super(RunnerBlah, self).__init__(renv)\n self.register_feature_class('bravo', Feature)\n self.register_feature_category_class(\n 'alpha', features=['bravo'], defaults=['bravo'])\n self.register_feature_class(\n 'foxtrot', Feature, requires=['alpha', 'bravo'])\n self.register_feature_category_class('echo', features=['foxtrot'])\n\n renv = create_runtime(RunnerBlah)\n renv.create_runner('runner')\n\n ctrl = renv.feature_ctrl\n\n total_order, _ = ctrl.get_activation_order(['foxtrot'])\n self.assertEqual(['bravo', 'foxtrot'], total_order)", "def classify(self, features):\n\n # TODO: finish this.\n features = np.array(features)\n return self.classifier.classify(features)", "def get_categorical_features(self):\n return self.categorical_features", "def classify(self, data):\n abstract", "def is_categorical(self):\n return all([isinstance(dim, Categorical) for dim in self.dimensions])", "def test_isNumericCategory(self):\r\n obs = self.overview_map.isNumericCategory('Treatment')\r\n self.assertEqual(obs, False)\r\n\r\n obs = self.overview_map.isNumericCategory('DOB')\r\n self.assertEqual(obs, True)", "def test_create_category(self):\n pass", "def test_with_dict(self, seed):\n probs = (0.1, 0.2, 0.3, 0.4)\n categories = (\"asdfa\", 2, 3, 4)\n dim = Categorical(\"yolo\", OrderedDict(zip(categories, probs)))\n samples = dim.sample(seed=seed)\n assert len(samples) == 1\n assert samples[0] == 2\n assert dim._probs == probs\n\n assert categories == dim.categories\n\n assert 2 in dim\n assert 0 not in dim\n\n assert dim.name == \"yolo\"\n assert dim.type == \"categorical\"\n assert dim.shape == ()", "def test_skl_hist_gradient_boosting_with_categorical():\n # We don't yet support HistGradientBoostingClassifier with categorical splits\n # So make sure that an exception is thrown properly\n rng = np.random.RandomState(0)\n n_samples = 1000\n f1 = rng.rand(n_samples)\n f2 = rng.randint(4, size=n_samples)\n X = np.c_[f1, f2]\n y = np.zeros(shape=n_samples)\n y[X[:, 1] % 2 == 0] = 1\n clf = HistGradientBoostingClassifier(max_iter=20, categorical_features=[1])\n clf.fit(X, y)\n np.testing.assert_array_equal(clf.is_categorical_, [False, True])\n\n with pytest.raises(\n NotImplementedError, match=r\"Categorical splits are not yet supported.*\"\n ):\n treelite.sklearn.import_model(clf)", "def is_partly_categorical(self):\n return any([isinstance(dim, Categorical) for dim in self.dimensions])", "def test_isNumericCategory(self):\n obs = self.overview_map.isNumericCategory('Treatment')\n self.assertEqual(obs, False)\n\n obs = self.overview_map.isNumericCategory('DOB')\n self.assertEqual(obs, True)", "def evaluate_model(model, X_test, Y_test, category_names):\n \n \n yPredictorTest = model.predict(X_test)\n \n for idx, col in enumerate(Y_test):\n print(col, classification_report(Y_test[col], yPredictorTest[:, idx]))", "def test_contains_from_categorical(self, tdim2):\n assert (0, 0, 0, 1) in tdim2\n assert (0, 2, 0, 1) in tdim2\n assert (0, 2, 0) not in tdim2", "def test_categorical_dimension_api(self):\r\n warm_colors = CategoricalDimension(\r\n name='colors',\r\n values=[\r\n 'red',\r\n 'yellow',\r\n 'orange',\r\n ]\r\n )\r\n\r\n cool_colors = CategoricalDimension(\r\n name='colors',\r\n values=[\r\n 'green',\r\n 'blue',\r\n 'indigo',\r\n 'violet',\r\n ]\r\n )\r\n\r\n rainbow_colors = CategoricalDimension(\r\n name='colors',\r\n values=[\r\n 'infra-red',\r\n 'red',\r\n 'orange',\r\n 'yellow',\r\n 'green',\r\n 'blue',\r\n 'indigo',\r\n 'violet',\r\n 'ultra-violet',\r\n ]\r\n )\r\n\r\n self.assertTrue(warm_colors in rainbow_colors)\r\n self.assertTrue(cool_colors in rainbow_colors)\r\n self.assertTrue(warm_colors.union(cool_colors) in rainbow_colors)\r\n self.assertFalse(rainbow_colors in warm_colors.union(cool_colors))\r\n self.assertTrue('infra-red' in rainbow_colors)\r\n self.assertTrue('infra-red' in rainbow_colors - warm_colors - cool_colors)\r\n self.assertTrue('red' not in rainbow_colors - warm_colors)\r\n self.assertTrue(warm_colors.intersects(rainbow_colors))\r\n self.assertFalse(warm_colors.intersects(cool_colors))\r\n self.assertTrue(rainbow_colors.intersection(warm_colors) == warm_colors)\r\n self.assertTrue(len(rainbow_colors - warm_colors - cool_colors) == 2)\r\n self.assertTrue(all(color in rainbow_colors for color in warm_colors.linspace()))", "def _classifier(self, test_set):\r\n return self._mahalanobis_classifier(test_set.features, self.targets)", "def test_that_objects_types_are_ok(self):\n categories = {\"asdfa\": 0.1, 2: 0.2, 3: 0.3, \"lalala\": 0.4}\n dim = Categorical(\"yolo\", categories)\n\n assert \"2\" not in dim\n assert 2 in dim\n assert \"asdfa\" in dim\n\n dim = Categorical(\"yolo\", categories, shape=(2,))\n\n assert [\"2\", \"asdfa\"] not in dim\n assert [2, \"asdfa\"] in dim", "def classify(trait_arg, alpha):\r\n x = df['essay'][1:]\r\n x = x.str.lower()\r\n y = df[trait_arg][1:]\r\n\r\n print(\"Predicting \", trait_arg, \" with alpha = \", alpha)\r\n print(\"Test set, Train Set ratio: 1:3\")\r\n\r\n # Test train split in 25 : 75 ratio\r\n x_train, x_test, y_train, y_test = train_test_split(x, y, random_state=11)\r\n\r\n # TF-IDF vectorizer\r\n vectorizer = TfidfVectorizer()\r\n xx_train = vectorizer.fit_transform(x_train)\r\n xx_test = vectorizer.transform(x_test)\r\n\r\n # Multinomial Naive Bayes Classifier\r\n classifier = MultinomialNB(alpha=alpha)\r\n classifier.fit(xx_train, y_train)\r\n\r\n predictions = classifier.predict(xx_test)\r\n print(\"Confusion Matrix:\")\r\n print(classification_report(y_test, predictions))\r\n score = accuracy_score(y_test, predictions)\r\n print(\"Accuracy:\", score)", "def classification(original_training_data):\n\n ''' Storing the dataframe as numpy array '''\n original_training_data_values = original_training_data.values\n\n ''' Storing the values of target attribute for finding out the counts of each recipetype'''\n target_column = original_training_data_values[:, -1]\n\n ''' Recipe_type stores the unique values of target attribute in the form of a list [Muffin Cupcake] \n cupcake_muffin_count stores the count of muffin and cupcakes in the form of a list [451 451]'''\n recipe_type, cupcake_muffin_count = np.unique(target_column, return_counts=True)\n\n ''' cupcake_muffin_count.argmax() returns the index of the highest value. In this case, it will return the index of \n muffin or cupcake count. '''\n majority_class = recipe_type[cupcake_muffin_count.argmax()]\n\n return majority_class", "def classify(self, mutation) -> Set['Category']:\n pass", "def test_analysis_result_cat(self):\n self.assertIsNotNone(analysis.analysis_interaction(\"Datasets/Crime1.csv\", \"Category\"))", "def test_class_counts(self):\n oz = ClassificationScoreVisualizer(GaussianNB())\n oz.fit(self.multiclass.X.train, self.multiclass.y.train)\n\n unique, counts = np.unique(self.multiclass.y.train, return_counts=True)\n npt.assert_array_equal(oz.classes_, unique)\n npt.assert_array_equal(oz.class_counts_, counts)", "def test_CatNB():\n X = np.random.randint(5, size=(100, 100))\n y = np.random.randint(2, size=100)\n\n atom = ATOMClassifier(X, y, random_state=1)\n atom.run(models=\"CatNB\", n_calls=2, n_initial_points=1)\n assert not atom.errors\n assert hasattr(atom, \"CatNB\")", "def classify(self, data):\n \"*** YOUR CODE HERE ***\"\n return self.sklearn_classifier.predict(data)", "def find_categorical(self, df):\n# print(type(df),df.ndim)\n categorical = [key for key in df.keys() if df.dtypes[key] == np.dtype('O')]\n numeric = [key for key in df.keys() if df.dtypes[key] != np.dtype('O')]\n # correct naive expectations\n actual_categoric = ['MSSubClass']\n numeric = list(set(numeric) - set(actual_categoric))\n categorical = list(set(categorical).union(set(actual_categoric)))\n return categorical", "def find_categorical(self, df):\n# print(type(df),df.ndim)\n categorical = [key for key in df.keys() if df.dtypes[key] == np.dtype('O')]\n numeric = [key for key in df.keys() if df.dtypes[key] != np.dtype('O')]\n # correct naive expectations\n actual_categoric = ['MSSubClass']\n numeric = list(set(numeric) - set(actual_categoric))\n categorical = list(set(categorical).union(set(actual_categoric)))\n return categorical", "def test_contains_wrong_shape(self):\n categories = {\"asdfa\": 0.1, 2: 0.2, 3: 0.3, 4: 0.4}\n dim = Categorical(\"yolo\", categories, shape=2)\n\n assert 3 not in dim\n assert (\"asdfa\", 2) in dim", "def test_get_cat_score_w_negation(self):\n classes = ['blue skin', 'pointy ears']\n negated_classes = ['large ears', 'increased pigmentation']\n\n categories = ['ear feature', 'skin feature']\n\n categorical_score = self.annot_scorer._get_categorical_score(\n classes, negated_classes, categories,\n self.negation_weight, self.mock_ic_values\n )\n\n assert categorical_score == 0.7201759238096741", "def svm_classify(train_image_feats, train_labels, test_image_feats, kernel_type):\r\n\r\n categories = np.unique(train_labels)\r\n # [Desc] make 15 different SVM solver (one(each category) vs. the other(14 other category))\r\n svc_list = []\r\n num_categories = len(categories)\r\n for cat_i in tqdm(range(num_categories)):\r\n category = categories[cat_i]\r\n if kernel_type == 'RBF':\r\n svc = svm.SVC(kernel='rbf', probability=True)\r\n elif kernel_type == 'linear':\r\n svc = svm.SVC(kernel='linear', probability=True)\r\n new_label_for_svm = np.where(train_labels == category, 1, 0)\r\n\r\n svc.fit(train_image_feats, new_label_for_svm)\r\n svc_list.append(svc)\r\n\r\n # [Desc] get test images' class using trained svm\r\n probability_list = []\r\n for cat_i in range(num_categories):\r\n svc = svc_list[cat_i]\r\n logit = svc.decision_function(test_image_feats)\r\n probability = logit\r\n probability_list.append(probability)\r\n probability_mat = np.array(probability_list)\r\n probability_mat = np.transpose(probability_mat)\r\n # [Desc] get each class to argmax each logit value.\r\n argmax_class = np.argmax(probability_mat, axis=1)\r\n\r\n return categories[argmax_class]", "def test_category_lowercase(self):\n self.assertEqual(self.category.category, \"test\")", "def evaluate_model(model, X_test, Y_test, category_names):\n \n y_preds = model.predict(X_test)\n predictions = pd.DataFrame(data=y_preds, columns=Y_test.columns, index=Y_test.index)\n for col in Y_test.columns:\n print(classification_report(predictions[col],Y_test[col]))", "def classify(self, dataset, missing_value_action='auto'):\n\n return super(LogisticClassifier, self).classify(dataset,\n missing_value_action=missing_value_action)", "def test_text_classifier_create(self):\n pass", "def categoricals(self):\n return base_64_to_object(self.categorical_hyperparameters_64)", "def _classifier(self, test_set):\r\n return self._euclidian_classifier(test_set.features, test_set.targets)", "def concept_categorization(self):\n dataset = pd.read_csv(\"data/Categorization data set.csv\", sep=\";\", header=None)\n dataset.columns = ['concept','word']\n\n cti = {}\n for i,c in enumerate(np.unique(dataset.concept.values)):\n cti[c] = i\n y_true = dataset.concept.apply(lambda x: cti[x]).values\n vs = []\n preds = [''] * dataset.shape[0]\n for ind,w in enumerate(dataset.word.values):\n try:\n vs.append(self.embeddings_index[w])\n except:\n preds[ind] = 0 \n km = KMeans(n_clusters=22, random_state=0)\n km.fit(np.array(vs).astype(np.float32))\n for ind,w in enumerate(dataset.word.values):\n if preds[ind] == '':\n preds[ind] = km.predict(np.array([self.embeddings_index[w]]))[0]\n contingency_matrix = metrics.cluster.contingency_matrix(y_true, preds)\n #purity score\n return np.sum(np.amax(contingency_matrix, axis=0)) / np.sum(contingency_matrix)", "def classify(self, features):\n \n class_labels = []\n # TODO: finish this.\n features = np.array(features)\n feat_shape = features.shape\n for indx in range(feat_shape[0]):\n# print list(features[indx,:]), features[indx,:]\n decision = self.root.decide(list(features[indx,:]))\n class_labels.append(decision)\n return class_labels", "def classify(self):\r\n Classify(os.path.join(self.__path,'test.csv'),self.__rang,self.__numeric,self.__statistics,self.__k,self.__classes,self.__abs_n,self)\r\n self.view.Build_Button.configure(state=\"active\")", "def test_xray_classifier():\n model = X_ray_Classifier()\n assert type(model) == X_ray_Classifier", "def classify(self, X):\n x = convert_data( X )\n return self.internal.classify(*get_data(x))", "def test_add_category(self):\n self.signup('Bo', 'Theo', 'Bo_theo5@example.com', 'Bo1995', 'Bo1995')\n self.login('Bo_theo5@example.com', 'Bo1995')\n self.dashboard()\n rv = self.category('Breakfast')\n self.assertIn(b'Category created', rv.data)", "def test_unused_categories_logic(self):\n s = ak.array([str(i) for i in range(10)])\n s12 = s[1:3]\n cat = ak.Categorical(s)\n cat12 = cat[1:3]\n self.assertListEqual(ak.in1d(s, s12).to_list(), ak.in1d(cat, cat12).to_list())\n self.assertSetEqual(set(ak.unique(s12).to_list()), set(ak.unique(cat12).to_list()))\n\n cat_from_codes = ak.Categorical.from_codes(ak.array([1, 2]), s)\n self.assertListEqual(ak.in1d(s, s12).to_list(), ak.in1d(cat, cat_from_codes).to_list())\n self.assertSetEqual(\n set(ak.unique(s12).to_list()),\n set(ak.unique(cat_from_codes).to_list()),\n )", "def test_category_model_entry(self): # PRUEBA DE CARGAR LA INFORMACION EN LOS MODELOS A TESTEAR\n data = self.data1\n self.assertTrue(isinstance(data, Category)) # REALIZA EL TESTEO ", "def test_text_classifier_vaporise(self):\n pass", "def evaluate_model(model, X_test, Y_test, category_names):\n\n Y_pred = pd.DataFrame(model.predict(X_test))\n Y_pred.columns = category_names\n Y_test = pd.DataFrame(Y_test)\n Y_test.columns = category_names\n\n for column in category_names:\n print('** {} **'.format(column).upper())\n print(classification_report(Y_test[column], Y_pred[column]))", "def test(self):\n\t\treturn classification_report(self.test_labels, self.predict(self.test_data), target_names=self.le.classes_)", "def test_analysis_target_cat(self):\n self.assertEqual(analysis.analysis_interaction(\"Datasets/Crime1.csv\", \"test\"), WRONG_COL_EX)", "def classify (self, text_test):\n test_features = self.vectorizer.transform(text_test)\n return self.nbc.predict(test_features)", "def classify(dataset,classifier,feat_mask=None):\r\n \r\n train = dataset.get_data('train',True)\r\n X_train = train['x']\r\n if feat_mask is not None:\r\n X_train = X_train[:,feat_mask]\r\n y_train = train['y']\r\n \r\n classifier.fit(X_train,y_train)\r\n \r\n test = dataset.get_data('test',True)\r\n X_test = test['x']\r\n if feat_mask is not None:\r\n X_test = X_test[:,feat_mask]\r\n y_test = test['y']\r\n \r\n pred = classifier.predict(X_test)\r\n \r\n acc = np.count_nonzero(pred==y_test) / len(y_test)\r\n return acc,y_test,pred", "def classify(self, features):\n\n # TODO: finish this.\n class_labels = []\n # TODO: finish this.\n features = np.array(features)\n feat_shape = features.shape\n for i in range(feat_shape[0]):\n vote = np.zeros((self.num_trees))\n for j in range(self.num_trees):\n #print self.trees[j].classify(feat)\n vote[j] = self.trees[j].classify(features[i,self.attr_track[j]].reshape(1,-1))[0]\n counts = np.bincount(vote.astype(int))\n class_labels.append(np.argmax(counts))\n return class_labels", "def test_conditional():\n # verify that conditioning increases the likelihood of getting a sample with the specified\n # categorical value", "def test_text_classifier_train(self):\n pass", "def categorical_accuracy(preds, y):\n max_preds = preds.argmax(dim=1, keepdim=True)\n correct = max_preds.squeeze(1).eq(y)\n\n return correct.sum() / torch.FloatTensor([y.shape[0]])", "def test_categorical_element_indexing():\n cat = pd.Categorical([\"a\", \"a\", \"b\", \"c\", \"a\"], categories=[\"a\", \"b\", \"c\"])\n pdsr = pd.Series(cat)\n sr = cudf.Series(cat)\n assert_eq(pdsr, sr)\n assert_eq(pdsr.cat.codes, sr.cat.codes, check_dtype=False)", "def test_bad_probabilities(self):\n categories = {\"asdfa\": 0.05, 2: 0.2, 3: 0.3, 4: 0.4}\n with pytest.raises(ValueError):\n Categorical(\"yolo\", categories, shape=2)", "def test_photo_classification_view_set_post_tag_category_exists(self):\n # Test data\n user = account_models.User.objects.get(email='mrtest@mypapaya.io')\n photo_models.PhotoClassification.objects.create_or_update(name='night', classification_type='category')\n\n # Simulate auth\n token = test_helpers.get_token_for_user(user)\n\n # Get data from endpoint\n client = APIClient()\n client.credentials(HTTP_AUTHORIZATION='Token ' + token)\n\n payload = {\n 'name': 'Night',\n 'classification_type': 'tag'\n }\n\n request = client.post('/api/photo_classifications', data=payload, format='json')\n result = request.data\n\n self.assertEquals(result['name'], 'Night')\n self.assertEquals(result['classification_type'], 'tag')\n\n # Query for entry as well\n classifications = photo_models.PhotoClassification.objects.all()\n\n self.assertEquals(len(classifications), 13)", "def test_mixed():\n # assert the distribution of the samples is close to the distribution of the data\n # using a kstest for continuous + a cstest for categorical.", "def multiclass_toy_data(): \n #dataset = np.zeros((10,5), np.int)\n dataset = np.array([[0,0,0,0,4],\n [0,0,0,0,5],\n [1,3,0,0,0],\n [3,1,0,0,1],\n [0,0,6,2,0],\n [0,0,0,0,0],\n [0,0,1,7,2], \n [0,0,5,1,5],\n [0,0,34,0,0],\n [0,0,3,0,0]])\n Y = np.array([3,3,2,2,1,0,1,1,0,0])\n #for i in range(10):\n #for j in range(5):\n #dataset[i][j] = np.random.randint(0,10) \n dataset = np.column_stack((dataset, Y))\n return (dataset)", "def evaluate_model(model, X_test, Y_test, category_names):\n y_pred = model.predict(X_test)\n print(classification_report(Y_test, y_pred, target_names=Y_test.keys()))", "def test_intent_classifier_curate(self):\n pass", "def test_basic_labeling(self):\n # data with only 1 feature\n data = array([[-1], [1], [0.5], [0.25], [-0.33], [0]])\n # give 1 if feature value >= 0; otherwise 0\n labels = array([0, 1, 1, 1, 0, 1])\n cdata = LabeledCData(data, labels)\n\n # ensure that labelling is correct\n assert array_equal(cdata.labels, labels)", "def test_categorical_constraint():\n categories = [\"Blue\", \"Red\", \"Yellow\"]\n prop = \"Property Color\"\n c = CategoricalConstraint(name=prop, accepted_categories=categories)\n\n mapped_c = c.to_dict()\n\n assert mapped_c[\"type\"] is \"categorical\"\n assert mapped_c[\"name\"] is prop\n assert mapped_c[\"options\"][\"categories\"] is categories", "def test_get_categories(self):\n obs = self.tester._get_categories(self.conn_handler)\n self.assertEqual(obs, self.exp_categories)", "def test_get_categories(self):\n obs = self.tester._get_categories(self.conn_handler)\n self.assertEqual(obs, self.exp_categories)", "def test_get_classes(self):\n self.view.learning_model = TestSingleLabelClassifierModel()\n self.assertEqual(self.view.get_classes(), TestSingleLabelClassifierModel.classes)", "def isCategorical(data):\n\tre = next((d for d in data if not (type(d) == int or type(d) == str)), None)\n\treturn (re is None)", "def test_get_a_category(self):\n self.test_add_category_success()\n response = self.client.get('/categories/1',\n headers={\"Authorization\": self.token})\n self.assertEqual(response.status_code, 200)\n self.assertIn('asian', response.data.decode())", "def test_create_category(self):\n res = self.client().post('/categories/', data=self.category)\n self.assertEqual(res.status_code, 201)\n self.assertIn('Stews', str(res.data))", "def test_repr_too_many_cats(self):\n categories = tuple(range(10))\n dim = Categorical(\"yolo\", categories, shape=2)\n\n assert (\n str(dim) == \"Categorical(name=yolo, \"\n \"prior={0: 0.10, 1: 0.10, ..., 8: 0.10, 9: 0.10}, \"\n \"shape=(2,), default value=None)\"\n )", "def classify(self, nn=1):\n\t\t#err=0\n\t\tpossibilities=[]\n\t\tfor i in range(len(self.X_test)):\n\t\t\tfor lines in range(len((self.X_train))):\n\t\t\t\tdist=np.linalg.norm(self.X_test[i]-self.X_train[lines])\n\t\t\t\tpossibilities.append([dist,self.Y_train[lines]])\n\t\t\tpossibilities.sort()\n\t\t\tfinal=[]\n\t\t\tfor c in range(0,15):\n\t\t\t\tfinal.append(possibilities[c][1])\n\t\t\t\tprint possibilities[c][1]\n\t\t\tcount=np.zeros(10)\n\t\t\tfor m in final:\n\t\t\t\tcount[m]+=1\n\t\t\t\n\t\t\tans=np.any(count==count.max())\n\t\t\t\n\t\t\tprint \"actual=\",self.Y_test[i]\n\t\t\tif(ans!=self.Y_test[i]):\n\t\t\t\tglobal err\n\t\t\t\terr=err+1", "def Catboost(df, test_size,col_dummies):\n from sklearn.model_selection import train_test_split\n from catboost import CatBoostRegressor\n # Define input\n X = df.drop(['target'], axis=1)\n # Set validation\n y = df['target']\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size, random_state=42)\n Cb = CatBoostRegressor(iterations=200,\n learning_rate=0.02,\n depth=12,\n eval_metric='RMSE',\n bagging_temperature = 0.2)\n column_index = [X_final.columns.get_loc(c) for c in col_dummies if c in X_final]\n # Fit model\n clf = Cb.fit(X_train, y_train,cat_features=column_index)\n print('Linear Regression RMSE',compute_rmse(y_test, clf.predict(X_test)))\n return clf.predict(X_test), y_test", "def testSaveAndLoadCategorical(self):\n num_elems = 51 # _getCategorical starts counting at 1, so the size is really off by one\n cat = self._getCategorical(size=num_elems)\n with self.assertRaises(ValueError):\n # Expect error for mode not being append or truncate\n cat.to_hdf(\"foo\", dataset=\"bar\", mode=\"not_allowed\")\n\n with tempfile.TemporaryDirectory(dir=CategoricalTest.cat_test_base_tmp) as tmp_dirname:\n dset_name = \"categorical_array\" # name of categorical array\n\n # Test the save functionality & confirm via h5py\n cat.to_hdf(f\"{tmp_dirname}/cat-save-test\", dataset=dset_name)\n\n import h5py\n\n f = h5py.File(tmp_dirname + \"/cat-save-test_LOCALE0000\", mode=\"r\")\n keys = list(f.keys())\n if io.ARKOUDA_HDF5_FILE_METADATA_GROUP in keys: # Ignore the metadata group if it exists\n keys.remove(io.ARKOUDA_HDF5_FILE_METADATA_GROUP)\n self.assertEqual(len(keys), 1, f\"Expected 1 key, {dset_name}\")\n self.assertListEqual([dset_name], keys)\n d = f[dset_name]\n f.close()\n\n # Now try to read them back with load_all\n x = ak.load_all(path_prefix=f\"{tmp_dirname}/cat-save-test\")\n self.assertTrue(dset_name in x)\n cat_from_hdf = x[dset_name]\n\n expected_categories = [f\"string {i}\" for i in range(1, num_elems)] + [\"N/A\"]\n\n # Note assertCountEqual asserts a and b have the same elements\n # in the same amount regardless of order\n self.assertCountEqual(cat_from_hdf.categories.to_list(), expected_categories)\n\n # Asserting the optional components and sizes are correct\n # for both constructors should be sufficient\n self.assertTrue(cat_from_hdf.segments is not None)\n self.assertTrue(cat_from_hdf.permutation is not None)\n print(f\"==> cat_from_hdf.size:{cat_from_hdf.size}\")\n self.assertEqual(cat_from_hdf.size, num_elems - 1)", "def test_index_view_with_categories(self):\n add_cat('test',1,1)\n add_cat('temp',1,1)\n add_cat('tmp',1,1)\n add_cat('tmp test temp',1,1)\n\n response = self.client.get(reverse('index'))\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, \"tmp test temp\")\n\n num_cats =len(response.context['categories'])\n self.assertEqual(num_cats , 4)", "def categorical_accuracy(preds, y):\n max_preds = preds.argmax(\n dim=1, keepdim=True\n ) # get the index of the max probability\n correct = max_preds.squeeze(1).eq(y)\n return correct.sum() / torch.FloatTensor([y.shape[0]]).to(device)" ]
[ "0.73366314", "0.7113809", "0.71037066", "0.6780652", "0.66445255", "0.66398925", "0.6559521", "0.6554185", "0.6519982", "0.6498263", "0.6448205", "0.64314634", "0.64148504", "0.64140356", "0.63725084", "0.6347099", "0.6345432", "0.62697804", "0.62644917", "0.62623185", "0.624088", "0.62361515", "0.6230513", "0.62102383", "0.6208405", "0.6201192", "0.62002", "0.61810195", "0.61779237", "0.6156097", "0.6142302", "0.61358976", "0.61279976", "0.61263955", "0.6122116", "0.6108596", "0.61045235", "0.60939944", "0.6087981", "0.60832727", "0.6078611", "0.60750747", "0.6073056", "0.60716665", "0.6061945", "0.6050584", "0.60165006", "0.59997916", "0.5994547", "0.59766054", "0.59760505", "0.59760505", "0.5955447", "0.5949505", "0.5946992", "0.5945492", "0.5895161", "0.5889663", "0.5883858", "0.588171", "0.5877114", "0.58671117", "0.5866378", "0.5862969", "0.5862299", "0.58573717", "0.5856754", "0.58496666", "0.5843616", "0.58374345", "0.58344334", "0.58165914", "0.5812084", "0.5811713", "0.5810924", "0.57895887", "0.5786368", "0.5785986", "0.5785483", "0.5777929", "0.5776728", "0.5776538", "0.5754164", "0.5752995", "0.5752392", "0.5744538", "0.57421064", "0.5738936", "0.57342297", "0.57342297", "0.5726356", "0.572469", "0.5723244", "0.5723157", "0.5718064", "0.57162666", "0.57133", "0.5708394", "0.5707671", "0.5704765" ]
0.7951883
0
Test the Hashed feature class.
def test_hashed_feature(): def mock(c): return ord(c) - ord('a') group = Group({"a": Hashed(buckets=3, hash=mock), "b": Hashed(buckets=5, hash=mock), }) for i in range(10): group.set_a("abcde" [i % 3]) group.set_b("abcde" [i % 5]) group.push() array = group.array() assert array.shape == (10, 8) for i, row in enumerate(array): for column, value in zip(array.columns, row): feature, index = column.split("_") if feature == "a": assert value == float((i % 3) == int(index)) else: assert value == float((i % 5) == int(index))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_hashing():\n X = Vectorizer(strategy=\"Hashing\", n_features=10).fit_transform(X_text)\n assert X.shape == (10, 10)\n assert \"hash1\" in X", "def test_all_features_with_data(self):\n feature1 = Feature('looktest1')\n feature1.set_percentage(5)\n\n feature2 = Feature('looktest2')\n feature2.activate()\n feature2.add_to_whitelist(3)\n\n feature3 = Feature('looktest3')\n feature3.activate()\n feature3.add_to_blacklist(4)\n feature3.add_to_blacklist(5)\n\n feature4 = Feature('looktest4')\n feature4.activate()\n feature4.add_to_whitelist(3)\n feature4.add_to_whitelist(5)\n feature4.add_to_blacklist(4)\n\n all_features = Feature.all_features(include_data=True)\n self.assertEqual(len(all_features), 4)\n\n for key in ['looktest1', 'looktest2', 'looktest3', 'looktest4']:\n self.assertTrue(key in all_features)\n if not key == 'looktest1':\n self.assertEqual(all_features[key]['percentage'], 100)\n\n self.assertEqual(all_features['looktest1']['percentage'], 5)\n self.assertFalse('whitelist' in all_features['looktest1'])\n self.assertFalse('blacklist' in all_features['looktest1'])\n\n self.assertTrue('whitelist' in all_features['looktest2'])\n self.assertEqual(all_features['looktest2']['whitelist'], [3])\n self.assertFalse('blacklist' in all_features['looktest2'])\n\n self.assertFalse('whitelist' in all_features['looktest3'])\n self.assertTrue('blacklist' in all_features['looktest3'])\n self.assertEqual(all_features['looktest3']['blacklist'], [4, 5])\n\n self.assertTrue('whitelist' in all_features['looktest4'])\n self.assertEqual(all_features['looktest4']['whitelist'], [3, 5])\n self.assertTrue('blacklist' in all_features['looktest4'])\n self.assertEqual(all_features['looktest4']['blacklist'], [4])", "def test_creating_simple_feature():\n # given & when\n feature = Feature(1, \"Feature\", \"I am a feature\", \"foo.feature\", 1, tags=None)\n\n # then\n assert feature.id == 1\n assert feature.keyword == \"Feature\"\n assert feature.sentence == \"I am a feature\"\n assert feature.path == \"foo.feature\"\n assert feature.line == 1\n assert feature.tags == []", "def test_hash(self):\r\n self.assertEqual(processor_hash('test'), 'GqNJWF7X7L07nEhqMAZ+OVyks1Y=')\r\n self.assertEqual(processor_hash('edx '), '/KowheysqM2PFYuxVKg0P8Flfk4=')", "def test_basic(self):\n self.assertEqual(hash_str(\"world!\", salt=\"hello, \").hex()[:6], \"68e656\")", "def test_registry():\n assert(CQT.get_id() in msaf.base.features_registry.keys())\n assert(PCP.get_id() in msaf.base.features_registry.keys())\n assert(Tonnetz.get_id() in msaf.base.features_registry.keys())\n assert(MFCC.get_id() in msaf.base.features_registry.keys())\n assert(Tempogram.get_id() in msaf.base.features_registry.keys())", "def test_hash(self):\n self.assertEqual(hash(self.compound), hash((\"t1\", \"test compound\")))", "def test_all_features(self):\n to_create = ['looktest1', 'looktest2', 'looktest3']\n for f in to_create:\n Feature(f).activate()\n\n all_features = Feature.all_features()\n self.assertEqual(len(all_features), len(to_create))\n for f in to_create:\n self.assertTrue(f in all_features)", "def feature():\n pass", "def test_is_active_of_homework_positive():\n assert oop_hw.is_active()", "def test_serialization(self):\n for hashtype in [HashTypes.SHA1, HashTypes.SHA2, HashTypes.SHA3, ]:\n self.do_test_serialization(hashtype)", "def test_add_feature(self):\n fc1 = self.read_feature()\n fc2 = self.read_feature('Aegean_Sea')\n\n # add a feature already in the feature collection\n fc1.add_feature(fc1.features[0])\n assert len(fc1.features) == 1\n\n # add a new feature to the feature collection\n fc1.add_feature(fc2.features[0])\n assert len(fc1.features) == 2\n\n self.check_feature(fc1.features[0])\n self.check_feature(fc1.features[1], expected_name='Aegean Sea')", "def test_features(iris):\n assert iris.num_features == 4\n assert iris.feature_names == [\n \"sepal length (cm)\",\n \"sepal width (cm)\",\n \"petal length (cm)\",\n \"petal width (cm)\",\n ]", "def test_fish():\n test_path = tempfile.mkdtemp()\n x_train, metadata = fish(test_path)\n try:\n assert x_train.shape == (97, 20)\n except:\n shutil.rmtree(test_path)\n raise()", "def test_hash(self):\n self.assertEqual(hash(self._version1), hash(self._version1))\n self.assertNotEqual(hash(self._version2), hash(self._version1))\n self.assertEqual(hash(\"0.1\"), hash(self._version1))", "def loadFeatures(self, filename):\n f = open(filename, 'rb')\n loadhash = pickle.load(f)\n b = self.spikes.view(np.uint8)\n hashkey = hashlib.sha1(b).hexdigest()\n\n if loadhash == hashkey:\n print(\"Spikeset hashes match, loading features info.\")\n self.calculateFeatures(pickle.load(f))\n else:\n print(\"Hashes don't match, features are from a different dataset. Be careful.\")\n self.calculateFeatures(pickle.load(f))", "def test_get_specific_algo(self):\n\n expected = False\n actual = PyFunceble.path.isfile(self.file)\n self.assertEqual(expected, actual)\n\n File(self.file).write(\"\\n\".join(self.data_to_write))\n expected = True\n actual = PyFunceble.path.isfile(self.file)\n\n self.assertEqual(expected, actual)\n\n expected = self.expected_hashed[\"sha512\"]\n actual = Hash(self.file, algorithm=\"sha512\", only_hash=True).get()\n self.assertEqual(expected, actual)\n\n File(self.file).delete()\n\n expected = False\n actual = PyFunceble.path.isfile(self.file)\n self.assertEqual(expected, actual)", "def test_read_feature_collection(self):\n fc = self.read_feature()\n assert len(fc.features) == 1\n feature = fc.features[0]\n self.check_feature(feature)", "def test_confirm_features_in_class_variable(self):\n # Given I have Romaine's core\n from tests.common import romaine\n core = romaine.Core()\n\n # When I locate features in /tmp/romaine_tests/features\n # And I locate features in tests/features\n core.locate_features('/tmp/romaine_tests/features')\n core.locate_features('tests/features')\n\n # Then the core's feature_paths_list variable contains:\n # | path |\n # | /tmp/romaine_tests/features/feature1 |\n # | /tmp/romaine_tests/features/feature2 |\n # | /tmp/romaine_tests/features/subdir/feature3 |\n # | tests/features/feature1 |\n # | tests/features/feature2 |\n # | tests/features/subdir/feature3 |\n self.assertEqual(\n sorted(core.feature_file_paths),\n [\n '/tmp/romaine_tests/features/feature1',\n '/tmp/romaine_tests/features/feature2',\n '/tmp/romaine_tests/features/subdir/feature3',\n 'tests/features/feature1',\n 'tests/features/feature2',\n 'tests/features/subdir/feature3',\n ]\n )", "def test_gtf(self):\n #TODO write bed tests", "def test_Fuselage_full():\n fus = Fuselage()\n assert('OML' in fus)", "def test_split_feature(tree):\r\n print(\"test_split_feature()...\", end = \"\")\r\n assert (tree.process_split_feature() == True)\r\n print(\"Passed!\")", "def _test(self):\n pass", "def _test(self):\n pass", "def _test(self):\n pass", "def _test(self):", "def _test(self):", "def _test(self):", "def _test(self):", "def _test(self):", "def __hash__(self):\n return hash(self.get_all_features())", "def test_feature(feature, value, good_features):\r\n\tbase_write(good_features,\"bin/stanford-ner-2015-04-20/base.prop\")\r\n\tbase_prop = open(\"bin/stanford-ner-2015-04-20/base.prop\", \"a\")\r\n\tbase_prop.write(feature.strip() + \"=\" + str(value) + \"\\n\")\r\n\tbase_prop.close()\r\n\r\n\t#Test read base.prop - To display in console\r\n\tread = open(\"bin/stanford-ner-2015-04-20/base.prop\").read()\r\n\tlogging.warning(read)\r\n\r\n\tos.system(\"bash src/other/features/features_selection.sh\")", "def test(self):\n pass", "def test(self):\n raise NotImplementedError", "def __init__(self, feature, assertion=NoAssertion):\n self.feature = feature\n self.assertion = assertion", "def test_hashmap(self, hash_type=\"robinhood\", hash_map_size=50):\n # Initialize\n print(\"Initializing a hash map ...\")\n hash_map = naive(hash_map_size) if hash_type == \"naive\" else robinhood(hash_map_size)\n\n # Load factor test\n print(\"Load factor test ...\")\n self.assertEqual(0.0, hash_map.load())\n\n # Add key/value pairs\n values = []\n for _ in range(hash_map_size):\n value = self.value_generator()\n hash_map.set(str(_), value)\n values.append(value)\n\n # Load factor test\n self.assertEqual(1.0, hash_map.load())\n\n # Get test\n print(\"Get test ...\")\n for _ in range(hash_map_size):\n self.assertEqual(values[_], hash_map.get(str(_)))\n\n # Delete test\n print(\"Delete test ...\")\n del_index = random.randint(0, hash_map_size)\n self.assertEqual(values[del_index], hash_map.delete(str(del_index)))\n self.assertEqual(None, hash_map.delete(str(del_index)))\n\n # Load factor test\n self.assertEqual((hash_map_size - 1) / hash_map_size, hash_map.load())\n\n # Set test\n print(\"Set test ...\")\n self.assertEqual(True, hash_map.set(str(del_index), values[del_index]))\n\n # Load factor test\n self.assertEqual(1.0, hash_map.load())\n\n # Get statistics (variance)\n print(\"Variance is {}\".format(statistics.variance(hash_map.probe_lengths)))\n\n print(\"Testing finished successfully ...\")", "def performance():\n nfeat = 10000\n print(f\"\\nTesting minhash performance with {nfeat} features:\\n\")\n\n # Reference\n features_32 = np.array(\n [xxh32_intdigest(rand.bytes(13)) for _ in range(nfeat)], dtype=np.uint32\n )\n for func in funcs_f32:\n mh = func(features_32)\n start = time.time()\n mh = func(features_32)\n end = time.time()\n rt = (end - start) * 1000\n print(f\"{func.__name__:<18}: {rt:.2f} ms runtime\")\n\n # New versions\n features_64 = np.array(\n [xxh64_intdigest(rand.bytes(13)) for _ in range(nfeat)], dtype=np.uint64\n )\n\n for func in funcs_xor:\n mh = func(features_64)\n start = time.time()\n mh = func(features_64)\n end = time.time()\n rt = (end - start) * 1000\n print(f\"{func.__name__:<18}: {rt:.2f} ms runtime\")", "def testBeliefs1sk(self):", "def test_training(self):\n\t\tpass", "def test_delete(self):\n\n self.feature_test.set_percentage(5)\n self.feature_test.delete()\n key = self.feature_test._get_redis_key()\n redis_data = cloak.redis.get(key)\n self.assertTrue(redis_data is None)\n\n set_key = Feature._get_redis_set_key()\n self.assertFalse(cloak.redis.sismember(set_key, key))", "def test_hash_type(self):\n expected_hash_method = ldap_sha512_crypt\n for pw in (generate_password(8) for i in range(10)):\n encrypted = hash_password(pw)\n self.assertTrue(expected_hash_method.identify(encrypted),\n \"Expected hashes for method {}, got: {}\"\n .format(expected_hash_method.name, encrypted))", "def test_surface_feature(self):\n\n # Fully valid image\n sf1 = SurfaceFeature(1, 1, 2, 2, 'dummy_wkt_string', 0.5, 'dummy_id')\n sf1.determine_quadkey()\n\n self.assertEqual(sf1.quadkey, '3000000')", "def testGetBinaryFeature(self):\n op = ops.get_binary_feature(tf.constant([1, 2], dtype=tf.int64), [0, 1], 3)\n with tf.Session() as sess:\n binary_features = sess.run(op)\n self.assertAllEqual([['aa', 'eaa'], ['bb', 'ebb']], binary_features)", "def testGetFirstFeature(self):\r\n self.prepareTestCanvas()\r\n myLayer = self.bucketFill.getActiveVectorLayer()\r\n myTestBox = QgsRectangle(TEST_BOX[0], TEST_BOX[1],\r\n TEST_BOX[2], TEST_BOX[3])\r\n\r\n myFeatureCount = myLayer.featureCount()\r\n if myFeatureCount > 0:\r\n myFeature = self.bucketFill.getFirstFeature(myLayer, myTestBox)\r\n print myFeature\r\n myMessage = ('Returned object was not a feature.')\r\n assert myFeature.type() == QgsFeature, myMessage\r\n else:\r\n myMessage = ('No features found in layer.')\r\n assert 1 == 0, myMessage", "def test_hashed_feature_random_sign():\n\n group = Group({\"a\": Hashed(buckets=100, random_sign=True), })\n\n for i in range(100):\n for j in range(100):\n group.set(randstr(), weight=123)\n group.push()\n\n array = group.array()\n assert array.shape == (100, 100)\n\n pos, neg = 0, 0\n for row in array:\n for value in row:\n assert value == 0 or abs(value) == 123\n pos += int(value > 0)\n neg += int(value < 0)\n assert pos and neg and abs(pos - neg) < (pos + neg) * 0.1", "def testModel( self, classTest, classPred):", "def testHashability(self) -> None:\n r = data_types.Result('test_1', ('win', 'x86'), (1, 10), 'id')\n test_set = set([r])\n test_set.add(r)\n self.assertEqual(1, len(test_set))\n\n r = data_types.Result('test_2', ('win', 'x86'), (2, 30), 'id')\n test_set.add(r)\n self.assertEqual(2, len(test_set))", "def __init__(self, feature, how_to_behave):\n self.feature = feature\n self.results = feature.results\n self.how_to_behave = how_to_behave", "def test_initialized() -> None:\n MapieClassifier()", "def test_redis_key(self):\n\n generated = self.feature_test._get_redis_key()\n expected = \"feature.1.testing\"\n self.assertEqual(generated, expected)", "def test__extract_features(self):\n text_sample = \"I really really love this movie\"\n feature_sample = ['really','love','good']\n feature_score_type = \"presence\"\n model_sample = Model(feature_sample,feature_score_type)\n result_features = model_sample.extract_features(text_sample)\n assert_equal(result_features,{'really':1,'love':1,'good':0})\n feature_score_type = \"term_frequency\"\n model_sample = Model(feature_sample,feature_score_type)\n result_features = model_sample.extract_features(text_sample)\n assert_equal(result_features,{'really':2,'love':1,'good':0})", "def run_feature_extraction_tests():\n test_feature_extraction()\n test_distributed_feature_extraction()\n test_multimodel_feature_extraction()\n test_distributed_multimodel_feature_extraction()", "def test_let(self):", "def test_example(self):\n self.assertEqual(self.example.get_example(), True)", "def test_it_exists():\n dan = Hobbit(\"Dan\")\n assert type(dan) == Hobbit\n assert dan.name == \"Dan\"\n assert dan.disposition == \"adventurous\"\n assert dan.age == 0", "def testWord(self, word):\n return self.crackHash(self.hashWord(word))", "def test_hue(self):\n thispath = os.path.dirname(__file__)\n impath = os.path.join(\"test\", \"737.jpg\")\n impath2 = os.path.join(\"test\", \"738.jpg\")\n \n img = cv2.imread(os.path.join(thispath, impath))\n img2 = cv2.imread(os.path.join(thispath, impath2))\n colorextr = ColorFeatureExtracter(img)\n colorextr2 = ColorFeatureExtracter(img2)\n print(colorextr.CompareFeatures(colorextr2.ComputeFeatures(),colorextr.ComputeFeatures()))\n # ... and then evaluate the output", "def test_add_feature():\n mock = MagicMock()\n with patch.dict(dism.__salt__, {\"cmd.run_all\": mock}):\n dism.add_feature(\"test\")\n mock.assert_called_once_with(\n [\n dism.bin_dism,\n \"/Quiet\",\n \"/Online\",\n \"/Enable-Feature\",\n \"/FeatureName:test\",\n \"/NoRestart\",\n ]\n )", "def test_features_property():\n atom = ATOMClassifier(X_bin, y_bin, random_state=1)\n atom.run(\"LR\")\n assert [i == j for i, j in zip(atom.lr.features, atom.features)]", "def test_alien_data(self):", "def test_initialization_of_homework_result_homework():\n assert result_1.homework == oop_hw", "def test_installed_features():\n features = (\n \"Feature Name : Capa1\\r\\n State : Enabled\\r\\n\"\n \"Feature Name : Capa2\\r\\n State : Disabled\\r\\n\"\n )\n\n mock = MagicMock(return_value=features)\n with patch.dict(dism.__salt__, {\"cmd.run\": mock}):\n out = dism.installed_features()\n mock.assert_called_once_with(\n [dism.bin_dism, \"/English\", \"/Online\", \"/Get-Features\"]\n )\n assert out == [\"Capa1\"]", "def test(self) -> Any:\n pass", "def test_getTunaFeatures(self):\n tuna = Tuna(\"1\", \"2\", \"3\", \"4\")\n array = [\"1\", \"2\", \"3\", \"4\"]\n self.assertEqual(tuna.getTunaFeatures(), array)", "def test_set_add(self):\n\n self.feature_test.set_percentage(15)\n key = self.feature_test._get_redis_key()\n set_key = Feature._get_redis_set_key()\n self.assertTrue(cloak.redis.sismember(set_key, key))", "def test_if(self):", "def test_ha_vm(self):\n testflow.step(\n \"Add VM %s to affinity group %s\",\n conf.VM_NAME[2], self.affinity_group_name\n )\n assert ll_clusters.update_affinity_group(\n cluster_name=conf.CLUSTER_NAME[0],\n old_name=self.affinity_group_name,\n vms=conf.VM_NAME[:3],\n positive=False\n )\n ha_host = ll_vms.get_vm_host(vm_name=conf.VM_NAME[2])\n host_resource = rhevm_helpers.get_host_resource_by_name(\n host_name=ha_host\n )\n testflow.step(\"Kill HA VM %s\", conf.VM_NAME[2])\n assert ll_hosts.kill_vm_process(\n resource=host_resource, vm_name=conf.VM_NAME[2]\n )\n testflow.step(\"Wait for HA VM %s to be down\", conf.VM_NAME[2])\n assert ll_vms.waitForVMState(vm=conf.VM_NAME[2], state=conf.VM_DOWN)\n testflow.step(\n \"Check that HA VM %s fails to start\", conf.VM_NAME[2]\n )\n assert not ll_vms.waitForVMState(vm=conf.VM_NAME[2], timeout=120)\n testflow.step(\"Stop VM %s\", conf.VM_NAME[1])\n assert ll_vms.stopVm(positive=True, vm=conf.VM_NAME[1])\n testflow.step(\n \"Check that HA VM %s succeeds to start\", conf.VM_NAME[2]\n )\n assert ll_vms.waitForVMState(\n vm=conf.VM_NAME[2], state=conf.VM_POWERING_UP\n )", "def test_predictor():", "def test_compute_glycemic_load(self):\n pass", "def test_add_hash(self):\n hash = '08be2c7239acb9557454088bba877a245c8ef9b0e9eb389c65a98e1c752c5709'\n info = self.api.add_hash(hash, tags=['asd'])\n self.assertEqual(info['value'], hash)\n tags = [t['name'] for t in info['tags']]\n self.assertEqual(tags, ['asd'])", "def test_init_hash(self):\n bill = Bill(self.input_hash)\n for key, value in self.input_hash.iteritems():\n self.assertEqual(value, bill.__dict__[key])", "def test(self):", "def test(self):", "def test_setTunaFeatures(self):\n tuna = Tuna()\n array = [\"1\", \"2\", \"3\", \"4\"]\n tuna.setTunaFeatures(array)\n self.assertEqual(tuna.getTunaFeatures(), array)", "def testTrainSplit(self, feature, Class, test_size = 0.2, \n random_state = 0):\n # training and testing sets\n fTrain, fTest, cTrain, cTest = train_test_split( feature, Class,\n test_size = test_size, \n random_state = random_state)\n self.fTrain = fTrain\n self.fTest = fTest\n self.cTrain = cTrain\n self.cTest = cTest\n \n return fTrain, fTest, cTrain, cTest", "def test_class_method(self):\n self.assertEqual(self.Test.has_one.im_self.__name__, 'Test')", "def test_01_lighting(self):", "def test_hash_data(self):\n\n expected = False\n actual = PyFunceble.path.isfile(self.file)\n self.assertEqual(expected, actual)\n\n File(self.file).write(\"\\n\".join(self.data_to_write))\n expected = True\n actual = PyFunceble.path.isfile(self.file)\n\n self.assertEqual(expected, actual)\n\n for algo, result in self.expected_hashed.items():\n self.assertEqual(\n result,\n Hash(self.file).hash_data(algo),\n msg=\"%s did not passed the test\" % repr(algo),\n )\n\n File(self.file).delete()\n\n expected = False\n actual = PyFunceble.path.isfile(self.file)\n\n self.assertEqual(expected, actual)", "def test_theft_and_stealing(self):", "def test_pattern_matching(self):\n\n for hashtype in [HashTypes.SHA1, HashTypes.SHA2, HashTypes.SHA3, ]:\n self.do_test_pattern_matching(hashtype)", "def test_auth_instance(self):\n self.assertIsInstance(Freenas(hostname), Freenas)\n\n\n\n\n #self.assertEqual(freenas.Freenas(hostname).request('auth/check_user',\n # method='POST', data={'username': 'api',\n # 'password': 'api'\n # }\n # ), True)", "def feature(self, node=\"clickhouse1\"):\n self.context.node = self.context.cluster.node(node)\n\n for scenario in loads(current_module(), Scenario):\n scenario()", "def test_machine_learning():", "def test001Word2VecHash():\n\n\t#model = Word2VecExecuter.Word2VecGetModel(\"../data_sets/GoogleNews-vectors-negative300.bin\")\n\tmodel = Word2VecExecuter.Word2VecGetModel(\"tools/word2vec/word2vec-read-only/vectors.bin\")\n\tlist_of_words = Features.LoadAllUnitsFromFiles(\"../data_sets/reuters_21578/test_train/\")\n\tdictWord2Vec, fpr = Word2VecExecuter.Word2VecLoadWordsHashTable(model, list_of_words)\n\n\tassert len(dictWord2Vec)==len(fpr)\n\n\trandom_word_index = random.randint(0,len(fpr)-1)\n\tsys.stdout.write(\"\\nrandom tested word index: \" + str(random_word_index) + \" \" + str(len(dictWord2Vec)) + \" \" + str(len(fpr)))\n\tword = dictWord2Vec.keys()[random_word_index]\n\tpointer = dictWord2Vec[word]\n\n\tvec1 = fpr[pointer]\n\tvec2 = Word2VecExecuter.Word2VecGetVector(model,word)\n\tcomp_vec = vec1-vec2\n\n\tassert max(abs(comp_vec))==0.0\n\n\tdictWord2VecH, fprH = Word2VecExecuter.Word2VecLoadWordsHashTable(model, list_of_words, Features.FeatureRepresentation.HASH)\n\n\thashed_word = dictWord2VecH.keys()[random_word_index]\n\tassert word != hashed_word\n\tpointerH = dictWord2VecH[hashed_word]\n\tvec3H = fpr[pointerH]\n\tcomp_vec = vec1-vec3H\n\n\tassert max(abs(comp_vec))==0.0\n\tdel model \n\n\t#check if after deleting the model it still works\n\tword = dictWord2Vec.keys()[random_word_index]\n\tpointer = dictWord2Vec[word]\n\n\tvec1ad = fpr[pointer+3]\n\tassert (len(vec1ad)==len(vec1))", "def test_test(task_dataset, features):\n features = torch.cat(features)\n feat = features[0]\n expected = features.eq(feat).sum().item() / N_SAMPLES\n\n class FakeModule(nn.Module):\n \"\"\"Always returns the same prediction.\"\"\"\n\n def forward(self, reps):\n \"\"\"Just returns the tag.\"\"\"\n assert reps.shape[-1] == N_DIMS_PER_REP\n logits = torch.zeros(len(reps), N_UNIQUE_FEATS)\n logits[:, feat] = 1\n return logits\n\n actual = learning.test(FakeModule(),\n task_dataset,\n device=torch.device('cpu'))\n assert actual == expected", "def test_user_hash_with_salt(self):\n self.assertEqual(get_user_hash(\"johndoe\", salt=\"jane\").hex()[:6], \"fb0bf4\")", "def test_authentication():\n\n c = GoodreadsClient()\n assert_equals(c.user['name'], 'Jan Skus')", "def test_fortress_flags(mock_htb_client: HTBClient):\n # Create a fake fortress to test with\n fortress = Fortress({\n \"id\": 1,\n \"name\": \"Jet\",\n \"ip\": \"10.13.37.10\",\n \"image\": \"https://www.hackthebox.eu/storage/companies/3.png\",\n \"number_of_flags\": 11\n }, mock_htb_client, summary=True)\n assert fortress.submit(CORRECT_HASH) is True\n\n with raises(IncorrectFlagException):\n fortress.submit(\"wrong\")", "def test_get_hyperflex_health_by_moid(self):\n pass", "def test_category_and_its_feature(self):\n class RunnerBlah(Runner):\n def __init__(self, renv):\n super(RunnerBlah, self).__init__(renv)\n self.register_feature_class('bravo', Feature)\n self.register_feature_class('charlie', Feature)\n self.register_feature_category_class(\n 'alpha', features=['bravo', 'charlie'], mono=True)\n\n renv = create_runtime(RunnerBlah)\n renv.create_runner('runner')\n\n ctrl = renv.feature_ctrl\n\n total_order, _ = ctrl.get_activation_order(['alpha', 'bravo'])\n self.assertEqual(['bravo'], total_order)", "def test_bed(self):\n #TODO write bed tests", "def test_usedforsecurity_flag_behavior(self) -> None:\n for version, expected in {\n self.sys_v3_8: (True, 'md5'),\n self.sys_v3_9: (False, 'md5'),\n self.sys_v4_8: (False, 'md5'),\n }.items():\n assert _attempt_init_of_python_3_9_hash_object(self.fake_md5, version) == expected", "def test_contains_true(self):\n self.assertTrue('1.SKM7.640188' in self.tester)", "def test_contains_true(self):\n self.assertTrue('1.SKM7.640188' in self.tester)", "def test_add_to_whitelist(self):\n\n self.feature_test.add_to_whitelist(3)\n self.assertTrue(3 in Feature(\"testing\").whitelist)", "def __init__(self, geneId, gtfFeature):\n\n self.geneId = geneId\n self.features = {}", "def testing_featurizer_build():\n f = ImageFeaturizer()\n compare_featurizer_class(f, (0, 0), np.zeros((1)), 0, '', False, '', {}, 1)", "def test_health_get(self):\n pass", "def test_class_started(self, cls):", "def test_trie_class_init():\n from trie import Trie\n test_case = Trie()\n assert test_case.root.lookup == {}" ]
[ "0.65287024", "0.62354875", "0.6188721", "0.60676974", "0.59955835", "0.5943524", "0.59145314", "0.5907544", "0.59006155", "0.5855149", "0.5854101", "0.5758078", "0.5741417", "0.5683079", "0.5673396", "0.5668186", "0.5665878", "0.56312555", "0.5623874", "0.5621978", "0.55877644", "0.55796444", "0.5576072", "0.5576072", "0.5576072", "0.55751795", "0.55751795", "0.55751795", "0.55751795", "0.55751795", "0.5571806", "0.5570238", "0.554856", "0.5532527", "0.5531553", "0.5513284", "0.5499313", "0.5494458", "0.54943985", "0.5490917", "0.5487463", "0.5486542", "0.5461842", "0.54566556", "0.54537684", "0.54477966", "0.54475737", "0.544237", "0.54415864", "0.54173756", "0.5404041", "0.53899115", "0.53879255", "0.5380441", "0.53749233", "0.5365315", "0.53610986", "0.53559697", "0.53481865", "0.534408", "0.5341741", "0.5335868", "0.532671", "0.53245986", "0.53130966", "0.53055197", "0.5301275", "0.5296979", "0.52942985", "0.5292769", "0.52801204", "0.5279869", "0.5279869", "0.5279501", "0.5277939", "0.5275717", "0.5267472", "0.5267028", "0.52622426", "0.52578527", "0.5255993", "0.52535486", "0.52497584", "0.52495855", "0.5249405", "0.52447927", "0.52413267", "0.5233042", "0.52266765", "0.5223016", "0.52128196", "0.5207286", "0.5204844", "0.5204844", "0.52006346", "0.5199431", "0.5198192", "0.5194812", "0.5194201", "0.519348" ]
0.70252043
0
Test if the default hash function distributes random signs evenly.
def test_hashed_feature_random_sign(): group = Group({"a": Hashed(buckets=100, random_sign=True), }) for i in range(100): for j in range(100): group.set(randstr(), weight=123) group.push() array = group.array() assert array.shape == (100, 100) pos, neg = 0, 0 for row in array: for value in row: assert value == 0 or abs(value) == 123 pos += int(value > 0) neg += int(value < 0) assert pos and neg and abs(pos - neg) < (pos + neg) * 0.1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def randomHash(self):\n modulus = self.filter_size\n a, b = random.randint(1, modulus - 1), random.randint(1, modulus - 1)\n\n def f(x):\n return hash(x) % (a + b) % modulus\n\n return f", "def deterministicrandom(x):\n\n i = hash_value(x)\n\n r = 1.0 * i / MAX_HASH_VALUE\n return r", "def hash(x):\r\n return (randint(1,5*c)*x + randint(1,5*c))%c", "def default_hash():\n return \"!\"", "def random_h_function(key, N):\n\n if key is None:\n raise TypeError(\"Key must not be None\")\n\n\n hashcode = 0\n for i in range(len(key)):\n hashcode = (71 * hashcode + ord(key[i])) % N\n return hashcode", "def seed_hash(*args):\n args_str = str(args)\n return int(hashlib.md5(args_str.encode(\"utf-8\")).hexdigest(), 16) % (2**31)", "def hash(x) -> int:\n pass", "def r_soft_hash(x):\n if abs(x) < 1e-9:return 0\n # round it to some number of bits\n b = ns.round(ns.log(abs(x)) / ns.log(2))\n gran = 2**(b-30)\n return ns.round(x / gran) * gran", "def generate_key(random=random.SystemRandom()):\n poly = 0\n while not is_acceptable_multiplier(poly):\n poly = random.getrandbits(61)\n oh = []\n for _ in range(2 * BLOCK_SIZE + TWISTING_COUNT):\n u64 = None\n while u64 is None or u64 in oh:\n u64 = random.getrandbits(64)\n oh.append(u64)\n return UmashKey(poly, oh)", "def HashAlgorithm(self) -> _n_7_t_0:", "def fn(k):\n seen = set()\n for i in range(len(s)-k+1): \n val = (prefix[i+k] - prefix[i]*fac[k]) % MOD \n if val in seen: return True # rolling hash (ver. Monte Carlo)\n seen.add(val)\n return False", "def _hash_function(self, n):\n # Get the mask for this n, or make a new one of 32 random bits.\n mask = self._memomask.get(n)\n if mask is None:\n random.seed(n ^ self.seed_mask)\n mask = self._memomask[n] = int(random.getrandbits(32))\n # Now return a function that uses Jenkins Hash\n #\n def somehash(x):\n return hashlittle(x, mask)\n return somehash", "def fixed(o):\n try:\n hash(o)\n except TypeError:\n return False\n return True", "def test_default_hash_colision(self):\n h1 = default_fnv_1a(\"gMPflVXtwGDXbIhP73TX\", 5)\n h2 = default_fnv_1a(\"LtHf1prlU1bCeYZEdqWf\", 5)\n\n self.assertEqual(h1[0], h2[0]) # these should match\n for i in range(1, 5):\n self.assertNotEqual(h1[i], h2[i])", "def gensalt():\n return hexlify(os.urandom(24)).decode()", "def test_hash_type(self):\n expected_hash_method = ldap_sha512_crypt\n for pw in (generate_password(8) for i in range(10)):\n encrypted = hash_password(pw)\n self.assertTrue(expected_hash_method.identify(encrypted),\n \"Expected hashes for method {}, got: {}\"\n .format(expected_hash_method.name, encrypted))", "def test_hash(self):\r\n self.assertEqual(processor_hash('test'), 'GqNJWF7X7L07nEhqMAZ+OVyks1Y=')\r\n self.assertEqual(processor_hash('edx '), '/KowheysqM2PFYuxVKg0P8Flfk4=')", "def mkRandHash(N):\n junk = \"\".join(random.choice(string.ascii_letters + string.digits)\n for unused in range(10))\n return lambda key: hash(junk + key) % N", "def hash_gen(n):\n domain = \"abcdefghijklmnopqrstuvwxyz\"\n temp = \"\"\n for i in range(0, n):\n temp += domain[random.randrange(0, 26)]\n return temp", "def test_rendezvous_hash_roughly_fractional_change():\n first_choices = range(10)\n second_choices = range(9)\n\n test_keys = [str(x) for x in range(10000)]\n\n first_results = [\n marathon_tools.rendezvous_hash(first_choices, k) for k in test_keys\n ]\n second_results = [\n marathon_tools.rendezvous_hash(second_choices, k) for k in test_keys\n ]\n\n num_same = len([1 for x, y in zip(first_results, second_results) if x == y])\n assert num_same > 8900\n assert num_same < 9100", "def h_python(key, N):\n return hash(key) % N", "def test_default_sha256(self):\n this_is_a_test = [\n 10244166640140130606,\n 5650905005272240665,\n 14215057275609328422,\n 5952353080197385534,\n 4990779931033217093,\n ]\n this_is_also = [\n 4140421647067018332,\n 9306548247555387104,\n 5672713771950536751,\n 8501641957786831066,\n 15146689942378126332,\n ]\n hashes = default_sha256(\"this is a test\", 5)\n self.assertEqual(hashes, this_is_a_test)\n hashes = default_sha256(\"this is also a test\", 5)\n self.assertEqual(hashes, this_is_also)", "def test_hash_verification(self):\n pw = generate_password(8)\n for hash_method in (ldap_des_crypt, ldap_sha512_crypt, ldap_md5,\n ldap_salted_sha1):\n encrypted = hash_method.hash(pw)\n self.assertTrue(verify_password(pw, encrypted),\n \"{}: '{}' should verify '{}'\"\n .format(hash_method.name, encrypted, pw))", "def hash_functions(self):\n pass", "def test_never_same():\n g = RG.larger_random()\n hundred_calls = set([next(g) for _ in range(20)])\n assert len(hundred_calls) == 20", "def create_hash(self):\n return os.urandom(32).encode('hex')", "def test_salt_generation(self):\n pw = generate_password(8)\n hashes = tuple(hash_password(pw) for i in range(10))\n self.assertEqual(len(hashes), len(set(hashes)),)", "def computeSecretKey(g, p):\r\n \"\"\"You will need to implement this function.\"\"\"\r\n return random.randint(1, p-1)", "def test_hash(self):\n ffs = get_available_force_fields()\n\n for ff1, ff2 in itertools.combinations(ffs, 2):\n assert hash(ff1) != hash(ff2)", "def test_12_multihash(self):\n self.base_12_multihash(\"sha256\")\n if sha512_supported:\n self.base_12_multihash(\"sha512_256\")", "def strongHashFunction(self):\n\t\treturn self._strongHashFunction", "def badhash(x):\n x = (((x >> 16) ^ x) * 0x045d9f3b) & 0xFFFFFFFF\n x = (((x >> 16) ^ x) * 0x045d9f3b) & 0xFFFFFFFF\n x = ((x >> 16) ^ x) & 0xFFFFFFFF\n return x", "def hash(self, key):\n return self._hash_function(key) % self.size # Modular hashing", "def hash_exp(key, seed=SEED):\n m = hashlib.sha256((\"%s###%s\" % (seed, key)).encode(\"ascii\"))\n uniform = float(int(m.hexdigest(), 16) + 1) / 2**256\n return -1.0 * math.log(uniform, math.e)", "def _random(self, key):\n\n if hasattr(key, \"encode\"):\n key = key.encode('ascii')\n\n value = (zlib.crc32(key, self.seed) & MAX_VALUE)\n\n return value * INV_MAX_VALUE", "def test_automatic_default_to_sha256(self) -> None:\n for version, expected in {\n self.sys_v3_8: (True, 'sha256'),\n self.sys_v3_9: (False, 'sha256'),\n self.sys_v4_8: (False, 'sha256'),\n }.items():\n _set_allowed_viable_default_hashes(self.sha256Default, version)\n set_hash_format(None, self.sha256Default, version)\n assert _get_hash_object(None, self.sha256Default, version) == expected", "def maybe(self):\n return random.getrandbits(1)", "def _hashPassword(password):\n charset = './' + ascii_letters + digits\n return crypt.crypt(password, ''.join(random.sample(charset, 2)))", "def _hash(self, hashKey):\n return hashKey % self.size", "def bit_contribution_test(hash_function):\n\n model = hash_function()\n hash_list = []\n zero_str = '0' * 2048\n for i in range(1, 2049):\n for j in range(0, i):\n flip_str = zero_str[:j] + '1' + zero_str[j+1:i]\n hash_list.append(list(map(int, list(msg_to_bits.pad_msg(flip_str, i)))))\n if i % 200 == 0:\n print(i)\n\n hashed_dict = dict()\n collisions = 0\n i = 0\n for to_hash in hash_list:\n i += 1\n hash_val = model.hash(to_hash, False).tostring()\n if hash_val in hashed_dict:\n collisions += 1\n hashed_dict[hash_val] = True\n if i % 10000 == 0:\n print(i)\n\n return collisions", "def _toss_fair_coin() -> bool:\n return random.random() > 0.5", "def _sample_using_md5(\n self,\n column_name: str,\n hash_digits: int = 1,\n hash_value: str = \"f\",\n ):\n return (\n sa.func.right(\n sa.func.md5(sa.cast(sa.column(column_name), sa.Text)), hash_digits\n )\n == hash_value\n )", "def _one_sign(item, seed):\n return 1 if (xxh64(item, seed=seed).intdigest() & 1) else -1", "def test_default_md5(self):\n this_is_a_test = [\n 12174049463882854484,\n 10455450501617390806,\n 3838261292881602234,\n 12102952520950148619,\n 12126605867972429202,\n ]\n this_is_also = [\n 8938037604889355346,\n 9361632593818981393,\n 15781121455678786382,\n 5600686735535066561,\n 1353473153840687523,\n ]\n hashes = default_md5(\"this is a test\", 5)\n self.assertEqual(hashes, this_is_a_test)\n hashes = default_md5(\"this is also a test\", 5)\n self.assertEqual(hashes, this_is_also)", "def _hash_function(self, key):\n h = 0\n a = 31\n table_size = self.size\n for i in range(len(key)):\n h = (h * a + ord(key[i])) % table_size\n return h", "def compute_hash(self, key: int):\n return key % 42", "def perfect_hash(num):\n return ((num+OFFSET)*(SIZE/PERIOD)) % (SIZE+1) + 1", "def __hash__(self) -> int:", "def fnv1(self, key, seed=0):\n # def fnv1(self, key):\n\n # Your code here\n \"\"\"\n Returns: The FNV-1 hash (64-bit) of a given string. \n \"\"\"\n #Constants : Fails the tests\n # FNV_prime = 1099511628211\n # offset_basis = 14695981039346656037\n\n # #FNV-1a Hash Function\n # hash = offset_basis + seed\n # # hash = offset_basis\n # for c in key:\n # hash = hash * FNV_prime\n # hash = hash ^ ord(c)\n # return hash\n\n \"\"\"\n Returns: The FNV-1a (alternate) hash of a given string\n \"\"\"\n # #Constants : Passes the tests\n # FNV_prime = 1099511628211\n # offset_basis = 14695981039346656037\n\n # #FNV-1a alternate Hash Function\n # hash = offset_basis + seed\n # for c in key:\n # hash = hash ^ ord(c)\n # hash = hash * FNV_prime\n # return hash", "def test_basic(self):\n self.assertEqual(hash_str(\"world!\", salt=\"hello, \").hex()[:6], \"68e656\")", "def fnv_1a(key: KeyT, seed: int = 0) -> int:\n max64mod = UINT64_T_MAX + 1\n hval = (14695981039346656037 + (31 * seed)) % max64mod\n fnv_64_prime = 1099511628211\n tmp = list(key) if not isinstance(key, str) else list(map(ord, key))\n for t_str in tmp:\n hval ^= t_str\n hval *= fnv_64_prime\n hval %= max64mod\n return hval", "def test_hash_ints_decorator(self):\n results = [\n 14409285476674975580,\n 6203976290780191624,\n 5074829385518853901,\n 3953072760750514173,\n 11782747630324011555,\n ]\n\n @hash_with_depth_int\n def my_hash(key, depth=1, encoding=\"utf-8\"):\n \"\"\"my hash function\"\"\"\n max64mod = UINT64_T_MAX + 1\n val = int(hashlib.sha512(key.encode(encoding)).hexdigest(), 16)\n return val % max64mod\n\n self.assertEqual(my_hash(\"this is a test\", 5), results)\n res = my_hash(\"this is a test\", 1)\n self.assertEqual(len(res), 1)\n self.assertEqual(res[0], results[0])", "def test_automatic_default_to_md5(self) -> None:\n for version, expected in {\n self.sys_v3_8: (True, 'md5'),\n self.sys_v3_9: (False, 'md5'),\n self.sys_v4_8: (False, 'md5'),\n }.items():\n _set_allowed_viable_default_hashes(self.md5Default, version)\n set_hash_format(None, self.md5Default, version)\n assert _get_hash_object(None, self.md5Default, version) == expected", "def randomization_bin(seed, problem_id):\r\n r_hash = hashlib.sha1()\r\n r_hash.update(str(seed))\r\n r_hash.update(str(problem_id))\r\n # get the first few digits of the hash, convert to an int, then mod.\r\n return int(r_hash.hexdigest()[:7], 16) % NUM_RANDOMIZATION_BINS", "def hash_key(self, key):\r\n hashed_key = sum((ord(char) for char in key))\r\n return hashed_key % 20", "def random_invite_hash():\n return ''.join(random.choice(string.ascii_lowercase) for i in range(25))", "def nextRandom(self):\n # Apply SHA-256, interpreting digest output as integer\n # to yield 256-bit integer (a python \"long integer\")\n hash_output = self.basehash.digest()\n self.next()\n return hash_output", "def __hash__(self):\n return hash(tuple(self.sig))", "def internal_hash(self): \n return hash(tuple(sorted(self.hashtriples())))", "def __hash__(self):\n return hash(self.hash)", "def create_hash() -> str:\n length = 6\n char = string.ascii_uppercase + string.digits + string.ascii_lowercase\n\n # Generate a new ID, until one is found that is unique\n while True:\n hash = \"\".join(random.choice(char) for _ in range(length))\n\n if not utils.cache_is_hash_taken(hash):\n return hash", "def _hash(self, key):\n if self.function == 'fnv':\n h = 2166136261\n for i in range(len(key)):\n h = (h * 16777619) ^ ord(key[i])\n return h\n elif self.function == 'add':\n h = 0\n for i in range(len(key)):\n h += ord(key[i])\n return h", "def genSeed():\n\tseed_length = int(''.join(random.SystemRandom().choice(string.digits) for _ in range(0, 3)))\n\tseed = os.urandom(seed_length)\n\thashing_algorithm = hashlib.shake_128()\n\thashing_algorithm.update(seed)\n\t# 2200 bytes from SHAKE-128 function is enough data to get 1024 coefficients\n\t# smaller than 5q, from Alkim, Ducas, Pöppelmann, Schwabe section 7:\n\tseed_hash = hashing_algorithm.digest(100)\n\treturn seed, seed_hash", "def get_hash_str():\r\n\tli = \"\"\r\n\tfor i in range(5):\r\n\t\tli += str(int(int((6 * random.random()) + 1)))\r\n\treturn li", "def test_symmetry_negative_int(self):\n for x in range(1000):\n random_int = random.randint(-1 * sys.maxsize - 1, 0)\n encoded_int = base62.from_decimal(random_int)\n self.assertEqual(random_int, base62.to_decimal(encoded_int))", "def test_symmetry_positive_int(self):\n for x in range(1000):\n random_int = random.randint(0, sys.maxsize)\n encoded_int = base62.from_decimal(random_int)\n self.assertEqual(random_int, base62.to_decimal(encoded_int))", "def hash_functions(self):\n def hash_factory(n):\n return lambda x: hash(\"salt\" + str(n) + str(x) + \"salt\")\n return [ hash_factory(_) for _ in range(self.dim) ]", "def _hash_function(self, k):\n return (hash(k) * self._scale + self._shift) % self._prime % len(self._table)", "def unique_hash(only_letters=False):\n\n if only_letters:\n return ''.join((chr(int(x) + 97) if x.isdigit() else x)\n for x in uuid.uuid4().hex)\n return uuid.uuid4().hex", "def test_hash(self):\n self.assertEqual(hash(self._version1), hash(self._version1))\n self.assertNotEqual(hash(self._version2), hash(self._version1))\n self.assertEqual(hash(\"0.1\"), hash(self._version1))", "def i_rand_a():\n return i_random() % 95 + 32", "def test_hash_bytes_decorator(self):\n results = [\n 1164302962920061,\n 16735493734761467723,\n 18150279091576190542,\n 9861778148718857663,\n 14008040072978383620,\n ]\n\n @hash_with_depth_bytes\n def my_hash(key, depth=1):\n \"\"\"my hash function\"\"\"\n return hashlib.sha512(key).digest()\n\n self.assertEqual(my_hash(\"this is a test\", 5), results)\n res = my_hash(\"this is a test\", 1)\n self.assertEqual(len(res), 1)\n self.assertEqual(res[0], results[0])", "def __hash__(self):\n return 0", "def __hash__(self):\n hash_value = 0\n \n # approximate_online_count\n hash_value ^= self.approximate_online_count\n \n # approximate_user_count\n hash_value ^= self.approximate_user_count << 12\n \n # description\n description = self.description\n if (description is not None):\n hash_value ^= hash(description)\n \n # discovery_splash\n hash_value ^= hash(self.discovery_splash)\n \n # emojis\n emojis = self.emojis\n hash_value ^= len(emojis) << 1\n for emoji in emojis.values():\n hash_value ^= hash(emoji)\n \n # features\n features = self.features\n hash_value ^= len(features) << 5\n for feature in features:\n hash_value ^= hash(feature)\n \n # icon\n hash_value ^= hash(self.icon)\n \n # id\n hash_value ^= self.id\n \n # invite_splash\n hash_value ^= hash(self.invite_splash)\n \n # stickers\n stickers = self.stickers\n hash_value ^= len(stickers) << 9\n for sticker in stickers.values():\n hash_value ^= hash(sticker)\n \n # name\n name = self.name\n if (description is None) or (description != name):\n hash_value ^= hash(name)\n \n return hash_value", "def __hash__(self):\n\t\treturn 1", "def _hash(self):\r\n MAX = sys.maxint\r\n MASK = 2 * MAX + 1\r\n n = len(self)\r\n h = 1927868237 * (n + 1)\r\n h &= MASK\r\n for x in self:\r\n hx = hash(x)\r\n h ^= (hx ^ (hx << 16) ^ 89869747) * 3644798167\r\n h &= MASK\r\n h = h * 69069 + 907133923\r\n h &= MASK\r\n if h > MAX:\r\n h -= MASK + 1\r\n if h == -1:\r\n h = 590923713\r\n return h", "def soft_hash(p):\n return tuple(map(r_soft_hash, p))", "def randkey():\n return binascii.b2a_hex(os.urandom(15))", "def random_lottery():\n\treturn 0", "def generate_key():\n return unicode(hashlib.sha224(str(random.getrandbits(128))).hexdigest())", "def hash(self) -> str:\r\n ...", "def __hash__(self):\n base = 1\n h = 0\n for l in self.data:\n for i in l:\n if i:\n h += base\n base *= 2\n return hash(h)", "def get_salt():\n return os.urandom(32)", "def gensalt(log_rounds=12):\n\n return _encode_salt(os.urandom(16), min(max(log_rounds, 4), 31))", "def test_automatic_default_to_sha1(self) -> None:\n for version, expected in {\n self.sys_v3_8: (True, 'sha1'),\n self.sys_v3_9: (False, 'sha1'),\n self.sys_v4_8: (False, 'sha1'),\n }.items():\n _set_allowed_viable_default_hashes(self.sha1Default, version)\n set_hash_format(None, self.sha1Default, version)\n assert _get_hash_object(None, self.sha1Default, version) == expected", "def _basehash(self):\n if self.baseseed is not None:\n hashinput = (str(self.baseseed) + ',').encode()\n self.basehash = hashlib.sha256(hashinput)\n else:\n self.basehash = None", "def generateHash(*length):\n from string import letters, digits\n from random import choice\n pool, size = letters + digits, length or 10\n hash = ''.join([choice(pool) for i in range(size)])\n return hash.lower()", "def __hash__(self):\n token = \"\"\n for gamePiece in self.game_pieces:\n token = token + str(gamePiece.x) + str(gamePiece.y)\n \n hash_ = int(token) % 100000\n return hash_", "def is_mersenne(num: int) -> bool:\n s = 4\n m = (2 ** num) - 1\n for i in range(0, num - 2):\n s = ((s**2) - 2) % m\n return s == 0", "def confused(self, rand):\n return rand > 0", "def test_usedforsecurity_flag_behavior(self) -> None:\n for version, expected in {\n self.sys_v3_8: (True, 'md5'),\n self.sys_v3_9: (False, 'md5'),\n self.sys_v4_8: (False, 'md5'),\n }.items():\n assert _attempt_init_of_python_3_9_hash_object(self.fake_md5, version) == expected", "def _sample_using_random(\n self,\n p: float = 0.1,\n ):\n return sa.func.random() < p", "def test_hash():\n if not cloud_aws.boto_is_current:\n skip(\"boto is not installed or is too old\")\n sky = cloud.Sky()\n east_prop = dict(provider=\"aws-ec2\", region=\"us-east-1\")\n east1 = sky.get_provider(east_prop)\n east2 = sky.get_provider(east_prop)\n assert hash(east1) == hash(east2)\n assert hash(east1) != hash(east1.get_provider_key(east_prop))\n assert east1 == east2\n assert not east1 != east2\n\n west = sky.get_provider(dict(provider=\"aws-ec2\", region=\"us-west-1\"))\n assert hash(east1) != hash(west)\n assert not east1 == west\n assert east1 != west", "def test_hash(self):\n self.assertEqual(hash(self.compound), hash((\"t1\", \"test compound\")))", "def generateHashFunctions(n):\n hashFunction = []\n for i in range(n):\n # Chooses random value to use as xor during a single hash function\n random_XORvalue = random.getrandbits(INT_SIZE)\n hashFunction.append((random_XORvalue))\n return hashFunction", "def hash(token, num_buckets):\n return murmurhash3_32(token, positive=True) % num_buckets", "def fnvhash(a): \n h = 2166136261 \n for i in a: \n t = (h * 16777619) & 0xffffffffL \n h = t ^ i \n return h", "def test_hash_url(self):\r\n url = u'http://google.com'\r\n hashed = generate_hash(url)\r\n self.assertEqual('aa2239c17609b2', hashed)", "def _hash_func(self, key: int) -> int:\n return key % self.capacity", "def test_hash_string(self):\n self.assertEqual(hexlify(self._hashdigest(pubkey_sha)), sample_ripe)" ]
[ "0.6889566", "0.6818092", "0.6662236", "0.64020073", "0.6377606", "0.6362132", "0.6194805", "0.61882335", "0.6186182", "0.61861044", "0.61729205", "0.615299", "0.6131213", "0.6115176", "0.6083449", "0.6062994", "0.60509396", "0.6027059", "0.59783775", "0.5965881", "0.59652704", "0.5960333", "0.593349", "0.58822453", "0.5878851", "0.5875208", "0.587366", "0.58725846", "0.5836218", "0.5825128", "0.58168745", "0.581502", "0.58097416", "0.5801333", "0.579637", "0.57949823", "0.5787195", "0.5779801", "0.57731146", "0.5769948", "0.5765483", "0.57475126", "0.57301974", "0.57300615", "0.5711973", "0.5710994", "0.57035613", "0.5692069", "0.5685949", "0.5682496", "0.5680667", "0.56687015", "0.5668452", "0.566653", "0.565931", "0.5656019", "0.56500095", "0.5639643", "0.5635712", "0.5631227", "0.56254053", "0.5624932", "0.5623066", "0.562105", "0.5619479", "0.5617374", "0.56056005", "0.5599941", "0.559259", "0.5588374", "0.5584703", "0.5577224", "0.55751795", "0.5572646", "0.55707973", "0.55703783", "0.55533993", "0.5551671", "0.5545184", "0.55432075", "0.55411124", "0.55395013", "0.553172", "0.55287683", "0.5528598", "0.55273837", "0.5526982", "0.5526518", "0.5525931", "0.5519332", "0.5516277", "0.5510646", "0.550861", "0.55057067", "0.5501904", "0.55010206", "0.5500626", "0.55006075", "0.5495196", "0.5494663" ]
0.624788
6
Test to see if using different classes works.
def test_stress(): group = Group({ "a": Numerical(), "b": Numerical(), "c": Categorical(list(range(5))), "d": Hashed(buckets=5), "e": Hashed(buckets=5, random_sign=True), }) for i in range(100): group.set_a(random()) group.set_b(random()) group.set_c(randint(0, 4)) for i in range(10): group.set_d(randstr()) group.set_e(randstr()) group.push() array = group.array() assert array.shape == (100, 17)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_do_check_class_names(self):\n self.assertTrue(self.a.do_check_class_names(self.a))\n self.assertFalse(self.a.do_check_class_names(self.b))", "def test_all_no_class(self):", "def test_all_no_class(self):", "def test_class_method(self):\n self.assertEqual(pyperry.Base.resolve_name.im_self.__name__, 'Base')", "def test_class_method(self):\n self.assertEqual(pyperry.Base.adapter.im_self.__name__, 'Base')", "def test_class_method(self):\n self.assertEqual(pyperry.Base.add_processor.im_self.__name__, 'Base')", "def test_starship(self):\n try:\n self.test = oop1.Starship()\n self.assertIsInstance(self.test, oop1.Starship)\n print(\"\\nPASS : Starship Class Exists\\n\")\n except NameError as e:\n print(e)", "def test_class_method(self):\n self.assertEqual(self.Test.has_one.im_self.__name__, 'Test')", "def test_class(self):\n city1 = City()\n self.assertEqual(city1.__class__.__name__, \"City\")", "def test_motorcycle(self):\n try:\n self.test = oop1.Motorcycle()\n self.assertIsInstance(self.test, oop1.Motorcycle)\n print(\"\\nPASS : Class Exists\\n\")\n except NameError as e:\n print(e)", "def testInstance(self):\n self.assertTrue(isinstance(self, AppswellUnitTest))", "def test_car(self):\n try:\n self.test = oop1.Car()\n self.assertIsInstance(self.test, oop1.Car)\n print(\"\\nPASS : Car Class Exists\\n\")\n except NameError as e:\n print(e)", "def test_blow_up(self):\n self.assertTrue(self.g.__class__)", "def test_explainer_class(self):\n assert self.explainer.__class__.__bases__[0].__name__ == 'ABC'\n assert self.explainer.__class__.__name__ == 'Explainer'", "def test_class_method(self):\n self.assertEqual(self.Test.scoped.im_self.__name__, 'Test')", "def test_airplane(self):\n try:\n self.test = oop1.Airplane()\n self.assertIsInstance(self.test, oop1.Airplane)\n print(\"\\nPASS : Airplane Class Exists\\n\")\n except NameError as e:\n print(e)", "def test_class_method(self):\n self.assertEqual(self.Test.default_scope.im_self.__name__, 'Test')", "def test_training_is_training_instance(self):\n self.assertIsInstance(self.weekly_training, Training)\n self.assertIsInstance(self.one_off_training, Training)", "def test_isclassinstance():\n class MockClass:\n pass\n\n # Since Python3, everything is a class, so this means nothing (?)\n assert isclassinstance(0)\n assert isclassinstance(1.0)\n assert isclassinstance(complex(2.0))\n assert isclassinstance('foo')\n assert isclassinstance([])\n assert isclassinstance(())\n assert isclassinstance(range(6))\n assert isclassinstance(bytes(7))\n assert isclassinstance(bytearray())\n assert isclassinstance(memoryview(b'nine'))\n assert isclassinstance(set())\n assert isclassinstance(frozenset())\n assert isclassinstance({})\n assert isclassinstance(None)\n assert isclassinstance(MockClass())", "def test_class_method(self):\n self.assertEqual(self.Test.unscoped.im_self.__name__, 'Test')", "def test_class_method(self):\n self.assertEqual(self.Test.scope.im_self.__name__, 'Test')", "def test_instantiates_badgr_lite_class(self):\n badgr = self.get_badgr_setup()\n self.assertIsInstance(badgr, BadgrLite)", "def test_ground_vehicle(self):\n try:\n self.test = oop1.GroundVehicle()\n self.assertIsInstance(self.test, oop1.GroundVehicle)\n print(\"\\nPASS : GroundVehicle Class Exists\\n\")\n except NameError as e:\n print(e)", "def test_class_method(self):\n self.assertEqual(self.Test.current_scope.im_self.__name__, 'Test')", "def test_is_a_subclass():\n class MockParentClass:\n pass\n\n class MockChildClass(MockParentClass):\n pass\n\n class MockGrandchildClass(MockChildClass):\n pass\n\n class MockNonRelativeClass:\n pass\n\n # Test with instances\n parent = MockParentClass()\n child = MockChildClass()\n grandchild = MockGrandchildClass()\n other = MockNonRelativeClass()\n assert is_a_subclass(1, int)\n assert is_a_subclass(child, MockParentClass)\n assert is_a_subclass(grandchild, MockParentClass)\n assert is_a_subclass(grandchild, MockChildClass)\n assert not is_a_subclass(1, float)\n assert not is_a_subclass(parent, MockNonRelativeClass)\n assert not is_a_subclass(child, MockNonRelativeClass)\n assert not is_a_subclass(grandchild, MockNonRelativeClass)\n assert not is_a_subclass(other, MockParentClass)\n assert not is_a_subclass(other, MockChildClass)\n assert not is_a_subclass(other, MockGrandchildClass)\n assert not is_a_subclass(parent, MockChildClass)\n assert not is_a_subclass(parent, MockGrandchildClass)\n assert not is_a_subclass(child, MockGrandchildClass)\n\n # Test with types\n assert is_a_subclass(int, int)\n assert is_a_subclass(MockChildClass, MockParentClass)\n assert is_a_subclass(MockGrandchildClass, MockParentClass)\n assert is_a_subclass(MockGrandchildClass, MockChildClass)\n assert not is_a_subclass(int, float)\n assert not is_a_subclass(MockParentClass, MockNonRelativeClass)\n assert not is_a_subclass(MockChildClass, MockNonRelativeClass)\n assert not is_a_subclass(MockGrandchildClass, MockNonRelativeClass)\n assert not is_a_subclass(MockNonRelativeClass, MockParentClass)\n assert not is_a_subclass(MockNonRelativeClass, MockChildClass)\n assert not is_a_subclass(MockNonRelativeClass, MockGrandchildClass)\n assert not is_a_subclass(MockParentClass, MockChildClass)\n assert not is_a_subclass(MockParentClass, MockGrandchildClass)\n assert not is_a_subclass(MockChildClass, MockGrandchildClass)", "def test_class_name(self):\n r = Review()\n r_dictionary = r.to_dict()\n self.assertIn('__class__', r_dictionary)", "def check_instance(self):\n self.assertIsInstance(self.amenity_1, amenity)\n self.assertIsInstance(self.amenity_2, amenity)", "def test_flight_vehicle(self):\n try:\n self.test = oop1.FlightVehicle()\n self.assertIsInstance(self.test, oop1.FlightVehicle)\n print(\"\\nPASS : FlightVehicle Class Exists\\n\")\n except NameError as e:\n print(e)", "def test_01_CheckClassTool(self):\n portal = self.portal\n self.assertNotEqual(None,getattr(portal,'portal_classes',None))\n self.commit()\n # check if web UI works\n portal_classes = portal.portal_classes\n portal_classes.manage_viewDocumentList()\n portal_classes.manage_viewPropertySheetList()\n portal_classes.manage_viewConstraintList()\n portal_classes.manage_viewExtensionList()\n portal_classes.manage_viewTestList()", "def test_class_name(self):\n self.assertEqual(self.adapter.class_name, 'LogicAdapter')", "def test_cl_fix():\n assert Cl is BaseCl", "def test_no_unlisted_classes_derived_from_Target(self):\n forcebalance_modules=[module[:-3] for module in os.listdir(forcebalance.__path__[0])\n if re.compile(\".*\\.py$\").match(module)\n and module not in [\"__init__.py\"]]\n for module in forcebalance_modules:\n # LPW: I don't think dcdlib should be imported this way.\n self.logger.debug(module)\n # Skip over smirnoff_hack because it is not intended to contain any Target implementations.\n if module in [\"_dcdlib\", \"smirnoff_hack\"]: continue\n m = __import__('forcebalance.' + module)\n objs = dir(eval('m.' + module))\n self.logger.debug(objs)\n for obj in objs:\n obj = eval('m.'+module+'.'+obj)\n if inspect.isclass(obj) and issubclass(obj, forcebalance.target.Target):\n implemented = [i for i in forcebalance.objective.Implemented_Targets.values()]\n # list of documented exceptions\n # Basically, platform-independent targets are excluded.\n exclude = ['Target',\n 'AbInitio',\n 'Interaction',\n 'Interaction_GMX',\n 'Liquid',\n 'Lipid',\n 'BindingEnergy',\n 'LeastSquares',\n 'Vibration',\n 'Hessian',\n 'Thermo',\n 'Hydration',\n 'Moments', \n 'OptGeoTarget',\n 'TorsionProfileTarget']\n self.logger.debug(obj)\n if obj not in implemented and obj.__name__ not in exclude:\n pytest.fail(\"Unknown class '%s' not listed in Implemented_Targets\" % obj.__name__)", "def test_class_errored(self, cls, exception):", "def test_vehicle(self):\n try:\n self.test = oop1.Vehicle()\n self.assertIsInstance(self.test, oop1.Vehicle)\n print(\"\\nPASS : Vehicle Class Exists\\n\")\n except NameError as e:\n print(e)", "def test_core_object_model_inheritance():\n assert issubclass(Question, CoreObject)\n assert issubclass(Fact, CoreObject)\n assert issubclass(Topic, CoreObject)\n assert issubclass(Word, CoreObject)\n assert issubclass(Book, CoreObject)", "def test_subclass():\n assert issubclass(BlockOnSpring, PhysicsModule)", "def test_should_implement(self):\n pass", "def test_class_method(self):\n self.assertEqual(self.Test.relation.im_self.__name__, 'Test')", "def test_classes(self):\r\n css_classes = [\r\n ('unsubmitted', 'unanswered'),\r\n ('incomplete', 'incorrect'),\r\n ('queued', 'processing'),\r\n ('correct', 'correct'),\r\n ('test', 'test'),\r\n ]\r\n for status, classname in css_classes:\r\n statobj = inputtypes.Status(status)\r\n self.assertEqual(statobj.classname, classname)", "def test_class_exists(self):\n\n self.assertTrue(hasattr(Account, self.klass_name))", "def class_is(cls: Class) -> bool:\n pass", "def mockup(cls):\n pass", "def class_is_interesting(name: str):\n if name.startswith('org.chromium.'):\n return True\n return False", "def test_init(self):\n res = computer.Computer(1)\n exp = computer.Computer\n self.assertIsInstance(res, exp)", "def test_returns_class(self):\n assert type is simple_class().__class__", "def test_no_unlisted_classes_derived_from_Target(self):\n self.skipTest(\"Not sure if test is working properly.\")\n forcebalance_modules=[module[:-3] for module in os.listdir(forcebalance.__path__[0])\n if re.compile(\".*\\.py$\").match(module)\n and module not in [\"__init__.py\"]]\n for module in forcebalance_modules:\n # LPW: I don't think dcdlib should be imported this way.\n print(module)\n if module == \"_dcdlib\": continue\n m = __import__('forcebalance.' + module)\n objs = dir(eval('m.' + module))\n print(objs)\n for obj in objs:\n obj = eval('m.'+module+'.'+obj)\n if type(obj) == abc.ABCMeta:\n implemented = [i for i in forcebalance.objective.Implemented_Targets.values()]\n # list of documented exceptions\n # Basically, platform-independent targets are excluded.\n exclude = ['Target',\n 'AbInitio',\n 'Interaction',\n 'Interaction_GMX',\n 'Liquid',\n 'Lipid',\n 'BindingEnergy',\n 'LeastSquares',\n 'Vibration',\n 'Thermo',\n 'Hydration',\n 'Moments']\n print(obj)\n if obj not in implemented and obj.__name__ not in exclude:\n self.fail(\"Unknown class '%s' not listed in Implemented_Targets\" % obj.__name__)", "def test_component_class_and_module(self):\r\n\t\tself.assertTrue(self._configuration_[\"AddWordDefinitionTask\"].class_name() == \"AddWordDefinitionTask\" and\r\n\t\t self._configuration_[\"AddWordDefinitionTask\"].module_name() == \"TestPlugins\")", "def check_from_class(self):\n context = TestContext(session_context=ducktape_mock.session_context(),\n cls=DummyTest, function=DummyTest.test_class_description)\n assert context.description == \"class description\"", "def test_class_method(self):\n self.assertEqual(pyperry.Base.add_middleware.im_self.__name__, 'Base')", "def test_contribute_to_class(self):\n self.assertEquals('tests', self.Article.elasticsearch.index_name)\n self.assertEquals('article', self.Article.elasticsearch.type)", "def test_instance(self):\n self.assertIsInstance(self.test1, BaseModel)", "def test_load_class():\n full_classname = 'collections.namedtuple'\n cls_ = load_class(full_classname)\n assert cls_ is collections.namedtuple\n\n with pytest.raises(ValueError):\n full_classname = 'collections.Foobar'\n load_class(full_classname)\n\n with pytest.raises(ImportError):\n full_classname = 'barfoo.Foobar'\n load_class(full_classname)", "def test_same_class_method_name_different_class(self):\n self.apple.add_sweet_letter(self.cherry)\n apple_add_sweet_cherry_key = get_function_cache_key('class_method', 'tests.Fruit.add_sweet_letter',\n (self.apple, self.cherry), {})\n self.assertExpectedKeyInCache(apple_add_sweet_cherry_key)\n\n self.celery.add_sweet_letter(self.cherry)\n celery_add_sweet_cherry_key = get_function_cache_key('class_method', 'tests.Vegetable.add_sweet_letter',\n (self.celery, self.cherry), {})\n self.assertExpectedKeyInCache(celery_add_sweet_cherry_key)\n\n self.assertNotEqual(apple_add_sweet_cherry_key, celery_add_sweet_cherry_key)", "def test_is_a():\n class MockParentClass:\n pass\n\n # Test primitives\n objects = [\n 0,\n 1.0,\n complex(2.0),\n 'foo',\n [],\n (),\n range(6),\n bytes(7),\n bytearray(),\n memoryview(b'nine'),\n set(),\n frozenset(),\n {},\n MockParentClass(),\n ]\n types = [\n int,\n float,\n complex,\n str,\n list,\n tuple,\n range,\n bytes,\n bytearray,\n memoryview,\n set,\n frozenset,\n dict,\n MockParentClass,\n ]\n\n # Test primitives (+ one class) against each other\n assert len(objects) == len(types)\n for i in range(0, len(objects)):\n assert is_a(objects[i], types[i])\n for j in range(0, len(objects)):\n if j == i:\n continue\n assert not is_a(objects[i], types[j])\n\n class MockChildClass(MockParentClass):\n pass\n\n class MockNonRelativeClass:\n pass\n\n # Test with class inheritance\n child = MockChildClass()\n other = MockNonRelativeClass()\n assert is_a(child, MockChildClass)\n assert is_a(child, MockParentClass)\n assert not is_a(other, MockParentClass)\n assert not is_a(child, MockNonRelativeClass)", "def test_4_4_1_1(self):\n pass", "def test_service_support(self):\n self.assertTrue(self.service_class.supports_bug_trackers)\n self.assertTrue(self.service_class.supports_repositories)", "def test_check_instance_explainer_functionality():\n type_error = 'The suppress_warning parameter should be a boolean.'\n inheritance_warning = (\n 'Every explainer object should inherit from fatf.utils.transparency.'\n 'explainers.Explainer abstract class.')\n\n class ClassPlain(object):\n pass\n\n class_plain = ClassPlain()\n\n class ClassInit(fute.Explainer):\n def __init__(self):\n pass\n\n class_init = ClassInit()\n\n class ClassExplainer1(object):\n def explain_instance(self):\n pass # pragma: no cover\n\n class_explainer_1 = ClassExplainer1()\n\n class ClassExplainer2(fute.Explainer):\n def explain_instance(self, x, y):\n pass # pragma: no cover\n\n class_explainer_2 = ClassExplainer2()\n\n class ClassExplainer3(object):\n def explain_instance(self, x):\n pass # pragma: no cover\n\n class_explainer_3 = ClassExplainer3()\n\n class ClassExplainer4(fute.Explainer):\n def explain_instance(self, x, y=3):\n pass # pragma: no cover\n\n class_explainer_4 = ClassExplainer4()\n\n class ClassExplainer5(object):\n def explain_instance(self, x, y=3, z=3):\n pass # pragma: no cover\n\n class_explainer_5 = ClassExplainer5()\n\n with pytest.raises(TypeError) as exinf:\n fute.check_instance_explainer_functionality(class_plain, 'False')\n assert str(exinf.value) == type_error\n with pytest.raises(TypeError) as exinf:\n fute.check_instance_explainer_functionality(ClassPlain, 'True')\n assert str(exinf.value) == type_error\n\n msg = \"The *{}* (explainer) class is missing 'explain_instance' method.\"\n\n with pytest.warns(UserWarning) as warning:\n assert fute.check_instance_explainer_functionality(class_plain,\n False) is False\n assert len(warning) == 2\n assert msg.format('ClassPlain') == str(warning[0].message)\n assert inheritance_warning == str(warning[1].message)\n\n with pytest.warns(UserWarning) as warning:\n assert fute.check_instance_explainer_functionality(ClassPlain) is False\n assert len(warning) == 2\n assert msg.format('ClassPlain') == str(warning[0].message)\n assert inheritance_warning == str(warning[1].message)\n\n with pytest.warns(UserWarning) as warning:\n assert fute.check_instance_explainer_functionality(class_plain,\n True) is False\n assert len(warning) == 1\n assert inheritance_warning == str(warning[0].message)\n\n msg = (\"The 'explain_instance' method of the *{}* (explainer) class has \"\n 'incorrect number ({}) of the required parameters. It needs to '\n 'have exactly 1 required parameter(s). Try using optional '\n 'parameters if you require more functionality.')\n\n with pytest.warns(UserWarning) as warning:\n assert fute.check_instance_explainer_functionality(class_init,\n False) is False\n assert len(warning) == 1\n assert msg.format('ClassInit', 0) == str(warning[0].message)\n\n with pytest.warns(UserWarning) as warning:\n assert fute.check_instance_explainer_functionality(ClassInit) is False\n assert len(warning) == 1\n assert msg.format('ClassInit', 0) == str(warning[0].message)\n\n assert fute.check_instance_explainer_functionality(class_init,\n True) is False\n\n #\n\n with pytest.warns(UserWarning) as warning:\n assert fute.check_instance_explainer_functionality(\n class_explainer_1, False) is False\n assert len(warning) == 2\n assert msg.format('ClassExplainer1', 0) == str(warning[0].message)\n assert inheritance_warning == str(warning[1].message)\n\n with pytest.warns(UserWarning) as warning:\n assert fute.check_instance_explainer_functionality(\n ClassExplainer1) is False\n assert len(warning) == 2\n assert msg.format('ClassExplainer1', 0) == str(warning[0].message)\n assert inheritance_warning == str(warning[1].message)\n\n with pytest.warns(UserWarning) as warning:\n assert fute.check_instance_explainer_functionality(\n class_explainer_1, True) is False\n assert len(warning) == 1\n assert inheritance_warning == str(warning[0].message)\n\n #\n\n with pytest.warns(UserWarning) as warning:\n assert fute.check_instance_explainer_functionality(\n class_explainer_2, False) is False\n assert len(warning) == 1\n assert msg.format('ClassExplainer2', 2) == str(warning[0].message)\n\n with pytest.warns(UserWarning) as warning:\n assert fute.check_instance_explainer_functionality(\n class_explainer_2) is False\n assert len(warning) == 1\n assert msg.format('ClassExplainer2', 2) == str(warning[0].message)\n\n assert fute.check_instance_explainer_functionality(class_explainer_2,\n True) is False\n\n #\n #\n\n with pytest.warns(UserWarning) as warning:\n assert fute.check_instance_explainer_functionality(\n class_explainer_3, False) is True\n assert len(warning) == 1\n assert inheritance_warning == str(warning[0].message)\n\n with pytest.warns(UserWarning) as warning:\n assert fute.check_instance_explainer_functionality(\n ClassExplainer3, True) is True\n assert len(warning) == 1\n assert inheritance_warning == str(warning[0].message)\n\n #\n\n assert fute.check_instance_explainer_functionality(class_explainer_4,\n False) is True\n assert fute.check_instance_explainer_functionality(ClassExplainer4,\n True) is True\n\n #\n\n with pytest.warns(UserWarning) as warning:\n assert fute.check_instance_explainer_functionality(\n class_explainer_5, False) is True\n assert len(warning) == 1\n assert inheritance_warning == str(warning[0].message)\n\n with pytest.warns(UserWarning) as warning:\n assert fute.check_instance_explainer_functionality(\n ClassExplainer5, True) is True\n assert len(warning) == 1\n assert inheritance_warning == str(warning[0].message)", "def setup_class(klass):", "def setup_class(klass):", "def test_inheritance(self):\n self.assertTrue(issubclass(Rectangle, Base))", "def test_not_equal_different_class(self):\n test1 = self.Test({ 'id': 1, 'name': 'Poop Head' })\n test2 = self.Test2({ 'id': 1, 'name': 'Poop Head' })\n self.assertNotEqual(test1, test2)", "def testHasOutputClass(self):\n manager.OutputManager.RegisterOutput(TestOutput)\n\n self.assertTrue(manager.OutputManager.HasOutputClass('test_output'))\n self.assertFalse(manager.OutputManager.HasOutputClass('bogus'))\n self.assertFalse(manager.OutputManager.HasOutputClass(1))\n\n manager.OutputManager.DeregisterOutput(TestOutput)", "def test_8_instantiation(self):\n b = User()\n self.assertEqual(str(type(b)), \"<class 'models.user.User'>\")\n self.assertIsInstance(b, User)\n self.assertTrue(issubclass(type(b), BaseModel))", "def test_class_eq_method(self, test_instances):\n a, b, _ = test_instances\n\n assert a == b", "def setup_class(cls):\n # ns.assert_true(False, \"setup_class run\")\n print('setup_class\\n')", "def test_instance(self):\n self.assertEqual(True, type(self.Test.defined_associations['something']) is pyperry.association.BelongsTo)", "def test_that_class_teacher_attribute_homework_done_is_the_same_with_instance_of_this_class():\n temp_1 = opp_teacher.homework_done\n temp_2 = Teacher.homework_done\n assert temp_1 == temp_2", "def testGetOutputClass(self):\n manager.OutputManager.RegisterOutput(TestOutput)\n\n output_class = manager.OutputManager.GetOutputClass('test_output')\n self.assertEqual(output_class, TestOutput)\n\n with self.assertRaises(ValueError):\n _ = manager.OutputManager.GetOutputClass(1)\n\n with self.assertRaises(KeyError):\n _ = manager.OutputManager.GetOutputClass('bogus')\n\n manager.OutputManager.DeregisterOutput(TestOutput)", "def test_can_instantiate(self):\n\n exc_thrown = False\n\n try:\n self.klass(*self.instantiate_args)\n except Exception:\n exc_thrown = True\n\n self.assertFalse(exc_thrown)", "def test_differentClassesInequality(self):\n self.assertTrue(Record(1, 2) != DifferentRecord(1, 2))", "def test_already_registered_002(self):\n\n class MyChecker(object):\n \"\"\"Do nothing.\"\"\"\n\n @staticmethod\n def get_long_code():\n \"\"\"Do nothing.\"\"\"\n return \"something\"\n\n @staticmethod\n def get_order():\n \"\"\"Do nothing.\"\"\"\n return 0\n\n @staticmethod\n def run(_, __):\n \"\"\"Do nothing.\"\"\"\n return []\n\n class MyContext(object):\n \"\"\"Do nothing.\"\"\"\n\n @staticmethod\n def get_order():\n \"\"\"Do nothing.\"\"\"\n return 0\n\n @staticmethod\n def run(_, __):\n \"\"\"Do nothing.\"\"\"\n return\n\n registry.register_checker(MyChecker)\n\n with self.assertRaises(EnvironmentError):\n registry.register_checker(MyChecker)\n\n registry.register_context(MyContext)\n\n with self.assertRaises(EnvironmentError):\n registry.register_context(MyContext)", "def test_class_started(self, cls):", "def test_not_inheritance(self):\n self.assertNotIsInstance(Base, Square)\n self.assertNotIsInstance(Rectangle, Square)", "def test_instance(self):\n self.assertIsInstance(self.user_1, User)", "def test_instance(self):\n b = Review()\n self.assertIsInstance(b, Review)\n self.assertTrue(issubclass(type(b), BaseModel))", "def test_entities__Entity__getClass__1(entity):\n assert Dummy == entity.getClass()", "def test(self):\n raise NotImplementedError", "def test_class_ended(self, cls):", "def test_subsystems(self):\n pass", "def test_is_instance(self):\n self.assertTrue(isinstance(self.profile, Profile))", "def test_core_object_types_global():\n for core_object_type in CORE_OBJECT_TYPES:\n core_object = get_object_from_string(core_object_type)\n assert core_object.__name__.lower() == core_object_type", "def test_equality(self):\n test_node = class_dependency.JavaClass(self.TEST_PKG, self.TEST_CLS)\n equal_node = class_dependency.JavaClass(self.TEST_PKG, self.TEST_CLS)\n self.assertEqual(test_node, equal_node)", "def test_instantiate_valid_target(self):\n # create test configs\n test_configs = [\n {\"_target_\": \"collections.deque\"},\n {\"_target_\": \"collections.UserString\", \"seq\": \"test string\"}\n ]\n\n # create truth objects\n truth_objs = [deque(), UserString(\"test string\")]\n\n # check that instantiate returns truth object for each config\n for truth_obj, test_config in zip(truth_objs, test_configs):\n self.assertEqual(truth_obj, instantiate(test_config))", "def test_required_methods(self):\n\n required_methods = ('__init__', 'load')\n\n for method in required_methods:\n self.assertIn(method, dir(DatasetLoader_Jakob2019))", "def _test(self):\n pass", "def _test(self):\n pass", "def _test(self):\n pass", "def test___get_all_classes():\n config = {\"plugins\": [\"tests.mock_plugin\"]}\n classes = r._get_all_classes(config, r.DataSource)\n assert \"food\" in classes\n classes = r._get_all_classes(config, r.DataSink)\n assert \"food\" in classes", "def safe_isinstance(obj, class_path_str):\n # this function is copy-paste from the code of the SHAP Python library\n # Copyright (c) 2018 Scott Lundberg\n if isinstance(class_path_str, str):\n class_path_strs = [class_path_str]\n elif isinstance(class_path_str, list) or isinstance(class_path_str, tuple):\n class_path_strs = class_path_str\n else:\n class_path_strs = ['']\n\n # try each module path in order\n for class_path_str in class_path_strs:\n if \".\" not in class_path_str:\n raise ValueError(\"class_path_str must be a string or list of strings specifying a full \\\n module path to a class. Eg, 'sklearn.ensemble.RandomForestRegressor'\")\n\n # Splits on last occurence of \".\"\n module_name, class_name = class_path_str.rsplit(\".\", 1)\n\n # here we don't check further if the model is not imported, since we shouldn't have\n # an object of that types passed to us if the model the type is from has never been\n # imported. (and we don't want to import lots of new modules for no reason)\n if module_name not in sys.modules:\n continue\n\n module = sys.modules[module_name]\n\n #Get class\n _class = getattr(module, class_name, None)\n\n if _class is None:\n continue\n\n if isinstance(obj, _class):\n return True\n\n return False", "def test_differentClassesEquality(self):\n self.assertFalse(Record(1, 2) == DifferentRecord(1, 2))", "def test_issubclass(self):\n self.assertTrue(issubclass(self.rev.__class__, BaseModel), True)", "def test_issubclass(self):\n self.assertTrue(issubclass(User()), BaseModel)", "def test_instantiate_library_run1(self):\n\t\tobj_ut = sentiment.LibraryRun(self.text1, self.lib)\n\t\tself.assertIsInstance(obj_ut, sentiment.LibraryRun)", "def test_inherent_instance_method() -> None:\n assert lmp.tknzr._bpe.BPETknzr.dec == BaseTknzr.dec\n assert lmp.tknzr._bpe.BPETknzr.enc == BaseTknzr.enc\n assert lmp.tknzr._bpe.BPETknzr.norm == BaseTknzr.norm\n assert lmp.tknzr._bpe.BPETknzr.pad_to_max == BaseTknzr.pad_to_max\n assert lmp.tknzr._bpe.BPETknzr.vocab_size == BaseTknzr.vocab_size", "def test_type(self):\n self.assertEqual(type(self.base1), BaseModel)\n self.assertEqual(type(self.base2), BaseModel)", "def test_entities__Entity__getClass__2():\n e = Entity(None, IDummy, None)\n with pytest.raises(ValueError):\n e.getClass()", "def test_get_classes(self):\n self.view.learning_model = TestSingleLabelClassifierModel()\n self.assertEqual(self.view.get_classes(), TestSingleLabelClassifierModel.classes)", "def test_type(self):\n assert issubclass(Error, Exception)\n assert Error.__name__ == \"Error\"", "def test_class_method(self):\n self.assertEqual(self.Test.belongs_to.im_self.__name__, 'Test')", "def test_derived_app_inheritance(s):\n assert hasattr(s['derived-app'], 'simple_none_return')\n assert hasattr(s['derived-app'], 'derived_method')\n assert 'derived-collection' in s['derived-app']\n\n assert not hasattr(s['simple-app'], 'derived_method')\n assert 'derived-collection' not in s['simple-app']", "def test_cl_fix():\n assert issubclass(Cl, ClFixHybridPressureCoord)" ]
[ "0.6858586", "0.67248607", "0.67248607", "0.6477522", "0.64745814", "0.6398199", "0.6391246", "0.63339883", "0.633331", "0.63200086", "0.6300495", "0.6298434", "0.62441707", "0.6239218", "0.6229985", "0.62199354", "0.62179506", "0.62132084", "0.61932254", "0.6172138", "0.61662114", "0.61605424", "0.6139733", "0.6136834", "0.61148375", "0.60704875", "0.6063382", "0.60442424", "0.604112", "0.60368985", "0.602979", "0.601972", "0.6018769", "0.6008488", "0.5990348", "0.5988652", "0.59668857", "0.5966159", "0.59634864", "0.5962535", "0.5962169", "0.592663", "0.59169763", "0.59144497", "0.59117216", "0.5908613", "0.5907328", "0.5879924", "0.5864304", "0.58635426", "0.5861087", "0.58577037", "0.58499485", "0.5849095", "0.5845595", "0.5834933", "0.5830795", "0.5819824", "0.5819824", "0.5815154", "0.58148855", "0.58123285", "0.5812132", "0.5809681", "0.58095837", "0.580215", "0.5799223", "0.57952166", "0.5790946", "0.578366", "0.57816494", "0.57658714", "0.57585764", "0.5751874", "0.5744401", "0.5740702", "0.57352346", "0.5735157", "0.5730678", "0.5728492", "0.5727065", "0.57211924", "0.57183325", "0.571372", "0.57109994", "0.57109994", "0.57109994", "0.5694021", "0.5691254", "0.56911457", "0.5690234", "0.5689797", "0.5688591", "0.5687205", "0.5686881", "0.56866467", "0.5684072", "0.56810033", "0.5674943", "0.566946", "0.56692433" ]
0.0
-1
Test if custom features work.
def test_custom_features(): group = Group({ "a": CustomSized(), "b": CustomNamed(), "c": CustomDynamic(), "d": CustomSlotList(), "e": CustomSlotDict(), }) for _ in range(10): for x in range(4): group.set_a(x) for x in "abcd": group.set_b(x) group.set_c("blub") group.set_d() group.set_e() group.push() array = group.array() assert array.shape == (10, 15)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_supported_features(self):", "def exposes_features(self):\n return self._features_op is not None", "def __call__(self, feature):\n return self.is_enabled(feature)", "def feature(self):\n Feature(run=default_frame, flags=TE)\n Feature(run=load(\"window_functions.tests.rows_frame\", \"feature\"), flags=TE)\n Feature(run=load(\"window_functions.tests.range_frame\", \"feature\"), flags=TE)\n Feature(run=load(\"window_functions.tests.range_overflow\", \"feature\"), flags=TE)\n Feature(run=load(\"window_functions.tests.range_datetime\", \"feature\"), flags=TE)\n Feature(run=load(\"window_functions.tests.range_errors\", \"feature\"), flags=TE)", "def test_all_features(self):\n to_create = ['looktest1', 'looktest2', 'looktest3']\n for f in to_create:\n Feature(f).activate()\n\n all_features = Feature.all_features()\n self.assertEqual(len(all_features), len(to_create))\n for f in to_create:\n self.assertTrue(f in all_features)", "def feature():\n pass", "def __contains__(self, feature):\n return feature in self.features", "def need_feature_generation(self):\n if self.feature_cmd_params:\n return True\n return False", "def need_feature_generation(self):\n if self.feature_cmd_params:\n return True\n return False", "def test_all_features_with_data(self):\n feature1 = Feature('looktest1')\n feature1.set_percentage(5)\n\n feature2 = Feature('looktest2')\n feature2.activate()\n feature2.add_to_whitelist(3)\n\n feature3 = Feature('looktest3')\n feature3.activate()\n feature3.add_to_blacklist(4)\n feature3.add_to_blacklist(5)\n\n feature4 = Feature('looktest4')\n feature4.activate()\n feature4.add_to_whitelist(3)\n feature4.add_to_whitelist(5)\n feature4.add_to_blacklist(4)\n\n all_features = Feature.all_features(include_data=True)\n self.assertEqual(len(all_features), 4)\n\n for key in ['looktest1', 'looktest2', 'looktest3', 'looktest4']:\n self.assertTrue(key in all_features)\n if not key == 'looktest1':\n self.assertEqual(all_features[key]['percentage'], 100)\n\n self.assertEqual(all_features['looktest1']['percentage'], 5)\n self.assertFalse('whitelist' in all_features['looktest1'])\n self.assertFalse('blacklist' in all_features['looktest1'])\n\n self.assertTrue('whitelist' in all_features['looktest2'])\n self.assertEqual(all_features['looktest2']['whitelist'], [3])\n self.assertFalse('blacklist' in all_features['looktest2'])\n\n self.assertFalse('whitelist' in all_features['looktest3'])\n self.assertTrue('blacklist' in all_features['looktest3'])\n self.assertEqual(all_features['looktest3']['blacklist'], [4, 5])\n\n self.assertTrue('whitelist' in all_features['looktest4'])\n self.assertEqual(all_features['looktest4']['whitelist'], [3, 5])\n self.assertTrue('blacklist' in all_features['looktest4'])\n self.assertEqual(all_features['looktest4']['blacklist'], [4])", "def __check_features(f_list, stopwords):\n ok = True\n for f in f_list:\n if not(__check_feature(f,stopwords)):\n return False\n return True", "def findFeatures(self):\n\t\tpass", "def __contains__(self, feature):\n return feature == 'cvarsort' or feature in self.features", "def test_checkCustoms(self):\n self.failUnlessEqual(self.nice.opts['myflag'], \"PONY!\")\n self.failUnlessEqual(self.nice.opts['myparam'], \"Tofu WITH A PONY!\")", "def test_read_feature_collection(self):\n fc = self.read_feature()\n assert len(fc.features) == 1\n feature = fc.features[0]\n self.check_feature(feature)", "def run_feature_extraction_tests():\n test_feature_extraction()\n test_distributed_feature_extraction()\n test_multimodel_feature_extraction()\n test_distributed_multimodel_feature_extraction()", "def has_feature(self, feature):\n features = self.features\n if features is None:\n return False\n \n return feature in features", "def matches(self, feature):\n pass", "def test_add_feature(self):\n fc1 = self.read_feature()\n fc2 = self.read_feature('Aegean_Sea')\n\n # add a feature already in the feature collection\n fc1.add_feature(fc1.features[0])\n assert len(fc1.features) == 1\n\n # add a new feature to the feature collection\n fc1.add_feature(fc2.features[0])\n assert len(fc1.features) == 2\n\n self.check_feature(fc1.features[0])\n self.check_feature(fc1.features[1], expected_name='Aegean Sea')", "def test_available(self):\n feature_guard = _make_requires(True, \"Error text\")\n results = []\n\n @feature_guard\n def inner():\n results.append(True)\n return True\n\n assert inner() is True\n assert [True] == results", "def _include_feature(self, name):\n return (self._feature_names is None or name in self._feature_names or\n name.startswith(self._neighbor_config.prefix))", "def uses_feature(self, fcname):\n used = False\n if any([fcname.upper() in y for y in [x.upper() for x in self._featureclasses]]):\n used = True\n return used", "def uses_unsupported_feature_or_framework(notebook, skip_args):\n functionalities_to_check = {\n \"docker\": [\"docker\\s+\", \"docker-compose\\s+\"],\n \"local_mode\": ['instance_type\\s*=\\s*\"local\"'],\n \"fsx_efs\": [\"\\s+(efs|EFS)\\s+\", \"^(EFS|efs)\\s+\"]\n }\n\n for identifier in functionalities_to_check:\n if skip_args.get(identifier, True) and contains_code(notebook, functionalities_to_check.get(identifier)):\n return True\n\n return False", "def special_features(self):\r\n return self._special_features", "def is_test(self):\r\n return self.has_label('tests')", "def _implements_test_batch_hooks(self):\n return not is_default(self.on_test_batch_begin) or not is_default(\n self.on_test_batch_end\n )", "def f_supports(self, data):\n return True", "def test_add_feature():\n mock = MagicMock()\n with patch.dict(dism.__salt__, {\"cmd.run_all\": mock}):\n dism.add_feature(\"test\")\n mock.assert_called_once_with(\n [\n dism.bin_dism,\n \"/Quiet\",\n \"/Online\",\n \"/Enable-Feature\",\n \"/FeatureName:test\",\n \"/NoRestart\",\n ]\n )", "def important_features_(self):\n return self.scores_ > self.score_cutoff_", "def test_feature_in_collection(self):\n fc1 = self.read_feature()\n fc2 = self.read_feature('Aegean_Sea')\n\n feature = fc1.features[0]\n assert fc1.feature_in_collection(feature)\n\n feature = fc2.features[0]\n assert not fc1.feature_in_collection(feature)", "def test_features_property():\n atom = ATOMClassifier(X_bin, y_bin, random_state=1)\n atom.run(\"LR\")\n assert [i == j for i, j in zip(atom.lr.features, atom.features)]", "def test_features(iris):\n assert iris.num_features == 4\n assert iris.feature_names == [\n \"sepal length (cm)\",\n \"sepal width (cm)\",\n \"petal length (cm)\",\n \"petal width (cm)\",\n ]", "def has_custom_gate(self) -> bool:\n for qubit_inst in self._map.values():\n for entry in qubit_inst.values():\n if entry.user_provided:\n return True\n return False", "def testfeatures(self):\n self.set_wdiff()\n xp,wp=st.crosslinematch(self.xarr, self.farr, self.slines, self.sfluxes,\n self.ws, mdiff=self.mdiff, wdiff=20, sigma=self.sigma, niter=self.niter)\n for x, w in zip(xp, wp):\n if w not in self.wp and w>-1: \n self.xp.append(x)\n self.wp.append(w)\n self.plotFeatures()\n self.redraw_canvas()", "def _general_testing(self, context, kind, *args, **kwargs):\r\n if kind == \"fake_next_op\":\r\n self._register_fake_next_op(context.channel, *args, **kwargs)\r\n self._reply(context, proto_success({}, None), None)\r\n return True\r\n self._reply(context, proto_failure({\"Unsupported testing function '{}'\".format(kind)}), None)\r\n return False", "def test_add_feature_with_extras():\n mock = MagicMock()\n with patch.dict(dism.__salt__, {\"cmd.run_all\": mock}):\n dism.add_feature(\"sponge\", \"bob\", \"C:\\\\temp\", True, True)\n mock.assert_called_once_with(\n [\n dism.bin_dism,\n \"/Quiet\",\n \"/Online\",\n \"/Enable-Feature\",\n \"/FeatureName:sponge\",\n \"/PackageName:bob\",\n \"/Source:C:\\\\temp\",\n \"/LimitAccess\",\n \"/All\",\n \"/NoRestart\",\n ]\n )", "def boolean_func(experiment):", "def test_category_and_its_feature(self):\n class RunnerBlah(Runner):\n def __init__(self, renv):\n super(RunnerBlah, self).__init__(renv)\n self.register_feature_class('bravo', Feature)\n self.register_feature_class('charlie', Feature)\n self.register_feature_category_class(\n 'alpha', features=['bravo', 'charlie'], mono=True)\n\n renv = create_runtime(RunnerBlah)\n renv.create_runner('runner')\n\n ctrl = renv.feature_ctrl\n\n total_order, _ = ctrl.get_activation_order(['alpha', 'bravo'])\n self.assertEqual(['bravo'], total_order)", "def test_build_feature_base(self):\n data = pd.DataFrame(pd.read_csv(\"tests/in_data/pro1_sub.csv\"))\n\n X = data.ix[:,1]\n Y = data.ix[:,0]\n model_sample = Model([],\"presence\")\n\n feature_base = model_sample.build_feature_base(X,Y)\n feature_evaluation =\n assert_equal(len(feature_base) > 10, True)", "def test_feature(feature, value, good_features):\r\n\tbase_write(good_features,\"bin/stanford-ner-2015-04-20/base.prop\")\r\n\tbase_prop = open(\"bin/stanford-ner-2015-04-20/base.prop\", \"a\")\r\n\tbase_prop.write(feature.strip() + \"=\" + str(value) + \"\\n\")\r\n\tbase_prop.close()\r\n\r\n\t#Test read base.prop - To display in console\r\n\tread = open(\"bin/stanford-ner-2015-04-20/base.prop\").read()\r\n\tlogging.warning(read)\r\n\r\n\tos.system(\"bash src/other/features/features_selection.sh\")", "def test__validate_features__0():\n for input_value, expected_output in (\n (None, None),\n ([], None),\n ([GuildFeature.animated_banner], (GuildFeature.animated_banner, )),\n ([GuildFeature.animated_banner.value], (GuildFeature.animated_banner, )),\n (\n [GuildFeature.animated_banner, GuildFeature.animated_icon],\n (GuildFeature.animated_banner, GuildFeature.animated_icon,),\n ),\n ):\n output = validate_features(input_value)\n vampytest.assert_eq(output, expected_output)", "def specialFeatures(self):\r\n return self._specialFeatures", "def detect(self, features):\n pass # TODO", "def test_findFeatures(self):\n features = self.builder._findChanges(\n self.project, self.builder._FEATURE)\n self.assertEquals(\n features,\n [(5, \"We now support the web.\"),\n (12, \"The widget is more robust.\"),\n (15,\n \"A very long feature which takes many words to describe with \"\n \"any accuracy was introduced so that the line wrapping behavior \"\n \"of the news generating code could be verified.\"),\n (16, \"A simpler feature described on multiple lines was added.\")])", "def supports(self, x):\n return True", "def _testFeatureDefault(self, name, features=None):\n default = irc.ServerSupportedFeatures()._features[name]\n\n if features is None:\n features = [(\"DEFINITELY_NOT\", \"a_feature\")]\n\n supported = self._parse(features)\n self.assertTrue(supported.hasFeature(name))\n self.assertEqual(supported.getFeature(name), default)", "def applyFeatureTest(tgen):\n\n if not _shared.withTests:\n # Ignore all build tasks for tests in this case\n for task in tgen.tasks:\n task.runnable_status = lambda: Task.SKIP_ME", "def test_add_to_whitelist(self):\n\n self.feature_test.add_to_whitelist(3)\n self.assertTrue(3 in Feature(\"testing\").whitelist)", "def test__extract_features(self):\n text_sample = \"I really really love this movie\"\n feature_sample = ['really','love','good']\n feature_score_type = \"presence\"\n model_sample = Model(feature_sample,feature_score_type)\n result_features = model_sample.extract_features(text_sample)\n assert_equal(result_features,{'really':1,'love':1,'good':0})\n feature_score_type = \"term_frequency\"\n model_sample = Model(feature_sample,feature_score_type)\n result_features = model_sample.extract_features(text_sample)\n assert_equal(result_features,{'really':2,'love':1,'good':0})", "def parse_features(self, skip=...):\n ...", "def parse_features(self, skip=...):\n ...", "def contains_feat(title):\n return bool(re.search(plugins.feat_tokens(), title, flags=re.IGNORECASE))", "def test_available_features():\n features = (\n \"Feature Name : Capa1\\r\\n State : Enabled\\r\\n\"\n \"Feature Name : Capa2\\r\\n State : Disabled\\r\\n\"\n )\n\n mock = MagicMock(return_value=features)\n with patch.dict(dism.__salt__, {\"cmd.run\": mock}):\n out = dism.available_features()\n mock.assert_called_once_with(\n [dism.bin_dism, \"/English\", \"/Online\", \"/Get-Features\"]\n )\n assert out == [\"Capa2\"]", "def testable(self):\n\t\treturn True", "def test_feature_is_filtered(self):\n\n # Duplicate 1st row in var and assigned to 2nd\n self.validator.adata.var[\"feature_is_filtered\"][0] = True\n for i in range(self.validator.adata.X.shape[0]):\n self.validator.adata.X[i, 0] = 0\n self.validator.adata.X[0, 0] = 1\n\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: Some features are 'True' in 'feature_is_filtered' of dataframe 'var', \"\n \"but there are 1 non-zero values in the corresponding columns of the matrix 'X'. \"\n \"All values for these features must be 0.\"\n ],\n )", "def feat():\n pass", "def return_flag_on_feature(self, feature, pos_tag_list):\n for i in pos_tag_list:\n if i[1] == feature:\n return True\n return False", "def supported_features(self):\n return MEURAL_SUPPORT", "def test_visible_white_and_blacklisted(self):\n\n self.feature_test.set_percentage(0)\n self.feature_test.add_to_whitelist(3)\n self.feature_test.add_to_blacklist(3)\n self.assertTrue(self.feature_test.is_visible(3))", "def test_if(self):", "def spec(self) -> bool:\n\t\treturn True", "def __check_feature(f,stopwords):\n if f == \"\" or f == None:\n return None\n if f == \"RT\":\n return False\n if f == \"via\":\n return False\n if len(re.findall(r\"(\\w)\", f)) < 1:\n return False\n if f == \"&amp\":\n return False\n if f in stopwords:\n return False\n if len(f) < 2:\n return False\n else:\n return True", "def test_can_enable_features_per_user(page):\n username = login_as_superuser(page)\n\n features.toggle_feature_for_user(page=page,\n feature_name='team-management-redo',\n username=username,\n enable=False)\n\n page.goto(\"/form/view\")\n nav = page.locator('.usa-nav__primary li').all()\n assert len(nav) == 2\n\n features.toggle_feature_for_user(page=page,\n feature_name='team-management-redo',\n username=username,\n enable=True)\n\n page.goto(\"/form/view\")\n nav = page.locator('.usa-nav__primary li').all()\n assert len(nav) == 3\n assert '🆕 Team Management' in nav[2].text_content().strip()\n\n features.toggle_feature_for_user(page=page,\n feature_name='team-management-redo',\n username=username,\n enable=False)\n\n page.goto(\"/form/view\")\n nav = page.locator('.usa-nav__primary li').all()\n assert len(nav) == 2", "def test_support_SAFELIST(self):\n self.assertEqual(self._parseFeature(\"SAFELIST\"), True)", "def runTest(self):\n return True", "def supports(self, message):\r\n if message.method == '__testing__':\r\n return True\r\n return self._interface.supports(message)", "def test_registry():\n assert(CQT.get_id() in msaf.base.features_registry.keys())\n assert(PCP.get_id() in msaf.base.features_registry.keys())\n assert(Tonnetz.get_id() in msaf.base.features_registry.keys())\n assert(MFCC.get_id() in msaf.base.features_registry.keys())\n assert(Tempogram.get_id() in msaf.base.features_registry.keys())", "def getFeatures(self, state, action):\n util.raiseNotDefined()", "def assert_has_feature(self, feature_name):\n if not self.features.get(\"has_{}\".format(feature_name), False):\n self.raise_config_error(\"Platform {} does not support to configure {feature_name}. \"\n \"Please make sure the platform \"\n \"you configured for {feature_name} actually supports that type \"\n \"of devices.\".format(self.__class__, feature_name=feature_name), 99)", "def test_split_feature(tree):\r\n print(\"test_split_feature()...\", end = \"\")\r\n assert (tree.process_split_feature() == True)\r\n print(\"Passed!\")", "def test_copy_features(self):\n fc = self.read_feature()\n other = FeatureCollection(features=fc.features,\n otherProperties=fc.otherProperties)\n assert len(other.features) == 1\n feature = other.features[0]\n\n self.check_feature(feature)", "def supports_index_feature(attr_name):\n return supports_indexes and hasattr(_test_index, attr_name)", "def should_fake_it(self):\n try:\n environment.get(\"FakeIt\")\n return True\n except KeyError:\n return False", "def _missing_feature_to_skipped_tests(self):\n # FIXME: This list matches WebKitWin and should be moved onto the Win port.\n return {\n \"Accelerated Compositing\": [\"compositing\"],\n \"3D Rendering\": [\"animations/3d\", \"transforms/3d\"],\n }", "def test(self):\n raise NotImplementedError", "def features(self, state, action, next_state):\n raise NotImplementedError", "def testable(self):\n return False", "def test_category_and_its_feature_dep(self):\n class RunnerBlah(Runner):\n def __init__(self, renv):\n super(RunnerBlah, self).__init__(renv)\n self.register_feature_class('bravo', Feature)\n self.register_feature_category_class(\n 'alpha', features=['bravo'], defaults=['bravo'])\n self.register_feature_class(\n 'foxtrot', Feature, requires=['alpha', 'bravo'])\n self.register_feature_category_class('echo', features=['foxtrot'])\n\n renv = create_runtime(RunnerBlah)\n renv.create_runner('runner')\n\n ctrl = renv.feature_ctrl\n\n total_order, _ = ctrl.get_activation_order(['foxtrot'])\n self.assertEqual(['bravo', 'foxtrot'], total_order)", "def has_test_docs(self):\n pass", "def supported_features(self):\n return SUPPORT_FLAGS", "def supported_features(self):\n return SUPPORT_FLAGS", "def supported_features(self):\n return SUPPORT_FLAGS", "def supported_features(self):\n return SUPPORT_FLAGS", "def check_featuring(self):\n existing_featuring = pd.read_csv(self.path_checkpoint)\n array_to_check = [float(self.radious), self.type_feature, self.type_filtering, self.h_filterig]\n bool_answer = (existing_featuring == array_to_check).all(1).any()\n # self.file_checkpoint_data.close()\n return bool_answer", "def test_can_return_all_current_features_only(self):\n returned_features = return_current_features()\n self.assertTrue(len(returned_features) > 0)\n for feature in returned_features:\n self.assertTrue(feature.is_feature)\n feature_admin_object = SuggestionAdminPage.objects.get(suggestion=feature)\n self.assertTrue(feature_admin_object.in_current_voting_cycle)\n\n all_current_features_admin = SuggestionAdminPage.objects.filter(suggestion__is_feature=True,\n in_current_voting_cycle=True)\n self.assertEqual(len(all_current_features_admin), len(returned_features))", "def _detect(self):\n return True", "def supported_features(self):\n return self._support_flags", "def supported_features(self):\n return self._support_flags", "def supported_features(self):\n return self._support_flags", "def supported_features(self):\n return self._support_flags", "def test_validate_media_player_features():\n config = {}\n attrs = {ATTR_SUPPORTED_FEATURES: 20873}\n entity_state = State(\"media_player.demo\", \"on\", attrs)\n assert validate_media_player_features(entity_state, config) is True\n\n config = {FEATURE_ON_OFF: None}\n assert validate_media_player_features(entity_state, config) is True\n\n entity_state = State(\"media_player.demo\", \"on\")\n assert validate_media_player_features(entity_state, config) is False", "def test_func(self):\n self.rol_nu, self.functie_nu = rol_get_huidige_functie(self.request)\n return self.rol_nu in (Rollen.ROL_BB, Rollen.ROL_BKO, Rollen.ROL_RKO, Rollen.ROL_RCL, Rollen.ROL_HWL)", "def filter_feature(feature, typ, value):\n return value is None or feature.__getattribute__(typ) == value", "def has_action2(self, feature):\n return feature in self._action2", "def check_feature(feature, expected_name='Adriatic Sea',\n expected_type='Polygon'):\n assert feature['properties']['name'] == expected_name\n assert feature['properties']['component'] == 'ocean'\n assert feature['geometry']['type'] == expected_type", "def test_add_to_blacklist(self):\n\n self.feature_test.add_to_blacklist(3)\n self.assertTrue(3 in Feature(\"testing\").blacklist)", "def hasTest(self):\n\n self.has_test = False\n cmake_file = os.path.join(self.path, 'CMakeLists.txt')\n with open(cmake_file, 'r') as file:\n lines = file.read().split()\n\n for line in lines:\n line = line.strip()\n if line.startswith('#'): \n line = ''\n test_keywords = ['catkin_add_gtest', 'add_rostest_gtest', 'catkin_add_nosetests', 'add_rostest']\n if any([keyword in line for keyword in test_keywords]):\n self.has_test = True\n break \n\n return self.has_test", "def test_gtf(self):\n #TODO write bed tests", "def _init_feature_processer(self):\n try:\n model_config = self._conf.get(PredictConstance.BASE_CONFIG,\n PredictConstance.FEATURE_ENGINEERING_CONFIG)\n conf = configparser.ConfigParser()\n conf.read(model_config)\n self._feature_processor = data_processor.DataProcessor(conf=conf,log_path = self.xeasy_log_path)\n if self._feature_processor.init() == runstatus.RunStatus.SUCC:\n return True\n else:\n return False\n except Exception as err:\n self.managerlogger.logger.error(\"init model error: %s\" % err)\n self.errorlogger.logger.error(\"init model error:\\n %s\" % traceback.format_exc())\n return False", "def list_feature_tests(self):\n\t\treturn self.test_names", "def hasFeature(self, Union, QDesignerFormWindowInterface_Feature=None, QDesignerFormWindowInterface_FeatureFlag=None): # real signature unknown; restored from __doc__\n return False" ]
[ "0.71229833", "0.6740728", "0.6653905", "0.6572284", "0.65463126", "0.6525058", "0.651423", "0.6466732", "0.6466732", "0.64624584", "0.6455726", "0.63872254", "0.6296405", "0.62341386", "0.6185639", "0.61484396", "0.6127456", "0.61109376", "0.60871255", "0.60606647", "0.6051658", "0.60405594", "0.6036131", "0.6014857", "0.5997329", "0.5995445", "0.5993773", "0.5956921", "0.59530103", "0.5951228", "0.5941957", "0.59332937", "0.59225285", "0.59213126", "0.59135234", "0.58685744", "0.58541405", "0.585181", "0.58407414", "0.5835358", "0.5832119", "0.58275735", "0.5807905", "0.5806153", "0.5804737", "0.5802067", "0.5793419", "0.57924736", "0.5776932", "0.5760241", "0.5760241", "0.57586807", "0.5745923", "0.5737192", "0.5726285", "0.57233524", "0.5717839", "0.5706902", "0.5701152", "0.56993353", "0.56947196", "0.5673054", "0.5666256", "0.56536376", "0.56527174", "0.56470937", "0.56446624", "0.56021553", "0.5597422", "0.55944455", "0.55851257", "0.5575888", "0.55750954", "0.55738664", "0.557335", "0.55689216", "0.55688435", "0.555821", "0.55467224", "0.55464333", "0.55464333", "0.55464333", "0.55464333", "0.5534855", "0.55334324", "0.5528606", "0.55280685", "0.55280685", "0.55280685", "0.55280685", "0.55211943", "0.5502708", "0.5497798", "0.54726195", "0.54703885", "0.54679763", "0.546101", "0.5447979", "0.5443352", "0.5439647", "0.5439267" ]
0.0
-1
Test if using undefined keys in features with predefined size or field names causes an exception.
def test_field_name_errors(): group = Group({"test": CustomSized(), }) group.set_test(5) assert_raises(KeyError, group.push) group = Group({"test": CustomNamed(), }) group.set_test("e") assert_raises(KeyError, group.push)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _check_for_unsupported_schema_fields(schema):\n if schema.sparse_feature:\n logging.warning('The input schema has sparse features which'\n ' are currently not supported.')", "def is_zero_dict( dict ):\n has_any_features = False\n for key in dict:\n has_any_features = has_any_features or dict[key]\n\n return not has_any_features", "def _validate_cols(cols):\n\n\tif cols is not None and len(cols) < 2:\n\t\traise ValueError('too few features')", "def test_missing_mandatory(self):\n try:\n CollectorUpdate()\n self.assertFalse(\"RuntimeError expected\")\n except RuntimeError as exception:\n assert_that(str(exception), equal_to(\"Missing keys: 'stage', 'status', 'timestamp'\"))", "def _check_feature_by_keys(service_data=None, service_keys=None, ns_data=None, ns_keys=None):\n\n if service_data and not isinstance(service_data, Exception) and service_keys:\n if _is_keyval_greater_than_value(service_data, service_keys):\n return True\n\n if ns_data and ns_keys:\n for ns, nsval in ns_data.iteritems():\n if not nsval or isinstance(nsval, Exception):\n continue\n if _is_keyval_greater_than_value(nsval, ns_keys):\n return True\n\n return False", "def test_fieldname_exc(self):\n ds = self.f.create_dataset('foo', (100,), 'f')\n self.assertRaises(ValueError, ds.__getitem__, (0, 'a'))", "def validate_features(self):\n # assert len(self.features) > 0\n if self.source != INPUT_CONTEXT:\n for feature in self.features:\n if feature.key == [DUMMY_KEY]:\n raise RuntimeError(f\"For anchors of non-INPUT_CONTEXT source, key of feature {feature.name} \"\n f\"should be explicitly specified and not left blank.\")", "def test_has_correct_number_of_keys_and_values(self):\n self.has_correct_number_of_keys_and_values(2, 1)", "def test_has_correct_number_of_keys_and_values(self):\n self.has_correct_number_of_keys_and_values(2, 1)", "def check_gs_argument(ground_state):\n required_fields = [\"bc\", \"cf\", \"eci\", \"atoms\"]\n keys = ground_state.keys()\n for key in keys:\n if key not in required_fields:\n raise ValueError(\n \"The GS argument has to contain {} keys. Given {}\".format(\n required_fields, keys))", "def testMissingKeys(self):\n self.assertRaises(ValueError,\n self.unauth.table,\n self.dataset,\n self.table)", "def test_invalid_columns():\n train = ((\"Lorem ipsum dolor sit amet\", 3),\n (\"Sed ut perspiciatis unde\", 5.5))\n with pytest.raises(ValueError):\n TabularDataset(train, named_columns=['some_random_col'])", "def _check_features_df(df, features):\n # check columns\n if not set(features).issubset(df.columns):\n raise ValueError(\"The dataframe does not seem to have the right \"\n \"features. {0} instead of {1}\"\n .format(df.columns, features))\n\n return", "def haskey(featureVals, fkey):\n try:\n featureVals[fkey]\n except KeyError:\n return False\n\n #warn(HASKEYMSG % (fkey))\n return True", "def test_map_missing_key_encountered():\n with pytest.raises(KeyError):\n Map().read_key(10, b\"\")", "def test_allow_extra_keys(self):\n from natcap.invest import validation\n\n args = {'a': 'a', 'b': 'b'}\n spec = {\n 'a': {\n 'type': 'freestyle_string',\n 'name': 'a',\n 'about': 'a freestyle string',\n 'required': True\n }\n }\n message = 'DEBUG:natcap.invest.validation:Provided key b does not exist in MODEL_SPEC'\n\n with self.assertLogs('natcap.invest.validation', level='DEBUG') as cm:\n validation.validate(args, spec)\n self.assertTrue(message in cm.output)", "def test_toomanykeys(self):\n self.assertRaises(recordparser.KeyListMismatchError,\n recordparser.getfields, \"1234567890\", \"10s\", (\"key1\", \"key2\"))", "def test_check_map_var_len_not_specified(self):\r\n\r\n self.assertRaises(ValueError, check_map,\r\n self.valid_mapping_data_var_len_bcs)", "def _assert_not_family_array(self, name):\n if name in self.family_keys():\n raise KeyError(\"Array \" + name + \" is a family-level property\")", "def test_too_few_props(self):\n with pytest.raises(ValueError):\n State(substance=\"water\", T=Q_(300, \"K\"))", "def invalid(values):\n # for box in values.keys():\n # if len(values[box]) == 0:\n # return True\n # return False\n return len([box for box in values.keys() if len(values[box]) == 0]) != 0", "def test_missing_column(self):\n df = pd.DataFrame({\"notlat\": [1, 2, 3], \"lon\": [11, 12, 13]})\n with self.assertRaises(Exception) as ctx:\n st.map(df)\n\n self.assertTrue(\"Map data must contain a column named\" in str(ctx.exception))", "def test_defining_only_or_defer_on_nonexistant_fields_fails(self):", "def test_raise_on_missing_critical(self):\n name_for_field = 'absent_field'\n field_opts = {'names': (name_for_field, 'absent'), 'alt_field': '', 'computed': False}\n critical_fields = {'absent_field': field_opts}\n with self.assertRaises(ImproperlyConfigured):\n self.form.fields_for_critical(critical_fields)", "def validate_key(self, key: keyType) -> bool:\n if isinstance(key, (dict,bool)):\n raise Exception\n if key is None:\n raise Exception\n # Numerical key object has no len(),\n # so explicitly specify which types are not allowed to use empty value as keys\n if isinstance(key, (str, tuple, set, list)) and (len(key) == 0):\n raise Exception\n return True", "def test__validate_features__1():\n for input_value in (\n 12.6,\n ):\n with vampytest.assert_raises(TypeError):\n validate_features(input_value)", "def check_n_features(n_features, cols):\n\n if n_features is None:\n raise Exception(\"n_features must be specified.\")\n\n if n_features <= 0:\n raise Exception(\"n_features cannot be 0 or a negative number.\")\n\n if n_features > 1 and not isinstance(n_features, int):\n raise Exception(\"n_features must be integer if > 1.\")\n\n if n_features > cols:\n raise Exception(\"n_features must be less than columns in X.\")\n\n return", "def _check_labels_features_exist(\n labels_example: List[\"Message\"], attribute: Text\n ) -> bool:\n\n for label_example in labels_example:\n if (\n label_example.get(SPARSE_FEATURE_NAMES[attribute]) is None\n and label_example.get(DENSE_FEATURE_NAMES[attribute]) is None\n ):\n return False\n return True", "def _check_input_size(n_components, n_features):\n if n_components <= 0:\n raise ValueError(\n \"n_components must be strictly positive, got %d\" % n_components\n )\n if n_features <= 0:\n raise ValueError(\"n_features must be strictly positive, got %d\" % n_features)", "def check_fields_in_dict(dictionary, fields, dictionary_name):\n for field in fields:\n if field not in dictionary:\n raise KafkaIotException(\"%s field(s) required but not found in %s: %s\"\n % (\", \".join(fields), dictionary_name, str(dictionary)))\n return True", "def test_dict_size_one_all_str(self):\n argument = {'nothing': 0}\n with self.assertRaises(IndexError):\n file_io.top_ten(argument)", "def testKeyInfoTooShort(self):\n key = 5\n self.assertRaises(ValueError, dataToToken, key, data='x', keyInfo='xx')", "def _check_for_unsupported_stats_fields(\n stats,\n stats_type):\n for feature in stats.features:\n if feature.HasField('struct_stats'):\n logging.warning('Feature \"%s\" in the %s has a struct_stats field which '\n 'is currently not supported.', feature.name, stats_type)", "def test_raise_error_unknown_field():\n\n options = {'fields': ['kHello']}\n\n with pytest.raises(KeyError) as excinfo:\n process.Packager(options)\n excinfo.match('Field ([a-zA-Z].*) not found in file list.')", "def check_all_same_schema(feat_dict_list, keys, name):\n if len(feat_dict_list) == 0:\n return\n for fdict in feat_dict_list:\n for k in keys:\n t1 = feat_dict_list[0][k]\n t2 = fdict[k]\n if F.dtype(t1) != F.dtype(t2) or F.shape(t1)[1:] != F.shape(t2)[1:]:\n raise DGLError('Expect all features {}[\"{}\"] to have the same data type'\n ' and feature size, but got\\n\\t{} {}\\nand\\n\\t{} {}.'.format(\n name, k, F.dtype(t1), F.shape(t1)[1:],\n F.dtype(t2), F.shape(t2)[1:]))", "def test_base_schema_ignores_unknown_fields():\n assert BaseSchema().load({\"unknown\": \"field\"}) == {}", "def test_set_non_dictionary_based_field(self):\n self.assertRaises(TypeError, self._p.set_fields, '')", "def testInvalidNumberOfClasses(self):\n\n feature = layers.real_valued_column('feature')\n with self.assertRaises(ValueError):\n _ = kernel_estimators.KernelLinearClassifier(\n feature_columns=[feature], n_classes=1)", "def _validate_length_features_and_labels(\n model_endpoint: mlrun.common.schemas.ModelEndpoint,\n ):\n\n # Getting the length of label names, feature_names and feature_stats\n len_of_label_names = (\n 0\n if not model_endpoint.spec.label_names\n else len(model_endpoint.spec.label_names)\n )\n len_of_feature_names = len(model_endpoint.spec.feature_names)\n len_of_feature_stats = len(model_endpoint.status.feature_stats)\n\n if len_of_feature_stats != len_of_feature_names + len_of_label_names:\n raise mlrun.errors.MLRunInvalidArgumentError(\n f\"The length of model endpoint feature_stats is not equal to the \"\n f\"length of model endpoint feature names and labels \"\n f\"feature_stats({len_of_feature_stats}), \"\n f\"feature_names({len_of_feature_names}),\"\n f\"label_names({len_of_label_names}\"\n )", "def check_fields(taxa: Dict[str, AphiaInfo]) -> None:\n for key, taxon in taxa.items():\n if taxon.get(\"scientificName\") is None:\n taxon.set_missing(\"scientificName\")\n if taxon.get(\"scientificNameID\") is None:\n taxon.set_missing(\"scientificNameID\")", "def test_femattribute_length_different_error(self):\n with self.assertRaises(ValueError):\n FEMAttribute('', [1, 2, 3], [10., 20., 30., 40.])", "def test_getitem_error(self):\n with self.assertRaises(KeyError):\n self.tester['Not_a_Sample']", "def test_getitem_error(self):\n with self.assertRaises(KeyError):\n self.tester['Not_a_Sample']", "def test_is_valid_annotation_key_invalid_input():\n # test length violations\n assert not is_valid_annotation_key(key=None) # Too short\n assert not is_valid_annotation_key(key=\"\") # Too short\n assert not is_valid_annotation_key(key=f\"{'p' * 254}/n\") # prefix too long\n assert not is_valid_annotation_key(key=\"/n\") # prefix too short\n assert not is_valid_annotation_key(key=\"p/\") # name too short\n assert not is_valid_annotation_key(key=\"a\" * 254) # name too long\n assert not is_valid_annotation_key(key=f\"d/{'b'*64}\") # name too long\n # test first character violations (not alphanum)\n assert not is_valid_annotation_key(key=\"-a\")\n assert not is_valid_annotation_key(key=\".b\")\n assert not is_valid_annotation_key(key=\" c\")\n # test last character violations (not alphanum)\n assert not is_valid_annotation_key(key=\"a-\")\n assert not is_valid_annotation_key(key=\"b.\")\n assert not is_valid_annotation_key(key=\"c \")\n assert not is_valid_annotation_key(key=\"sw33T#\")\n # test middle characters violations\n assert not is_valid_annotation_key(key=\"a$$a\")\n assert not is_valid_annotation_key(key=\"b b\")", "def test_keyerror(self):\n try:\n self.db['foo']\n except KeyError, e:\n assert \"no key 'foo' in database <SequenceFileDB\" in str(e), str(e)", "def test_missing_column(self, example_dataset):\n\n example_dataset = example_dataset.drop([\"Sex\"], axis=1)\n transformer = PreprocessFeatures()\n\n with pytest.raises(ValueError):\n transformer.fit_transform(example_dataset)", "def test_no_such_key():\n test = [{'key': 'val1'}, ['missing']]\n t_result = fetch_data_by_keys(*test)\n assert not is_successful(t_result)\n assert 'missing' in str(t_result.failure())", "def test_dict_size_one_all_number(self):\n argument = {0: 0}\n with self.assertRaises(IndexError):\n file_io.top_ten(argument)", "def test_missing_info(self):\r\n self.validate(None, None,\r\n N_image_shape=(3, 2, 8, 8),\r\n N_filter_shape=(4, 2, 5, 5))\r\n self.validate((3, 2, None, None), None,\r\n N_image_shape=(3, 2, 8, 8),\r\n N_filter_shape=(4, 2, 5, 5))\r\n self.validate((None, 2, None, None), (None, 2, 5, 5),\r\n N_image_shape=(3, 2, 8, 8),\r\n N_filter_shape=(4, 2, 5, 5))", "def test_getitem_missing(self, env: yaenv.Env):\n with pytest.raises(yaenv.EnvError) as err:\n _ = env['MISSING']\n assert 'Missing' in str(err.value)", "def _validate_or_make_feature(dic_fc, num_img):\n # bol_found_numf = False\n num_il = -999 # number in list initialise\n if isinstance(dic_fc, dict):\n if isinstance(num_img, int):\n # print(f\" = dic_fc: {dic_fc}\")\n if \"type\" in dic_fc.keys() and dic_fc[\"type\"] == \"FeatureCollection\":\n if \"features\" in dic_fc.keys() and isinstance(dic_fc[\"features\"], list):\n num_feat = -1\n for fet in dic_fc[\"features\"]:\n num_feat += 1\n if isinstance(fet, dict):\n if fet[\"type\"] == \"Feature\":\n if \"properties\" in fet.keys():\n if dic_fc[\"features\"][num_feat][\"properties\"][\"numi\"] == num_img:\n num_il = num_feat\n break\n else:\n print(f\"ERR: in _val_or_make...() dic_fc feature has no properties key\")\n return dic_fc\n else:\n print(f\"ERR: in _val_or_make...() dic_fc has element of not-Feature type: {fet['type']}\")\n return dic_fc\n else:\n print(f\"ERR: in _val_or_make...() dic_fc feature is not dict: {str(type(fet))}\")\n return dic_fc\n else:\n print(f\"ERR: in _val_or_make...() dic_fc has no key: features of type list\")\n return dic_fc\n else:\n print(f\"ERR: in _val_or_make...() dic_fc has no key: type == FeatureCollection\")\n return dic_fc\n else:\n print(f\"ERR: in _val_or_make...() received non-int as num_f\")\n return dic_fc\n else:\n print(f\"ERR: in _val_or_make...() received non-dict as dict_fc\")\n return dic_fc\n if num_il < 0:\n new_feature = {\"type\": \"Feature\", \"geometry\": {\"type\": \"Point\", \"coordinates\": [0, 0]}, \"properties\": {\"numi\": 0}}\n new_feature[\"properties\"][\"numi\"] = num_img\n dic_fc[\"features\"].append(new_feature)\n num_il = len(dic_fc[\"features\"]) - 1\n return dic_fc, num_il", "def test_columns_empty_list_error(self):\n\n with pytest.raises(ValueError):\n\n BaseTransformer(columns=[])", "def test_missing_axis_field():\n svl_string = \"\"\"\n BAR bigfoot X LABEL \"Classification\" Y classification COUNT\n \"\"\"\n\n with pytest.raises(SvlMissingValue):\n parse_svl(svl_string)", "def test_toofewkeys(self):\n self.assertRaises(recordparser.KeyListMismatchError,\n recordparser.getfields, \"1234567890\", \"10s\", ())", "def test_errors(self):\n self.assertRaises(TypeError, columnize, 5, 'reject input - not array')\n return", "def test_is_valid_label_key_invalid_input():\n # test length violations\n assert not is_valid_label_key(key=None) # Too short\n assert not is_valid_label_key(key=\"\") # Too short\n assert not is_valid_label_key(key=f\"{'p' * 254}/n\") # prefix too long\n assert not is_valid_label_key(key=\"/n\") # prefix too short\n assert not is_valid_label_key(key=\"p/\") # name too short\n assert not is_valid_label_key(key=\"a\" * 254) # name too long\n assert not is_valid_label_key(key=f\"d/{'b'*64}\") # name too long\n # test first character violations (not alphanum)\n assert not is_valid_label_key(key=\"-a\")\n assert not is_valid_label_key(key=\".b\")\n assert not is_valid_label_key(key=\" c\")\n # test last character violations (not alphanum)\n assert not is_valid_label_key(key=\"a-\")\n assert not is_valid_label_key(key=\"b.\")\n assert not is_valid_label_key(key=\"c \")\n assert not is_valid_label_key(key=\"sw33T#\")\n # test middle characters violations\n assert not is_valid_label_key(key=\"a$$a\")\n assert not is_valid_label_key(key=\"b b\")", "def test_key_no_data(self):\n key = Key({})\n\n assert key.warning is None\n assert key.in_car is None", "def test_with_missing_required_fields(data_store_path, missing_fields):\n data_store = YAMLDataStore(file_path=str(data_store_path))\n user = {\"name\": \"Eric Idle\", \"phone\": \"123-456-7890\", \"address\": \"here\"}\n for missing_field in missing_fields:\n del user[missing_field]\n\n with pytest.raises(InvalidUserError) as error:\n data_store.create(user)\n\n error_msg = str(error.value)\n for missing_field in missing_fields:\n assert missing_field in error_msg", "def test_error():\n file = gff.GFFFile()\n with pytest.raises(ValueError):\n # 'seqid' beginning with '>' is not legal\n file.append(\">xyz\", \"ab\", \"cd\", 1, 2, None, None, None, {\"Id\":\"foo\"})\n with pytest.raises(ValueError):\n # String fields must not be empty\n file.append(\"\", \"ab\", \"cd\", 1, 2, None, None, None, {\"Id\":\"foo\"})\n with pytest.raises(ValueError):\n # String fields must not be empty\n file.append(\"xyz\", \"\", \"cd\", 1, 2, None, None, None, {\"Id\":\"foo\"})\n with pytest.raises(ValueError):\n # String fields must not be empty\n file.append(\"xyz\", \"ab\", \"\", 1, 2, None, None, None, {\"Id\":\"foo\"})", "def is_valid(self):\n\n if not self.name:\n raise ValueError(\"No name found in feature set.\")\n\n if len(self.entities) == 0:\n raise ValueError(\"No entities found in feature set {self.name}\")", "def check_valid(feature: th.Tensor,\n num_frames: Optional[th.Tensor]) -> Tuple[th.Tensor]:\n num_nans = th.sum(th.isnan(feature))\n shape = feature.shape\n if num_nans:\n raise ValueError(f\"Detect {num_nans} NANs in feature matrices, \" +\n f\"shape = {shape}...\")\n if num_frames is not None:\n max_frames = num_frames.max().item()\n if feature.shape[-2] < max_frames:\n raise RuntimeError(f\"feats shape: {shape[-2]} x {shape[-1]}, \" +\n f\"num_frames = {num_frames.tolist()}\")\n if feature.shape[-2] > max_frames:\n feature = feature[..., :max_frames, :]\n return feature, num_frames", "def testNoFeatureColumnsOrKernelMappers(self):\n with self.assertRaises(ValueError):\n _ = kernel_estimators.KernelLinearClassifier()", "def test_set_missing_keys_1(self):\n data_dict = {\"type\":\"add\", \"cluster\":\"\"}\n key_set = set([\"type\", \"host_genus\"])\n tickets.set_missing_keys(data_dict, key_set)\n with self.subTest():\n self.assertEqual(len(data_dict.keys()), 3)\n with self.subTest():\n self.assertEqual(data_dict[\"host_genus\"], \"\")", "def check_keys(self):", "def test_default_ff(self):\n for res, atoms in self.ff.items():\n if res != \"KEY\":\n for atom, params in atoms.items():\n self.assertEqual(len(params), 8)\n self.assertTrue(type(atom) is str)\n self.assertTrue(type(params[0]) is str)\n self.assertTrue(all([(type(x) is int) or (type(x) is float)\n for x in params[1:]]))", "def test_columns_not_in_X_error(self):\n\n df = d.create_df_1()\n\n x = BaseTransformer(columns=[\"a\", \"z\"])\n\n with pytest.raises(ValueError):\n\n x.columns_check(X=df)", "def _check_schema(\n self,\n document: Dict,\n is_missing_vector_field=True,\n is_missing_id_field=True,\n is_nested=False\n ):\n VECTOR_FIELD_NAME = \"_vector_\"\n IS_VECTOR_FIELD_MISSING = True\n IS_ID_FIELD_MISSING = True\n for field, value in document.items():\n if field == '_id':\n IS_ID_FIELD_MISSING = False\n if isinstance(value, dict):\n IS_ID_FIELD_MISSING, IS_VECTOR_FIELD_MISSING = self._check_schema(\n document[field],\n is_missing_vector_field=IS_VECTOR_FIELD_MISSING,\n is_missing_id_field=IS_ID_FIELD_MISSING,\n is_nested=True\n )\n if \"_vectors_\" in field:\n warnings.warn(\n \"Rename \" + field + \"to \" + field.replace('_vectors_', '_vector_')\n , MissingFieldWarning)\n\n for field in document.keys():\n if VECTOR_FIELD_NAME in field:\n IS_VECTOR_FIELD_MISSING = False\n\n if not is_nested:\n if IS_VECTOR_FIELD_MISSING:\n warnings.warn(\n \"Potential issue. Cannot find a vector field. Check that the vector field contains _vector_.\",\n MissingFieldWarning\n )\n if IS_ID_FIELD_MISSING:\n warnings.warn(\n \"Missing ID field. Please include an _id field to make inserting easier.\",\n MissingFieldWarning\n )\n return IS_ID_FIELD_MISSING, IS_VECTOR_FIELD_MISSING", "def test_PerfectModel_verify_metric_keyerrors(\n perfectModelEnsemble_initialized_control, metric\n):\n with pytest.raises(KeyError) as excinfo:\n perfectModelEnsemble_initialized_control.verify(\n comparison=\"e2c\",\n metric=metric,\n dim=[],\n )\n assert \"Specify metric from\" in str(excinfo.value)", "def test_ban_size_kwarg(self):\n with pytest.raises(ValueError):\n Dimension(\"yolo\", \"norm\", 0.9, size=(3, 2))", "def check_input_dimension(self, data):\n if len(data[0]) != self.input_dimension:\n raise ValueError(\"Received {} features, expected {}.\".format(self.input_dimension, len(data[0])))", "def check_fields(entry, fields):\n if entry is None:\n raise exceptions.BadInputError(\"empty entry\")\n for field in fields:\n if field not in entry:\n raise exceptions.BadInputError(f\"field {field} required and not found\")\n if entry[field] is None:\n # empty fields will be loaded in as None by pyyaml\n raise exceptions.BadInputError(f\"field {field} required not to be empty\")", "def test_valid_key(self):\n f = lws.valid_data_key\n assert f('string', int, r'string') is False\n assert f('string', str, r'test') is False\n assert f(123, int, '123') is False\n assert f(123.00, float, '123') is False\n assert f('123', str, r'[0-9]*') is True", "def test_keys_failure(self):\n storage = Storage()\n storage._keys_dict = {'1': 'one',\n 'abc': '1'}\n self.assertRaises(StoragePatternError, storage.keys, 'ab[cd')", "def test_feature_id_non_existent_ensembl(self):\n\n for component_name in [\"var\", \"raw.var\"]:\n with self.subTest(component_name=component_name):\n # Resetting validator\n self.validator.adata = examples.adata.copy()\n self.validator.errors = []\n\n component = Validator.getattr_anndata(\n self.validator.adata, component_name\n )\n\n new_index = list(component.index)\n new_index[0] = \"ENSG000\"\n component.set_index(pd.Index(new_index), inplace=True)\n component[\"feature_biotype\"][0] = \"gene\"\n\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n f\"ERROR: 'ENSG000' is not a valid feature ID in '{component_name}'.\"\n ],\n )", "def has_no_conds(key):\n if isinstance(key, ColumnProxy):\n return False\n return ((key is Ellipsis) or (key is None) or (key == EMPTY_SLICE) or\n (isinstance(key, Sized) and len(key) == 0))", "def check_column_values(self, values):\n none_keys = sorted(list(self._necessary_input_columns.intersection(set([elem for elem in self._columns if values[self.column_id[elem]] in [None, 'None']]))))\n if len(none_keys) > 0:\n raise Exception('missing_keys in ForcingOnMesh_DBManager add function parameter file_info:\\n%s\\n'%('\\n'.join([' - %s'%elem for elem in none_keys])))", "def _entry_optional_features_are_valid(entry: _LexiconEntry) -> None:\n tag = _tag_of(entry)\n optional = tags.OPTIONAL_FEATURES[tag]\n\n if not optional:\n return\n\n features = _features_of(entry)\n category_value = _category_value_pairs(features)\n\n if not all(c in optional and v in optional[c] for c, v in category_value):\n raise InvalidLexiconEntryError(\"Entry has invalid optional features.\")", "def _entry_has_required_features(entry: _LexiconEntry) -> None:\n features = _features_of(entry)\n tag = _tag_of(entry)\n required = tags.REQUIRED_FEATURES[tag]\n\n if features == \"~\" and required:\n raise InvalidLexiconEntryError(\"Entry is missing required features.\")", "def test_load_data_multiple_columns_no_csv():\n f = ImageFeaturizer()\n with pytest.raises(ValueError):\n f.load_data(**LOAD_DATA_ARGS_MULT_ERROR)", "def test_key_none(self):\n try:\n AlphaVantage()\n self.fail(msg='A None api key must raise an error')\n except ValueError:\n self.assertTrue(True)", "def test_key_none(self):\n try:\n AlphaVantage()\n self.fail(msg='A None api key must raise an error')\n except ValueError:\n self.assertTrue(True)", "def test_config_key_error():\n c = core.Config()\n\n with pytest.raises(KeyError):\n c['doesNotExist']", "def test_get_non_existent_dimension(self):\n\n v = Vector({ })\n self.assertEqual(0, v.dimensions['x'])", "def validate_instruction_keys(instruction: TransactionInstruction, expected: int) -> None:\n if len(instruction.keys) < expected:\n raise ValueError(f\"invalid instruction: found {len(instruction.keys)} keys, expected at least {expected}\")", "def testBadKeys(self):\n # Ignore access to protected members\n # pylint: disable=W0212\n self.assertRaises(DOLAPI._DOLAPIError,\n self.badauth.table,\n self.dataset,\n self.table)", "def _validate(self):\n fields, schema = self.__dict__, self._def.default\n extra_fields = fields.viewkeys() - schema.viewkeys()\n if len(extra_fields) > 0:\n raise AttributeError('Fields found that are not in the schema: %r' % (list(extra_fields)))\n for key in fields.iterkeys():\n if type(fields[key]) is not type(schema[key]):\n raise AttributeError('Invalid %s for field \"%s\", should be %s' %\n (type(fields[key]), key, type(schema[key])))", "def test_metadata_no_unknown_top_keys(self):\n top_keys = [\"name\", \"designer\", \"license\", \"visibility\", \"category\",\n \"size\", \"dateAdded\", \"fonts\", \"subsets\"]\n for x in self.metadata.keys():\n self.assertIn(x, top_keys, msg=\"%s found unknown top key\" % x)", "def hasProperField(dHeader,s):\n\tif not dHeader.get(s):\n\t\tshowUsage(dHeader)\n\t\traise '\\nneed proper %s input\\n' % s\n\treturn 1", "def check_field_name(field_name):\n\n error_checking.assert_is_string(field_name)\n if field_name in ALL_PREDICTOR_NAMES + ALL_TARGET_NAMES:\n return\n\n error_string = (\n '\\nField \"{0:s}\" is not valid predictor or target variable. Valid '\n 'options listed below:\\n{1:s}'\n ).format(field_name, str(ALL_PREDICTOR_NAMES + ALL_TARGET_NAMES))\n\n raise ValueError(error_string)", "def test_unknown_fields_are_not_allowed() -> None:\n with pytest.raises(pydantic.ValidationError):\n r4.Meta(unknown_field=True)", "def _kwargs_check(feature_extraction, kwargs):\n # When using policy_kwargs parameter on model creation,\n # all keywords arguments must be consumed by the policy constructor except\n # the ones for the cnn_extractor network (cf nature_cnn()), where the keywords arguments\n # are not passed explicitly (using **kwargs to forward the arguments)\n # that's why there should be not kwargs left when using the mlp_extractor\n # (in that case the keywords arguments are passed explicitly)\n if feature_extraction == 'mlp' and len(kwargs) > 0:\n raise ValueError(\"Unknown keywords for policy: {}\".format(kwargs))", "def test_process_args_should_reject_missing_units(self, arg_dict):\n with pytest.raises(KeyError):\n change_resolution.process_args(arg_dict)", "def test_extract_invalid_column(self):\n self.dicom.extract_keywords([\"invalid\"])\n\n # ensure column was added\n columns = self.dicom.metadata.column_names\n if u'invalid' not in columns:\n raise Exception(\"Invalid column not added\")\n\n # compare expected and actual result\n invalid_column = self.dicom.metadata.take(self.count, columns=[u'invalid'])\n expected_result = [[None] for x in range(0, self.count)]\n self.assertEqual(invalid_column, expected_result)", "def test_get_field_state_comparisons_no_comp_states(self):\r\n self.assertRaises(ValueError, get_field_state_comparisons,\r\n self.dist_matrix_header, self.dist_matrix,\r\n self.mapping_header, self.mapping, self.field,\r\n [])", "def test_PerfectModel_verify_comparison_keyerrors(\n perfectModelEnsemble_initialized_control, comparison\n):\n with pytest.raises(KeyError) as excinfo:\n perfectModelEnsemble_initialized_control.verify(\n comparison=comparison,\n metric=\"mse\",\n dim=[],\n )\n assert \"Specify comparison from\" in str(excinfo.value)", "def test_dict_kwarg_integrity(fast_reader, guess):\n expstyle = fast_reader.get(\"exponent_style\", \"E\")\n fields = [\"10.1D+199\", \"3.14d+313\", \"2048d+306\", \"0.6D-325\", \"-2.d345\"]\n\n ascii.read(StringIO(\" \".join(fields)), guess=guess, fast_reader=fast_reader)\n assert fast_reader.get(\"exponent_style\", None) == expstyle", "def test_extract_multiple_invalid_columns(self):\n keywords = [\"invalid\", \"another_invalid_col\"]\n self.dicom.extract_keywords(keywords)\n\n # test that columns were added\n columns = self.dicom.metadata.column_names\n if u'invalid' not in columns:\n raise Exception(\"invalid column not added to columns\")\n if u'another_invalid_col' not in columns:\n raise Exception(\"another_invalid_col not added to columns\")\n\n # compare actual with expected result\n invalid_columns = self.dicom.metadata.take(self.count, columns=keywords)\n expected_result = [[None, None] for x in range(0, self.count)]\n self.assertEqual(invalid_columns, expected_result)", "def test_bad_dimensions(self, prop: str):\n kwargs = {prop: Q_(1.0, \"dimensionless\")}\n if prop == \"v\":\n kwargs[\"T\"] = Q_(300.0, \"K\")\n else:\n kwargs[\"v\"] = Q_(1.0, \"m**3/kg\")\n with pytest.raises(StateError):\n State(substance=\"water\", **kwargs)", "def checkIndex(key):\n if not isinstance(key, (int, float)): raise TypeError\n if key<0: raise IndexError", "def test_raises_on_missing_needed_fields(self):\n test_name = \"impossible_creature_not_present\"\n self.form.constructor_fields = [*self.form.constructor_fields, test_name]\n message = \"The fields for email, username, and constructor must be set in fields. \"\n self.assertNotIn(test_name, self.form.base_fields)\n with self.assertRaisesMessage(ImproperlyConfigured, message):\n self.form.confirm_required_fields()" ]
[ "0.69130564", "0.634529", "0.6202998", "0.6143627", "0.6138493", "0.6115828", "0.6083164", "0.594095", "0.594095", "0.5913152", "0.590867", "0.58996445", "0.5898748", "0.589319", "0.5874625", "0.5874482", "0.5858971", "0.58570707", "0.58447146", "0.58440685", "0.5839924", "0.5823964", "0.5816455", "0.57970273", "0.579245", "0.57915145", "0.57727253", "0.5764092", "0.57576805", "0.57357657", "0.57340705", "0.57222426", "0.5693365", "0.5693328", "0.56586766", "0.5650429", "0.5647483", "0.5640896", "0.56297255", "0.56271785", "0.5619019", "0.561861", "0.561861", "0.56131876", "0.55983114", "0.55935436", "0.55848145", "0.5584671", "0.55717635", "0.5568681", "0.5531042", "0.55268615", "0.552542", "0.55247605", "0.552242", "0.55210024", "0.5517768", "0.5504339", "0.55013716", "0.5493338", "0.54930586", "0.54928493", "0.54896986", "0.5482295", "0.5482217", "0.54750854", "0.546872", "0.5466045", "0.54650295", "0.5464877", "0.5464283", "0.54642206", "0.54616165", "0.5459785", "0.54593766", "0.54577833", "0.5443611", "0.54329175", "0.5428472", "0.542832", "0.542832", "0.54245484", "0.54195887", "0.54171443", "0.5416449", "0.54080373", "0.54079473", "0.54065585", "0.5406181", "0.54057", "0.5405466", "0.540544", "0.53986", "0.5396238", "0.5394619", "0.5391927", "0.5390464", "0.5385855", "0.53851116", "0.53845716" ]
0.554681
50
Test if array can be build from empty features when the field size or the field names are fixed.
def test_custom_empty(): group = Group({ "a": CustomSized(), "b": CustomNamed(), "c": Numerical(dimensions=4), "d": Hashed(buckets=4), "e": Categorical([1, 2, 3, 4]), }) for i in range(10): group.push() array = group.array() assert array.shape == (10, 20)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def f_supports(self, data):\n dtype = type(data)\n if dtype is tuple or dtype is list and len(data) == 0:\n return True # ArrayParameter does support empty tuples\n elif dtype is np.ndarray and data.size == 0 and data.ndim == 1:\n return True # ArrayParameter supports empty numpy arrays\n else:\n return super(ArrayParameter, self).f_supports(data)", "def LengthTest(arr):\n\tif len(arr) == 8:\n\t\treturn True;\n\telif len(arr) == 7:\n\t\treturn IsMissingField('cid', arr)\n\telse:\n\t\treturn False", "def _check_arrays(current_arrays, fields, sizes, factor, required):\n\n # Nothing supplied so we build it out\n if current_arrays is None:\n current_arrays = {}\n\n for label in fields:\n if required:\n size = sizes[label]\n current_arrays[label] = np.zeros((factor, size))\n else:\n current_arrays[label] = None # np.empty((1))\n\n return current_arrays", "def __check_flat_array__(self):\n if self.flat_array is not None:\n return True\n else:\n return False", "def is_empty(self):\n return len(self.fields) == 0", "def is_empty_arraylike(arraylike):\n # pylint: disable=len-as-condition\n import numpy as np\n if arraylike is None:\n return True\n if isinstance(arraylike, np.ndarray) and arraylike.size == 0:\n return True\n if isinstance(arraylike, list) and len(arraylike) == 0:\n return True\n if isinstance(arraylike, tuple) and len(arraylike) == 0:\n return True\n return False", "def _is_empty(shape):\n return F.shape_mul(shape) == 0", "def isscalar(array):\n arr = ma.array(array)\n if not hasattr(arr, '__len__') or arr.shape == () or len(arr) == 1:\n return True\n return False", "def f_is_empty(self):\n return len(self._data) == 0", "def not_empty(entry):\n gt_boxes = entry['boxes']\n return gt_boxes.shape[0] > 0", "def is_empty(self):", "def is_empty(self):", "def is_array(self):\n return len(self.descriptor) > 1", "def f_is_empty(self):\n raise NotImplementedError(\"Implement this!\")", "def is_empty(self):\n return self.size == []", "def is_empty(self) -> bool:", "def _isvalid(self, data):\n if data is None:\n return False\n elif isinstance(data, (list,tuple)):\n if len(data) <= 0:\n return False\n else:\n return True\n elif isinstance(data, (np.ndarray)):\n if data.size <= 0:\n return False\n else:\n return True\n elif not data:\n return False\n else:\n return True", "def is_full(self) -> bool:\n return self._array[0].all()", "def is_empty(polyreg):\n n = len(polyreg)\n if len(polyreg) == 0:\n try:\n return len(polyreg.A) == 0\n except Exception:\n return True\n else:\n N = np.zeros(n, dtype=int)\n for i in range(n):\n N[i] = is_empty(polyreg.list_poly[i])\n if np.all(N):\n return True\n else:\n return False", "def is_field_empty(*args):\n for field in args:\n if field == \"\" or field is None:\n return True\n return False\n return \"NONDETERMINISTIC\"", "def array_not_empty(array: np.ndarray) -> None:\n if not array.size:\n raise ValueError(\"Array must not be empty\")", "def hasFlexibleFields(data):\n\n regexp = '_f[0..9]+$'\n\n # XXX GR the 'and' close shouldn't be necessary, but there are\n # empty flexible fields in some of our mails\n # also bool on iterators doesn't check non emptiness\n\n return int(bool([fid for fid in data\n if re.search(regexp, fid) and data[fid] is not None]))", "def NeedsOptionalArray(self, type_):\n return self._NameComponents(type_) in self._optional_array_types", "def NeedsArray(self, type_):\n return self._NameComponents(type_) in self._array_types", "def is_full(self):\n\n return self.count == len(self.array)", "def isFull(T):\r\n return len(T.data) >= T.max_data", "def is_trivial(self):\n return self.dims == 0", "def is_empty(self):\n return False if self.__reserved_members or self.__expedition else True", "def isEmpty(self):\n return self._N == 0", "def is_empty(self):\n return self.__size == 0", "def is_empty(self):\n return len(self) == 0", "def _test_empty(t):\n return t.is_empty()", "def _is_empty(self):\n return self.size == 0", "def isFull(self) -> bool:\n return self._elems == self._k", "def is_empty(self):\n return self._size == 0", "def is_empty(self):\n return not self.size()", "def empty(self):\n return 0 >= len(self.__data)", "def has_definite_size(iterable):\n return hasattr(iterable, '__len__')", "def is_empty(self) -> bool:\n return False if self.__reserved_members or self.__expedition else True", "def is_empty_record(*args):\n return not any([arg for arg in args])", "def is_array(a):\n try:\n shape = a.shape\n return len(shape) >= 1\n except AttributeError:\n return False", "def is_zero_dict( dict ):\n has_any_features = False\n for key in dict:\n has_any_features = has_any_features or dict[key]\n\n return not has_any_features", "def f_is_empty(self):\n return self._data is None", "def test_array_option_empty_equivalents(self):\n def get_opt():\n opts = self.introspect('--buildoptions')\n for x in opts:\n if x.get('name') == 'list':\n return x\n raise Exception(opts)\n\n expected = {\n 'name': 'list',\n 'description': 'list',\n 'section': 'user',\n 'type': 'array',\n 'value': [],\n 'choices': ['foo', 'bar', 'oink', 'boink'],\n 'machine': 'any',\n }\n tdir = os.path.join(self.unit_test_dir, '19 array option')\n self.init(tdir, extra_args='-Dlist=')\n original = get_opt()\n self.assertDictEqual(original, expected)", "def is_empty(self): # concrete method assuming abstract len\n return len(self) == 0", "def is_empty(self):\n return not (\n self.has_label\n or self.has_name\n or self.has_points\n or self.has_attributes\n )", "def is_not_empty(things: Union[Sized, Iterable[Any]]) -> bool:\n\n return not is_empty(things)", "def invalid(values):\n # for box in values.keys():\n # if len(values[box]) == 0:\n # return True\n # return False\n return len([box for box in values.keys() if len(values[box]) == 0]) != 0", "def is_empty(self): # concrete method assuming abstract len\n return len(self) == 0", "def empty(self) -> bool:\n return len(self.a) == 0 and len(self.b) == 0", "def empty(self):\n return self.numba_rtree._bounds_tree.shape[0] == 0", "def is_empty(self):\n if numpy.any(numpy.logical_not(self.shape)):\n return True\n if len(self.__m__) == 0:\n return True\n return False", "def _check_array(X):\n return check_array(X,\n accept_sparse=['csr', 'csc'], # Accept sparse csr, csc\n order=None, # Do not enforce C or Fortran\n copy=False, # Do not trigger copying\n force_all_finite=True, # Raise error on np.inf/np.nan\n ensure_2d=True, # Force 'X' do be a matrix\n allow_nd=True, # Allow 'X.ndim' > 2\n warn_on_dtype=False # Mute as 'dtype' is 'None'\n )", "def test_check_X_too_many_dims():\n with pytest.raises(ValueError):\n check_X(np.ones((5,4,3)))", "def is_empty(self):\r\n return self.buff==[]", "def is_empty(self): # -> bool:\n ...", "def is_full(self):\n return len(self._data) == 1", "def is_empty(self):\r\n return len(self) == 0", "def valid(self):\n if (self._npix == []\n or self._gpix == []\n or self._epix == []\n or self._ppix == []) :\n return False\n return True", "def is_empty(self):\n return len(self) == 0", "def _assert_not_family_array(self, name):\n if name in self.family_keys():\n raise KeyError(\"Array \" + name + \" is a family-level property\")", "def allUnknown(self, length: int) -> bool:\n ...", "def is_emtpy(self) -> bool:\n return self._size == 0", "def _check_input_size(n_components, n_features):\n if n_components <= 0:\n raise ValueError(\n \"n_components must be strictly positive, got %d\" % n_components\n )\n if n_features <= 0:\n raise ValueError(\"n_features must be strictly positive, got %d\" % n_features)", "def empty(self):\n return len(self.a) == 0", "def empty(self):\n return len(self.a) == 0", "def checkNullFields(self, func, **kwargs):\n labels = list(kwargs.keys())\n fields = list(kwargs.values())\n\n #Func exits means we need to change label\n if func:\n labels = [func(label) for label in kwargs.keys()]\n\n if any([len(field) == 0 for field in fields]):\n print_warn = []\n for i in range(len(fields)):\n if len(fields[i]) == 0:\n print_warn.extend(labels[i])\n print_warn.extend([\",\", \" \"])\n\n warning_string = ''.join(print_warn[:-2]) #Ignore the last \", \" and \" \"\n self.messagebox.showerror(\"Not enough data\", f\"Please input {warning_string}\")\n return False\n\n return True", "def _check_input(self, X):\n symbols = np.concatenate(X)\n if len(symbols) == 1: # not enough data\n raise ValueError(\"expected at least 1 observation \"\n \"but none found.\")\n elif (symbols < 0).any(): # contains negative integers\n raise ValueError(\"expected non-negative features \"\n \"for each observation.\")\n elif X.shape[1] > 1: # contains to many features\n raise ValueError(\"expected only 1 feature but got {0} \"\n \"for each observation.\".format(X.shape[1]))\n else:\n return True", "def empty(self):\n return len(self.layers) == 0", "def full(self):\n for x in range(0,3):\n for y in range(0,3):\n if self[x,y] is None:\n return False\n return True", "def has_min_len(arr, len_, kind):\n arr_len = len(arr)\n if arr_len < len_:\n raise DimensionError(\n f'Your {kind} array must be at least {len_}, '\n f'but has only length {arr_len}!'\n )\n return True", "def test_empty_structure():\n empty = SME_Struct()\n\n assert isinstance(empty.version, str)\n assert empty.teff is not None\n assert empty.logg is not None\n assert empty.vmic == 0\n assert empty.vmac == 0\n assert empty.vsini == 0\n\n assert empty.nseg == 0\n assert empty.wave is None\n assert empty.spec is None\n assert empty.uncs is None\n assert empty.synth is None\n assert empty.cont is None\n assert empty.mask is None\n assert empty.mask_good is None\n assert empty.mask_bad is None\n # assert empty.mask_line is None\n # assert empty.mask_continuum is None\n\n assert empty.cscale.shape == (0, 1)\n assert empty.vrad.shape == (0,)\n assert empty.cscale_flag == \"none\"\n assert empty.vrad_flag == \"none\"\n assert empty.cscale_degree == 0\n\n assert empty.mu is not None\n assert empty.nmu == 7\n\n # assert empty.md5 is not None\n\n assert empty.linelist is not None\n assert empty.species is not None\n assert len(empty.species) == 0\n assert empty.atomic is not None\n\n assert empty.monh == 0\n assert not np.isnan(empty[\"abund Fe\"])\n assert empty.abund[\"H\"] == 12\n assert not np.isnan(empty.abund()[\"Mg\"])\n\n assert empty.system_info is not None\n assert empty.system_info.arch == \"\"\n\n assert len(empty.fitparameters) == 0\n assert empty.fitresults is not None\n assert empty.fitresults.covariance is None\n\n assert empty.atmo is not None\n assert empty.atmo.depth is None\n\n assert empty.nlte is not None\n assert empty.nlte.elements == []", "def test_empty_file(self):\n field = TypedFileField(required=False)\n for v in EMPTY_VALUES:\n assert field.clean(v) is None", "def check_empty(self):\n if self.size():\n raise AttributeError", "def is_empty(self):\n return False", "def is_empty(self):\n return False", "def is_empty(self):\n return False", "def is_empty(self):\n return False", "def is_empty(self):\n return False", "def is_empty(self):\n return False", "def empty(self):\n return len(self.a) + len(self.b) == 0", "def is_empty(self):\n return len(self.data) == 0", "def empty(self) -> bool:\n return self.sk1_len==0", "def _check_for_unsupported_schema_fields(schema):\n if schema.sparse_feature:\n logging.warning('The input schema has sparse features which'\n ' are currently not supported.')", "def empty(self):\n return self.value == []", "def test_empty_input(self):\n discs = calc_disc(np.column_stack((np.ones(0), np.ones(0), np.ones(0))))\n np.testing.assert_almost_equal(discs, np.array([]))", "def is_empty(self):\n return len(self.values) == 0", "def _check_shape(placeholder_shape, data_shape):\n\n return True", "def _isscalar(x):\n return np.isscalar(x) or hasattr(x, \"shape\") and x.shape == ()", "def test_given_empty_array(self):\n lists_array = []\n result = solution(lists_array)\n self.assertEqual(None, result)", "def is_empty(self) -> bool:\r\n return self.size == 0", "def _check_shape(self, X):\n return all([X.shape[i] == self.train_shape[i] for i in range(2)])", "def is_tableau_empty(self):\n return all(self.tableau[i] is None for i in range(28))", "def _is_empty(self):\n if self.allocated_spaces == 0:\n return True\n else:\n return False", "def is_empty(self):\r\n if self.size == 0:\r\n return True\r\n return False", "def check_unstructured(extractions):\n if not extractions:\n return True\n for ext in extractions:\n if not hasattr(ext, 'args'):\n return False\n return True", "def PartiallyEmpty(self):\n return None==self.piecesToRecover", "def is_empty(self):\n return len(self) == 0", "def is_empty(self):\n return len(self) == 0", "def is_empty(self):\n return len(self) == 0", "def is_empty(self):\n return len(self) == 0" ]
[ "0.65964544", "0.647808", "0.63849956", "0.6338137", "0.6333534", "0.6268299", "0.62431586", "0.61735314", "0.6156284", "0.6152013", "0.6133537", "0.6133537", "0.61284035", "0.6114361", "0.61137617", "0.6066248", "0.60511184", "0.6048782", "0.6030363", "0.6017818", "0.5988865", "0.5975305", "0.595802", "0.5945045", "0.59333324", "0.59328914", "0.59327275", "0.5884647", "0.5875132", "0.58743685", "0.5871457", "0.5859152", "0.58483845", "0.58444375", "0.58433753", "0.58390814", "0.5833125", "0.5826494", "0.5823183", "0.5816152", "0.581458", "0.5806884", "0.5802959", "0.5787522", "0.5779642", "0.57789725", "0.5778421", "0.577815", "0.57769305", "0.5765666", "0.57638764", "0.5762478", "0.5754121", "0.57535756", "0.573919", "0.57362336", "0.57292426", "0.5727268", "0.57263404", "0.5724056", "0.57227975", "0.57175654", "0.5707983", "0.5707515", "0.570414", "0.570414", "0.5696192", "0.5691552", "0.5686834", "0.5678057", "0.56662726", "0.56572884", "0.56567574", "0.56536496", "0.5652337", "0.5652337", "0.5652337", "0.5652337", "0.5652337", "0.5652337", "0.5650849", "0.56492", "0.56486285", "0.5648162", "0.5647419", "0.5646896", "0.56422263", "0.563819", "0.56336564", "0.5633088", "0.5631577", "0.5619392", "0.5618643", "0.5618129", "0.5616103", "0.5615988", "0.561568", "0.56153023", "0.56153023", "0.56153023", "0.56153023" ]
0.0
-1
Test if array concatenation works.
def test_array_concat(): array = Array(columns="abc") for i in range(10): array.append([1, 2, 3]) # Any 2-dimensional array witht the same number of rows should work. other = [[4, 5, 6]] * len(array) array.concat(other) assert array.shape == (10, 6) assert len(array.columns) == 6 assert all(type(column) is str for column in array.columns) for row in array: assert tuple(row) == (1, 2, 3, 4, 5, 6) # Now this should fail since the columns have the same names. other = Array(columns="abc") for i in range(10): other.append([7, 8, 9]) assert_raises(ValueError, array.concat, other) # Adding a prefix should make it work. array.concat(other, prefix="other") assert array.shape == (10, 9) assert len(array.columns) == 9 for row in array: assert tuple(row) == (1, 2, 3, 4, 5, 6, 7, 8, 9)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test01(self):\n a = np.arange(1000)\n b = bcolz.carray(a, chunklen=1, rootdir=self.rootdir)\n b.append(a)\n # print \"b->\", `b`\n c = np.concatenate((a, a))\n assert_array_equal(c, b[:], \"Arrays are not equal\")", "def test03(self):\n a = np.arange(1e4)\n c = np.arange(2e5)\n b = bcolz.carray(a, rootdir=self.rootdir)\n b.append(c)\n # print \"b->\", `b`\n d = np.concatenate((a, c))\n assert_array_equal(d, b[:], \"Arrays are not equal\")", "def test02a(self):\n a = np.arange(1000)\n b = bcolz.carray(a, chunklen=10*1000, rootdir=self.rootdir)\n b.append(a)\n # print \"b->\", `b`\n c = np.concatenate((a, a))\n assert_array_equal(c, b[:], \"Arrays are not equal\")", "def test02b(self):\n a = np.arange(100*1000)\n b = bcolz.carray(a, chunklen=10*1000, rootdir=self.rootdir)\n b.append(a)\n # print \"b->\", `b`\n c = np.concatenate((a, a))\n assert_array_equal(c, b[:], \"Arrays are not equal\")", "def test02c(self):\n a = np.arange(1000*1000)\n b = bcolz.carray(a, chunklen=100*1000-1, rootdir=self.rootdir)\n b.append(a)\n # print \"b->\", `b`\n c = np.concatenate((a, a))\n assert_array_equal(c, b[:], \"Arrays are not equal\")", "def test_pad_and_concatenate_with_1d(self):\n array1 = 1.0\n array2 = 2.0\n result = numpy_pad_and_concatenate(array1, array2)\n self.assertTrue(np.array_equal(np.array([1.0, 2.0]), result))\n\n tensor1 = torch.tensor(1.0)\n tensor2 = torch.tensor(2.0)\n result = torch_pad_and_concatenate(tensor1, tensor2)\n self.assertTrue(torch.equal(result, torch.Tensor([1.0, 2.0])))", "def add_mismatched_arrays(array1, array2, truncate=False):\n # Cast these arrays to the largest common type\n array1 = np.array(array1, dtype=np.promote_types(array1.dtype, array2.dtype))\n array2 = np.array(array2, dtype=np.promote_types(array1.dtype, array2.dtype))\n\n # TODO: find a more elegant way to do this whole function\n\n if truncate:\n if len(array1) < len(array2):\n result = array1.copy()\n result += array2[:len(array1)]\n else:\n result = array2.copy()\n result += array1[:len(array2)]\n else:\n if len(array1) < len(array2):\n result = array2.copy()\n result[:len(array1)] += array1\n else:\n result = array1.copy()\n result[:len(array2)] += array2\n\n return result", "def assertArrayEquals(testcase, arr1, arr2):\n from itertools import zip_longest\n import numpy as np\n testcase.assertTrue(\n all([\n np.array_equal(e, a)\n for e, a\n in zip_longest(arr1, arr2)\n ])\n )", "def test_concatenate_quaternions():\n # Until ea9adc5, this combination of a list and a numpy array raised\n # a ValueError:\n q1 = [1, 0, 0, 0]\n q2 = np.array([0, 0, 0, 1])\n q12 = pr.concatenate_quaternions(q1, q2)\n assert_array_almost_equal(q12, np.array([0, 0, 0, 1]))\n\n random_state = np.random.RandomState(0)\n for _ in range(5):\n q1 = pr.quaternion_from_axis_angle(pr.random_axis_angle(random_state))\n q2 = pr.quaternion_from_axis_angle(pr.random_axis_angle(random_state))\n\n R1 = pr.matrix_from_quaternion(q1)\n R2 = pr.matrix_from_quaternion(q2)\n\n q12 = pr.concatenate_quaternions(q1, q2)\n R12 = np.dot(R1, R2)\n q12R = pr.quaternion_from_matrix(R12)\n\n pr.assert_quaternion_equal(q12, q12R)", "def __array_append(self, in_a,in_b):\n in_b = np.array([in_b]) if isinstance(in_b,(int,float,long,complex)) else in_b\n return np.concatenate((in_a,in_b))", "def testConcatSourceMultipleButOneConcatable(self):\n env = self.env\n\n # Even if multiple input files, if only one is concat-able, won't concat.\n cs = env.ConcatSource('foo3.cc', ['a.cc', 'd.o'])\n self.assertEqual(map(str, cs), ['d.o', 'a.cc'])", "def test_op_add_offload_array_int(self):\n\n device = pymic.devices[0]\n stream = device.get_default_stream()\n a = numpy.arange(1, 4711 * 1024, dtype=int)\n o = a + 1\n\n old_a = numpy.empty_like(a)\n old_o = numpy.empty_like(o)\n old_a[:] = a[:]\n old_o[:] = o[:]\n expect = a + o\n\n offl_a = stream.bind(a)\n offl_o = stream.bind(o)\n offl_r = offl_a + offl_o\n offl_a.update_host()\n r = offl_r.update_host().array\n stream.sync()\n\n self.assertEqual(r.shape, a.shape)\n self.assertEqual(r.dtype, a.dtype)\n self.assertTrue((a == old_a).all(),\n \"Input array operand 1 must not be modified: \"\n \"{0} should be {1}\".format(a, old_a))\n self.assertTrue((o == old_o).all(),\n \"Input array operand 2 must not be modified: \"\n \"{0} should be {1}\".format(o, old_o))\n self.assertTrue((r == expect).all(),\n \"Array contains unexpected values: \"\n \"{0} should be {1}\".format(r, expect))", "def test_op_add_array_int(self):\n\n device = pymic.devices[0]\n stream = device.get_default_stream()\n a = numpy.arange(1, 4711 * 1024, dtype=int)\n o = a + 1\n\n old_a = numpy.empty_like(a)\n old_o = numpy.empty_like(o)\n old_a[:] = a[:]\n old_o[:] = o[:]\n expect = a + o\n\n offl_a = stream.bind(a)\n offl_r = offl_a + o\n offl_a.update_host()\n r = offl_r.update_host().array\n stream.sync()\n\n self.assertEqual(r.shape, a.shape)\n self.assertEqual(r.dtype, a.dtype)\n self.assertTrue((a == old_a).all(),\n \"Input array operand 1 must not be modified: \"\n \"{0} should be {1}\".format(a, old_a))\n self.assertTrue((o == old_o).all(),\n \"Input array operand 2 must not be modified: \"\n \"{0} should be {1}\".format(o, old_o))\n self.assertTrue((r == expect).all(),\n \"Array contains unexpected values: \"\n \"{0} should be {1}\".format(r, expect))", "def test_concat(self):\n\n test_cases = [\n Case(\n description=\"lists of strings\",\n val=[\"a\", \"b\"],\n args=[[\"c\", \"d\"]],\n kwargs={},\n expect=[\"a\", \"b\", \"c\", \"d\"],\n ),\n Case(\n description=\"missing argument\",\n val=[\"a\", \"b\"],\n args=[],\n kwargs={},\n expect=FilterArgumentError,\n ),\n Case(\n description=\"too many arguments\",\n val=[\"a\", \"b\"],\n args=[[\"c\", \"d\"], \"\"],\n kwargs={},\n expect=FilterArgumentError,\n ),\n Case(\n description=\"arguments not a list\",\n val=[\"a\", \"b\"],\n args=[5],\n kwargs={},\n expect=FilterArgumentError,\n ),\n Case(\n description=\"not an array\",\n val=\"a, b\",\n args=[[\"c\", \"d\"]],\n kwargs={},\n expect=FilterValueError,\n ),\n Case(\n description=\"array contains non string\",\n val=[\"a\", \"b\", 5],\n args=[[\"c\", \"d\"]],\n kwargs={},\n expect=[\"a\", \"b\", 5, \"c\", \"d\"],\n ),\n Case(\n description=\"undefined left value\",\n val=self.env.undefined(\"test\"),\n args=[[\"c\", \"d\"]],\n kwargs={},\n expect=[\"c\", \"d\"],\n ),\n Case(\n description=\"undefined argument\",\n val=[\"a\", \"b\"],\n args=[self.env.undefined(\"test\")],\n kwargs={},\n expect=FilterArgumentError,\n ),\n ]\n\n self._test(Concat, test_cases)", "def add_arrays(arr1, arr2):\n if len(arr1) != len(arr2):\n return None\n return [arr1[i] + arr2[i] for i in range(len(arr1))]", "def test_op_add_offload_array_complex(self):\n\n device = pymic.devices[0]\n stream = device.get_default_stream()\n a = numpy.arange(1.0, 4711.0 * 1024, dtype=complex)\n o = a + complex(1.0, -1.3)\n\n old_a = numpy.empty_like(a)\n old_o = numpy.empty_like(o)\n old_a[:] = a[:]\n old_o[:] = o[:]\n expect = a + o\n\n offl_a = stream.bind(a)\n offl_o = stream.bind(o)\n offl_r = offl_a + offl_o\n offl_a.update_host()\n r = offl_r.update_host().array\n stream.sync()\n\n self.assertEqual(r.shape, a.shape)\n self.assertEqual(r.dtype, a.dtype)\n self.assertTrue((a == old_a).all(),\n \"Input array operand 1 must not be modified: \"\n \"{0} should be {1}\".format(a, old_a))\n self.assertTrue((o == old_o).all(),\n \"Input array operand 2 must not be modified: \"\n \"{0} should be {1}\".format(o, old_o))\n self.assertTrue((r == expect).all(),\n \"Array contains unexpected values: \"\n \"{0} should be {1}\".format(r, expect))", "def hpat_arrays_append_overload(A, B):\n\n use_A_array = isinstance(A, (RangeIndexType, Int64IndexType))\n use_B_array = isinstance(B, (RangeIndexType, Int64IndexType))\n if isinstance(A, (types.Array, RangeIndexType, Int64IndexType)):\n if isinstance(B, (types.Array, RangeIndexType, Int64IndexType)):\n def _append_single_numeric_impl(A, B):\n _A = A.values if use_A_array == True else A # noqa\n _B = B.values if use_B_array == True else B # noqa\n return numpy.concatenate((_A, _B,))\n\n return _append_single_numeric_impl\n\n elif (isinstance(B, (types.UniTuple, types.List))\n and isinstance(B.dtype, (types.Array, RangeIndexType, Int64IndexType))):\n B_dtype_is_index = isinstance(B.dtype, (RangeIndexType, Int64IndexType))\n numba_common_dtype = find_common_dtype_from_numpy_dtypes([A.dtype, B.dtype.dtype], [])\n\n # TODO: refactor to use numpy.concatenate when Numba supports building a tuple at runtime\n def _append_list_numeric_impl(A, B):\n\n total_length = len(A) + numpy.array([len(arr) for arr in B]).sum()\n new_data = numpy.empty(total_length, numba_common_dtype)\n\n stop = len(A)\n _A = numpy.array(A) if use_A_array == True else A # noqa\n new_data[:stop] = _A\n for arr in B:\n _arr = arr.values if B_dtype_is_index == True else arr # noqa\n start = stop\n stop = start + len(_arr)\n new_data[start:stop] = _arr\n return new_data\n\n return _append_list_numeric_impl\n\n elif A == string_array_type:\n if B == string_array_type:\n def _append_single_string_array_impl(A, B):\n total_size = len(A) + len(B)\n total_chars = num_total_chars(A) + num_total_chars(B)\n new_data = sdc.str_arr_ext.pre_alloc_string_array(total_size, total_chars)\n\n pos = 0\n pos += append_string_array_to(new_data, pos, A)\n pos += append_string_array_to(new_data, pos, B)\n\n return new_data\n\n return _append_single_string_array_impl\n elif (isinstance(B, (types.UniTuple, types.List)) and B.dtype == string_array_type):\n def _append_list_string_array_impl(A, B):\n array_list = [A] + list(B)\n total_size = numpy.array([len(arr) for arr in array_list]).sum()\n total_chars = numpy.array([num_total_chars(arr) for arr in array_list]).sum()\n\n new_data = sdc.str_arr_ext.pre_alloc_string_array(total_size, total_chars)\n\n pos = 0\n pos += append_string_array_to(new_data, pos, A)\n for arr in B:\n pos += append_string_array_to(new_data, pos, arr)\n\n return new_data\n\n return _append_list_string_array_impl", "def _concat_arrays(arrays):\n # torch\n if isinstance(arrays[0], torch.Tensor):\n return torch.cat(arrays)\n\n # numpy\n if not isinstance(arrays[0], np.ndarray):\n arrays = np.asarray(arrays)\n\n return np.concatenate(arrays)", "def add_arrays(arr1, arr2):\n if len(arr1) != len(arr2):\n return (None)\n newList = []\n for i in range(len(arr1)):\n newList.append(arr1[i] + arr2[i])\n return (newList)", "def testRegisterConcatenation(self):\n reg_one = ShiftRegister(2)\n reg_one.shift(\"a\")\n reg_one.shift(\"b\")\n reg_two = ShiftRegister(3)\n reg_two.shift(\"c\")\n reg_two.shift(\"d\")\n reg_two.shift(\"e\")\n reg_cat = reg_one.concatenate(reg_two)\n self.assertEqual(''.join(reg_cat), \"abcde\")", "def assert_content_equals_array(result, expected):\n assert isinstance(result, (pa.Array, pa.ChunkedArray))\n if isinstance(result, pa.ChunkedArray):\n result = pa.concat_arrays(result.iterchunks())\n assert result.equals(expected)", "def cat_arrays(arr1, arr2):\n return [x for x in arr1+arr2]", "def add_arrays(arr1, arr2):\n n = len(arr1)\n m = len(arr2)\n if n != m:\n return None\n return [arr1[i] + arr2[i] for i in range(n)]", "def concatenate(arrays, **kwargs):\n unit = unit_of(arrays[0])\n result = np.concatenate([to_unitless(arr, unit) for arr in arrays], **kwargs)\n return result * unit", "def test_concat_impl(self, value, expected_concat_value):\n # Need to convert np arrays to tensors first.\n value = tf.nest.map_structure(tf.constant, value)\n concat_value = concat._concat_impl(value)\n self.assertAllEqual(concat_value, expected_concat_value)", "def solution(array1, array2):\n array1, array2 = np.array(array1), np.array(array2)\n return np.concatenate((array1, array2.flatten()))", "def test_concatenate_errors(self):\n header = BDFHeader.from_path(TestData.bdf_2048)\n header2 = BDFHeader.from_path(TestData.bdf_256)\n with pytest.raises(ValueError):\n header.concatenate(header2)", "def test_op_add_offload_array_float(self):\n\n device = pymic.devices[0]\n stream = device.get_default_stream()\n a = numpy.arange(1.0, 4711.0 * 1024, dtype=float)\n o = a + 1.0\n\n old_a = numpy.empty_like(a)\n old_o = numpy.empty_like(o)\n old_a[:] = a[:]\n old_o[:] = o[:]\n expect = a + o\n\n offl_a = stream.bind(a)\n offl_o = stream.bind(o)\n offl_r = offl_a + offl_o\n offl_a.update_host()\n r = offl_r.update_host().array\n stream.sync()\n\n self.assertEqual(r.shape, a.shape)\n self.assertEqual(r.dtype, a.dtype)\n self.assertTrue((a == old_a).all(),\n \"Input array operand 1 must not be modified: \"\n \"{0} should be {1}\".format(a, old_a))\n self.assertTrue((o == old_o).all(),\n \"Input array operand 2 must not be modified: \"\n \"{0} should be {1}\".format(o, old_o))\n self.assertTrue((r == expect).all(),\n \"Array contains unexpected values: \"\n \"{0} should be {1}\".format(r, expect))", "def concatenate_data():", "def testConcatDisabled(self):\n env = self.env\n\n # If CONCAT_SOURCE_ENABLE is not set, files are passed through\n env['CONCAT_SOURCE_ENABLE'] = False\n cs = env.ConcatSource('foo4.cc', ['a.cc', 'b.cc', 'c.cc'])\n self.assertEqual(map(str, cs), ['a.cc', 'b.cc', 'c.cc'])", "def add_mismatched_arrays2D(array1, array2, truncate=False):\n # Cast these arrays to the largest common type\n array1 = np.array(array1, dtype=np.promote_types(array1.dtype, array2.dtype))\n array2 = np.array(array2, dtype=np.promote_types(array1.dtype, array2.dtype))\n\n # TODO: find a more elegant way to do this whole function\n\n if truncate:\n if array1.shape[1] < array2.shape[1]: # Kludge\n result = array1.copy()\n result += array2[:, :array1.shape[1]]\n else:\n result = array2.copy()\n result += array1[:, :array2.shape[1]]\n else:\n if array1.shape[1] < array2.shape[1]:\n result = array2.copy()\n result[:, :array1.shape[1]] += array1\n else:\n result = array1.copy()\n result[:, :array2.shape[1]] += array2\n\n return result", "def test_op_iadd_offload_array_int(self):\n\n device = pymic.devices[0]\n stream = device.get_default_stream()\n a = numpy.arange(1, 4711 * 1024, dtype=int)\n o = a + 1\n\n old_a = numpy.empty_like(a)\n old_o = numpy.empty_like(o)\n old_a[:] = a[:]\n old_o[:] = o[:]\n expect = a + o\n\n offl_a = stream.bind(a)\n offl_o = stream.bind(o)\n offl_a += offl_o\n offl_a.update_host()\n r = offl_a.array\n stream.sync()\n\n self.assertTrue((r != old_a).all(),\n \"Array operand must be modified: \"\n \"{0} should be {1}\".format(a, old_a))\n self.assertTrue((o == old_o).all(),\n \"Input array operand 2 must not be modified: \"\n \"{0} should be {1}\".format(o, old_o))\n self.assertTrue((r == expect).all(),\n \"Array contains unexpected values: \"\n \"{0} should be {1}\".format(r, expect))", "def test_combine_nsamples_one_array():\n test_samples = np.ones((2, 13, 21)) * 3\n samples_out = utils.combine_nsamples(test_samples, axis=0)\n test_full_samples = np.ones((2, 2, 13, 21)) * 3\n assert np.allclose(test_full_samples, samples_out)", "def test_op_iadd_array_int(self):\n\n device = pymic.devices[0]\n stream = device.get_default_stream()\n a = numpy.arange(1, 4711 * 1024, dtype=int)\n o = a + 1\n\n old_a = numpy.empty_like(a)\n old_o = numpy.empty_like(o)\n old_a[:] = a[:]\n old_o[:] = o[:]\n expect = a + o\n\n offl_a = stream.bind(a)\n offl_o = stream.bind(o)\n offl_a += o\n offl_a.update_host()\n r = offl_a.array\n stream.sync()\n\n self.assertTrue((r != old_a).all(),\n \"Array operand must be modified: \"\n \"{0} should be {1}\".format(a, old_a))\n self.assertTrue((o == old_o).all(),\n \"Input array operand 2 must not be modified: \"\n \"{0} should be {1}\".format(o, old_o))\n self.assertTrue((r == expect).all(),\n \"Array contains unexpected values: \"\n \"{0} should be {1}\".format(r, expect))", "def test_op_iadd_offload_array_complex(self):\n\n device = pymic.devices[0]\n stream = device.get_default_stream()\n a = numpy.arange(1, 4711 * 1024, dtype=complex)\n o = a + complex(1.2, -1.3)\n\n old_a = numpy.empty_like(a)\n old_o = numpy.empty_like(o)\n old_a[:] = a[:]\n old_o[:] = o[:]\n expect = a + o\n\n offl_a = stream.bind(a)\n offl_o = stream.bind(o)\n offl_a += offl_o\n offl_a.update_host()\n r = offl_a.array\n stream.sync()\n\n self.assertTrue((r != old_a).all(),\n \"Array operand must be modified: \"\n \"{0} should be {1}\".format(a, old_a))\n self.assertTrue((o == old_o).all(),\n \"Input array operand 2 must not be modified: \"\n \"{0} should be {1}\".format(o, old_o))\n self.assertTrue((r == expect).all(),\n \"Array contains unexpected values: \"\n \"{0} should be {1}\".format(r, expect))", "def test_op_add_array_complex(self):\n\n device = pymic.devices[0]\n stream = device.get_default_stream()\n a = numpy.arange(1.0, 4711.0 * 1024, dtype=complex)\n o = a + complex(1.2, -1.5)\n\n old_a = numpy.empty_like(a)\n old_o = numpy.empty_like(o)\n old_a[:] = a[:]\n old_o[:] = o[:]\n expect = a + o\n\n offl_a = stream.bind(a)\n offl_r = offl_a + o\n offl_a.update_host()\n r = offl_r.update_host().array\n stream.sync()\n\n self.assertEqual(r.shape, a.shape)\n self.assertEqual(r.dtype, a.dtype)\n self.assertTrue((a == old_a).all(),\n \"Input array operand 1 must not be modified: \"\n \"{0} should be {1}\".format(a, old_a))\n self.assertTrue((o == old_o).all(),\n \"Input array operand 2 must not be modified: \"\n \"{0} should be {1}\".format(o, old_o))\n self.assertTrue((r == expect).all(),\n \"Array contains unexpected values: \"\n \"{0} should be {1}\".format(r, expect))", "def test01(self):\n a = np.arange(self.N)\n b = bcolz.carray(a, rootdir=self.rootdir)\n c = b.view()\n c.append(np.arange(self.N, self.N + 11))\n self.assertEqual(len(b), self.N)\n self.assertEqual(len(c), self.N + 11)\n r = np.arange(self.N + 11)\n assert_array_equal(b[:], a)\n assert_array_equal(c[:], r)", "def concatenate(array1, array2, axis=0):\r\n\r\n assert isinstance(array2, numpy.ndarray)\r\n if array1 is not None:\r\n assert isinstance(array1, numpy.ndarray)\r\n return numpy.concatenate((array1, array2), axis=axis)\r\n else:\r\n return array2", "def test_op_add_array_float(self):\n\n device = pymic.devices[0]\n stream = device.get_default_stream()\n a = numpy.arange(1.0, 4711.0 * 1024, dtype=float)\n o = a + 1.0\n\n old_a = numpy.empty_like(a)\n old_o = numpy.empty_like(o)\n old_a[:] = a[:]\n old_o[:] = o[:]\n expect = a + o\n\n offl_a = stream.bind(a)\n offl_r = offl_a + o\n offl_a.update_host()\n r = offl_r.update_host().array\n stream.sync()\n\n self.assertEqual(r.shape, a.shape)\n self.assertEqual(r.dtype, a.dtype)\n self.assertTrue((a == old_a).all(),\n \"Input array operand 1 must not be modified: \"\n \"{0} should be {1}\".format(a, old_a))\n self.assertTrue((o == old_o).all(),\n \"Input array operand 2 must not be modified: \"\n \"{0} should be {1}\".format(o, old_o))\n self.assertTrue((r == expect).all(),\n \"Array contains unexpected values: \"\n \"{0} should be {1}\".format(r, expect))", "def test_flatten_array(test_input, expected):\n assert flatten_array(test_input) == expected", "def test13(self):\n a = bcolz.ones((self.N, 1))\n b = bcolz.zeros(a.shape)\n b = bcolz.eval('a + b')\n self.assertEqual(b.sum(), self.N)", "def __add__(self, right_arr):\n concat_arr = self.copy() # Create new instance to return\n concat_arr.extend(right_arr)\n return concat_arr", "def in_array(array1, array2):", "def test_concatenate(self):\n header = BDFHeader.from_path(TestData.bdf_256)\n header2 = BDFHeader.from_path(TestData.bdf_256)\n assert header.nb_data_records == 60\n assert header.data_duration == 1\n assert header2.nb_data_records == 60\n assert header2.data_duration == 1\n assert (header.nb_channels + 1) * 256 == header.bytes_in_header\n header.concatenate(header2)\n assert (header.nb_channels + 1) * 256 == header.bytes_in_header\n assert header.nb_data_records == 120\n assert header.data_duration == 2\n assert header2.nb_data_records == 60\n assert header2.data_duration == 1\n header2.max_dimensions = [99999999] * header2.nb_channels\n header2.min_dimensions = [-9999999] * header2.nb_channels\n header2.max_digital = [99999999] * header2.nb_channels\n header2.min_digital = [-9999999] * header2.nb_channels\n header.concatenate(header2)\n assert header.nb_data_records == 180\n assert header.data_duration == 3\n assert header.max_dimensions == [99999999] * header2.nb_channels\n assert header.min_dimensions == [-9999999] * header2.nb_channels\n assert header.max_digital == [99999999] * header2.nb_channels\n assert header.min_digital == [-9999999] * header2.nb_channels\n assert (header.nb_channels + 1) * 256 == header.bytes_in_header", "def test00(self):\n a = np.arange(111)\n b = bcolz.carray(a, rootdir=self.rootdir)\n c = b.copy()\n c.append(np.arange(111, 122))\n self.assertTrue(len(b) == 111, \"copy() does not work well\")\n self.assertTrue(len(c) == 122, \"copy() does not work well\")\n r = np.arange(122)\n assert_array_equal(c[:], r, \"incorrect correct values after copy()\")", "def test06(self):\n a = np.arange(1e5)\n b = bcolz.carray(a, chunklen=10000, rootdir=self.rootdir)\n sl = -2 # second last element\n # print \"b[sl]->\", `b[sl]`\n assert_array_equal(a[sl], b[sl], \"Arrays are not equal\")\n sl = -1 # last element\n # print \"b[sl]->\", `b[sl]`\n assert_array_equal(a[sl], b[sl], \"Arrays are not equal\")", "def test_merge_empty():\n run_merge([], [], [])", "def test01(self):\n b = bcolz.arange(1e2, chunklen=2, rootdir=self.rootdir)\n b.trim(5)\n a = np.arange(1e2-5)\n # print \"b->\", `b`\n assert_array_equal(a, b[:], \"Arrays are not equal\")", "def safe_concat(arrs, default=None, **kwargs):\n arrs = [arr for arr in arrs]\n if not arrs:\n return default\n if isinstance(arrs[0], pd.Series):\n arrs = [arr.values for arr in arrs]\n if isinstance(arrs[0], pd.DataFrame):\n if all([arr.empty for arr in arrs]):\n return default\n return pd.concat([arr for arr in arrs if not arr.empty], **kwargs)\n if isinstance(arrs[0], np.ndarray):\n if all([arr.shape[0] == 0 for arr in arrs]):\n return default\n return np.concatenate([arr for arr in arrs if not arr.shape[0] == 0], **kwargs)", "def _compare_arrays(self, data, reference, atol=1.e-5, rtol=1.e-5):\n if not (data.shape==reference.shape): return False\n ret=numpy.allclose(data,reference, atol=atol, rtol=rtol)\n return ret", "def test_op_iadd_offload_array_float(self):\n\n device = pymic.devices[0]\n stream = device.get_default_stream()\n a = numpy.arange(1, 4711 * 1024, dtype=float)\n o = a + 1.3\n\n old_a = numpy.empty_like(a)\n old_o = numpy.empty_like(o)\n old_a[:] = a[:]\n old_o[:] = o[:]\n expect = a + o\n\n offl_a = stream.bind(a)\n offl_o = stream.bind(o)\n offl_a += offl_o\n offl_a.update_host()\n r = offl_a.array\n stream.sync()\n\n self.assertTrue((r != old_a).all(),\n \"Array operand must be modified: \"\n \"{0} should be {1}\".format(a, old_a))\n self.assertTrue((o == old_o).all(),\n \"Input array operand 2 must not be modified: \"\n \"{0} should be {1}\".format(o, old_o))\n self.assertTrue((r == expect).all(),\n \"Array contains unexpected values: \"\n \"{0} should be {1}\".format(r, expect))", "def test_variadic_addition_identity(self):\n self.assertIdentity(variadic_addition)", "def test02(self):\n a = np.arange(1e3)\n b = chunk(a, atom=a.dtype, cparams=bcolz.cparams())\n # print \"b[1:3]->\", `b[1:3]`\n assert_array_equal(a[1:3], b[1:3], \"Arrays are not equal\")", "def test_combine():\n # Create 4 square arrays:\n # 0 1 2 3\n # -----------\n # 00 11 22 33\n # 00 11 22 33\n tiles = [np.array(_square(i)) for i in range(4)]\n\n with pytest.raises(ValueError):\n _combine_tiles(tiles[0], tiles[1], tiles[2]) # Too few values.\n\n with pytest.raises(ValueError):\n _combine_tiles(tiles[0], None, None, None, None) # Too many values.\n\n # Combine them the 4 major ways:\n\n # case1: corner\n # 0X\n # XX\n case1 = _combine_tiles(tiles[0], None, None, None)\n assert case1.shape == (2, 2)\n assert (case1 == tiles[0]).all()\n\n # case2: bottom edge\n # 01\n # XX\n case2 = _combine_tiles(tiles[0], tiles[1], None, None)\n assert case2.shape == (2, 4)\n assert (case2[0:2, 0:2] == tiles[0]).all()\n assert (case2[0:2, 3:5] == tiles[1]).all()\n\n # case3: right edge\n # 0X\n # 2X\n case3 = _combine_tiles(tiles[0], None, tiles[2], None)\n assert case3.shape == (4, 2)\n assert (case3[0:2, 0:2] == tiles[0]).all()\n assert (case3[3:5, 0:2] == tiles[2]).all()\n\n # case4: interior\n # 01\n # 23\n case4 = _combine_tiles(tiles[0], tiles[1], tiles[2], tiles[3])\n assert case4.shape == (4, 4)\n assert (case4[0:2, 0:2] == tiles[0]).all()\n assert (case4[0:2, 3:5] == tiles[1]).all()\n assert (case4[3:5, 0:2] == tiles[2]).all()\n assert (case4[3:5, 3:5] == tiles[3]).all()", "def test02(self):\n a = np.arange(3, self.N, 4)\n ac = bcolz.arange(3, self.N, 4, rootdir=self.rootdir)\n self.assertTrue(np.all(a == ac))", "def test_op_iadd_array_complex(self):\n\n device = pymic.devices[0]\n stream = device.get_default_stream()\n a = numpy.arange(1, 4711 * 1024, dtype=complex)\n o = a + complex(1.2, -1.3)\n\n old_a = numpy.empty_like(a)\n old_o = numpy.empty_like(o)\n old_a[:] = a[:]\n old_o[:] = o[:]\n expect = a + o\n\n offl_a = stream.bind(a)\n offl_o = stream.bind(o)\n offl_a += o\n offl_a.update_host()\n r = offl_a.array\n stream.sync()\n\n self.assertTrue((r != old_a).all(),\n \"Array operand must be modified: \"\n \"{0} should be {1}\".format(a, old_a))\n self.assertTrue((o == old_o).all(),\n \"Input array operand 2 must not be modified: \"\n \"{0} should be {1}\".format(o, old_o))\n self.assertTrue((r == expect).all(),\n \"Array contains unexpected values: \"\n \"{0} should be {1}\".format(r, expect))", "def test_radd(self):\n tensor = Tensor([2, 4, 6, 8])\n result = 1 + tensor\n result_np = np.array(1) + tensor\n result_arr = [1, 1, 1, 1] + tensor\n\n assert result.data.tolist() == [3, 5, 7, 9]\n assert result_np.data.tolist() == [3, 5, 7, 9]\n assert result_arr.data.tolist() == [3, 5, 7, 9]", "def test01(self):\n a = np.arange(3, self.N)\n ac = bcolz.arange(3, self.N, rootdir=self.rootdir)\n self.assertTrue(np.all(a == ac))", "def test_op_iadd_array_float(self):\n\n device = pymic.devices[0]\n stream = device.get_default_stream()\n a = numpy.arange(1, 4711 * 1024, dtype=float)\n o = a + 1.3\n\n old_a = numpy.empty_like(a)\n old_o = numpy.empty_like(o)\n old_a[:] = a[:]\n old_o[:] = o[:]\n expect = a + o\n\n offl_a = stream.bind(a)\n offl_o = stream.bind(o)\n offl_a += o\n offl_a.update_host()\n r = offl_a.array\n stream.sync()\n\n self.assertTrue((r != old_a).all(),\n \"Array operand must be modified: \"\n \"{0} should be {1}\".format(a, old_a))\n self.assertTrue((o == old_o).all(),\n \"Input array operand 2 must not be modified: \"\n \"{0} should be {1}\".format(o, old_o))\n self.assertTrue((r == expect).all(),\n \"Array contains unexpected values: \"\n \"{0} should be {1}\".format(r, expect))", "def concat(list_of_arrays):\n shape = np.shape(list_of_arrays)\n newShape = [ shape[0]*shape[1] ]\n if len(shape)>2:\n for i in range(2,len(shape)):\n newShape.append(shape[i])\n \n array_concat = np.zeros(newShape)\n s=0\n e=shape[1]\n \n for i in range(0,shape[0]):\n array_concat[s:e] = list_of_arrays[i]\n s=e\n e=e+shape[1] \n return array_concat", "def cat_arrays(arr1, arr2):\n newarr = [0 for i in range(len(arr1) + len(arr2))]\n for i in range(len(arr1)):\n newarr[i] = arr1[i]\n for i in range(len(arr2)):\n newarr[i + len(arr1)] = arr2[i]\n return newarr", "def test02(self):\n a = np.arange(2)\n b = bcolz.arange(1e4, rootdir=self.rootdir)\n b.trim(1e4-2)\n # print \"b->\", `b`\n assert_array_equal(a, b[:], \"Arrays are not equal\")", "def testConcatSourceMultiple(self):\n env = self.env\n\n # Multiple source files are combined, but object and mm files aren't.\n # Check for both g++ and msvc.\n env3 = env.Clone(CC='g++')\n cs = env3.ConcatSource('foo2a.cc', ['a.cc', 'e.mm', 'b.cc', 'd.o',\n 'c.cc'])\n self.assertEqual(map(str, cs), ['e.mm', 'd.o', 'foo2a.cc'])\n\n env4 = env.Clone(CC='cl')\n cs = env4.ConcatSource('foo2b.cc', ['a.cc', 'e.mm', 'b.cc', 'd.obj',\n 'c.cc'])\n self.assertEqual(map(str, cs), ['e.mm', 'd.obj', 'foo2b.cc'])", "def test_asarraylike_array():\n arr = np.array([1, 2, 3, 4])\n result = util.asarraylike(arr)\n\n assert result is arr", "def test01(self):\n a = np.arange(1e5)\n sa = a.sum(dtype='i8')\n ac = bcolz.carray(a)\n sac = ac.sum(dtype='i8')\n # print \"numpy sum-->\", sa\n # print \"carray sum-->\", sac\n self.assertTrue(sa.dtype == sac.dtype,\n \"sum() is not working correctly.\")\n self.assertTrue(sa == sac, \"sum() is not working correctly.\")", "def add_arrays(arr1, arr2):\n if len(arr1) != len(arr2):\n return None\n return [sum(element_wise) for element_wise in zip(arr1, arr2)]", "def check_empty_arrays(cmd_out: list) -> list:\n array_counts = array_occurrences(cmd_out)\n new_cmd_out = []\n comments = -1\n for entry in cmd_out:\n flat_entry = chain(*entry)\n if ''.join(flat_entry) == entry[0]:\n if array_counts[entry[0]] == 1:\n entry[comments] = 'No data for this array'\n new_cmd_out.append(entry)\n else:\n array_counts[entry[0]] -= 1\n else:\n new_cmd_out.append(entry)\n return new_cmd_out", "def test_multi_intersect():\r\n\r\n arr1 = np.array(np.arange(1000).reshape(2,500))\r\n arr2 = np.array([[1,0.1,0.2],[0.3,0.4, 0.5]])\r\n arr3 = np.array(1)\r\n npt.assert_equal(1, utils.multi_intersect([arr1, arr2, arr3]))", "def test05(self):\n a = np.arange(1e1)\n b = bcolz.arange(1e1, rootdir=self.rootdir)\n b.trim(0)\n # print \"b->\", `b`\n assert_array_equal(a, b[:], \"Arrays are not equal\")", "def test_if_array_is_good(self):\n testing_param = random.randint(1, 100)\n self.assertEqual(self.exercice.main(testing_param),\n list(range(2, testing_param+1, 2)))", "def test01a(self):\n a = np.arange(1e2)\n b = bcolz.carray(a, chunklen=10, rootdir=self.rootdir)\n sl = slice(1)\n # print \"b[sl]->\", `b[sl]`\n assert_array_equal(a[sl], b[sl], \"Arrays are not equal\")", "def test00a(self):\n b = bcolz.arange(self.N, rootdir=self.rootdir)\n b.resize(self.N-3)\n a = np.arange(self.N-3)\n # print \"b->\", `b`\n assert_array_equal(a, b[:], \"Arrays are not equal\")", "def test_merge_two_two_same():\n run_merge([1, 3], [1, 3], [1, 1, 3, 3])", "def NeedsArray(self, type_):\n return self._NameComponents(type_) in self._array_types", "def checkArray(comment, first, second, dtype, tol=1e-10, update=True):\n res = True\n if len(first) != len(second):\n res = False\n print(\"checking answer\",comment,'|','lengths do not match:',len(first),len(second))\n else:\n for i in range(len(first)):\n if dtype == float:\n pres = checkFloat('',first[i],second[i],tol,update=False)\n elif dtype in (str,unicode):\n pres = checkSame('',first[i],second[i],update=False)\n if not pres:\n print('checking array',comment,'|','entry \"{}\" does not match: {} != {}'.format(i,first[i],second[i]))\n res = False\n if update:\n if res:\n results[\"pass\"] += 1\n else:\n results[\"fail\"] += 1\n return res", "def test04a(self):\n a = np.arange(1e3)\n b = bcolz.carray(a, chunklen=16, rootdir=self.rootdir)\n sl = slice(1, 2)\n # print \"b[sl]->\", `b[sl]`\n assert_array_equal(a[sl], b[sl], \"Arrays are not equal\")", "def add(x1, x2):\n arr1 = numpy.asarray(x1)\n arr2 = numpy.asarray(x2)\n out_size = _get_num_chars(arr1) + _get_num_chars(arr2)\n\n if type(arr1.dtype) != type(arr2.dtype):\n # Enforce this for now. The solution to it will be implement add\n # as a ufunc. It never worked right on Python 3: bytes + unicode gave\n # nonsense unicode + bytes errored, and unicode + object used the\n # object dtype itemsize as num chars (worked on short strings).\n # bytes + void worked but promoting void->bytes is dubious also.\n raise TypeError(\n \"np.char.add() requires both arrays of the same dtype kind, but \"\n f\"got dtypes: '{arr1.dtype}' and '{arr2.dtype}' (the few cases \"\n \"where this used to work often lead to incorrect results).\")\n\n return _vec_string(arr1, type(arr1.dtype)(out_size), '__add__', (arr2,))", "def test_ndarray_copy(self):\r\n assert copy(numpy.ndarray) is numpy.ndarray\r\n assert deepcopy(numpy.ndarray) is numpy.ndarray", "def _chain_equal(a,b):\n for a_part, b_part in zip(a.parts, b.parts):\n for a_seg, b_seg in zip(a_part, b_part):\n if not np.array_equal(a_seg, b_seg):\n return False\n return True", "def test04(self):\n a = np.arange(1e4)\n b = chunk(a, atom=a.dtype, cparams=bcolz.cparams())\n # print \"b[1:8000]->\", `b[1:8000]`\n assert_array_equal(a[1:8000], b[1:8000], \"Arrays are not equal\")", "def test__chk_asarray(self):\r\n\r\n exp = (array([[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3]]), 0)\r\n obs = _chk_asarray([[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3]], 0)\r\n assert_almost_equal(obs[0], exp[0])\r\n self.assertEqual(obs[1], exp[1])", "def concatenate(module, arrays, dimension):\n _import_modules()\n if module in [np, ma, jnp]:\n return module.concatenate(arrays, dimension)\n elif module == torch:\n return module.cat(arrays, dimension)\n elif module == tf:\n return tf.concat(arrays, axis=dimension)\n return UnknownModuleException(f\"Module {module.__name__} not supported.\")", "def __radd__(self, left_arr):\n concat_arr = left_arr.copy() # Create new instance to return\n concat_arr.extend(self)\n return concat_arr", "def test01a(self):\n a = np.arange(101)\n b = bcolz.carray(a, chunklen=2, rootdir=self.rootdir)\n # print \"sum iter->\", sum(b.iter(3))\n self.assertTrue(sum(a[3:]) == sum(b.iter(3)), \"Sums are not equal\")", "def test02a(self):\n a = np.arange(1e2)\n b = bcolz.carray(a, chunklen=10, rootdir=self.rootdir)\n sl = slice(1, 3)\n # print \"b[sl]->\", `b[sl]`\n assert_array_equal(a[sl], b[sl], \"Arrays are not equal\")", "def test00(self):\n a = np.arange(self.N)\n ac = bcolz.arange(self.N, rootdir=self.rootdir)\n self.assertTrue(np.all(a == ac))", "def arrayStringsAreEqual1(self, word1: List[str], word2: List[str]) -> bool:\n word1str = ''.join(word1)\n word2str = ''.join(word2)\n return word1str == word2str", "def test_line_concat():\n for _x in range(100):\n strings = [random_str(30, 50) for _x in range(10)]\n l_file = random_str(10, 20)\n l_num = randint(1, 10000)\n lines = [Line(x, l_file, l_num) for x in strings]\n # Concatenate the lines\n l_full = lines[0]\n for line in lines[1:]: l_full = l_full + line\n # Test the result\n assert l_full == \"\".join(strings)\n assert isinstance(l_full, Line)\n assert l_full.file == l_file\n assert l_full.number == l_num", "def test03(self):\n dtype = np.dtype([('f1', [('f1', 'i2'), ('f2', 'i4')])])\n a = np.ones(3000, dtype=dtype)\n ac = bcolz.carray(a, dtype=dtype)\n self.assertTrue(ac.dtype == dtype)\n self.assertTrue(a.dtype == ac.dtype)\n # print \"ac-->\", `ac`\n assert_array_equal(a, ac[:], \"Arrays are not equal\")", "def test_concatenate_tables():\n table0 = _get_table(region=\"shapes/circles\", instance_key=\"instance_id\")\n table1 = _get_table(region=\"shapes/poly\", instance_key=\"instance_id\")\n table2 = _get_table(region=\"shapes/poly2\", instance_key=\"instance_id\")\n with pytest.raises(ValueError):\n _concatenate_tables([])\n assert len(_concatenate_tables([table0])) == len(table0)\n assert len(_concatenate_tables([table0, table1, table2])) == len(table0) + len(table1) + len(table2)\n\n table0.obs[\"annotated_element_merged\"] = np.arange(len(table0))\n c0 = _concatenate_tables([table0, table1])\n assert len(c0) == len(table0) + len(table1)\n\n d = c0.uns[TableModel.ATTRS_KEY]\n d[\"region\"] = sorted(d[\"region\"])\n assert d == {\n \"region\": [\"shapes/circles\", \"shapes/poly\"],\n \"region_key\": \"region\",\n \"instance_key\": \"instance_id\",\n }\n\n table3 = _get_table(region=\"shapes/circles\", region_key=\"annotated_shapes_other\", instance_key=\"instance_id\")\n with pytest.raises(ValueError):\n _concatenate_tables([table0, table3], region_key=\"region\")\n\n table4 = _get_table(\n region=[\"shapes/circles1\", \"shapes/poly1\"], region_key=\"annotated_shape0\", instance_key=\"instance_id\"\n )\n table5 = _get_table(\n region=[\"shapes/circles2\", \"shapes/poly2\"], region_key=\"annotated_shape0\", instance_key=\"instance_id\"\n )\n table6 = _get_table(\n region=[\"shapes/circles3\", \"shapes/poly3\"], region_key=\"annotated_shape1\", instance_key=\"instance_id\"\n )\n with pytest.raises(ValueError, match=\"`region_key` must be specified if tables have different region keys\"):\n _concatenate_tables([table4, table5, table6])\n assert len(_concatenate_tables([table4, table5, table6], region_key=\"region\")) == len(table4) + len(table5) + len(\n table6\n )", "def _numpy_checker(x, y):\r\n x, y = x[0], y[0]\r\n if (x.dtype != y.dtype or x.shape != y.shape\r\n or numpy.any(numpy.abs(x - y) > 1e-10)):\r\n raise Exception(\"Output mismatch.\", {'performlinker': x, 'clinker': y})", "def test_02_this_step_will_fail(self):\n\n self.assertIn(5, arr)", "def isscalar(array):\n arr = ma.array(array)\n if not hasattr(arr, '__len__') or arr.shape == () or len(arr) == 1:\n return True\n return False", "def test03(self):\n a = np.arange(1e3)\n b = chunk(a, atom=a.dtype, cparams=bcolz.cparams())\n # print \"b[1:8:3]->\", `b[1:8:3]`\n assert_array_equal(a[1:8:3], b[1:8:3], \"Arrays are not equal\")", "def test_splitting(self):\n t, x_n, x_s, x_p, x = self.t, self.x_n, self.x_s, self.x_p, self.x\n c_e_combined = np.concatenate(\n (self.c_e_n(t, x_n), self.c_e_s(t, x_s), self.c_e_p(t, x_p)), axis=0\n )\n\n np.testing.assert_array_equal(self.c_e(t, x), c_e_combined)", "def test_numpy_ops(self):\n\n arr = np.array([1, 2, 3])\n c = Column('a', arr)\n eq = c == arr\n assert np.all(eq)\n assert len(eq) == 3\n assert type(eq) == Column\n assert eq.dtype.str == '|b1'\n eq = arr == c\n assert np.all(eq)\n\n lt = c - 1 < arr\n assert np.all(lt)", "def test06(self):\n dtype = np.dtype(\"object\")\n a = np.array([\"ale\", \"e\", \"aco\"], dtype=dtype)\n ac = bcolz.carray(a, dtype=dtype)\n self.assertEqual(ac.dtype, dtype)\n self.assertEqual(a.dtype, ac.dtype)\n assert_array_equal(a, ac, \"Arrays are not equal\")", "def test02(self):\n dtype = np.dtype(\"f4,f8\")\n a = np.ones(30000, dtype=dtype)\n ac = bcolz.carray(a, dtype=dtype)\n self.assertTrue(ac.dtype == dtype)\n self.assertTrue(a.dtype == ac.dtype)\n # print \"ac-->\", `ac`\n assert_array_equal(a, ac[:], \"Arrays are not equal\")", "def allequal(a, b, flavor=\"numpy\"):\n\n # print(\"a-->\", repr(a))\n # print(\"b-->\", repr(b))\n if not hasattr(b, \"shape\"):\n # Scalar case\n return a == b\n\n if ((not hasattr(a, \"shape\") or a.shape == ()) and\n (not hasattr(b, \"shape\") or b.shape == ())):\n return a == b\n\n if a.shape != b.shape:\n if verbose:\n print(\"Shape is not equal:\", a.shape, \"!=\", b.shape)\n return 0\n\n # Way to check the type equality without byteorder considerations\n if hasattr(b, \"dtype\") and a.dtype.str[1:] != b.dtype.str[1:]:\n if verbose:\n print(\"dtype is not equal:\", a.dtype, \"!=\", b.dtype)\n return 0\n\n # Rank-0 case\n if len(a.shape) == 0:\n if a[()] == b[()]:\n return 1\n else:\n if verbose:\n print(\"Shape is not equal:\", a.shape, \"!=\", b.shape)\n return 0\n\n # null arrays\n if a.size == 0: # len(a) is not correct for generic shapes\n if b.size == 0:\n return 1\n else:\n if verbose:\n print(\"length is not equal\")\n print(\"len(a.data) ==>\", len(a.data))\n print(\"len(b.data) ==>\", len(b.data))\n return 0\n\n # Multidimensional case\n result = (a == b)\n result = np.all(result)\n if not result and verbose:\n print(\"Some of the elements in arrays are not equal\")\n\n return result", "def test03(self):\n a = np.arange(self.N, dtype=\"i1\")\n ac = bcolz.arange(self.N, dtype=\"i1\", rootdir=self.rootdir)\n self.assertTrue(np.all(a == ac))" ]
[ "0.71296877", "0.7055316", "0.69996417", "0.67035186", "0.6662046", "0.64107263", "0.6404458", "0.6371694", "0.6330895", "0.63107747", "0.6218703", "0.6131409", "0.6072472", "0.60144913", "0.5992456", "0.597871", "0.5963048", "0.5954356", "0.59503627", "0.5942833", "0.59251606", "0.59075296", "0.588849", "0.58516836", "0.5808522", "0.57904404", "0.57831866", "0.57751584", "0.57558775", "0.5745632", "0.57233256", "0.5722236", "0.5713012", "0.5703975", "0.5688654", "0.5681721", "0.5664421", "0.56503713", "0.563521", "0.56345516", "0.562731", "0.56208104", "0.56138897", "0.5598334", "0.55950445", "0.55895054", "0.5584596", "0.5547161", "0.5539464", "0.5538495", "0.55207574", "0.5513301", "0.55049956", "0.54827976", "0.5464196", "0.5460845", "0.5458485", "0.54571986", "0.5446074", "0.5433204", "0.5425964", "0.542393", "0.54233027", "0.5419625", "0.5414035", "0.54133403", "0.5408212", "0.5401354", "0.53981906", "0.5390979", "0.5388871", "0.5388787", "0.5386702", "0.5385552", "0.53662026", "0.53613913", "0.5354845", "0.53542817", "0.5352096", "0.5351854", "0.53505415", "0.53491914", "0.5335002", "0.5317351", "0.5308982", "0.53035045", "0.52886045", "0.5280492", "0.52790177", "0.52788085", "0.5278435", "0.5273225", "0.52691585", "0.5268589", "0.52562374", "0.5254674", "0.5252116", "0.52511597", "0.52397335", "0.5232052" ]
0.7229425
0
Test if transforming the array works.
def test_pipe_simple(): def transform(array): """Turns the (n,2) array into a (n,4) array.""" assert array.shape == (10, 2) new = Array(columns="abcd") for x, y in array: new.append([x, y, x + y, x * y]) return new group = Pipe(Group({"a": Numerical(), "b": Numerical()}), transform) for _ in range(10): group.set_a(1e-6 + random()) group.set_b(1e-6 + random()) group.push() array = group.array() assert array.shape == (10, 4) for row in array: assert row[0] > 0.0 and row[1] > 0.0 assert row[2] == row[0] + row[1] assert row[3] == row[0] * row[1]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_transform(self):\n t = Enumerate([2, \"asfa\", \"ipsi\"])\n assert t.transform(2) == 0\n assert t.transform(\"asfa\") == 1\n assert t.transform(\"ipsi\") == 2\n with pytest.raises(KeyError):\n t.transform(\"aafdasfa\")\n assert numpy.all(\n t.transform([[\"ipsi\", \"asfa\"], [2, \"ipsi\"]]) == [[2, 1], [0, 2]]\n )\n\n # for the crazy enough\n t = Enumerate([2])\n assert t.transform(2) == 0\n with pytest.raises(KeyError):\n t.transform(\"aafdasfa\")\n assert numpy.all(t.transform([[2, 2], [2, 2]]) == [[0, 0], [0, 0]])", "def test_transform(self):\n t = OneHotEncode(3)\n assert numpy.all(t.transform(0) == numpy.array((1.0, 0.0, 0.0)))\n assert numpy.all(t.transform(1) == numpy.array((0.0, 1.0, 0.0)))\n assert numpy.all(t.transform(2) == numpy.array((0.0, 0.0, 1.0)))\n with pytest.raises(AssertionError):\n t.transform(4)\n with pytest.raises(AssertionError):\n t.transform(-1)\n with pytest.raises(AssertionError):\n t.transform(2.2)\n assert numpy.all(\n t.transform([[2, 1], [0, 2]])\n == numpy.array(\n [[(0.0, 0.0, 1.0), (0.0, 1.0, 0.0)], [(1.0, 0.0, 0.0), (0.0, 0.0, 1.0)]]\n )\n )\n\n t = OneHotEncode(2)\n assert t.transform(0) == 0.0\n assert t.transform(1) == 1.0\n with pytest.raises(TypeError):\n t.transform(\"ipsi\")\n assert numpy.all(\n t.transform([[1, 1], [0, 1]]) == numpy.array([[1.0, 1.0], [0.0, 1.0]])\n )\n\n # for the crazy enough\n t = OneHotEncode(1)\n assert t.transform(0) == 0.0\n with pytest.raises(TypeError):\n t.transform(\"ipsi\")\n assert numpy.all(t.transform([[0, 0], [0, 0]]) == [[0.0, 0.0], [0.0, 0.0]])", "def test__transform_continuous(self):", "def test_asarraylike_array():\n arr = np.array([1, 2, 3, 4])\n result = util.asarraylike(arr)\n\n assert result is arr", "def test_transform(self):\n X = self.generate_X()\n task = mmRDTR()\n task.fit(X)\n res = task.transform(X)\n # check if Instance\n self.assertIsInstance(res,Container)\n # check if names\n self.assertEqual(np.all(res.colnames()==[str(i) for i in xrange(len(res.colnames()))]),True)\n # check if values as within the range expected\n self.assertEqual(np.all(res().min()>=-1),True)\n self.assertEqual(np.all(res().max()<=1),True)\n for i in range(len(res.colnames())):\n self.assertEqual(round(res()[:,i].mean(),8),0)\n # check with new data\n Y = self.generate_X()\n res = task.transform(Y)\n self.assertEqual(np.all(res.colnames()==[str(i) for i in xrange(len(res.colnames()))]),True)\n self.assertEqual(np.all(res().min()>=-1),True)\n self.assertEqual(np.all(res().max()<=1),True)", "def test_transform(self):\n t = Quantize()\n assert t.transform(8.6) == 9\n assert t.transform(8.4) == 8\n assert t.transform(5.3) == 5\n assert numpy.all(t.transform([8.6, 5.3]) == numpy.array([9, 5], dtype=int))", "def test__chk_asarray(self):\r\n\r\n exp = (array([[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3]]), 0)\r\n obs = _chk_asarray([[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3]], 0)\r\n assert_almost_equal(obs[0], exp[0])\r\n self.assertEqual(obs[1], exp[1])", "def test_transform(self):\n t = Compose([Enumerate([2, \"asfa\", \"ipsi\"]), OneHotEncode(3)], \"categorical\")\n assert numpy.all(t.transform(2) == numpy.array((1.0, 0.0, 0.0)))\n assert numpy.all(t.transform(\"asfa\") == numpy.array((0.0, 1.0, 0.0)))\n assert numpy.all(t.transform(\"ipsi\") == numpy.array((0.0, 0.0, 1.0)))\n\n with pytest.raises(KeyError):\n t.transform(\"aafdasfa\")\n\n assert numpy.all(\n t.transform([[\"ipsi\", \"asfa\"], [2, \"ipsi\"]])\n == numpy.array(\n [[(0.0, 0.0, 1.0), (0.0, 1.0, 0.0)], [(1.0, 0.0, 0.0), (0.0, 0.0, 1.0)]]\n )\n )\n\n t = Compose([Enumerate([2, \"asfa\"]), OneHotEncode(2)], \"categorical\")\n assert t.transform(2) == 0.0\n assert t.transform(\"asfa\") == 1.0\n with pytest.raises(KeyError):\n t.transform(\"ipsi\")\n assert numpy.all(\n t.transform([[\"asfa\", \"asfa\"], [2, \"asfa\"]])\n == numpy.array([[1.0, 1.0], [0.0, 1.0]])\n )\n\n # for the crazy enough\n t = Compose([Enumerate([2]), OneHotEncode(1)], \"categorical\")\n assert t.transform(2) == 0.0\n with pytest.raises(KeyError):\n t.transform(\"ipsi\")\n assert numpy.all(t.transform([[2, 2], [2, 2]]) == [[0, 0], [0, 0]])", "def test_transform(self):\n t = Reverse(Quantize())\n assert t.transform(9) == 9.0\n assert t.transform(5) == 5.0\n assert numpy.all(t.transform([9, 5]) == numpy.array([9.0, 5.0], dtype=float))", "def verify_transformed(self, data):\n data_dim = data.shape[-1]\n if data_dim != self.dimension:\n error(\n \"{} result dimension {} does not match the prescribed input dimension {}\"\n .format(self.name, data_dim, self.dimension))\n nans, _ = np.where(np.isnan(data))\n if np.size(nans) != 0:\n error(\"{} result contains nan elements in :{}\".format(\n self.name, nans))", "def test_conversion(backend):\n\n x = np.random.rand(10, 10)\n x_b = backend.from_numpy(x)\n x_c = backend.to_numpy(x_b)\n\n assert np.all(np.isclose(x, x_c))", "def test_transform(self):\n result = transform((1, 2) ,2, 2)\n self.assertEqual(result, (4 * PIXEL, 3 * PIXEL))", "def test_flatten_array(test_input, expected):\n assert flatten_array(test_input) == expected", "def test_load_full_transform(self):\n self.add_transform(cond_artist=True, cond_album=True, cond_title=True,\n cond_ensemble=True, cond_composer=True, cond_conductor=True,\n change_artist=True, change_album=True, change_title=True,\n change_ensemble=True, change_composer=True, change_conductor=True,\n pattern_artist='Artist', pattern_album='Album', pattern_title='Title',\n pattern_ensemble='Ensemble', pattern_composer='Composer', pattern_conductor='Conductor',\n to_artist='Artist 2', to_album='Album 2', to_title='Title 2',\n to_ensemble='Ensemble 2', to_composer='Composer 2', to_conductor='Conductor 2')\n self.app.load_data()\n self.assertEqual(len(self.app.transforms), 1)\n transform = self.app.transforms.transforms[1]\n self.assertEqual(transform.cond_artist, True)\n self.assertEqual(transform.cond_album, True)\n self.assertEqual(transform.cond_title, True)\n self.assertEqual(transform.cond_ensemble, True)\n self.assertEqual(transform.cond_composer, True)\n self.assertEqual(transform.cond_conductor, True)\n self.assertEqual(transform.change_artist, True)\n self.assertEqual(transform.change_album, True)\n self.assertEqual(transform.change_title, True)\n self.assertEqual(transform.change_ensemble, True)\n self.assertEqual(transform.change_composer, True)\n self.assertEqual(transform.change_conductor, True)\n self.assertEqual(transform.pattern_artist, 'Artist')\n self.assertEqual(transform.pattern_album, 'Album')\n self.assertEqual(transform.pattern_title, 'Title')\n self.assertEqual(transform.pattern_ensemble, 'Ensemble')\n self.assertEqual(transform.pattern_composer, 'Composer')\n self.assertEqual(transform.pattern_conductor, 'Conductor')\n self.assertEqual(transform.to_artist, 'Artist 2')\n self.assertEqual(transform.to_album, 'Album 2')\n self.assertEqual(transform.to_title, 'Title 2')\n self.assertEqual(transform.to_ensemble, 'Ensemble 2')\n self.assertEqual(transform.to_composer, 'Composer 2')\n self.assertEqual(transform.to_conductor, 'Conductor 2')", "def test_inverse_transform(self):", "def can_retransform(self):\r\n return self._can_retransform", "def test_transform(self):\n data = pd.DataFrame({\n \"x\": np.array([0.1, 0.3, 0.5]),\n \"y\": np.array([\"yes\", \"yes\", \"no\"])\n })\n\n transformer = DataTransformer()\n transformer._column_transform_info_list = [\n ColumnTransformInfo(\n column_name=\"x\", column_type=\"continuous\", transform=None,\n transform_aux=None,\n output_info=[SpanInfo(1, 'tanh'), SpanInfo(3, 'softmax')],\n output_dimensions=1 + 3\n ),\n ColumnTransformInfo(\n column_name=\"y\", column_type=\"discrete\", transform=None,\n transform_aux=None,\n output_info=[SpanInfo(2, 'softmax')],\n output_dimensions=2\n )\n ]\n\n transformer._transform_continuous = Mock()\n selected_normalized_value = np.array([[0.1], [0.3], [0.5]])\n selected_component_onehot = np.array([\n [1, 0, 0],\n [1, 0, 0],\n [1, 0, 0],\n ])\n return_value = (selected_normalized_value, selected_component_onehot)\n transformer._transform_continuous.return_value = return_value\n\n transformer._transform_discrete = Mock()\n transformer._transform_discrete.return_value = [np.array([\n [0, 1],\n [0, 1],\n [1, 0],\n ])]\n\n result = transformer.transform(data)\n transformer._transform_continuous.assert_called_once()\n transformer._transform_discrete.assert_called_once()\n\n expected = np.array([\n [0.1, 1, 0, 0, 0, 1],\n [0.3, 1, 0, 0, 0, 1],\n [0.5, 1, 0, 0, 1, 0],\n ])\n\n assert result.shape == (3, 6)\n assert (result[:, 0] == expected[:, 0]).all(), \"continuous-cdf\"\n assert (result[:, 1:4] == expected[:, 1:4]).all(), \"continuous-softmax\"\n assert (result[:, 4:6] == expected[:, 4:6]).all(), \"discrete\"", "def test_transform(self):\n t = Linearize()\n assert t.transform(numpy.e) == numpy.log(numpy.e)\n t.transform(0)", "def test_schwefel222(self):\n fun = get_problem('schwefel222', self.dimension)\n self.assertEqual(fun(self.array), 0.0)", "def test_data_augmentation_transforms():\n\n transforms_list = get_data_augmentation_transforms(inp_size=(100, 50), pixel_mean=[0.5], pixel_std=[0.3]).transforms\n\n assert len(transforms_list) > 3\n\n # last 3 should be fundamental\n augmentation_transforms = Compose(transforms_list[:-3])\n\n try:\n inp_img = Image.fromarray(np.loadtxt(\"proj6_code/proj6_unit_tests/test_data/transform_inp.txt\", dtype=\"uint8\"))\n\n except:\n inp_img = Image.fromarray(\n np.loadtxt(\"../proj6_code/proj6_unit_tests/test_data/transform_inp.txt\", dtype=\"uint8\")\n )\n augmented_img = augmentation_transforms(inp_img)\n assert isinstance(augmented_img, type(inp_img))\n assert not np.array_equal(augmented_img, inp_img)", "def test_interface(transform, example_tsds: TSDataset):\n start_columnns = example_tsds.columns\n example_tsds.fit_transform(transforms=[transform])\n assert np.all(start_columnns == example_tsds.columns)", "def test_fundamental_transforms():\n\n transforms = get_fundamental_transforms(inp_size=(100, 50), pixel_mean=[0.5], pixel_std=[0.3])\n\n try:\n inp_img = Image.fromarray(np.loadtxt(\"proj6_code/proj6_unit_tests/test_data/transform_inp.txt\", dtype=\"uint8\"))\n output_img = transforms(inp_img)\n expected_output = torch.load(\"proj6_code/proj6_unit_tests/test_data/transform_out.pt\")\n\n except:\n inp_img = Image.fromarray(\n np.loadtxt(\"../proj6_code/proj6_unit_tests/test_data/transform_inp.txt\", dtype=\"uint8\")\n )\n output_img = transforms(inp_img)\n expected_output = torch.load(\"../proj6_code/proj6_unit_tests/test_data/transform_out.pt\")\n\n assert torch.allclose(expected_output, output_img)", "def test_transform(self):\n t = Precision(precision=4)\n assert t.transform(8.654321098) == 8.654\n assert t.transform(0.000123456789) == 0.0001235\n assert numpy.all(\n t.transform([8.654321098, 0.000123456789])\n == numpy.array([8.654, 0.0001235], dtype=float)\n )", "def test_transform(self):\n shape = (3, 4, 5)\n index = (0, 2, 1)\n t = View(shape=shape, index=index)\n a = numpy.zeros(shape)\n a[index] = 2\n assert t.transform(a) == 2", "def test_reference_to_array(self):\n arr = numpy.arange(0.0, 10.0, 0.1)\n arr = numpy.reshape(arr, (25, 4))\n vtk_arr = array_handler.array2vtk(arr)\n arr1 = array_handler.vtk2array(vtk_arr)\n # Now make sure these are using the same memory.\n arr[0][0] = 100.0\n self.assertEqual(arr[0][0], arr1[0][0])\n self.assertEqual(arr.shape, arr1.shape)", "def check_transformations(*args):\n assert args[0].shape == (21,21)\n assert args[0].dtype == np.float64\n if len(args) == 2:\n assert args[1].shape == (2,2)\n assert args[1].dtype == np.float64", "def test_schwefel221(self):\n fun = get_problem('schwefel221', self.dimension)\n self.assertEqual(fun(self.array), 0.0)", "def test_array_normalization(self):\n norm = self.normalizer\n matrix = norm.normalize(self.matrix)\n vals = [int(x) for x in matrix[\"temperatures\"]]\n self.assertEqual([11, 12, 13, 21, 22, 23, 31, 32, 33], vals)", "def test_transform_default(self):\n result = transform((1, 2))\n self.assertEqual(result, (2 * PIXEL, 1 * PIXEL))", "def test__inverse_transform_continuous(self):", "def test_if_array_is_good(self):\n testing_param = random.randint(1, 100)\n self.assertEqual(self.exercice.main(testing_param),\n list(range(2, testing_param+1, 2)))", "def test_load_empty_transform(self):\n self.add_transform()\n self.app.load_data()\n self.assertEqual(len(self.app.transforms), 1)\n transform = self.app.transforms.transforms[1]\n self.assertEqual(transform.cond_artist, False)\n self.assertEqual(transform.cond_album, False)\n self.assertEqual(transform.cond_title, False)\n self.assertEqual(transform.cond_ensemble, False)\n self.assertEqual(transform.cond_conductor, False)\n self.assertEqual(transform.cond_composer, False)\n self.assertEqual(transform.change_artist, False)\n self.assertEqual(transform.change_album, False)\n self.assertEqual(transform.change_title, False)\n self.assertEqual(transform.change_ensemble, False)\n self.assertEqual(transform.change_conductor, False)\n self.assertEqual(transform.change_composer, False)\n self.assertEqual(transform.pattern_artist, '')\n self.assertEqual(transform.pattern_album, '')\n self.assertEqual(transform.pattern_title, '')\n self.assertEqual(transform.pattern_ensemble, '')\n self.assertEqual(transform.pattern_conductor, '')\n self.assertEqual(transform.pattern_composer, '')\n self.assertEqual(transform.to_artist, '')\n self.assertEqual(transform.to_album, '')\n self.assertEqual(transform.to_title, '')\n self.assertEqual(transform.to_ensemble, '')\n self.assertEqual(transform.to_conductor, '')\n self.assertEqual(transform.to_composer, '')", "def test_flatten_array_failure():\n with pytest.raises(ValueError, match=\"Please pass in a valid array\"):\n flatten_array(\"\")", "def test_data(self):\n\n self.assertIsInstance(self.image.data, np.ndarray)", "def test_transform_data(self):\n # assemble\n input_data = (\n self.spark\n .read\n .parquet(self.test_data_path + 'employees'))\n\n expected_data = (\n self.spark\n .read\n .parquet(self.test_data_path + 'employees_report'))\n\n expected_cols = len(expected_data.columns)\n expected_rows = expected_data.count()\n expected_avg_steps = (\n expected_data\n .agg(mean('steps_to_desk').alias('avg_steps_to_desk'))\n .collect()[0]\n ['avg_steps_to_desk'])\n\n # act\n data_transformed = transform_data(input_data, 21)\n\n cols = len(expected_data.columns)\n rows = expected_data.count()\n avg_steps = (\n expected_data\n .agg(mean('steps_to_desk').alias('avg_steps_to_desk'))\n .collect()[0]\n ['avg_steps_to_desk'])\n\n # assert\n self.assertEqual(expected_cols, cols)\n self.assertEqual(expected_rows, rows)\n self.assertEqual(expected_avg_steps, avg_steps)\n self.assertTrue([col in expected_data.columns\n for col in data_transformed.columns])", "def test_safe_array_cast(self):\n msg = '^Copying array of size \\(5, 5\\) to convert it in the ' \\\n 'right format$'\n with self.assertWarnsRegex(RuntimeWarning, msg):\n PoissonRegression._safe_array(self.X.astype(int))\n\n msg = '^Copying array of size \\(3, 5\\) to create a ' \\\n 'C-contiguous version of it$'\n with self.assertWarnsRegex(RuntimeWarning, msg):\n PoissonRegression._safe_array(self.X[::2])\n\n np.testing.assert_array_equal(self.X,\n PoissonRegression._safe_array(self.X))", "def test_isarray_vrt(self):\n self.assertIsInstance(_test_array(landsat_vrt), np.ndarray)", "def test_transform(self):\n t = Identity()\n assert t.transform(\"yo\") == \"yo\"", "def check_array(self, v, t):\n raise NotImplementedError('check_array')", "def test_isarray_gtiff(self):\n self.assertIsInstance(_test_array(landsat_gtiff), np.ndarray)", "def test_array2vtk(self):\n # Put all the test arrays here.\n t_z = [] \n\n # Test the different types of arrays.\n t_z.append(numpy.array([-128, 0, 127], numpy.int8))\n\n # FIXME: character arrays are a problem since there is no\n # unique mapping to a VTK data type and back.\n #t_z.append(numpy.array([-128, 0, 127], numpy.character))\n t_z.append(numpy.array([-32768, 0, 32767], numpy.int16))\n t_z.append(numpy.array([-2147483648, 0, 2147483647], numpy.int32))\n t_z.append(numpy.array([0, 255], numpy.uint8))\n t_z.append(numpy.array([0, 65535], numpy.uint16))\n t_z.append(numpy.array([0, 4294967295L], numpy.uint32))\n t_z.append(numpy.array([-1.0e38, 0, 1.0e38], 'f'))\n t_z.append(numpy.array([-1.0e299, 0, 1.0e299], 'd'))\n\n # Check multi-component arrays.\n t_z.append(numpy.array([[1], [2], [300]], 'd'))\n t_z.append(numpy.array([[1, 20], [300, 4000]], 'd'))\n t_z.append(numpy.array([[1, 2, 3], [4, 5, 6]], 'f'))\n t_z.append(numpy.array([[1, 2, 3],[4, 5, 6]], 'd'))\n t_z.append(numpy.array([[1, 2, 3, 400],[4, 5, 6, 700]],\n 'd'))\n t_z.append(numpy.array([range(9),range(10,19)], 'f'))\n\n # Test if a Python list also works.\n t_z.append(numpy.array([[1., 2., 3., 400.],[4, 5, 6, 700]],\n 'd'))\n\n # Test if arrays with number of components not in [1,2,3,4,9] work.\n t_z.append(numpy.array([[1, 2, 3, 400, 5000],\n [4, 5, 6, 700, 8000]], 'd'))\n t_z.append(numpy.array([range(10), range(10,20)], 'd'))\n\n for z in t_z:\n vtk_arr = array_handler.array2vtk(z)\n # Test for memory leaks.\n self.assertEqual(vtk_arr.GetReferenceCount(),\n array_handler.BASE_REFERENCE_COUNT)\n self._check_arrays(z, vtk_arr)\n z1 = array_handler.vtk2array(vtk_arr)\n if len(z.shape) == 1:\n self.assertEqual(len(z1.shape), 1)\n if z.dtype.char != 'c':\n #print z1\n self.assertEqual(sum(numpy.ravel(z) - numpy.ravel(z1)), 0)\n else:\n #print z1.astype('c')\n self.assertEqual(z, z1.astype('c'))\n \n # Check if type conversion works correctly.\n z = numpy.array([-128, 0, 127], numpy.int8)\n vtk_arr = vtk.vtkDoubleArray()\n ident = id(vtk_arr)\n vtk_arr = array_handler.array2vtk(z, vtk_arr)\n # Make sure this is the same array!\n self.assertEqual(ident, id(vtk_arr))\n self._check_arrays(z, vtk_arr)\n\n # Check the vtkBitArray.\n vtk_arr = vtk.vtkBitArray()\n vtk_arr.InsertNextValue(0)\n vtk_arr.InsertNextValue(1)\n vtk_arr.InsertNextValue(0)\n vtk_arr.InsertNextValue(1)\n arr = array_handler.vtk2array(vtk_arr)\n self.assertEqual(numpy.sum(arr - [0,1,0,1]), 0)\n vtk_arr = array_handler.array2vtk(arr, vtk_arr)\n self.assertEqual(vtk_arr.GetValue(0), 0)\n self.assertEqual(vtk_arr.GetValue(1), 1)\n self.assertEqual(vtk_arr.GetValue(2), 0)\n self.assertEqual(vtk_arr.GetValue(3), 1)\n\n # ----------------------------------------\n # Test if the array is copied or not.\n a = numpy.array([[1, 2, 3],[4, 5, 6]], 'd')\n vtk_arr = array_handler.array2vtk(a)\n # Change the numpy array and see if the changes are\n # reflected in the VTK array.\n a[0] = [10.0, 20.0, 30.0]\n self.assertEqual(vtk_arr.GetTuple3(0), (10., 20., 30.))\n\n # Make sure the cache is doing its job.\n key = vtk_arr.__this__\n z = array_handler._array_cache.get(vtk_arr)\n self.assertEqual(numpy.sum(z - numpy.ravel(a)), 0.0)\n\n l1 = len(array_handler._array_cache)\n # del the Numeric array and see if this still works.\n del a\n self.assertEqual(vtk_arr.GetTuple3(0), (10., 20., 30.))\n # Check the cache -- just making sure.\n self.assertEqual(len(array_handler._array_cache), l1)\n\n # Delete the VTK array and see if the cache is cleared.\n del vtk_arr\n self.assertEqual(len(array_handler._array_cache), l1-1)\n self.assertEqual(array_handler._array_cache._cache.has_key(key),\n False)\n\n # Make sure bit arrays are copied.\n vtk_arr = vtk.vtkBitArray()\n a = numpy.array([0,1,0,1], numpy.int32)\n vtk_arr = array_handler.array2vtk(a, vtk_arr)\n del a\n self.assertEqual(vtk_arr.GetValue(0), 0)\n self.assertEqual(vtk_arr.GetValue(1), 1)\n self.assertEqual(vtk_arr.GetValue(2), 0)\n self.assertEqual(vtk_arr.GetValue(3), 1)\n \n # Make sure the code at least runs for all the non-complex \n # numerical dtypes in numpy.\n for dtype in (numpy.sctypes['int'] + numpy.sctypes['uint'] +\n numpy.sctypes['float']):\n array_handler.array2vtk(numpy.zeros((1,), dtype=dtype))", "def test_random_transform():\n # given\n train = pd.read_csv('source/train.csv')\n train['labels'] = train['labels'].map(ast.literal_eval)\n image_path = os.path.join('source', train.iloc[0].path)\n all_labels = train.iloc[0]['labels']\n for label in all_labels:\n if label['class'] == 'whiteboard':\n break\n xn = [int(float(x)) for x in label['xn'].split(';')][:4]\n yn = [int(float(y)) for y in label['yn'].split(';')][:4]\n labels = np.zeros((4, 2))\n for i in range(4):\n labels[i, 0] = xn[i]\n labels[i, 1] = yn[i]\n img = cv2.imread(image_path)\n kw = dict(rotation_range=15,\n height_shift_range=0.2,\n width_shift_range=0.2,\n shear_range=0.3,\n channel_shift_range=0.2,\n horizontal_flip=True,\n vertical_flip=True,\n dim_ordering='tf',\n seed=1313)\n # when\n rimg, rlabels = image_generator.random_transform(img, labels, **kw)\n\n # then just assert transformation isn't changed much\n assert MultiPoint([[224.91875347, 58.05657097],\n [673.57648317, 189.27244333],\n [544.23308452, 381.12743459],\n [70.73339963, 312.7359806]]\n ).equals_exact(rlabels, 5)", "def test_TimeArray_convert_unit():", "def test_example(self, example_dataset, expected_result):\n\n transformer = PreprocessFeatures()\n result = transformer.fit_transform(example_dataset)\n\n assert (result == expected_result).all()", "def check_array(self, array: ArrayData, value: List[int]):\n assert self._call is not None, f\"You must first call a function before checking its return values!\"\n \"\"\" Checks that when this function is called, we have not already assembled and run the test. \"\"\"\n assert not self._has_executed, f\"Test has already been assembled and run!\"\n assert len(value) > 0, \"Array to compare against has to contain at least one element.\"\n assert len(value) <= len(array), \"Array to compare against must contain a smaller or equal amount of elements.\"\n expected = self.array(value).name\n actual = \"la a2, \" + self._lookup_array(array)\n self._compare_int_array(array.name, actual, expected, value, exit_code = 2)", "def test_transcoder(self, raw, value):\n assert DPTSceneNumber.to_knx(value) == DPTArray(raw)\n assert DPTSceneNumber.from_knx(DPTArray(raw)) == value", "def test_numpy_ops(self):\n\n arr = np.array([1, 2, 3])\n c = Column('a', arr)\n eq = c == arr\n assert np.all(eq)\n assert len(eq) == 3\n assert type(eq) == Column\n assert eq.dtype.str == '|b1'\n eq = arr == c\n assert np.all(eq)\n\n lt = c - 1 < arr\n assert np.all(lt)", "def test_transform_array_anonymize(self, mock_maps):\n # Setup\n data = np.array(['bar', 'foo', 'foo', 'tar'])\n\n # Run\n transformer = Mock()\n transformer.anonymize = 'email'\n transformer.intervals = [1, 2, 3]\n\n mock_maps[id(transformer)] = {\n 'bar': 'bar_x',\n 'foo': 'foo_x',\n 'tar': 'tar_x'\n }\n\n result = CategoricalTransformer.transform(transformer, data)\n\n # Asserts\n expect_result_len = 4\n\n self.assertEqual(\n len(result),\n expect_result_len,\n \"Unexpected length of transformed data\"\n )", "def _check_array(self, X):\n x = np.copy(X)\n if np.isfortran(x) is False:\n # print (\"Array must be in Fortran-order. Converting now.\")\n x = np.asfortranarray(x)\n if self.sampling > x.shape:\n raise ValueError(\"'sampling' is greater than the dimensions of X\")\n return x", "def test_02_this_step_will_fail(self):\n\n self.assertIn(5, arr)", "def test_rastrigin(self):\n rastrigin = get_problem('rastrigin', dimension=self.dimension)\n self.assertEqual(rastrigin(self.array), 0.0)", "def test_transform_array_no_anonymize(self, mock_maps):\n # Setup\n data = np.array(['bar', 'foo', 'foo', 'tar'])\n\n # Run\n transformer = Mock()\n transformer.anonymize = None\n transformer.intervals = [1, 2, 3]\n\n CategoricalTransformer.transform(transformer, data)\n\n # Asserts\n expect_maps_call_count = 0\n\n self.assertEqual(\n mock_maps.call_count,\n expect_maps_call_count,\n \"Dont call to the map encoder when not anonymize\"\n )", "def test_equal15():\n x = np.array([[[[[[True, False, True], [True, False, True], [True, False, True]]]]]])\n y = x\n res = np.equal(x, y)\n obj.run(res=res, x=x, y=y)", "def test_load_zero_transforms(self):\n self.app.load_data()\n self.assertEqual(len(self.app.transforms), 0)", "def test_rosenbrock(self):\n fun = get_problem('rosenbrock', self.dimension)\n self.assertEqual(fun(self.array2), 0.0)", "def test_salomon(self):\n fun = get_problem('salomon', self.dimension, -100.0, 100.0)\n self.assertEqual(fun(self.array), 0.0)", "def test_whitley(self):\n fun = get_problem('whitley', self.dimension)\n self.assertEqual(fun(self.array2), 0.0)", "def testTrivial(self):\n self.doTest(afwGeom.makeIdentityTransform())", "def testTrivial(self):\n self.doTest(afwGeom.makeIdentityTransform())", "def healthy_test(obj: np.ndarray) -> bool:\n nb_rows, nb_cols = obj.shape\n return nb_rows == nb_cols > 1 and np.array_equal(obj, colony(nb_rows))", "def test_convert_logical():", "def assert_content_equals_array(result, expected):\n assert isinstance(result, (pa.Array, pa.ChunkedArray))\n if isinstance(result, pa.ChunkedArray):\n result = pa.concat_arrays(result.iterchunks())\n assert result.equals(expected)", "def test_singles(self):\n self.assertEqual(singles(self.TestData), 3)\n self.assertEqual(singles(array([0,3,4])), 0)\n self.assertEqual(singles(array([1])), 1)", "def checkBasics(self, transform):\n for fromPoint in self.fromIter():\n toPoint = transform.forwardTransform(fromPoint)\n roundTripPoint = transform.reverseTransform(toPoint)\n for i in range(2):\n self.assertAlmostEqual(fromPoint[i], roundTripPoint[i])\n\n for deltaFrom in (\n Extent2D(0),\n Extent2D(0.1, -0.1),\n Extent2D(-0.15, 0.1),\n ):\n tweakedFromPoint = fromPoint + deltaFrom\n tweakedToPoint = transform.forwardTransform(tweakedFromPoint)\n linToPoint = transform.linearizeForwardTransform(\n fromPoint)(tweakedFromPoint)\n linRoundTripPoint = transform.linearizeReverseTransform(\n toPoint)(tweakedToPoint)\n for i in range(2):\n self.assertAlmostEqual(\n tweakedToPoint[i], linToPoint[i], places=2)\n self.assertAlmostEqual(\n tweakedFromPoint[i], linRoundTripPoint[i], places=2)", "def check_array(arr: Arrayable) -> np.ndarray:\n if isinstance(arr, np.ndarray):\n return arr\n return np.array(arr)", "def test_transform(self):\n X,Y,Z = self.generate_data()\n\n p={'k':-1,'r':0}\n\n task = mmSCHPOLY('sc2=0.5')\n res= task.fit_transform(X,Y,Z)\n # check if Instance\n self.assertIsInstance(res,Container)\n # check if expected response\n print res.colnames(**p)\n self.assertEqual(res.colnames(**p)==['0','Pol1stTerm_1','Pol2ndTerm_1'],True)\n self.assertEqual(np.all(res(**p)[:,0]-X()[:,0]==0),True)\n self.assertEqual(np.all(res(**p)[:,1]-X()[:,1]==0),True)\n self.assertEqual(np.all(res(**p)[:,2]-0.5*X()[:,1]**2==0),True)\n\n task = mmSCH2W('sc=0.1')\n res= task.fit_transform(X,Y,Z)\n # check if Instance\n self.assertIsInstance(res,Container)\n # check if expected response\n print res.colnames(**p)\n self.assertEqual(res.colnames(**p)==['Product_1_2'],True)\n self.assertEqual(np.all(res(**p)[:,0]-0.1*X()[:,1]*X()[:,2]==0),True)", "def testLargeArrayMixed():\n arr = [1, 4, 3, 5, 2]\n sort(arr)\n expectedArr = [1, 2, 3, 4, 5]\n assert isEqual(arr, expectedArr)", "def test_reverse_transform_array(self):\n # Setup\n data = np.array([-0.6, 0.2, 0.6, -0.2])\n normalized_data = pd.Series([0.4, 0.2, 0.6, 0.8])\n\n intervals = {\n 'foo': (0, 0.5),\n 'bar': (0.5, 0.75),\n 'tar': (0.75, 1),\n }\n\n # Run\n transformer = Mock()\n transformer._normalize.return_value = normalized_data\n transformer.intervals = intervals\n\n result = CategoricalTransformer.reverse_transform(transformer, data)\n\n # Asserts\n expect = pd.Series(['foo', 'foo', 'bar', 'tar'])\n\n pd.testing.assert_series_equal(result, expect)", "def test_sort_array(self):\r\n self.assertEqual(sort_array([6, 4, 9, 10]), [4, 6, 9, 10])", "def _array_is_aligned(self):\n rot_matrix = self.axes_wcs.wcs.pc\n return np.allclose(rot_matrix, np.eye(self.axes_wcs.wcs.naxis))", "def test_transform(self):\n pt = np.array([1.0, 2.0, 3.0])\n tr = pose.Pose()\n tr.position = onp.array([4.0, 5.0, 6.0])\n pt2 = tr.transform(pt)\n self.assertLess(np.linalg.norm(pt2 - np.array([5.0, 7.0, 9.0])), 1e-6)", "def test_numpy_arrays_not_copied(self):\n with PhysicsEngineHarness('tests/engineering-test.json') as physics_engine:\n state = physics_engine.get_state()\n\n engineering = state.engineering\n engineering.components[0].temperature = 777777.7\n self.assertEqual(engineering._array[2 * N_COMPONENTS], 777777.7)\n self.assertEqual(state.y0()[state.ENGINEERING_START_INDEX + 2 * N_COMPONENTS], 777777.7)", "def test_arr2vtkPoints(self):\n a = [[0.0, 0.0, 0.0], [1.0, 1.0, 1.0]]\n p = array_handler.array2vtkPoints(a)\n self.assertEqual(p.GetPoint(0), (0.0, 0.0, 0.0))\n self.assertEqual(p.GetPoint(1), (1.0, 1.0, 1.0))\n p = vtk.vtkPoints()\n ident = id(p)\n p = array_handler.array2vtkPoints(numpy.array(a), p)\n self.assertEqual(p.GetPoint(0), (0.0, 0.0, 0.0))\n self.assertEqual(p.GetPoint(1), (1.0, 1.0, 1.0))\n self.assertEqual(id(p), ident)\n self.assertRaises(AssertionError, array_handler.array2vtkPoints,\n [0.0, 1.0])\n self.assertRaises(AssertionError, array_handler.array2vtkPoints,\n [0.0, 1.0, 1.0])", "def test_check_sparse(self):\n x, x_rand, s = self.create_testdata()\n task = mmRDTR()\n #check that a dense array x is passed thru unchanged\n check = task.check_sparse(x)\n self.assertEqual(np.all(check==x),True)\n #check that a sparse matrix s is converted to a numpy array\n check = task.check_sparse(s)\n self.assertIsInstance(check,np.ndarray)\n self.assertEqual(np.all(check==s.todense()),True)", "def _check_array(X):\n return check_array(X,\n accept_sparse=['csr', 'csc'], # Accept sparse csr, csc\n order=None, # Do not enforce C or Fortran\n copy=False, # Do not trigger copying\n force_all_finite=True, # Raise error on np.inf/np.nan\n ensure_2d=True, # Force 'X' do be a matrix\n allow_nd=True, # Allow 'X.ndim' > 2\n warn_on_dtype=False # Mute as 'dtype' is 'None'\n )", "def test_append_transform(self):\n\n # Default GroupTransform length should be 0 without append.\n self.assertEqual(self.group_tr.__len__(), 0)\n\n matrix_tr = OCIO.MatrixTransform()\n ff_tr = OCIO.FixedFunctionTransform()\n\n self.group_tr.appendTransform(matrix_tr)\n self.group_tr.appendTransform(ff_tr)\n\n self.assertEqual(self.group_tr.__len__(), 2)\n\n iterator = self.group_tr.__iter__()\n for i in [matrix_tr, ff_tr]:\n self.assertEqual(i, next(iterator))", "def __check_flat_array__(self):\n if self.flat_array is not None:\n return True\n else:\n return False", "def test_equal13():\n x = np.array([[[[[[True, False, True], [True, False, True], [True, False, True]]]]]])\n y = np.array([[True, False, True], [True, False, True], [True, False, True]])\n res = np.equal(x, y)\n obj.run(res=res, x=x, y=y)", "def test_as_float_array():\n X = np.ones((3, 10), dtype=np.int32)\n X = X + np.arange(10, dtype=np.int32)\n # Checks that the return type is ok\n X2 = as_float_array(X, copy=False)\n np.testing.assert_equal(X2.dtype, np.float32)\n # Another test\n X = X.astype(np.int64)\n X2 = as_float_array(X, copy=True)\n # Checking that the array wasn't overwritten\n assert as_float_array(X, False) is not X\n # Checking that the new type is ok\n np.testing.assert_equal(X2.dtype, np.float64)\n # Here, X is of the right type, it shouldn't be modified\n X = np.ones((3, 2), dtype=np.float32)\n assert as_float_array(X, copy=False) is X", "def test_non_pd_type_error(self):\n\n x = BaseTransformer(columns=\"a\")\n\n with pytest.raises(ValueError):\n\n x.transform(X=[1, 2, 3, 4, 5, 6])", "def test_step3(self):\n fun = get_problem('step3', self.dimension)\n self.assertEqual(fun(self.array), 0.0)", "def test_transforms(self):\n\n rank_zero_deprecation(\n \"DataModule property `test_transforms` was deprecated in v1.5 and will be removed in v1.7.\"\n )\n return self._test_transforms", "def test_good_array(self):\n # Setup test\n filename = os.path.join(_SAMPLE_FILES_DIR, \"reg_good_ddt_array.xml\")\n out_name = \"physics_types_ddt_array\"\n out_source_name = out_name + '.F90'\n out_meta_name = out_name + '.meta'\n in_source = os.path.join(_SAMPLE_FILES_DIR, out_source_name)\n in_meta = os.path.join(_SAMPLE_FILES_DIR, out_meta_name)\n out_source = os.path.join(_TMP_DIR, out_source_name)\n out_meta = os.path.join(_TMP_DIR, out_meta_name)\n remove_files([out_source, out_meta])\n # Run dycore\n retcode, files = gen_registry(filename, 'se', {}, _TMP_DIR, 2,\n _SRC_MOD_DIR, _CAM_ROOT,\n loglevel=logging.ERROR,\n error_on_no_validate=True)\n # Check return code\n amsg = \"Test failure: retcode={}\".format(retcode)\n self.assertEqual(retcode, 0, msg=amsg)\n flen = len(files)\n amsg = \"Test failure: Found {} files, expected 1\".format(flen)\n self.assertEqual(flen, 1, msg=amsg)\n amsg = \"{} does not exist\".format(out_meta)\n self.assertTrue(os.path.exists(out_meta), msg=amsg)\n amsg = \"{} does not exist\".format(out_source)\n self.assertTrue(os.path.exists(out_source), msg=amsg)\n # For each output file, make sure it matches input file\n amsg = \"{} does not match {}\".format(in_meta, out_meta)\n self.assertTrue(filecmp.cmp(in_meta, out_meta,\n shallow=False), msg=amsg)\n amsg = \"{} does not match {}\".format(in_source, out_source)\n self.assertTrue(filecmp.cmp(in_source, out_source,\n shallow=False), msg=amsg)", "def test00(self):\n a = np.arange(self.N)\n ac = bcolz.arange(self.N, rootdir=self.rootdir)\n self.assertTrue(np.all(a == ac))", "def test_X_returned(self, df, expected):\n\n x = BaseTransformer(columns=\"a\", copy=True)\n\n df_transformed = x.transform(X=df)\n\n h.assert_equal_dispatch(\n expected=expected,\n actual=df_transformed,\n msg=\"Check X returned from transform\",\n )", "def testDegenerate(self):\n srt = asarray(self.copy())\n srt.sort(axis=1)\n return (srt[:,:-1] == srt[:,1:]).any(axis=1)", "def test00(self):\n a = np.arange(self.N)\n ac = bcolz.carray(a, dtype='i4', rootdir=self.rootdir)\n self.assertTrue(ac.dtype == np.dtype('i4'))\n a = a.astype('i4')\n self.assertTrue(a.dtype == ac.dtype)\n self.assertTrue(np.all(a == ac[:]))", "def test_equal14():\n x = np.array([[[[[[True, False, True], [True, False, True], [True, False, True]]]]]])\n y = np.array([[True, False, True], [True, False, True], [True, False, True]])\n res = np.equal(x, y)\n obj.run(res=res, x=x, y=y)", "def test_cast_array(self):\n dim = Real(\"yolo\", \"uniform\", -3, 4)\n assert np.all(dim.cast(np.array([\"1\", \"2\"])) == np.array([1.0, 2.0]))", "def test_process_xform_list(self):\n img = GenericImageEntity(np.linspace(0, 10, 100))\n xforms = [DummyTransform_Add(1), DummyTransform_Multiply(2)]\n img_expected = (img.get_data() + 1) * 2\n img_actual = utils.process_xform_list(img, xforms, RandomState())\n self.assertTrue(np.allclose(img_actual.get_data(), img_expected))", "def test_ndarray_copy(self):\r\n assert copy(numpy.ndarray) is numpy.ndarray\r\n assert deepcopy(numpy.ndarray) is numpy.ndarray", "def is_array(self):\n return False", "def test_step(self):\n fun = get_problem('step', self.dimension)\n self.assertEqual(fun(self.array), 0.0)", "def test_equitability(self):\n c = array([5])\n self.assertFloatEqual(equitability(c), 0)\n c = array([5,5])\n self.assertFloatEqual(equitability(c), 1)\n c = array([1,1,1,1,0])\n self.assertEqual(equitability(c), 1)", "def test_apply(Group: Type[jaxlie.MatrixLieGroup]):\n T_w_b = sample_transform(Group)\n p_b = onp.random.randn(Group.space_dim)\n\n if Group.matrix_dim == Group.space_dim:\n assert_arrays_close(\n T_w_b @ p_b,\n T_w_b.apply(p_b),\n T_w_b.as_matrix() @ p_b,\n )\n else:\n # Homogeneous coordinates\n assert Group.matrix_dim == Group.space_dim + 1\n assert_arrays_close(\n T_w_b @ p_b,\n T_w_b.apply(p_b),\n (T_w_b.as_matrix() @ onp.append(p_b, 1.0))[:-1],\n )", "def test_cast_array(self):\n dim = Integer(\"yolo\", \"uniform\", -3, 5)\n assert np.all(dim.cast(np.array([\"1\", \"2\"])) == np.array([1, 2]))", "def test_chung_reynolds(self):\n fun = get_problem('chung_reynolds', self.dimension, -100, 100)\n self.assertEqual(fun(self.array), 0.0)", "def test_shannon(self):\n c = array([5])\n self.assertFloatEqual(shannon(c), 0)\n c = array([5,5])\n self.assertFloatEqual(shannon(c), 1)\n c = array([1,1,1,1,0])\n self.assertEqual(shannon(c), 2)", "def _numpy_checker(x, y):\r\n x, y = x[0], y[0]\r\n if (x.dtype != y.dtype or x.shape != y.shape\r\n or numpy.any(numpy.abs(x - y) > 1e-10)):\r\n raise Exception(\"Output mismatch.\", {'performlinker': x, 'clinker': y})", "def isscalar(array):\n arr = ma.array(array)\n if not hasattr(arr, '__len__') or arr.shape == () or len(arr) == 1:\n return True\n return False", "def test_fix_data(self):\n cube = self.fix.fix_data(self.cube)\n np.testing.assert_allclose(cube.data[0], 1.0)\n np.testing.assert_allclose(cube.data[2], 2.0)\n assert not np.ma.is_masked(cube.data[0])\n assert np.ma.is_masked(cube.data[1])\n assert not np.ma.is_masked(cube.data[2])" ]
[ "0.7060562", "0.6684099", "0.65899926", "0.65693116", "0.6499308", "0.6458877", "0.645005", "0.64077723", "0.63759315", "0.62878346", "0.61688924", "0.6164394", "0.6134178", "0.61210686", "0.6116632", "0.6107201", "0.6093356", "0.6074735", "0.60722286", "0.60400724", "0.6036963", "0.60263395", "0.60261834", "0.60085094", "0.600174", "0.598951", "0.5983081", "0.597927", "0.59588075", "0.59465843", "0.5928631", "0.5911022", "0.58999765", "0.5897988", "0.5893011", "0.5891747", "0.58734167", "0.5861575", "0.58428764", "0.58305", "0.5796157", "0.5775694", "0.575001", "0.5745177", "0.57449067", "0.57409203", "0.57353616", "0.5725081", "0.5723638", "0.5722376", "0.5711625", "0.5702057", "0.5696442", "0.5687362", "0.56832135", "0.56731176", "0.56660205", "0.5658016", "0.5658016", "0.5622617", "0.5611309", "0.5610457", "0.56090826", "0.56057024", "0.5597082", "0.5592878", "0.5577249", "0.5575596", "0.55754155", "0.55743545", "0.55684", "0.55569035", "0.55562925", "0.55561084", "0.5555563", "0.5551319", "0.5550579", "0.55479544", "0.55476063", "0.55474246", "0.55446166", "0.55357313", "0.55217755", "0.55192804", "0.55160064", "0.55154246", "0.5513446", "0.5509717", "0.5503222", "0.55016893", "0.54958904", "0.5494454", "0.5491907", "0.54905385", "0.54887366", "0.5488241", "0.5487704", "0.54819906", "0.54801035", "0.5477245", "0.54707515" ]
0.0
-1
Turns the (n,2) array into a (n,4) array.
def transform(array): assert array.shape == (10, 2) new = Array(columns="abcd") for x, y in array: new.append([x, y, x + y, x * y]) return new
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def to_nibble_array(arr: ndarray) -> ndarray:\n arr = arr.ravel()\n return (arr[::2] + (arr[1::2] << 4)).astype(\"uint8\")", "def matrix4_to_3x4_array(mat):\r\n return tuple(f for v in mat[0:3] for f in v)", "def create_array( n ):", "def to_array(X, n=2):\n return np.array([np.eye(n)[x] for x in X])", "def a(a,N): \n a=np.ravel(a, order='F') # Same order\n return a", "def a(a,N): \n a=np.ravel(a, order='F') # Same order\n return a", "def blockshaped(arr, nrows, ncols):\r\n\t h, w = arr.shape\r\n\t return (arr.reshape(h//nrows, nrows, -1, ncols)\r\n\t .swapaxes(1,2)\r\n\t .reshape(-1, nrows, ncols))", "def n2m(a):\n if not isinstance(a, np.ndarray): a = np.array(a)\n return multiprocessing.Array(a.dtype.char, a.flat, lock=False), tuple(a.shape), a.dtype.char, isinstance(a, np.matrix)", "def make_2d(x):\n return x.reshape((1, len(x)))", "def Transposer(arr,n):\n # Check that the requested special transposition is feasible #\n if arr.shape[0]%n != 0:\n logging.critical('Cannot apply the recoupling on the array, the requested repetition is not a divisor of the array size')\n sys.exit()\n\n # Define new array #\n x = arr.shape[0]\n y = arr.shape[1]\n new_x = x//n\n new_y = y*n\n new_arr = np.zeros((new_x,new_y))\n\n # Loop #\n idx_row = 0 # row index of new array\n for i in range(arr.shape[0]//n):\n arr_slice = arr[i*n:(i+1)*n,:] # Slice that we need to \"transpose\"\n idx_col = 0 # col index of new array\n for col in range(arr_slice.shape[1]): # loop over slice to fill the row of new array\n for row in range(arr_slice.shape[0]):\n new_arr[idx_row,idx_col] = arr_slice[row,col]\n idx_col += 1\n idx_row += 1\n\n return new_arr", "def twoDize(array, width):\n count = 0\n output = []\n temp = []\n while len(array) > 0:\n temp.append(array.pop())\n if len(temp) == width:\n output.append(temp)\n temp = []\n return output", "def to_4d(x):\n xsh = np.asarray(x.get_shape().as_list())\n return tf.reshape(x, [xsh[0], xsh[1], xsh[2], np.prod(xsh[3:])])", "def flatten_npar(np_array):\n \n itr = len(np_array)\n start = np_array[0]\n \n for i in range(1,itr):\n start = np.hstack((start,np_array[i]))\n \n return(np.array(start))", "def beta_2d_to_4d(betas_2d):\n\tbetas_4d = np.reshape(betas_2d.T, (64,64,34) + (-1,))\n\treturn betas_4d", "def rearrange(self, an_array):\n\tnew_array = an_array*0\n\tn=new_array.size()\n\tnew_array[n/2:] = an_array[:n/2]\n\tnew_array[:n/2] = an_array[n/2:]\n\treturn new_array", "def blockshaped(arr, nrows, ncols):\n h, w = arr.shape\n return (arr.reshape(h//nrows, nrows, -1, ncols)\n .swapaxes(1,2)\n .reshape(-1, nrows, ncols))", "def blockshaped(arr, nrows, ncols):\n h, w = arr.shape\n return (arr.reshape(h//nrows, nrows, -1, ncols)\n .swapaxes(1,2)\n .reshape(-1, nrows, ncols))", "def blockshaped(arr, nrows, ncols):\n h, w = arr.shape\n return (arr.reshape(h//nrows, nrows, -1, ncols)\n .swapaxes(1,2)\n .reshape(-1, nrows, ncols))", "def Repeater(arr,n):\n new_arr = np.zeros((arr.shape[0]*n,arr.shape[1]),dtype=object)\n for i in range(0,arr.shape[0]):\n new_row = np.tile(arr[i,:],(n,1))\n new_arr[i*n:(i+1)*n,:] = new_row\n return new_arr", "def arr_to_tup(a):\n return tuple(a.reshape(1, -1)[0])", "def createarray(m,n):\n return( np.ones((m,2,n)) )", "def s(a,N):\n a=np.reshape(a,(1,N**2),order='F').T\n return a", "def vertify(data: list):\n assert len(data) == 4\n n = [float(d) for d in data]\n return np.array([[n[0], n[1]], [n[2], n[1]], [n[2], n[3]], [n[0], n[3]], [n[0], n[1]]])", "def even_split(a, n):\n n = min(n, len(a)) # if less elements in array than chunks to output, change chunks to array length\n k, m = divmod(len(a), n)\n return (a[i * k + min(i, m):(i + 1) * k + min(i + 1, m)] for i in range(n))", "def flat_to_2d(data, det_width):\n return data.reshape((data.shape[0], data.shape[1], det_width, det_width))", "def _make_2x2(self, A11, A12, A21, A22, dtype=float):\n array = np.empty((2,2), dtype=dtype)\n array[0,0] = A11\n array[0,1] = A12\n array[1,0] = A21\n array[1,1] = A22\n return array", "def _atleastnd(array, n):\n return array[tuple((n - array.ndim) * [None] + [...])]", "def split(array, nrows, ncols):\r\n r, h = array.shape\r\n return (array.reshape(h//nrows, nrows, -1, ncols)\r\n .swapaxes(1, 2)\r\n .reshape(-1, nrows, ncols))", "def reshape_pixel_array(self, pixel_arr):\n reshaped_pixel_arr = []\n n = 28\n while n <= len(pixel_arr):\n reshaped_pixel_arr.append(pixel_arr[n-28:n])\n n+=28\n\n return reshaped_pixel_arr", "def steppify(arr, axis='x'):\n\t\n\tif axis == 'x':\n\t\tnewarr = np.r_[arr[0], np.dstack((arr[1:], arr[1:])).flatten()]\n\t\n\telif axis == 'y':\n\t\tnewarr = np.r_[np.dstack((arr[:-1], arr[:-1])).flatten(), arr[-1]]\n\t\n\telse:\n\t\tprint('your axes in steppify are improperly identified')\n\n\treturn newarr", "def double(arr):\n newarr = np.array([(xx,xx) for xx in arr]).ravel()\n return newarr", "def init_four_d_array(dimens, val):\n w, x, y, z = dimens\n return [[[[val for l in range(z)]\n for k in range(y)]\n for j in range(x)]\n for i in range(w)]", "def get_field_array(self):\n array_j = []\n array_i = []\n n = 3\n i = self.square_size_y / 2\n while i <= self.field_height:\n if n % 2 == 1:\n j = self.square_size_x / 2\n while j <= self.field_width:\n array_j.append((j, i))\n j += self.square_size_x\n array_i.append(array_j)\n array_j = []\n n += 1\n else:\n j = 0\n while j <= self.field_width:\n array_j.append((j, i))\n j += self.square_size_x\n array_i.append(array_j)\n array_j = []\n n += 1\n i += self.square_size_y\n self.array = array_i\n return array_i", "def chop_up_to_4s(list, n):\n sublists = []\n num_sublists = 4**(n-1)\n for i in range(num_sublists):\n sublists.append(list[4*i: 4*i + 4])\n return sublists", "def turnArraySideways(array):\n newList = []\n\n for item in array[0]:\n newList.append([])\n\n x = -1\n for row in array:\n x = x + 1\n y = - 1\n for item in row:\n y = y + 1\n newList[y].append(item)\n\n return newList", "def split_array(a):\n n = len(a)\n if n == 1:\n return a\n index = n // 2\n b = a[:index]\n c = a[index:]\n return b, c", "def m2n(buf, shape, typecode, ismatrix=False):\n a = np.frombuffer(buf, dtype=typecode).reshape(shape)\n if ismatrix: a = np.asmatrix(a)\n return a", "def test_integer_split_2D_default(self):\n a = array([arange(10),arange(10)])\n res = array_split(a,3)\n desired = [array([arange(10)]),array([arange(10)]),array([])]\n compare_results(res,desired)", "def permute4(values: List) -> List:\n o = []\n ld4 = len(values) // 4\n for i in range(ld4):\n o.extend(\n [values[i], values[i + ld4], values[i + ld4 * 2], values[i + ld4 * 3]])\n return o", "def from_nibble_array(arr: ndarray) -> ndarray:\n shape = arr.size\n\n new_arr = zeros((shape * 2), dtype=uint8)\n\n new_arr[::2] = arr & 0xF\n new_arr[1::2] = arr >> 4\n\n return new_arr", "def Arrays(n):\n arr = [x for x in range(2 * n +1)]\n shuffle(arr)\n return sorted(arr[:n]), sorted(arr[n:])", "def _to_arrays(particle, count: int):\n if (\n isinstance(particle, np.ndarray)\n and len(particle.shape) == 2\n and particle.shape[0] == 4\n ):\n # Multiple particles provided\n return particle\n\n elif len(particle) == 4:\n # One particle\n out = np.zeros((4, count))\n out[0] += particle[0]\n out[1] += particle[1]\n out[2] += particle[2]\n out[3] += particle[3]\n\n return out\n\n raise ValueError(\n f\"target shape invalid: should either be a length-4 iterable [x, y, z, t] or a shape (4, N) array\\nGot {type(particle)}\"\n )", "def convertToTwoDList(l, n):\n\treturn [l[i:i+n] for i in range(0, len(l), n)]", "def flatten(self, arr):\n shape = arr.shape\n return arr.reshape(shape[0] * shape[1], *shape[2:])", "def array2arrays(self, frame, ballx, bally):\n r = []\n for i in range(len(frame)):\n temp = []\n temp.append(frame[i])\n temp.append(ballx[i])\n temp.append(bally[i])\n r.append(temp)\n return r", "def getIntArray2D(self) -> typing.List[typing.List[int]]:\n ...", "def GPy_reformat_3D(array):\r\n n_timesteps = np.shape(array)[-1]\r\n if len(np.shape(array)) == 1:\r\n array = array.reshape(n_timesteps, 1)\r\n return [array, array, array]\r\n elif len(np.shape(array)) == 2:\r\n array = array.T\r\n array1 = array[:, 0, None]\r\n array2 = array[:, 1, None]\r\n array3 = array[:, 2, None]\r\n return [array1, array2, array3]\r\n else:\r\n return print(\"Error in GPy_reformat, input array is wrong shape.\")", "def greedy_split(arr, n, axis=0):\n length = arr.shape[axis]\n # compute the size of each of the first n-1 blocks\n block_size = int(np.ceil(length / float(n)))\n # the indices at which the splits will occur\n ix = np.arange(block_size, length, block_size)\n return np.array(np.split(arr, ix, axis))", "def to_2dnp_array(X):\r\n if isinstance(X, np.ndarray):\r\n if X.ndim == 1:\r\n return X.reshape((-1, 1))\r\n if X.ndim == 2:\r\n return X\r\n if isinstance(X, Number):\r\n X = [X]\r\n X = np.array(X)\r\n X = X.reshape([-1, np.prod(X.shape) // X.shape[0]])\r\n return X", "def conver1D(array):\n l = array.shape\n total = np.zeros((0, l[1] * l[2]), dtype=np.float32)\n i = 0\n for i in range(24):\n tempData = array[i]\n array1D = []\n for x in tempData:\n for s in x:\n array1D.append(s)\n total = np.insert(total, i, array1D, axis=0)\n return total", "def unblockshaped(arr, h, w):\n n, nrows, ncols = arr.shape\n return (arr.reshape(h//nrows, -1, nrows, ncols)\n .swapaxes(1,2)\n .reshape(h, w))", "def numpyReshape(array):\n return np.array(array, dtype = float).reshape(1, len(array))", "def getShortArray2D(self) -> typing.List[typing.List[int]]:\n ...", "def _t(a):\n return a.transpose((0, 2, 1))", "def vstack (tup ):\n\tl = len(tup[0])\n\tfor j in tup:\n\t\tif l!=len(j):\n\t\t\tprint \"error: dimensions don't match\"\n\t\t\treturn\n\tm = [];\n\tfor i in range(0,len(tup)):\n\t\tm.extend(zip(*tup[i]))\n\treturn zip(*m)", "def changeArray(array):\r\n\r\n return [[float(array[j][i]) for j in range(len(array))] for i in range(len(array[0]))]", "def ToMatrix(lines):\r\n #print lines\r\n arr = np.zeros([4, 4])\r\n for j in xrange(4):\r\n arr[j, :] = np.array([int(num) for num in lines[j].split(\" \")])\r\n #print np.array([int(num) for num in lines[j].split(\" \")])\r\n return arr", "def getByteArray2D(self) -> typing.List[typing.List[int]]:\n ...", "def reshape(arr):\r\n reshape_arr = np.empty((3,240,320),dtype='float32')\r\n reshape_arr[0,:,:] = arr[:,:,0]\r\n reshape_arr[1,:,:] = arr[:,:,1]\r\n reshape_arr[2,:,:] = arr[:,:,2]\r\n return reshape_arr", "def _stride_arr(self, stride):\n return [1, stride, stride, 1]", "def gen_4_tuples(it):\n\n return list(zip(it[0::4], it[1::4], it[2::4], it[3::4]))", "def gen_4_tuples(it):\n\n return list(zip(it[0::4], it[1::4], it[2::4], it[3::4]))", "def sf01(arr):\n s = arr.shape\n return arr.swapaxes(0, 1).reshape(s[0] * s[1], *s[2:])", "def sf01(arr):\n s = arr.shape\n return arr.swapaxes(0, 1).reshape(s[0] * s[1], *s[2:])", "def sf01(arr):\n s = arr.shape\n return arr.swapaxes(0, 1).reshape(s[0] * s[1], *s[2:])", "def sf01(arr):\n s = arr.shape\n return arr.swapaxes(0, 1).reshape(s[0] * s[1], *s[2:])", "def hstack (tup ):\n\tl = len(zip(*tup[0]))\n\tfor j in tup:\n\t\tif l!=len(zip(*j)):\n\t\t\tprint \"error: dimensions don't match\"\n\t\t\treturn\n\tm = [];\n\tfor i in range(0,len(tup)):\n\t\tm.append(tup[i])\n\treturn m", "def cut4(image):\r\n i, j = image.shape\r\n a1 = image[:i // 2, :j // 2]\r\n a2 = image[i // 2:, :j // 2]\r\n a3 = image[:i // 2, j // 2:]\r\n a4 = image[i // 2:, j // 2:]\r\n return a1, a2, a3, a4", "def segment_array(array=[],N=0,value=0):\n \n array = np.concatenate([array,np.ones(N-len(array)%N)*value])\n \n return array.reshape((-1,N))", "def n(l):\n return np.array(l,dtype=object)", "def _to_array2(self, maps, norb):\n nstate = len(maps[(0,)])\n arrays = numpy.zeros((norb, nstate, 3), dtype=numpy.int32)\n for i in range(norb):\n for k, data in enumerate(maps[(i,)]):\n arrays[i, k, 0] = data[0]\n arrays[i, k, 1] = data[1]\n arrays[i, k, 2] = data[2]\n return arrays", "def transform(self, x: Array2D) -> Array2D:", "def transpose():", "def funcify_3d(arrayin, func2d):\r\n assert(len(arrayin.shape) >= 2)\r\n elem = arrayin.size / (arrayin.shape[-1] * arrayin.shape[-2])\r\n if elem == 2 :\r\n arrayout = func2d(arrayin)\r\n else :\r\n array = arrayin.flatten().reshape( (elem, arrayin.shape[-2], arrayin.shape[-1]))\r\n arrayout = []\r\n for i in range(elem):\r\n arrayout.append(func2d(array[i]))\r\n arrayout = np.array(arrayout).reshape( arrayin.shape )\r\n return arrayout", "def sampling(self,arr):\n H=0\n W=0\n if arr.shape[0]%2 == 0:\n H = arr.shape[0]/2\n else:\n H = 1+arr.shape[0]/2\n\n if arr.shape[1]%2 == 0:\n W = arr.shape[1]/2\n else:\n W = 1+arr.shape[1]/2\n \n new_arr = numpy.zeros((H,W),dtype = numpy.int)\n for i in range(H):\n for j in range(W):\n new_arr[i][j] = arr[2*i][2*j]\n return new_arr", "def indexreshape(n, m):\n if n < m:\n raise ValueError('m must be lower or equal to n')\n\n delta = (n % m) // 2\n end = n - (n % m)\n step = end // m\n r = tuple((i + delta, i + delta + step - 1) for i in range(0, end, step))\n return r", "def reformat(dataset):\n x = dataset[:, 1] \n x = np.stack(x) # reshape to (n, mel bands, timesteps)\n x = np.expand_dims(np.moveaxis(x, 1, -1), axis=3) # reformat x to (n, timesteps, mel bands, 1) \n y = dataset[:, 2] \n y = np.moveaxis(np.stack(y), 1, -1) # reformat y to (n, timesteps, 8)\n return x, y", "def arrayManipulation_brute(n, queries):\n arr = [0] * n\n\n for i, row in enumerate(queries):\n a, b, k = row[0], row[1], row[2]\n for j in range(a - 1, b):\n arr[j] = arr[j] + k\n print(f'array size {arr.__sizeof__()/1000000}')\n return max(arr)", "def assure_2d(array):\n array = np.array(array, copy=False, subok=True, ndmin=1)\n if array.ndim == 2:\n return array\n elif array.ndim == 1:\n return array[:, np.newaxis]\n else:\n raise RuntimeError(\"Array must be 1 or 2 dimensional.\")", "def flatten(a, start=0, count=2):\n s = a.shape\n return np.reshape(a, s[:start] + (-1,) + s[start+count:])", "def get_array_combi(data_array, grid_size, combi_array):\n result = [0] * grid_size # initialise to 0's\n\n offset = 0\n for (index,size) in enumerate(data_array):\n \n head = offset + combi_array[index]\n result[head:head+size] = [1] * size\n \n offset += size\n\n return result", "def numpy_array(arr: Array) -> np.ndarray:\n\n if not isinstance(arr, np.ndarray):\n arr_np = np.asarray(arr)\n if isinstance(arr, (list, tuple)) and len(arr_np.shape) == 2:\n arr_np = np.transpose(arr_np)\n return arr_np\n else:\n return arr", "def to_2d_array(self):\n return reshape_fns.to_2d(self._obj, raw=True)", "def _get_n2(self) -> np.ndarray:\n if self.nstep is None:\n n2_disp1d = np.arange(-self.N // 2 + 1, self.N // 2 + 1) ** 2\n else:\n p1d = np.arange(self.N) * 2 * np.pi / self.L\n\n coeffs = get_laplace_coefficients(self.nstep)\n norm = self.L ** 2 / self.epsilon ** 2 / 4 / np.pi ** 2\n\n n2_disp1d = np.sum(\n [\n -cn * np.cos(n * p1d * self.epsilon) * norm\n for n, cn in coeffs.items()\n ],\n axis=0,\n )\n\n return np.sum(\n [el.flatten() for el in np.meshgrid(*[n2_disp1d] * self._ndim)], axis=0\n )", "def flattenImage(input_array):\r\n shp = np.size(input_array)\r\n return np.reshape(input_array, (shp,))", "def reshape_to_batch(array):\n if len(array.shape) == 2:\n array = numpy.expand_dims(array, axis=2)\n array = numpy.expand_dims(array, axis=0)\n return array", "def split_3d_array_into_channels(arr):\n return [arr[:, :, i] for i in range(arr.shape[-1])]", "def conv_array(n):\n\n\t# Allocate kernel\n\tconv = np.zeros(n)\n\n\t# Give values to elements\n\tfor i in range(0,n-1):\n\t\tconv[i] = 1/n\n\n\treturn conv", "def row_to_array(r):\n a = np.ma.array([i for i in r.as_void()])\n return a", "def upper_triangular_matrix_to_full_matrix(arr, n):\n triu0 = np.triu_indices(n, 0)\n triu1 = np.triu_indices(n, 1)\n tril1 = np.tril_indices(n, -1)\n\n mat = np.zeros((n, n), dtype=float)\n mat[triu0] = arr\n mat[tril1] = mat[triu1]\n\n return mat", "def create2d(row_count, col_count, value=None):\n a = [None] * row_count\n for row in range(row_count):\n a[row] = [value] * col_count\n return a", "def _reshape(self, arr: np.ndarray) -> np.ndarray:\n return arr.reshape(self.TileHeight.value, self.TileWidth.value, self.bands,)", "def segment_array(a):\n\n l = [array(a.typecode) for chaff in range(16)]\n index = 0\n\n for i in range(0, len(a), 16):\n l[index].extend(a[i:i + 16])\n index = (index + 1) % 16\n\n return l", "def _make_coord_array(t,d):\n Nt,Nd = t.shape[0],d.shape[0]\n X = np.zeros([Nt,Nd,3],dtype=np.float64)\n for j in range(Nt):\n for k in range(Nd):\n X[j,k,0] = t[j]\n X[j,k,1:3] = d[k,:]\n X = np.reshape(X,(Nt*Nd,3))\n return X", "def divide_array(arr, n):\r\n if n <= 0:\r\n raise ValueError(\"Number of elements value must be greater than 0\")\r\n\r\n if len(arr) % n > 0:\r\n output_size = len(arr) // n + 1 # python 3 integer division\r\n last_output_size = output_size - (len(arr) % n)\r\n else:\r\n output_size = len(arr) // n\r\n last_output_size = output_size\r\n\r\n output_arr = []\r\n slice_start = 0\r\n slice_end = output_size\r\n i = 0\r\n\r\n while i < n:\r\n if i < (n - 1):\r\n output_item = arr[slice_start : slice_end]\r\n output_arr.append(output_item)\r\n slice_start += output_size\r\n slice_end += output_size\r\n else:\r\n slice_end = slice_start + last_output_size\r\n output_item = arr[slice_start : slice_end]\r\n output_arr.append(output_item)\r\n i += 1\r\n\r\n return output_arr", "def cast_list_to_numpy_array(grid, size):\n npgrid = np.zeros((size, size), dtype='int16')\n test = str(grid).split()\n if (len(test) != size**2):\n print(\"It seems like the infos.txt is wrong about the size\")\n exit(1)\n l = 0\n n = 0\n new = []\n for i in test:\n tmp = re.sub('[^0-9]', '', i)\n if tmp.isdigit():\n new.append(int(tmp))\n if len(new) < size * size:\n print(\"Missing elements in grid..\")\n return None\n elif len(new) > size * size:\n print(\"Too many elements in grid.\")\n return None\n elif check_duplicate_numbers_in_grid(new) is True:\n print(\"Duplicates found inside the grid.\")\n return None\n return new", "def unravel(arrayin, shape = 0):\r\n N = arrayin.shape[0]\r\n n = int(np.sqrt(N/2))\r\n if type(shape) != tuple :\r\n arrayout = arrayin[:N/2].reshape(n,n) + 1.0J * arrayin[N/2:].reshape(n,n)\r\n else :\r\n arrayout = arrayin[:N/2].reshape(shape) + 1.0J * arrayin[N/2:].reshape(shape)\r\n return arrayout", "def data_reshape(image):\n image_mat = []\n if image.shape[-1] == 3:\n for x, i in enumerate(image):\n for y, j in enumerate(i):\n image_mat.append([x, y, j[0], j[1], j[2]])\n else:\n for x, i in enumerate(image):\n for y, j in enumerate(i):\n image_mat.append([x, y, j])\n return np.array(image_mat)", "def copy_grid (grid):\r\n new_grid = []\r\n for i in range (4):\r\n new_grid.append ([])\r\n for j in range (4):\r\n new_grid[i].append (grid[i][j])\r\n return new_grid", "def coordinate_matrix(n):\n xcoordinates = np.zeros((n,n))\n xcoordinates = xcoordinates + np.arange(0,worksize) #broadcasting trick\n ycoordinates = xcoordinates.T\n return np.array([ycoordinates,xcoordinates])" ]
[ "0.6299503", "0.62471116", "0.6135411", "0.60306585", "0.59801215", "0.59801215", "0.5971381", "0.5949728", "0.5925237", "0.58508295", "0.5849235", "0.58042186", "0.57584757", "0.5721797", "0.571671", "0.56908566", "0.56908566", "0.56908566", "0.56880254", "0.5633662", "0.5616265", "0.5614675", "0.559672", "0.5590701", "0.55865484", "0.55704296", "0.55688703", "0.5562045", "0.5555896", "0.5552094", "0.5550944", "0.5533031", "0.5530264", "0.55235034", "0.55009836", "0.548087", "0.5478036", "0.5475453", "0.54557914", "0.5454278", "0.54414827", "0.5437861", "0.54323566", "0.54121006", "0.5409598", "0.5405315", "0.5393452", "0.5385925", "0.5383054", "0.5370921", "0.53672034", "0.534174", "0.53351843", "0.5326394", "0.5314771", "0.5313655", "0.53112835", "0.52863526", "0.5277218", "0.52741295", "0.52623224", "0.52623224", "0.52528673", "0.52528673", "0.52528673", "0.52528673", "0.52360415", "0.5230343", "0.52296025", "0.52255744", "0.5213924", "0.5202317", "0.52017415", "0.52006143", "0.5199923", "0.51919645", "0.51914006", "0.51891804", "0.5177607", "0.51683176", "0.5162979", "0.5161118", "0.5156194", "0.5147742", "0.5147622", "0.5133563", "0.51310074", "0.5121181", "0.512042", "0.5115889", "0.511524", "0.51044047", "0.50872767", "0.5081855", "0.5063249", "0.50436896", "0.50399673", "0.50342894", "0.50265974", "0.5021043" ]
0.5201071
73
A Sequence of AWG Cores.
def awgs(self) -> t.Union[t.Sequence[AWG], Node]: if "AWG" not in self.features.options(): logger.error("Missing option: AWG") return Node( self._root, self._tree + ("awgs",), ) return NodeList( [ AWG( self.root, self._tree + ("awgs", str(i)), self.serial, i, self.device_type, self.device_options, ) for i in range(len(self["awgs"])) ], self._root, self._tree + ("awgs",), )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cg(self):\n\n _cg = [0, 0, 0]\n _sumProduct = [0, 0, 0]\n _sumWeight = 0\n\n for fstnr in self:\n # Calculate the sum of the products\n for i, component in enumerate(fstnr.xyz):\n _sumProduct[i] += component * fstnr.wt\n\n # Calculate the sum of the areas\n _sumWeight += fstnr.wt\n\n # Divide sum of products by sum of areas\n for i, product in enumerate(_sumProduct):\n _cg[i] = product / _sumWeight\n\n return _cg", "def get(self) -> list:\n return self.__cogs", "def aic_c(self):\n if hasattr(self, '_aic_c'):\n return self._aic_c\n else:\n k = len(self.params)\n n = self.data['n'].sum()\n self._aic_c = self.aic() + (2*k**2 + 2*k)/(n - k - 1)\n return self._aic_c", "def __calc_CoagS(self):\n\n Dp_small = self.dp_lim[0]*1e-9 # in m\n temp = self.temp_data # Kelvin\n pres = self.pres_data # Pascal\n Dp = self.par_diam*1e-9 # m\n time = self.par_time # days\n N = self.__dNdlog2dN(Dp,self.smoothed_par_data) # cm-3\n findex = np.argwhere(Dp>=Dp_small).flatten()\n big_R = Dp[findex]/2.\n big_N = N[:,findex]\n k_B = 1.38064852e-23 # Boltzmann constant m2 kg s-2 K-1\n r0=Dp_small/2.\n r1=r0\n dens=1000.\n self.CoagS=np.zeros(time.shape)\n for i in range(0,len(time)):\n lamda=(6.73e-8*temp[i]*(1+(110.4/temp[i])))/(296*pres[i]/101325.0*1.373)\n myy=(1.832e-5*(temp[i]**(1.5))*406.4)/(5093*(temp[i]+110.4))\n kn1=lamda/r1\n kn=lamda/big_R\n CC= 1.+(kn*(1.142+(0.558*np.exp((-.999)/kn))))\n CC1= 1. + (kn1*(1.142+(0.558*np.exp((-.999)/kn1))))\n D = (k_B*temp[i]*CC)/(6.*np.pi*myy*big_R)\n D1 = (k_B*temp[i]*CC1)/(6.*np.pi*myy*r1)\n M = 4./3.*np.pi*(big_R**3)*dens\n M1 = 4./3.*np.pi*(r1**3)*dens\n c= np.sqrt((8.*k_B*temp[i])/(np.pi*M))\n c1= np.sqrt((8.*k_B*temp[i])/(np.pi*M1))\n c12= np.sqrt((c**2)+(c1**2))\n r12= big_R+r1\n D12= D+D1\n CCONT = 4.*np.pi*r12*D12\n CFR = np.pi*r12*r12*c12\n L=(8.*D)/(np.pi*c)\n L1=(8.*D1)/(np.pi*c1)\n SIG=(1./(3.*r12*L))*((r12+L)**3-(r12*r12+L*L)**1.5)-r12\n SIG1=(1./(3.*r12*L1))*((r12+L1)**3-(r12*r12+L1*L1)**1.5)-r12\n SIG12= np.sqrt((SIG**2)+(SIG1**2))\n KO=CCONT/((r12/(r12+SIG12))+(CCONT/CFR))\n self.CoagS[i] = np.nansum(KO*big_N[i,:]*1e6)\n if (r0==big_R[0]):\n self.CoagS[i] = 0.5*KO*big_N[i,0]*1e6+np.nansum(KO*big_N[i,1:]*1e6)\n else:\n self.CoagS[i] = np.nansum(KO*big_N[i,:]*1e6)", "def test_multiple_cregs(self):\n qc = QuantumCircuit(2)\n cr1 = ClassicalRegister(1, \"cr1\")\n cr2 = ClassicalRegister(1, \"cr2\")\n qc.add_register(cr1)\n qc.add_register(cr2)\n qc.measure(0, 0)\n qc.measure(1, 1)\n\n result = Sampler().run(qc, shots=100).result()\n self.assertDictAlmostEqual(result.quasi_dists[0], {0: 1})", "def coupons(self):\r\n return Coupons(self)", "def circuits(self) -> List[QuantumCircuit]:\n circ0 = QuantumCircuit(1, 1)\n circ0.measure(0, 0)\n\n circ1 = QuantumCircuit(1, 1)\n circ1.x(0)\n circ1.measure(0, 0)\n\n for i, circ in enumerate([circ0, circ1]):\n circ.metadata = {\n \"experiment_type\": self._type,\n \"qubit\": self.physical_qubits[0],\n \"xval\": i,\n }\n\n return [circ0, circ1]", "def carbs(self) -> List[RecipeObjectNutrientsCalories]:\n return self._carbs", "def getGC(self):\n numGC = self.sequence.upper().count(\"G\") + self.sequence.upper().count(\"C\")\n self.gc = float(numGC)/len(self.sequence)\n return self.gc", "def n_cs(self):\n pass", "def concentration(self):\n return [node.concentration for node in self]", "def initialize_cell_cycles(self,g_av=1,g_sig=0.2):\n # self.tc0 = np.random.uniform(0,1,self.nc)\n self.g_av = g_av\n self.g_sig = g_sig\n self.tc = np.random.uniform(0,1,self.nc)\n self.g = np.random.normal(self.g_av,self.g_sig,self.nc)", "def Ag_seq(RNs):\n seq = []\n for res in range(cf.lAg):\n randi = RNs.getR()\n for i in range(20):\n if randi < cf.cumprob20[i]:\n seq.append(i + 1) # want amino acids between 1 and 20\n break\n return seq", "def aucs(self):\n self._compute()\n return self._aucs", "def calculate_gc_content(sequence):\n sequence = sequence.upper()\n sc = Counter(sequence)\n return round((sc['C'] + sc['G']) / (sc['A'] + sc['C'] + sc['G'] + sc['T']) * 100, 2)", "def gc(self):\n g = self.seq.count('G')\n g += self.seq.count('g')\n c = self.seq.count('C')\n c += self.seq.count('c')\n return (g + c) / len(self.seq)", "def generate_ca(valid_attributes):\n attr_list = valid_attributes.split(',')\n nb_attributes = len(attr_list)\n\n gen_g1 = G1.generator()\n gen_g2 = G2.generator()\n exp = [G1.order().random() for _ in range(nb_attributes + 1)]\n\n pk = [gen_g1] + [gen_g1 ** i for i in exp[1:]] + [gen_g2] + [gen_g2 ** i for i in exp]\n sk = gen_g1 ** exp[0]\n\n sk = [sk, pk, attr_list]\n pk = [pk, attr_list]\n\n\n return (jsonpickle.encode(pk).encode(), jsonpickle.encode(sk).encode())", "def ac(self):\n return np.array(self['gen'], dtype=np.float32)", "def cLCG(G):\n \n gens = []\n \n for g in G:\n gens.append(LCG(*g))\n \n m0 = G[0][3]-1\n \n while True:\n yield sum([(-1**j)*next(g) for j,g in enumerate(gens)]) % m0", "def test_get_qasm_all_gates(self):\n q_program = QuantumProgram(specs=self.QPS_SPECS_NONAMES)\n qc = q_program.get_circuit()\n qr = q_program.get_quantum_register()\n cr = q_program.get_classical_register()\n qc.u1(0.3, qr[0])\n qc.u2(0.2, 0.1, qr[1])\n qc.u3(0.3, 0.2, 0.1, qr[2])\n qc.s(qr[1])\n qc.s(qr[2]).inverse()\n qc.cx(qr[1], qr[2])\n qc.barrier()\n qc.cx(qr[0], qr[1])\n qc.h(qr[0])\n qc.x(qr[2]).c_if(cr, 0)\n qc.y(qr[2]).c_if(cr, 1)\n qc.z(qr[2]).c_if(cr, 2)\n qc.barrier(qr)\n qc.measure(qr[0], cr[0])\n qc.measure(qr[1], cr[1])\n qc.measure(qr[2], cr[2])\n result = q_program.get_qasm()\n self.assertEqual(len(result), (len(qr.name) * 23 +\n len(cr.name) * 7 +\n 385))", "def covar(self):\n wwt = self.ww.copy()\n wwt[self.ww>0] = 1.0/self.ww[self.ww>0]\n covar = np.zeros((self.nstar,self.nstar),dtype=self.ww.dtype)\n for i in range(self.nstar):\n for j in range(i+1):\n covar[i,j] = np.sum(wwt * self.vv[:,i] * self.vv[:,j])\n covar[j,i] = covar[i,j]\n return covar", "def CoP_x(CoG_x, ang_acc, inertia, force):\n CoP = []\n for i in range(0, len(ang_acc)):\n if ang_acc[i] is None:\n pass\n else:\n CoP_frame = CoG_x[i] + (inertia[i] * ang_acc[i] / force)\n CoP.append(CoP_frame)\n return CoP", "def make_coco_labels(real_c):\n y = np.eye(real_c.size(1))\n\n fixed_c_list = []\n\n # single object addition and removal\n for i in range(2*real_c.size(1)):\n fixed_c = real_c.clone()\n for c in fixed_c:\n if i%2:\n c[i//2] = 0.\n else:\n c[i//2] = 1.\n fixed_c_list.append(Variable(fixed_c, volatile=True).cuda())\n\n # multi-attribute transfer (H+G, H+A, G+A, H+G+A)\n #if self.dataset == 'CelebA':\n # for i in range(4):\n # fixed_c = real_c.clone()\n # for c in fixed_c:\n # if i in [0, 1, 3]: # Hair color to brown\n # c[:3] = y[2]\n # if i in [0, 2, 3]: # Gender\n # c[3] = 0 if c[3] == 1 else 1\n # if i in [1, 2, 3]: # Aged\n # c[4] = 0 if c[4] == 1 else 1\n # fixed_c_list.append(self.to_var(fixed_c, volatile=True))\n return fixed_c_list", "def charges(self):\n charges = np.zeros(len(self))\n charges[:len(self.qc_mol), ...] = self.qc_mol.charges\n charges[len(self.qc_mol):len(self.qc_mol) + len(self.br_mol), ...] = self.br_mol.charges\n charges[-len(self.pc_mol):, ...] = self.pc_mol.charges\n\n return charges", "def compute_gc(seq): # seq should be a string\n num_GC = list(seq).count('g')+list(seq).count('c')+list(seq).count('G')+list(seq).count('C')\n amount_GC = num_GC/len(seq)\n return amount_GC", "def prepare_each(self, model, wngrid):\n\n self._total_cia = len(self.ciaPairs)\n self._nlayers = model.nLayers\n self._ngrid = wngrid.shape[0]\n self.info('Computing CIA ')\n\n sigma_cia = np.zeros(shape=(model.nLayers, wngrid.shape[0]))\n\n chemistry = model.chemistry\n\n for pairName in self.ciaPairs:\n cia = self._cia_cache[pairName]\n sigma_cia[...] = 0.0\n\n cia_factor = chemistry.get_gas_mix_profile(cia.pairOne) * \\\n chemistry.get_gas_mix_profile(cia.pairTwo)\n\n for idx_layer, temperature in enumerate(model.temperatureProfile):\n _cia_xsec = cia.cia(temperature, wngrid)\n sigma_cia[idx_layer] += _cia_xsec*cia_factor[idx_layer]\n self.sigma_xsec = sigma_cia\n yield pairName, sigma_cia", "def generateCoefficients (self):\n\t\tself.ws = []\n\t\tif not self.sine:\n\t\t\tself.bs = []\n\t\tmean = np.zeros(self.dim)\n\t\tcov = np.eye(self.dim)*(2*self.gammak)\n\n\t\tif self.sine:\n\t\t\tfor _ in range(self.rn):\n\t\t\t\tself.ws.append(nr.multivariate_normal(mean, cov))\n\t\telse:\n\t\t\tfor _ in range(self.rn):\n\t\t\t\tself.ws.append(nr.multivariate_normal(mean, cov))\n\t\t\t\tself.bs.append(nr.uniform(0.0, 2*np.pi))", "def make_prog(self):\r\n\r\n self.cnv.clear()\r\n cdf = self.df[self.df.L != 0]\r\n c0 = cdf['C0'].value_counts().idxmax()\r\n c1 = cdf['C1'].value_counts().idxmax()\r\n c2 = cdf['C2'].value_counts().idxmax()\r\n c3 = cdf['C3'].value_counts().idxmax()\r\n self.cnv.extend([c0, c1, c2, c3])", "def aco(self, gens, current_gen, client):\n # The time at the start of the algorithm\n time_start = time.time()\n\n # Initalise the colony and its parameters\n self.colony = Colony()\n self.colony.ants = self.ants\n self.colony.shortest_path = self.shortest_path\n self.colony.min_distance = self.min_distance\n\n # Initialise an array to be append with nodes\n shortest_path = []\n\n # Do generations from the current generation to the generation number needed\n for i in range(current_gen, gens):\n\n # The current time\n time_now = time.time()\n time_elapsed = time_now-time_start\n # If exectutiion time has reached 25 seconds, return result\n if (time_elapsed) > 25:\n break\n\n # Ants within colony perform their tours\n self.colony.perform_tours(self)\n\n # Get the shortest tour found by the ants\n shortest_path = self.colony.shortest_path\n\n # Global update of pheromones\n self.update_pheromones(self.colony)\n\n # Generation successful, thus increase the generation reached\n gen_reached = i+1\n\n # Update Instance parameters to be returned to client\n self.shortest_path = shortest_path\n self.min_distance = self.colony.min_distance\n msg = \"Generation \" + str(i) + \" distance \" + str(round(self.colony.min_distance, 3)) + \" path \" + str(shortest_path)\n\n # Emit a message using SocketIO for a dynamic console\n socketio.emit('my event', msg, room=client)\n socketio.sleep(0.00000000001)\n\n return gen_reached, shortest_path, self.colony.min_distance", "def c_():\r\n c = np.array([[0, 0], [0, 100], [100, 100], [100, 80], [20, 80],\r\n [20, 20], [100, 20], [100, 0], [0, 0]])\r\n return c", "def recommend_cosim():\n pass", "def COMBI(self):\n\n self.FB()\n self.LAT()\n self.ROT()", "def make_cnv(self):\r\n\r\n self.cnv = random.sample(range(1, 25), 4)\r\n self.c_array = random.sample(self.c_array, len(self.c_array))\r\n score = float(0)\r\n common = [i for i in self.cnv[1:] if i in self.c_array]\r\n if self.cnv[0] in self.c0_array:\r\n score += 0.25\r\n elif self.cnv[0] not in self.c0_array:\r\n score -= 0.25\r\n for i in range(len(common)):\r\n score += 0.25\r\n if score > 1:\r\n score = 1\r\n elif score < 0:\r\n score = 0\r\n return self.cnv.append(score)", "def gc_frequency(self):\n result = str(self.seq).count(\"G\") + str(self.seq).count(\"C\")\n return result", "def GC_Content(self):\n GC_content = lambda dna: (dna.count('G')+dna.count('C'))\\\n /self.length\n return round(GC_content(self.sequence),4)", "def com(self):\n\n\t\tcom = vector3d()\n\n\t\tcom.x = 0.0; com.y = 0.0; com.z = 0.0\n\t\tnAt = 0.0\n\n\t\tfor chain in self.chain:\n\t\t\tfor residue in chain.residue:\n\t\t\t\tfor atom in residue.atom:\n\t\t\t\t\tcom.x += atom.coord.x\n\t\t\t\t\tcom.y += atom.coord.y\n\t\t\t\t\tcom.z += atom.coord.z\n\n\t\t\t\t\tnAt += 1.0\n\n\t\tif nAt == 0:\n\t\t\tprint \"ERROR: zero atoms present for COM calculation!\"\n\t\t\tsys.exit()\n\n\t\tcom /= nAt\n\t\treturn com", "def _granger_causality(self):\r\n gc = dict(frequencies={}, gc_xy={}, gc_yx={}, gc_sim={},\r\n spectral_density={})\r\n for i, j in self.ij:\r\n w, f_x2y, f_y2x, f_xy, Sw = \\\r\n alg.granger_causality_xy(self.model_coef[i, j],\r\n self.error_cov[i, j],\r\n n_freqs=self._n_freqs)\r\n\r\n # All other measures are dependent on i, j:\r\n gc['gc_xy'][i, j] = f_x2y\r\n gc['gc_yx'][i, j] = f_y2x\r\n gc['gc_sim'][i, j] = f_xy\r\n gc['spectral_density'][i, j] = Sw\r\n\r\n return gc", "def get_culg_cost(As, Rps, culg):\n c, d = culg\n # To avoid actually permuting tensor elements we do this manually\n # instead of calling permute_As.\n cubeperm = cubeperms[c]\n tensorperm = tensorperms[d]\n indexperm = indexperms[d]\n T = type(As[0])\n As = [As[cubeperm[i]] for i in tensorperm]\n NW_dim = get_A_dim(As, Rps, 3, indexperm[5])\n NE_dim = get_A_dim(As, Rps, 7, indexperm[5])\n SW_dim = get_A_dim(As, Rps, 0, indexperm[5])\n SE_dim = get_A_dim(As, Rps, 4, indexperm[5])\n BW_dim = get_A_dim(As, Rps, 1, indexperm[1])\n BE_dim = get_A_dim(As, Rps, 5, indexperm[1])\n FW_dim = get_A_dim(As, Rps, 0, indexperm[1])\n FE_dim = get_A_dim(As, Rps, 4, indexperm[1])\n N_dim = NW_dim**2 * NE_dim**2\n S_dim = SW_dim**2 * SE_dim**2\n B_dim = BW_dim**2 * BE_dim**2\n F_dim = FW_dim**2 * FE_dim**2\n cost = N_dim*B_dim + B_dim*S_dim + S_dim*F_dim\n return cost", "def circuit():\n np.random.seed(1967)\n for gates in gates_per_layers:\n for gate in gates:\n qml.apply(gate)\n return qml.expval(qml.PauliZ(0))", "def get_arcs(self):\n arcs = []\n for arcs_list in self._inc.values():\n record = arcs_list.get_first_record()\n while record is not None:\n arc = record.element\n arcs.append(arc)\n record = record._next\n return arcs", "def cg(self):\n A = self.clAlphaT / self.clAlphaWF\n B = (1 - self.downwashGradW)\n C = (self.surfaceT * self.tlH) / (self.surfaceW * self.cMACW)\n sm = 0.05 * self.cMACW # static margin, approximated as 5% of aircraft MAC\n return self.ac + A * B * C * self.speedRatio**2 - sm", "def cole_coeff(self):\n return self.diseq_coeff(standardize=True)", "def genRate(self):\n\n # We need to compute normEsquared before we can compute the generation\n # rate\n normEsq = self.get_scalar_quantity('normEsquared')\n # Prefactor for generation rate. Note we gotta convert from m^3 to\n # cm^3, hence 1e6 factor\n fact = consts.epsilon_0 / (consts.hbar * 1e6)\n gvec = np.zeros_like(normEsq)\n # Main loop to compute generation in each layer\n freq = self.conf[('Simulation', 'params', 'frequency')]\n for name, layer in self.layers.items():\n self.log.debug('LAYER: %s', name)\n self.log.debug('LAYER T: %f', layer.thickness)\n self.log.debug('START: %f', layer.start)\n self.log.debug('END: %f', layer.end)\n # Use the layer object to get the nk matrix with correct material\n # geometry\n nmat, kmat = layer.get_nk_matrix(freq, self.X, self.Y)\n gvec[layer.get_slice(self.Z)] = fact * nmat * kmat * normEsq[layer.get_slice(self.Z)]\n # gvec[layer.get_slice()] = nmat * kmat * normEsq[layer.get_slice(self.Z)]\n self.extend_data('genRate', gvec)\n return gvec", "def covar_samp(self):\n if self.count <= 1:\n return None\n return self.Ck / (self.count - 1)", "def get_gains(self):\n return tuple([lib.is_SetHWGainFactor(self.hcam,0x8000+i,0)/100 for i in range(4)])", "def get_gates(self):\n return self.gates", "def syn_rand(num_workers, gp, acq_optimiser, anc_data):\n return [asy_rand(gp, acq_optimiser, anc_data) for _ in range(num_workers)]", "def sample_cc(self, nsamples=1, weighted=True):\n weights = self.areas / np.sum(self.areas) if weighted else None\n for index in np.random.choice(a=len(self.geometries), size=nsamples, p=weights):\n yield self.geometries[index]", "def gc(sequence):\n sequence = sequence.upper()\n return (sequence.count('G') + sequence.count('C')) / float(len(sequence))", "def get_occr(self):\n return self._occr_array", "def covariates(self):\n return None", "def _get_parameters(n, j, domain, g, ncap):\n alphas, betas = rc.recurrenceCoefficients(n - 2, lb=domain[0], rb=domain[1],\n j=j, g=g, ncap=ncap)\n omegas = g * np.array(alphas)\n ts = g * np.sqrt(np.array(betas)[1::])\n c0 = np.sqrt(betas[0])\n return omegas, ts, c0", "def _construct_reg_costs(self):\n param_reg_cost = sum([T.sum(p**2.0) for p in self.joint_params])\n return param_reg_cost", "def produce_cgchart(ytrue, ypred):\n\n yprobas = np.append((1-ypred).reshape(-1,1), ypred.reshape(-1,1), axis=1)\n # 0's and 1's\n print(yprobas.shape)\n areas = plot_cumulative_gain(ytrue, yprobas)", "def evaluation_cc(self, property='clustering-coeff'):\n\n if property == 'clustering-coeff':\n rw_cc = [np.mean(clustering_coef_wu(self.rw_data[t])) for t in range(0, self.T)]\n smth_cc = [np.mean(clustering_coef_wu(self.smth_data[t])) for t in range(0, self.T)]\n elif property == 'transitivity':\n rw_cc = [np.mean(transitivity_wu(self.rw_data[t])) for t in range(0, self.T)]\n smth_cc = [np.mean(transitivity_wu(self.smth_data[t])) for t in range(0, self.T)]\n elif property == 'coreness':\n rw_cc = [np.mean(core.core_periphery_dir(self.rw_data[t])) for t in range(0, self.T)]\n smth_cc = [np.mean(core.core_periphery_dir(self.smth_data[t])) for t in range(0, self.T)]\n elif property == 'assortativity':\n rw_cc = [np.mean(core.assortativity_wei(self.rw_data[t], 0)) for t in range(0, self.T)]\n smth_cc = [np.mean(core.assortativity_wei(self.smth_data[t], 0)) for t in range(0, self.T)]\n elif property == 'modularity':\n rw_cc, _ = get_number_of_components(self.rw_data)\n smth_cc, _ = get_number_of_components(self.smth_data)\n elif property == 'path_length':\n rw_cc = [charpath(rw)[0] for rw in self.rw_data]\n smth_cc = [charpath(sm)[0] for sm in self.smth_data]\n\n # rw_cc_ent = get_entropy_list(rw_cc)\n # smth_cc_ent = get_entropy_list(smth_cc)\n\n return rw_cc, smth_cc", "def gc_content(seq):\n return round( (seq.count('C') + seq.count('G')) / len(seq) * 100 , 6 )", "def __init__(self,Capas: list=[],n_fret: float=0,pext: float=0,gamma_h20: float=9.81):\r\n self.capas=Capas\r\n self.n_fret=n_fret\r\n self.pext = pext\r\n self.gamma_h20=gamma_h20\r\n pass", "def get_arcs(self):\n arcs = []\n for arcs_set in self._inc.values():\n for arc in arcs_set: arcs.append(arc)\n return arcs", "def calc_cohesion( g, sg0, sg1, max_csize ) :\n score = 0.0\n n0 = len( sg0 )\n n1 = len( sg1 )\n if (n0 + n1 <= max_csize) :\n boundary_edges = networkx.edge_boundary( g, sg0, sg1 )\n for e in boundary_edges :\n score += g[e[0]][e[1]][\"similarity\"]\n return score / max( n0, n1 )", "def qcs(self):\n return self.aggregation", "def cs(self):\n if hasattr(self, \"_cs_cache\"):\n return self._cs_cache\n return np.array([conf.cs for conf in self.configurations], dtype=int)", "def get_classes(self):\n return list(range(self.num_clss))", "def calc_cogen_const(q_heat_Wh, thermal_eff, electrical_eff):\n q_fuel_Wh = q_heat_Wh / thermal_eff\n p_el_Wh = q_fuel_Wh * electrical_eff\n q_anth_Wh = q_fuel_Wh - (q_heat_Wh + p_el_Wh)\n return q_fuel_Wh, p_el_Wh, q_anth_Wh", "def ac(self):\n return np.array(self['ac'], dtype=np.float32) / 1000", "def ac(self):\n return self.acWF + self.acN", "def _generate_qubits(self):\n return cq.LineQubit.range(4)", "def get_cd_samples(self):\n \n if \"PCD\" in self.algorithm:\n \n input_vars = []\n \n given_vars = []\n \n else:\n \n input_vars = [self.minibatch_set]\n \n given_vars = {self.x_gibbs: self.train_inputs[self.minibatch_set,:]} \n \n get_samples = theano.function(inputs = input_vars,\n outputs = [self.p_xi_given_x_[-1], \n self.gibbs_samples[-1]\n ], \n givens = given_vars,\n #start the chain at the data distribution\n updates = self.gibbs_updates)\n \n return get_samples", "def add_computed_gas_concentrations(self):\n # Extract the z-coordinate and T, S, P profile\n zs = self.interp_ds.coords[self.ztsp[0]].values\n Ts = self.interp_ds[self.ztsp[1]].values\n Ss = self.interp_ds[self.ztsp[2]].values\n Ps = self.interp_ds[self.ztsp[3]].values\n \n # Create an air object\n air_names = ['nitrogen', 'oxygen', 'argon', 'carbon_dioxide']\n yk = np.array([0.78084, 0.20946, 0.009340, 0.00036])\n from tamoc import dbm\n air = dbm.FluidMixture(air_names)\n m = air.masses(yk)\n \n # Compute the concentrations adjusted for depth\n Cs = np.zeros((len(zs), len(air_names)))\n for i in range(len(zs)):\n Cs[i,:] = air.solubility(m, Ts[i], 101325., Ss[i])[0,:] * \\\n seawater.density(Ts[i], Ss[i], Ps[i]) / \\\n seawater.density(Ts[i], Ss[i], 101325.)\n \n # Make sure none of these gases are already in the measured profile\n for name in air_names:\n if name in self.interp_ds:\n air_names[air_names.index(name)] = 'computed_' + name\n \n # Add these data to the Profile object\n data = np.hstack((np.atleast_2d(zs).transpose(), Cs))\n names = [self.ztsp[0]] + air_names \n units = [self.ztsp_units[0]] + 4*['kg/m^3']\n self.append(data, names, units)\n \n # Rebuild the interpolator\n self._build_interpolator()", "def syn_bucb(num_workers, gp, acq_optimiser, anc_data):\n recommendations = [asy_ucb(gp, acq_optimiser, anc_data)]\n for _ in range(1, num_workers):\n recommendations.append(_halluc_ucb(gp, acq_optimiser, recommendations, anc_data))\n return recommendations", "def concentration(self):\n return self._gev_bijector.concentration", "def Generate(self,the_scf):\n self.dip_ints = the_scf.mol.intor('cint1e_r_sph', comp=3) # component,ao,ao.\n charges = the_scf.mol.atom_charges()\n coords = the_scf.mol.atom_coords()\n self.nuc_dip = np.einsum('i,ix->x', charges, coords)\n return", "def generateCombos(vars,constants):\n # SUPER NOT GENERALIZED---TOO LATE AT NIGHT FOR ME TO DO RECURSIVE ALGORITHMS\n assert len(vars) == 2 and len(constants) == 2\n combs = []\n for c1 in constants:\n for c2 in constants:\n combs.append(Grounding([(vars[0], c1), (vars[1], c2)]))\n return combs", "def kitti_eval_coco_style(gt_annos, dt_annos, current_classes):\n class_to_name = {\n 0: 'Car',\n 1: 'Pedestrian',\n 2: 'Cyclist',\n 3: 'Van',\n 4: 'Person_sitting',\n }\n class_to_range = {\n 0: [0.5, 0.95, 10],\n 1: [0.25, 0.7, 10],\n 2: [0.25, 0.7, 10],\n 3: [0.5, 0.95, 10],\n 4: [0.25, 0.7, 10],\n }\n name_to_class = {v: n for n, v in class_to_name.items()}\n if not isinstance(current_classes, (list, tuple)):\n current_classes = [current_classes]\n current_classes_int = []\n for curcls in current_classes:\n if isinstance(curcls, str):\n current_classes_int.append(name_to_class[curcls])\n else:\n current_classes_int.append(curcls)\n current_classes = current_classes_int\n overlap_ranges = np.zeros([3, 3, len(current_classes)])\n for i, curcls in enumerate(current_classes):\n overlap_ranges[:, :, i] = np.array(class_to_range[curcls])[:,\n np.newaxis]\n result = ''\n # check whether alpha is valid\n compute_aos = False\n for anno in dt_annos:\n if anno['alpha'].shape[0] != 0:\n if anno['alpha'][0] != -10:\n compute_aos = True\n break\n mAPbbox, mAPbev, mAP3d, mAPaos = do_coco_style_eval(\n gt_annos, dt_annos, current_classes, overlap_ranges, compute_aos)\n for j, curcls in enumerate(current_classes):\n # mAP threshold array: [num_minoverlap, metric, class]\n # mAP result: [num_class, num_diff, num_minoverlap]\n o_range = np.array(class_to_range[curcls])[[0, 2, 1]]\n o_range[1] = (o_range[2] - o_range[0]) / (o_range[1] - 1)\n result += print_str((f'{class_to_name[curcls]} '\n 'coco AP@{:.2f}:{:.2f}:{:.2f}:'.format(*o_range)))\n result += print_str((f'bbox AP:{mAPbbox[j, 0]:.2f}, '\n f'{mAPbbox[j, 1]:.2f}, '\n f'{mAPbbox[j, 2]:.2f}'))\n result += print_str((f'bev AP:{mAPbev[j, 0]:.2f}, '\n f'{mAPbev[j, 1]:.2f}, '\n f'{mAPbev[j, 2]:.2f}'))\n result += print_str((f'3d AP:{mAP3d[j, 0]:.2f}, '\n f'{mAP3d[j, 1]:.2f}, '\n f'{mAP3d[j, 2]:.2f}'))\n if compute_aos:\n result += print_str((f'aos AP:{mAPaos[j, 0]:.2f}, '\n f'{mAPaos[j, 1]:.2f}, '\n f'{mAPaos[j, 2]:.2f}'))\n return result", "def get_coco_dataset():\n ds = AttrDict()\n classes = [\n '__background__', 'person', 'bicycle', 'car', 'motorcycle', 'airplane',\n 'bus', 'train', 'truck', 'boat', 'traffic light', 'fire hydrant',\n 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse',\n 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack',\n 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis',\n 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove',\n 'skateboard', 'surfboard', 'tennis racket', 'bottle', 'wine glass',\n 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich',\n 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake',\n 'chair', 'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv',\n 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave',\n 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase',\n 'scissors', 'teddy bear', 'hair drier', 'toothbrush'\n ]\n ds.classes = {i: name for i, name in enumerate(classes)}\n return ds", "def ComputeRegenerativeBraking(self):\r\n pass", "def _init_sgc(self, init_temp, symbols):\n self._sgc_obj = []\n for ground_state in self._ground_states:\n self._sgc_obj.append(\n SGCMonteCarlo(\n ground_state[\"atoms\"],\n init_temp,\n symbols=symbols))", "def algi(C):\n return np.array([ C[0,2], C[1,2], C[1,0] ])", "def test_grovers_waltz_basis_gates(self):\n shots = 2000\n circuits = ref_algorithms.grovers_circuit(final_measure=True,\n allow_sampling=True)\n targets = ref_algorithms.grovers_counts(shots)\n job = execute(circuits, QasmSimulator(), shots=shots, basis_gates='u1,u2,u3,cx')\n result = job.result()\n self.is_completed(result)\n self.compare_counts(result, circuits, targets, delta=0.05 * shots)", "def CIS(self):\n return self.get_class_average(self.CIS_class_level)", "def _gto_from_ccdata(self):\n\n gbasis = self.ccdata.gbasis\n lines = []\n\n for no, basis in enumerate(gbasis):\n lines.append(f\"{no + 1:3d} 0\")\n for prims in basis:\n lines.append(f\"{prims[0].lower():s} {len(prims[1]):5d} 1.00\")\n for prim in prims[1]:\n lines.append(f\"{prim[0]:15.9e} {prim[1]:15.9e}\")\n lines.append('')\n lines.append('')\n return lines", "def _rbSequenceInit(self):\n\n ## send all of this to sequence acq\n if not self.nbFrames:\n self.nbFrames = int(self.duration/self.cycleTime)+1 ## Determine number of frames. (+1) because int round at the lower int\n #nbGreenFrames = self.rbGreenRatio[0] #nb of green frames in each green sequence #NOT YET USED\n nbGreenSequence = float(self.nbFrames)/self.greenFrameInterval #Dividing nbFrames by the green frame interval with a float to have float division\n print('Nb of green frames : ', nbGreenSequence)\n nbGreenSequence = int(round(nbGreenSequence))\n print('Nb of green frames : ', nbGreenSequence)\n #if self.colorMode == SequenceAcquisition.rbColorModes[0]:\n colorSeq=[0,2] #R & B alternation by default\n if self.colorMode == SequenceAcquisition.rbColorModes[1]:\n colorSeq = [0] #Red only mode\n elif self.colorMode == SequenceAcquisition.rbColorModes[2]:\n colorSeq = [2] #Blue only mode\n\n self.ledList = colorSeq*int(round(float(self.nbFrames-nbGreenSequence)/len(colorSeq))) #Initiate a whole list of R-B alternance\n #list.insert(index, elem) -- inserts the element at the given index, shifting elements to the right\n greenSeqIdx = 0\n while greenSeqIdx <= self.nbFrames :\n self.ledList.insert(greenSeqIdx,1)\n greenSeqIdx+= self.greenFrameInterval\n #NB : no return needed because each ledList and nbFrames are instance attribute", "def calc_chromatic_coupling(self):\n raise NotImplementedError('Chromatic Coupling is not Implemented yet.')", "def get_gc_content(sequence):\n len_seq = len(sequence) - sum(alternative_bases_counter(sequence).values())\n sequence = sequence.upper()\n c = sequence.count('C')\n g = sequence.count('G')\n return round((c + g) / len_seq, 4)", "def batch(self, coeff_count=13, db=False):\n mfccs, _ = self.mfcc(coeff_count)\n if db:\n mfccs = utils.dbspec(mfccs)\n delta1, delta2 = self.delta_coeffs(mfccs)\n self._annotate(mfccs)\n\n mfccs_len = mfccs.shape[1]\n batch_x = np.concatenate((mfccs, delta1, delta2), axis=0).transpose()\n batch_y = np.array(self.annotated_samples)\n print(\"AudioClip--Generated Batch\")\n return (batch_x, batch_y)", "def Gen_Sim_Acc (lmax, HC, HS, Pos):\r\n print(\"Generating simulated acclerations, lmax =\", lmax, \"\")\r\n\r\n CS = conv.Make_Line_Coef(lmax, HC, HS)\r\n print(f\"Shape of the Coef array = {CS.shape}\")\r\n\r\n M_PotGrad = solv.Get_PotGradMatrix(lmax, Pos) # get M_PotGrad\r\n# print(\"shape of M=\", M_PotGrad.shape)\r\n\r\n Acc_line = M_PotGrad.dot(CS)\r\n# print(\"shape of Acc_line=\", Acc_line.shape)\r\n\r\n Acc_sim = conv.Make_Array(Acc_line, 3)\r\n# print(\"shape of Acc_sim=\", Acc_sim.shape)\r\n\r\n return Acc_sim", "def get_cole_cole_params(self, emg, sampling_frequency=1980):\n def get_cole_cole(emg):\n emg_dft = np.fft.fft(emg)\n emg_dft = emg_dft[:len(emg_dft) // 2]\n r_dft, i_dft = emg_dft.real, emg_dft.imag\n points = zip(r_dft, i_dft)\n xc, yc, r = make_circle(points)\n discr = np.sqrt(r ** 2 - yc ** 2) **0.5\n x1 = xc + discr\n x2 = xc - discr\n\n eps_inf = min(x1, x2)\n delta = abs(x1 - x2)\n tau = None\n cc_params = eps_inf, delta, tau\n return cc_params\n\n emg_split_list = np.array_split(emg, self.feature_length)\n cole_cole_signal = np.array([get_cole_cole(emg) for emg in emg_split_list])\n return cole_cole_signal", "def gc_content(sequence):\n gc = sequence.count('G') + sequence.count('C')\n atgc = sequence.count('A') + sequence.count('T') + sequence.count('G') + sequence.count('C')\n \n return (gc/atgc) * 100", "def popn_coal_rate(model, pop_id, n_samp, generation_time, steps=None):\n ddb = model.get_demography_debugger()\n if steps is None:\n end_time = ddb.epochs[-2].end_time + 10000\n steps = np.linspace(1, end_time, end_time+1)\n num_samples = [0 for _ in range(ddb.num_populations)]\n num_samples[pop_id] = n_samp\n coal_rate, P = ddb.coalescence_rate_trajectory(steps=steps,\n num_samples=num_samples,\n double_step_validation=False)\n steps = steps * generation_time\n return coal_rate, P, steps", "def cifar100_aug2():\n normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n transform_t = transforms.Compose([\n transforms.RandomCrop(32, 4),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n normalize,\n ])\n transform_v = transforms.Compose([\n transforms.ToTensor(),\n normalize,\n ])\n return cifar100_altaug(transform_t, transform_v)", "def get_sc(self):\n sc = []\n a = np.array([1, 0, 0], dtype=np.float32)\n b = np.array([0, 1, 0], dtype=np.float32)\n c = np.array([0, 0, 1], dtype=np.float32)\n # order z -> y -> x\n for i, j, k in itertools.product(*itertools.repeat(range(-(self.nsc//2),self.nsc//2+1), 3)):\n sc.append(i*a + j*b + k*c)\n sc = np.stack(sc, axis=0)\n #print(sc.shape)\n\n with tf.variable_scope(\"supercell\"):\n sc = tf.constant(sc, dtype=tf.float32)\n return sc", "def build_cycle_gan(image_shape : tuple) -> list:\n g_model_AtoB = define_generator(image_shape)\n # generator: B -> A\n g_model_BtoA = define_generator(image_shape)\n # discriminator: A -> [real/fake]\n d_model_A = define_discriminator(image_shape)\n # discriminator: B -> [real/fake]\n d_model_B = define_discriminator(image_shape)\n # composite: A -> B -> [real/fake, A]\n c_model_AtoB = define_composite_model(g_model_AtoB, d_model_B, g_model_BtoA, image_shape)\n # composite: B -> A -> [real/fake, B]\n c_model_BtoA = define_composite_model(g_model_BtoA, d_model_A, g_model_AtoB, image_shape)\n models = [d_model_A, d_model_B, g_model_AtoB, g_model_BtoA, c_model_AtoB, c_model_BtoA]\n return models", "def prada(self):\n scale_factor = 1.0 / (1.0 + self.snapshot.header.redshift)\n r200c_physical = self.r200c * scale_factor / 1000.0 # units Mpc\n\n v200 = (\n (self.snapshot.const.G * self.m200c)\n / r200c_physical\n * self.snapshot.const.Mpc ** 2\n / 1000.0 ** 2\n ) ** 0.5 # units km/s\n\n def y(x, vmax, v200):\n func = np.log(1 + x) - (x / (1 + x))\n return ((0.216 * x) / func) ** 0.5 - (vmax / v200)\n\n concentration = np.zeros((len(self.vmax)))\n for halo in range(self.N_halos):\n if v200[halo] > self.vmax[halo]:\n concentration[halo] = -9999.0\n else:\n try:\n concentration[halo] = newton(\n y, x0=5.0, args=(self.vmax[halo], v200[halo])\n )\n except:\n concentration[halo] = -9999.0\n\n return concentration", "def gliphs(self):\n return self._gliphs", "def part3c_2():\n xs = \"Werner & Co entered court today . Werner maintained that they were not guilty .\".split()\n N = 10000\n\n submission.computeGibbsProbabilities( englishCRF,\n submission.getCRFBlocks,\n submission.chooseGibbsCRF,\n xs, N )\n grader.requireIsTrue(True)", "def get_gc_content(sequence):\n # get the sequence length and \n # make all the sequence characters upper case\n seq_len, sequence = len(sequence), sequence.upper()\n # count all gs and cs\n c = sequence.count('C')\n g = sequence.count('G')\n # returns the gc content from a sequence\n # sum up the |Gs and Cs counts and divide \n # by the sequence length\n return round((c + g) / seq_len, 4)", "def get_pc_per_range(model, class_name):\n class_total = model.class_counts[class_name]\n if model.num_runs is not None:\n class_total = model.num_runs * class_total * .33\n\n true_positives, totals = model.range_metrics_10[class_name]\n purities = [] # Accuracy per range (true positive/total)\n comps = []\n TP_count = 0\n total_count = 0\n\n for index in reversed(range(len(true_positives))):\n cur_p = 0 # Current purity\n cur_c = 0 # Current completeness\n TP_count += true_positives[index]\n total_count += totals[index]\n if total_count != 0:\n # positive class samples / totals # with prob in range\n cur_p = TP_count / total_count\n if class_total != 0:\n cur_c = TP_count / class_total\n\n purities.append(cur_p)\n comps.append(cur_c)\n purities.reverse()\n comps.reverse()\n return purities, comps", "def num_carns(self):\n return self._num_carns", "def _get_gen_classes(self, bgc_like, gcf_as_cutoff=0.5):\n # assess if bgc or gcf\n is_bgc = isinstance(bgc_like, BGC)\n if is_bgc:\n # get parent gcf for bgc\n bgc_like_gcf = [\n gcf for gcf in self.npl.gcfs\n if bgc_like.bgc_id in [b.bgc_id for b in gcf.bgcs]\n ][0]\n # gather AS classes and convert to names in scoring dict\n as_classes = self.npl.class_matches.convert_as_classes(\n bgc_like.product_prediction.split(\".\"))\n bgc_like_classes_dict = {\n \"bigscape_class\": bgc_like_gcf.bigscape_class,\n # str - always one bigscape class right?\n \"as_classes\": as_classes\n } # list(str)\n else:\n as_classes = self.npl.class_matches.convert_as_classes(\n self.npl.class_matches.get_gcf_as_classes(\n bgc_like, gcf_as_cutoff))\n bgc_like_classes_dict = {\n \"bigscape_class\": bgc_like.bigscape_class,\n # str - always one bigscape class right?\n \"as_classes\": as_classes\n } # list(str)\n return bgc_like_classes_dict", "def Ab_seq(RNs):\n seq = []\n for res in range(cf.nkey):\n randi = RNs.getR()\n for i in range(20):\n if randi < cf.cumprob20[i]:\n seq.append(i + 1) # want amino acids between 1 and 20\n break\n return seq", "def gen_reactions(self, model, options):\n Avogadro = model.parameters.get_one(id='Avogadro')\n c = model.compartments.get_one(id='c')\n\n # basic metabolic reactions\n for basic_reaction in options['basic']:\n\n # reaction\n reaction = model.reactions.get_or_create(submodel=model.submodels.get_one(id=basic_reaction['submodel']),\n id=basic_reaction['id'])\n reaction.name = basic_reaction['name']\n reaction.participants = []\n for participant in basic_reaction['participants']:\n reaction.participants.add(model.species_types.get_one(id=participant['id']).species.get_one(compartment=c).species_coefficients.get_or_create(coefficient=participant['coefficient']))\n\n # rate law\n model.parameters.create(id='kcat_'+basic_reaction['id'],\n value=basic_reaction['rate_law']['k_cat']['value'],\n type=wc_ontology['WC:k_cat'],\n units=unit_registry.parse_units(basic_reaction['rate_law']['k_cat']['units']))\n for km in basic_reaction['rate_law']['k_m']:\n model.parameters.create(id='km_{}_{}'.format(basic_reaction['id'], km['id']),\n value=km['value'],\n type=wc_ontology['WC:K_m'],\n units=unit_registry.parse_units('M'))\n reactants = [participant['id'] for participant in basic_reaction['participants'] if participant['coefficient']<0]\n if 'h' in reactants:\n reactants.remove('h')\n if 'h2o' in reactants:\n reactants.remove('h2o')\n rate_law_exp, errors = wc_lang.RateLawExpression.deserialize(\n '{}{}'.format('kcat_'+basic_reaction['id'], ' '.join(['* ({}[c] / (km_{}_{} * Avogadro * volume_c + {}[c]))'.format(reactant, basic_reaction['id'], reactant, reactant) for reactant in reactants])),\n self.get_rate_law_context(model))\n\n rate_law = model.rate_laws.create(direction=wc_lang.RateLawDirection.forward,\n type=None,\n expression=rate_law_exp,\n reaction=reaction,\n )\n rate_law.id = rate_law.gen_id()\n\n # rna\n rna_species_types = [species_types for species_types in model.species_types if species_types.type == wc_ontology['WC:RNA']]\n\n # rna transcription\n for km in options['rna']['transcription']['k_m']:\n model.parameters.create(id='km_{}_trans'.format(km['id']), value=km['value'], type=wc_ontology['WC:K_m'], units=unit_registry.parse_units('M'))\n\n for i, rna_species_type in enumerate(rna_species_types):\n reaction = model.reactions.get_or_create(submodel=model.submodels.get_one(id=options['rna']['submodel']), id='transcription_{}'.format(rna_species_type.id))\n reaction.name = 'transcription {}'.format(rna_species_type.name)\n reaction.participants = []\n # participants\n rna_str = rna_species_type.structure.value\n # lhs\n reaction.participants.add(model.species_types.get_one(id='atp').species.get_one(compartment=c).species_coefficients.get_or_create(coefficient=-rna_str.count('A')))\n reaction.participants.add(model.species_types.get_one(id='gtp').species.get_one(compartment=c).species_coefficients.get_or_create(coefficient=-rna_str.count('G')))\n reaction.participants.add(model.species_types.get_one(id='ctp').species.get_one(compartment=c).species_coefficients.get_or_create(coefficient=-rna_str.count('C')))\n reaction.participants.add(model.species_types.get_one(id='utp').species.get_one(compartment=c).species_coefficients.get_or_create(coefficient=-rna_str.count('U')))\n reaction.participants.add(model.species_types.get_one(id='h2o').species.get_one(compartment=c).species_coefficients.get_or_create(coefficient=-1))\n # rhs\n reaction.participants.add(rna_species_type.species.get_one(compartment=c).species_coefficients.get_or_create(coefficient=1))\n reaction.participants.add(model.species_types.get_one(id='ppi').species.get_one(compartment=c).species_coefficients.get_or_create(coefficient=len(rna_str)))\n reaction.participants.add(model.species_types.get_one(id='h').species.get_one(compartment=c).species_coefficients.get_or_create(coefficient=1))\n # rate law\n model.parameters.create(\n id='k_trans_{}'.format(rna_species_type.id),\n value=math.log(2)/model.parameters.get_one(id='half_life_{}'.format(rna_species_type.id)).value * 8,\n type=wc_ontology['WC:k_cat'],\n units=unit_registry.parse_units('s^-1 / M'))\n rate_law_str = 'k_trans_{}'.format(rna_species_type.id)\n if 'A' in rna_str:\n rate_law_str += ' * (atp[c] / (km_atp_trans * Avogadro * volume_c + atp[c]))'\n if 'G' in rna_str:\n rate_law_str += ' * (gtp[c] / (km_gtp_trans * Avogadro * volume_c + gtp[c]))'\n if 'C' in rna_str:\n rate_law_str += ' * (ctp[c] / (km_ctp_trans * Avogadro * volume_c + ctp[c]))'\n if 'U' in rna_str:\n rate_law_str += ' * (utp[c] / (km_utp_trans * Avogadro * volume_c + utp[c]))'\n rate_law_str += ' * rna_pol[c] / (Avogadro * volume_c)'\n\n reaction_rate_law_exp, errors = wc_lang.RateLawExpression.deserialize(\n rate_law_str,\n self.get_rate_law_context(model))\n reaction_rate_law = model.rate_laws.create(direction=wc_lang.RateLawDirection.forward,\n type=None,\n expression=reaction_rate_law_exp,\n reaction=reaction,\n )\n reaction_rate_law.id = reaction_rate_law.gen_id()\n\n # rna degradation\n for i, rna_species_type in enumerate(rna_species_types):\n reaction = model.reactions.get_or_create(submodel=model.submodels.get_one(id=options['rna']['submodel']), id='degradation_{}'.format(rna_species_type.id))\n reaction.name = 'transcription {}'.format(rna_species_type.name)\n reaction.participants = []\n # participants\n rna_str = rna_species_type.structure.value\n # lhs\n reaction.participants.add(rna_species_type.species.get_one(compartment=c).species_coefficients.get_or_create(coefficient=-1))\n reaction.participants.add(model.species_types.get_one(id='h2o').species.get_one(compartment=c).species_coefficients.get_or_create(coefficient=-(len(rna_str)-1)))\n # rhs\n reaction.participants.add(model.species_types.get_one(id='amp').species.get_one(compartment=c).species_coefficients.get_or_create(coefficient=rna_str.count('A')))\n reaction.participants.add(model.species_types.get_one(id='gmp').species.get_one(compartment=c).species_coefficients.get_or_create(coefficient=rna_str.count('G')))\n reaction.participants.add(model.species_types.get_one(id='cmp').species.get_one(compartment=c).species_coefficients.get_or_create(coefficient=rna_str.count('C')))\n reaction.participants.add(model.species_types.get_one(id='ump').species.get_one(compartment=c).species_coefficients.get_or_create(coefficient=rna_str.count('U')))\n reaction.participants.add(model.species_types.get_one(id='h').species.get_one(compartment=c).species_coefficients.get_or_create(coefficient=len(rna_str)-1))\n # rate law\n model.parameters.create(\n id='k_deg_{}'.format(rna_species_type.id),\n value=math.log(2)/model.parameters.get_one(id='half_life_{}'.format(rna_species_type.id)).value,\n type=wc_ontology['WC:k_cat'],\n units=unit_registry.parse_units('s^-1 / M'))\n model.parameters.create(\n id='km_deg_{}'.format(rna_species_type.id),\n value=1 / Avogadro.value / c.init_volume.mean,\n type=wc_ontology['WC:K_m'],\n units=unit_registry.parse_units('M'))\n reaction_rate_law_exp, errors = wc_lang.RateLawExpression.deserialize(\n 'k_deg_{}'\n ' * {}[c] / (km_deg_{} * Avogadro * volume_c + {}[c])'\n ' * rna_se[c] / (Avogadro * volume_c)'.format(rna_species_type.id, rna_species_type.id, rna_species_type.id, rna_species_type.id),\n self.get_rate_law_context(model))\n reaction_rate_law = model.rate_laws.create(direction=wc_lang.RateLawDirection.forward,\n type=None,\n expression=reaction_rate_law_exp,\n reaction=reaction,\n )\n reaction_rate_law.id = reaction_rate_law.gen_id()" ]
[ "0.5574133", "0.53772056", "0.53357095", "0.5283083", "0.52788275", "0.5273213", "0.5222399", "0.51976293", "0.51760423", "0.5161018", "0.5147553", "0.5111785", "0.5092093", "0.50634205", "0.5053241", "0.5039575", "0.50389385", "0.501876", "0.50118065", "0.50114757", "0.5008792", "0.500848", "0.49983403", "0.49963635", "0.49921808", "0.49915943", "0.49884245", "0.49873528", "0.49805462", "0.49766207", "0.49653915", "0.49638075", "0.49599883", "0.49519107", "0.4943074", "0.49396834", "0.4933761", "0.4932756", "0.49233264", "0.49193457", "0.491547", "0.49110615", "0.49020573", "0.4901336", "0.48970154", "0.48955023", "0.48898646", "0.48878267", "0.48767716", "0.4871018", "0.48644978", "0.4859013", "0.48588237", "0.48534694", "0.4853362", "0.48385647", "0.483495", "0.48311907", "0.48216292", "0.48072797", "0.48060134", "0.4795882", "0.47956398", "0.47849724", "0.4784928", "0.47812524", "0.47712594", "0.47712475", "0.47703218", "0.4770054", "0.47687584", "0.47665364", "0.47530413", "0.4746192", "0.47445557", "0.47413749", "0.47392908", "0.47377405", "0.47322437", "0.47321934", "0.47264978", "0.47256407", "0.47133672", "0.47112465", "0.47110868", "0.47104782", "0.47091633", "0.4707532", "0.47025365", "0.46996287", "0.4698863", "0.46970475", "0.46952984", "0.46943426", "0.46920058", "0.46880198", "0.46840054", "0.4683063", "0.46824834", "0.46801248" ]
0.5017676
18
Read 2dimensional realvalued features with associated class labels
def read_data(path, d=','): arr = numpy.genfromtxt(path, delimiter=d, dtype=None) length = len(arr) x = numpy.zeros(shape=(length, 2)) t = numpy.zeros(length, dtype=int) for i, (x1, x2, tv) in enumerate(arr): x[i, 0] = x1 x[i, 1] = x2 t[i] = int(tv) return x, t
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_data(feature_file, label_file):", "def load_data(class_fnames):\n X = []\n y = []\n for label, fnames in enumerate(class_fnames):\n for fname in fnames:\n X.append(cv2.imread(fname))\n y.append(label)\n X = np.stack(X)\n y = np.stack(y)\n return X, y", "def load_features_labels(self):\n MFCCs = torch.from_numpy(np.load(self.feature_file))\n labels = torch.from_numpy(np.load(self.label_file))\n 'Loading from files finished!'\n return MFCCs.view(-1,1,128,128), labels.long()", "def read_data(self,filename):\n\n\t\tfid = open(filename,\"r\")\n\t\tdata = []\n\t\td = []\n\t\tfor line in fid.readlines():\n\t\t\td.append(line.strip())\n\t\tfor d1 in d:\n\t\t\tdata.append(d1.split(\",\")) # list of lists\n\t\tfid.close()\n\n\t\tself.featureNames = data[0] # first row as feature names\n\t\tself.targetName = self.featureNames[-1]\n\t\tself.featureNames = self.featureNames[:-1]\n\n\t\tdata = data[1:] # remove the first row\n\t\tself.classData = []\n\t\tfor d in range(len(data)):\n\t\t\tself.classData.append(data[d][-1]) # extract last column \n\t\t\tdata[d] = data[d][:-1]\t# remove the last column in data\n\n\t\t# extract unique values values for each feature\n\t\ttransposedData = np.transpose(np.copy(data))\n\t\tself.featureValues={}\n\t\tfor i in range(len(self.featureNames)):\n\t\t\tself.featureValues[self.featureNames[i]] = np.unique(transposedData[i])\n\t\tprint(self.featureValues)\n\n\t\treturn data,self.classData,self.featureNames", "def read_data(filename):\n data = np.genfromtxt(filename, delimiter=',', dtype = str)\n X = data[1:,2:].astype(np.float)\n y = data[1:,0]\n y[y==label0]='0' \n y[y==label1]='1' \n y[y==label2]='2'\n y.astype(np.float) \n return X, y", "def read_data(filename):\n data = np.genfromtxt(filename, delimiter=',', dtype=str)\n X = data[1:,2:].astype(np.float)\n y = data[1:,0]\n y[y==label0]='0'\n y[y==label1]='1'\n y[y==label2]='2'\n y=y.astype(np.float)\n return X, y", "def read_features_from_file(filename):\n\tf = np.loadtxt(filename)\n\treturn f[:,:4],f[:,4:] # feature locations, descriptors", "def _read_libffm_file(self, filename):\n\n X_true = np.zeros((self.num_rows, self.num_features))\n y_true = np.zeros((self.num_rows, 1))\n field_true = np.zeros((self.num_features, 1))\n with open(filename, 'r') as f:\n i = 0\n for line in f:\n tmp_row = line.replace('\\n', '').split(' ')\n\n # extract label\n y_true[i] = int(tmp_row[0])\n\n # extract data and fields\n for k in range(1, len(tmp_row)):\n if len(tmp_row[k]) > 0:\n tmp_str = tmp_row[k].split(':')\n j = int(tmp_str[1])\n field_true[j] = int(tmp_str[0])\n tmp_data = float(tmp_str[2])\n X_true[i, j] = tmp_data\n i = i + 1\n\n return X_true, y_true, field_true", "def _convert_to_features(self, img: np.ndarray) -> np.ndarray:", "def get_imagenet_classnames():\r\n return np.loadtxt(open(path_data+'/ilsvrc_2012_labels.txt'), dtype=object, delimiter='\\n')", "def read_data(data_path, filename,feature_number):\n\n with open(data_path + \"/\" + filename, 'r', encoding='utf-8-sig') as f: \n X = np.genfromtxt(f, delimiter=',')[:,0:feature_number]\n\n\n # Last column of datafile contains output labels\n Y = np.genfromtxt(data_path + \"/\" + filename,delimiter=\",\")[:,feature_number]\n Y = Y.reshape(X.shape[0])\n\n return X,Y", "def numpy_read_features(path):\n import numpy\n # read table as a structured array (each row is a tuple)\n feature_array = numpy.genfromtxt(path, delimiter='\\t', names=True, dtype=None)\n source = feature_array['source']\n target = feature_array['target']\n status = feature_array['status']\n feature_names = numpy.array(feature_array.dtype.names[3: ])\n features = feature_array[feature_names]\n # convert from structured array to normal ndarray\n features = features.view((numpy.float, len(features.dtype.names)))\n return source, target, status, features, feature_names", "def svm_read_feature(data_file_name, digit):\n\tprob_y = []\n\tprob_x = []\n\tfor line in open(data_file_name):\n\t\t#print line\n\t\tline = line.split(None, 1)\n\t\t#print line\n\t\t# In case an instance with all zero features\n\t\tif len(line) == 1: line += ['']\n\t\tlabel, features = line\n\t\t#parse prob_x\n\t\txi = {}\n\t\tind = 1\n\t\tfor e in features.split():\n\t\t\txi[ind] = float(e)\n\t\t\tind += 1\n\t\t#parse prob_y\n\t\tif int(float(label)) == digit:\n\t\t\tprob_y += [float(+1)]\n\t\telse:\n\t\t\tprob_y += [float(-1)]\n\t\tprob_x += [xi]\n\treturn (prob_y, prob_x)", "def extract_labels(f, one_hot=False, num_classes=10):\n print('Extracting', f.name)\n with gzip.GzipFile(fileobj=f) as bytestream:\n magic = _read32(bytestream)\n if magic != 2049:\n raise ValueError('Invalid magic number %d in MNIST label file: %s' %\n (magic, f.name))\n num_items = _read32(bytestream)\n buf = bytestream.read(num_items)\n labels = np.frombuffer(buf, dtype=np.uint8)\n if one_hot:\n return dense_to_one_hot(labels, num_classes)\n return labels", "def read_label(filepath, read_scalars=False):\n label_array = np.loadtxt(filepath, dtype=np.int, skiprows=2, usecols=[0])\n if read_scalars:\n scalar_array = np.loadtxt(filepath, skiprows=2, usecols=[-1])\n return label_array, scalar_array\n return label_array", "def extract_labels(f, one_hot=False, num_classes=10):\n\tprint('Extracting', f.name)\n\twith gzip.GzipFile(fileobj=f) as bytestream:\n\t\tmagic = _read32(bytestream)\n\t\tif magic != 2049:\n\t\t\traise ValueError('Invalid magic number %d in MNIST label file: %s' %\n\t\t\t\t\t\t\t\t\t\t\t (magic, f.name))\n\t\tnum_items = _read32(bytestream)\n\t\tbuf = bytestream.read(num_items)\n\t\tlabels = numpy.frombuffer(buf, dtype=numpy.uint8)\n\t\tif one_hot:\n\t\t\treturn dense_to_one_hot(labels, num_classes)\n\t\treturn labels", "def extract_labels(f, one_hot=False, num_classes=10):\n print('Extracting', f.name)\n with gzip.GzipFile(fileobj=f) as bytestream:\n magic = _read32(bytestream)\n if magic != 2049:\n raise ValueError('Invalid magic number %d in MNIST label file: %s' %\n (magic, f.name))\n num_items = _read32(bytestream)\n buf = bytestream.read(num_items)\n labels = numpy.frombuffer(buf, dtype=numpy.uint8)\n if one_hot:\n return dense_to_one_hot(labels, num_classes)\n return labels", "def extract_labels(f, one_hot=False, num_classes=10):\n with gzip.GzipFile(fileobj=f) as bytestream:\n magic = _read32(bytestream)\n if magic != 2049:\n raise ValueError('Invalid magic number %d in MNIST label file: %s' %\n (magic, f.name))\n num_items = _read32(bytestream)\n buf = bytestream.read(num_items)\n labels = numpy.frombuffer(buf, dtype=numpy.uint8)\n if one_hot:\n return dense_to_one_hot(labels, num_classes)\n return labels", "def __init__(self, features, labels, bigdl_type=\"float\"):\n self.feature = features[0]\n self.features = features\n self.label = labels[0]\n self.bigdl_type = bigdl_type\n self.labels = labels", "def read_feature_labels(output):\n path = os.path.join(output, 'features.list')\n if not os.path.exists(path):\n logging.warning(\"Cannot read feature labels. Path/File does not exist.\")\n return None\n else:\n with open(path, 'r') as in_file:\n feature_labels = in_file.readlines()\n feature_labels = [feature_label.strip() for feature_label in feature_labels]\n\n return np.asarray(feature_labels)", "def extract_labels(f, one_hot=False, num_classes=10):\n print('Extracting', f.name)\n with gzip.GzipFile(fileobj=f) as bytestream:\n magic = _read32(bytestream)\n if magic != 2049:\n raise ValueError('Invalid magic number %d in MNIST label file: %s' %\n (magic, f.name))\n num_items = _read32(bytestream)\n buf = bytestream.read(num_items)\n labels = np.frombuffer(buf, dtype=np.uint8)\n if one_hot:\n return dense_to_one_hot(labels, num_classes)\n return labels", "def extract_labels(f, one_hot=False, num_classes=10):\n print('Extracting', f.name)\n with gzip.GzipFile(fileobj=f) as bytestream:\n magic = _read32(bytestream)\n if magic != 2049:\n raise ValueError('Invalid magic number %d in MNIST label file: %s' %\n (magic, f.name))\n num_items = _read32(bytestream)\n buf = bytestream.read(num_items)\n labels = np.frombuffer(buf, dtype=np.uint8)\n if one_hot:\n return dense_to_one_hot(labels, num_classes)\n return labels", "def get_training(feature_path): \n features = np.loadtxt(feature_path)\n feature_size = features.shape[1] -1 \n features_in = features[:,0:feature_size]\n features_out = features[:,-1]\n #features_out = np.array(map(lambda x: x if x else 0, features_out_unnorm))\n return features_in, features_out", "def feature_extraction(inputFile, text, label):\r\n df = pd.read_csv(inputFile, encoding=\"utf8\")\r\n df[text].replace(np.nan, '', inplace=True)\r\n for idx, line in df.iterrows():\r\n try:\r\n words = line[text]\r\n newWords = ''.join(words.split())\r\n df.set_value(idx, text, newWords)\r\n except:\r\n pass\r\n tf = TfidfVectorizer(analyzer='char', encoding=\"utf8\", min_df=10)\r\n\r\n x = tf.fit_transform(df[text])\r\n x = x.toarray()\r\n print(x.shape)\r\n y = df[label]\r\n\r\n return x, y", "def read_labels(labels_path):\n with open(labels_path, 'r') as file:\n data = file.read()\n data = data.split()\n data = np.array(data)\n data = np.reshape(data, (-1, 2))\n return data", "def feature_reader(path):\n features = json.load(open(path))\n index_1 = [int(k) for k,v in features.items() for fet in v]\n index_2 = [int(fet) for k,v in features.items() for fet in v]\n values = [1.0]*len(index_1) \n nodes = [int(k) for k,v in features.items()]\n node_count = max(nodes)+1\n feature_count = max(index_2)+1\n features = sparse.coo_matrix((values,(index_1,index_2)), shape=(node_count, feature_count),dtype=np.float32)\n out_features = dict()\n out_features[\"indices\"] = torch.LongTensor(np.concatenate([features.row.reshape(-1,1), features.col.reshape(-1,1)],axis=1).T)\n out_features[\"values\"] = torch.FloatTensor(features.data)\n out_features[\"dimensions\"] = features.shape\n return out_features", "def load_features(inputfile, load_bin=False, save_path=None):\n if load_bin:\n return bin2matrix(inputfile)\n X, y = [], []\n pf = FileOfPaths(inputfile)\n pb = progressbar.ProgressBar(pf.numberFiles())\n for n, _ in enumerate(pf):\n y.append(pf.getY())\n X.append(pf.getFeatures())\n pb.update()\n X = np.array(X).astype(float)\n y = np.array(y).astype(int)\n if save_path:\n ym = np.array([y])\n Xy = np.append(X, ym.T, axis=1)\n np.save(save_path, Xy)\n return X, y", "def load_features(file):\n data = np.load(file, allow_pickle=True)\n return data", "def extract_labels(nlabels,filename, one_hot=False):\n print('Extracting', filename,'bbbccicicicicib')\n\n labels=numpy.loadtxt(filename,dtype='int64')\n \n if one_hot:\n print(\"LABELS ONE HOT\")\n print(labels.shape)\n XXX=dense_to_one_hot(labels,nlabels)\n print(XXX.shape)\n return dense_to_one_hot(labels,nlabels)\n print(\"LABELS\")\n print(labels.shape)\n return labels", "def load_training_data(fname):\n all_data = load_csv(fname, 'excel-tab')\n\n labels = [rec[2] == 'OFF' for rec in all_data]\n data = [convert_to_reals(clean_text(rec[1])) for rec in all_data]\n max_features = max([len(rec) for rec in data])\n\n # Pad the data\n for rec in data:\n rec.extend([0.0] * (max_features - len(rec)))\n\n return labels, data, max_features", "def parser(serialized_example):\n features = tf.parse_single_example(\n serialized_example,\n features={\n \"image\": tf.FixedLenFeature([], tf.string),\n \"label\": tf.FixedLenFeature([], tf.int64),\n })\n image = tf.decode_raw(features[\"image\"], tf.uint8)\n image.set_shape([CHANNELS * HEIGHT * WIDTH])\n # Reshape from [depth * height * width] to [depth, height, width].\n image = tf.cast(\n tf.transpose(tf.reshape(image, [CHANNELS, HEIGHT, WIDTH]), [1, 2, 0]),\n tf.float32) * (2. / 255) - 1\n\n label = tf.cast(features['label'], tf.int32)\n\n random_noise = tf.random_normal([noise_dim])\n features = {\n 'real_images': image,\n 'random_noise': random_noise}\n\n return features, label", "def load_features_predictors():\n pwd = \"./data/\"\n if __name__ == \"__main__\":\n pwd = \".\" + pwd\n else:\n pass\n\n fn1 = os.path.join(pwd, \"features.npy\")\n fn2 = os.path.join(pwd, \"predictors.npy\")\n\n X = np.load(fn1)\n y = np.load(fn2)\n return X, y", "def mapper(line): \n feats = line.strip().split(\",\") \n # labels must be at the beginning for LRSGD\n label = feats[len(feats) - 1] \n feats = feats[: len(feats) - 1]\n feats.insert(0,label)\n features = [ float(feature) for feature in feats ] # need floats\n return np.array(features)", "def load_rf_data(filename):\n A = np.loadtxt(filename, dtype=\"float32\", delimiter=\",\")\n\n X = A[:, :10]\n y = A[:, -1]\n\n return X, y", "def from_labeled_point(rdd: RDD, categorical: bool = False, nb_classes: int = None):\n features = np.asarray(\n rdd.map(lambda lp: from_vector(lp.features)).collect())\n labels = np.asarray(rdd.map(lambda lp: lp.label).collect(), dtype='int32')\n if categorical:\n if not nb_classes:\n nb_classes = np.max(labels) + 1\n temp = np.zeros((len(labels), nb_classes))\n for i, label in enumerate(labels):\n temp[i, label] = 1.\n labels = temp\n return features, labels", "def read_file ( filename ):\r\n\t# lecture de l'en-tete\r\n\tinfile = open ( filename, \"r\" ) \r\n\tnb_classes, nb_features = [ int( x ) for x in infile.readline().split() ]\r\n\r\n\t# creation de la structure de donnees pour sauver les images :\r\n\t# c'est un tableau de listes (1 par classe)\r\n\tdata = np.empty ( 10, dtype=object ) \r\n\tfiller = np.frompyfunc(lambda x: list(), 1, 1)\r\n\tfiller( data, data )\r\n\r\n\t# lecture des images du fichier et tri, classe par classe\r\n\tfor ligne in infile:\r\n\t\tchamps = ligne.split ()\r\n\t\tif len ( champs ) == nb_features + 1:\r\n\t\t\tclasse = int ( champs.pop ( 0 ) )\r\n\t\t\tdata[classe].append ( map ( lambda x: float(x), champs ) ) \r\n\tinfile.close ()\r\n\r\n\t# transformation des list en array\r\n\toutput = np.empty ( 10, dtype=object )\r\n\tfiller2 = np.frompyfunc(lambda x: np.asarray (x), 1, 1)\r\n\tfiller2 ( data, output )\r\n\r\n\treturn output", "def read_2d_analysis_data(f):\n \n data = np.transpose(np.loadtxt(f, dtype=np.float64))\n x = data[0]\n y = data[1]\n\n return x, y", "def read_data(feats_file, labels_file, size=None):\n feats = np.loadtxt(feats_file)\n labels = np.loadtxt(labels_file, ndmin=2)\n if size:\n feats = feats[:size, :]\n labels = labels[:size, :]\n return np.concatenate((feats, labels), axis=1)", "def read_features(self):\r\n def unpack_keypoint(data):\r\n try:\r\n kpts = data['keypoints']\r\n desc = data['descriptors']\r\n keypoints = [cv.KeyPoint(x, y, _size, _angle, _response, int(_octave), int(_class_id))\r\n for x, y, _size, _angle, _response, _octave, _class_id in list(kpts)]\r\n return keypoints, np.array(desc)\r\n except(IndexError):\r\n return np.array([]), np.array([])\r\n try:\r\n data = np.load(self.features_path + self.id + \".npz\")\r\n self.keypoints, self.descriptors = unpack_keypoint(data)\r\n logging.info(f\"Existing features for {self.name} found in features directory.\")\r\n except FileNotFoundError:\r\n logging.info(f\"Features for {self.name} not found in {self.features_path}.\")", "def _parse_el_example(array_feats, array_feat_types, quant_feats):\n out_example = []\n d_keys = sorted(array_feats.keys())\n for k in d_keys:\n n_feat = quant_feats[k]\n point_feat = tf.decode_raw(array_feats[k], array_feat_types[k])\n point_feat = tf.reshape(point_feat, [quant_feats[k]])\n out_example.append(point_feat)\n return tuple(out_example)", "def convert_full_features_to_input_features(raw_features):\n data_features = mx.gluon.data.SimpleDataset(list(itertools.chain.from_iterable(raw_features)))\n data_features = data_features.transform(lambda *example: (\n example[0], # example_id\n example[7], # inputs_id\n example[9], # segment_ids\n example[2], # valid_length,\n example[8], # p_mask\n example[10], # start_position,\n example[11], # end_position\n example[14])) # is_impossible\n return data_features", "def load_data(filename):\n assert os.path.exists(filename)==True\n dat = scipy.io.loadmat(filename)\n inputs = dat['inputs']\n #print len(inputs)\n targets = dat['targets']\n #print len(targets)\n assert len(inputs)==len(targets)\n\n global alldata\n global indim \n global outdim\n\n indim = len(inputs[0])\n outdim = 1\n #print indim\n alldata = ClassificationDataSet(indim, outdim, nb_classes = 8)\n alldata.setField('input',inputs)\n alldata.setField('target',targets)\n\n assert len(alldata['input'])==len(alldata['target'])\n print type(alldata)", "def _read_train_datas(self):\r\n with open(self.train_label_path, 'r') as fb:\r\n lines = fb.readlines()\r\n return self._parse_raw_labels(lines)", "def data_parser(data):\n\n with open(data, 'r') as inp:\n\n # take every sample\n # the last line in the text file is empty, so reading until -1\n samples = inp.read().split('\\n')[:-1]\n\n vec = []\n labels = []\n for sample in samples:\n # file is tab delimited\n split_samples = sample.split('\\t')\n # last column contains the label\n labels.append(int(split_samples[-1]))\n\n features = []\n for feature in split_samples[:-1]:\n features.append(float(feature))\n vec.append(features)\n\n # make the features and labels as a numpy array\n vec = np.array(vec)\n labels = np.array(labels)\n return vec, labels", "def read_temp(temp):\n type_dict = {\"string\": str, \"unknown\": str, \"numeric\": float}\n with open(temp, 'r') as topen:\n feature_lines = topen.readlines()\n feature_labels = []\n feature_types = []\n for i, row in enumerate(feature_lines):\n if row.startswith(\"@attribute\"):\n flabel, ftype = row[11:-1].split(' ')\n feature_labels.append(flabel)\n feature_types.append(type_dict[ftype])\n elif row.startswith(\"@data\"):\n feature_values = feature_lines[i+1].split(\",\")\n if len(feature_values) < len(feature_labels):\n feature_values = feature_lines[i+2].split(\",\")\n for i, item in enumerate(feature_values):\n try:\n feature_values[i] = (feature_types[i](item))\n except:\n feature_values[i] = item\n return(dict(zip(feature_labels, feature_values)))", "def load_csv(fname = data_indoor):\n \n reader = csv.reader(open(fname, 'r'))\n \n # Blank list\n data = []\n \n # Don't read the zeroth element of each row (image name), convert to float.\n for row in reader:\n data.append(map(float, row[1:]))\n \n # Convert list to array \n d = np.array(data)\n \n # Seperate labels from features\n Y = d[:,0]\n X = d[:,1:]\n \n return X,Y", "def _dtype_feature(ndarray):\n assert isinstance(ndarray, np.ndarray)\n dtype_ = ndarray.dtype\n if dtype_ == np.float64 or dtype_ == np.float32:\n return tf.train.Feature(float_list=tf.train.FloatList(value=ndarray))\n elif dtype_ == np.int64:\n return tf.train.Feature(int64_list=tf.train.Int64List(value=ndarray))\n else: \n raise ValueError(\"The input should be numpy ndarray. \\\n Instead got {}\".format(ndarray.dtype))", "def decode_data(features, reader_settings):\n if features.dtype == tf.string:\n return tf.decode_raw(\n features,\n reader_settings)\n else:\n return tf.cast(\n features,\n reader_settings)", "def read_features_from_file(filename):\n f = loadtxt(filename)\n return f[:, :4], f[:, 4:] # feature locations, descriptors", "def extract_features(self, inputs):\n pass", "def feature_extraction(images, save_to='dataset.csv'):\n num_images = len(images)\n logging.info(f\"Extracting features from {num_images} images...\")\n x = np.zeros((num_images, 7))\n y = np.zeros(num_images, dtype=np.int8)\n\n for i, image in enumerate(images):\n logging.info(f\"Processing Image {i+1}/{num_images}...\")\n y[i] = 0 if image.name.startswith('cyl') \\\n else 1 if image.name.startswith('inter') \\\n else 2 if image.name.startswith('let') \\\n else 3 if image.name.startswith('mod') \\\n else 4 if image.name.startswith('para') \\\n else 5 if image.name.startswith('super') \\\n else 6 if image.name.startswith('svar') else -1\n \n # Get number of object pixels in segmented color channels, which become features 0-3\n for color in [0,1,2,4]: # 3 is the color index for RGB so we skip that and use 4 (grayscale)\n uniques, counts = np.unique(image.getMatrix(color), return_counts=True)\n if len(uniques) > 2:\n image = image.otsu(color)\n uniques, counts = np.unique(image.getMatrix(color), return_counts=True)\n x[i,color if color is not 4 else 3] = counts[0]\n\n x[i,4] = np.std(image.getHistogram(4))\n\n x[i,5] = np.argmax(image.getHistogram(4))\n\n x[i,6] = np.argmin(image.getHistogram(4))\n\n # Save new dataset to file\n np.savetxt(save_to, np.concatenate([x,np.atleast_2d(y).T], axis=1), delimiter=',', fmt='%s')\n\n return x, y", "def _get_classification_data(self, real, synthetic, real_label, synthetic_label):\n split_index = int(self.train_test_split * len(real))\n X_train = synthetic[:split_index]\n y_train = synthetic_label[:split_index]\n X_test = real[split_index:]\n y_test = real_label[split_index:]\n return X_train, y_train, X_test, y_test", "def load_data_set(filename):\n\n input_file = open(filename)\n\n num_features = len(input_file.readline().split('\\t')) - 1\n input_file.seek(0)\n data_mat = []\n label_mat = []\n\n for line in input_file.readlines():\n line_arr = []\n curr_line = line.strip().split('\\t')\n for i in range(num_features):\n line_arr.append(float(curr_line[i]))\n data_mat.append(line_arr)\n label_mat.append(float(curr_line[-1]))\n\n return data_mat, label_mat", "def read_label_file(self, label_file_name = None): #completed\n if label_file_name is None:\n label_file_name = self.label_file_name\n try:\n label_data = sp.loadmat(label_file_name)['labels'].astype(np.int32)\n return label_data#[:,1], label_data[:,0]#in MATLAB format\n except IOError:\n print \"Unable to open \", label_file_name, \"... Exiting now\"\n sys.exit()", "def get_features_from_file(self):\n f_list = []\n f = open(\"verifiability_features.txt\", \"r\")\n for line in f:\n f_list.append(line)\n self.features = f_list", "def get_features_from_file(self):\n f_list = []\n f = open(\"verifiability_features.txt\", \"r\")\n for line in f:\n f_list.append(line)\n self.features = f_list", "def load_pts_features(path):\n\n #\n # Your code here\n #\n\n pts = [np.empty((123, 2)), np.empty((123, 2))]\n feats = [np.empty((123, 128)), np.empty((123, 128))]\n\n return pts, feats", "def _process_features(self, limit):\n\n if self.testMode:\n g = self.testgraph\n else:\n g = self.graph\n model = Model(g)\n raw = '/'.join((self.rawdir, 'feature'))\n logger.info(\"building labels for features\")\n\n line_counter = 0\n with open(raw, 'r') as f:\n filereader = csv.reader(f, delimiter='\\t', quotechar='\\\"')\n f.readline() # read the header row; skip\n for line in filereader:\n (feature_id, dbxref_id, organism_id, name, uniquename,\n residues, seqlen, md5checksum, type_id, is_analysis,\n timeaccessioned, timelastmodified) = line\n\n feature_key = feature_id\n if re.search(r'[\\|\\s\\[\\]\\{\\}\\\\<\\>]', uniquename):\n # some uniquenames have pipes or other nasty chars!\n # for example: FB||||FBrf0133242|Hugh-u1\n feature_id = self._makeInternalIdentifier(\n 'feature', feature_key)\n else:\n feature_id = 'FlyBase:'+uniquename\n self.idhash['feature'][feature_key] = feature_id\n self.feature_types[feature_key] = type_id\n self.label_hash[feature_id] = name\n\n if feature_key not in self.feature_to_organism_hash:\n self.feature_to_organism_hash[feature_key] = set()\n self.feature_to_organism_hash[feature_key].add(organism_id)\n\n # HACK - FBgn are genes, and therefore classes,\n # all else be individuals\n is_gene = False\n if re.search(r'(FBgn|FBog)', feature_id):\n self.idhash['gene'][feature_key] = feature_id\n is_gene = True\n elif re.search(r'FBa[lb]', feature_id):\n self.idhash['allele'][feature_key] = feature_id\n elif re.search(r'FBt[ip]', feature_id):\n self.idhash['feature'][feature_key] = feature_id\n\n if self.testMode and \\\n int(feature_key) not in self.test_keys['gene'] + \\\n self.test_keys['allele'] + self.test_keys['feature']:\n continue\n\n # now do something with it!\n # switch on type_id\n if name.strip() == '':\n name = uniquename\n\n type_key = type_id\n type_id = self.idhash['cvterm'][type_key]\n\n # skip some features by type\n types_to_skip = [\n 'SO:0000316', # CDS\n 'SO:0000696', # oligos\n 'SO:0000358', # polypeptide\n 'SO:0000234', # transcripts\n ]\n\n type_keys_to_skip = [\n 596, # pcr_product\n 57096, # mature peptide\n 57097, # signal_peptide\n 57270, # repeat masker\n 58210, # alignment\n 59643, # cDNA_clone\n 60006, # uncharacterized_change_in_nucleotide_sequence\n 61351, # oligo\n 61467, # polypeptide_domain\n 257, # exon\n 286, # intron\n ]\n\n organisms_to_skip = [\n 2 # computational result\n ]\n\n if type_id in types_to_skip \\\n or int(type_key) in type_keys_to_skip\\\n or int(organism_id) in organisms_to_skip:\n continue\n\n line_counter += 1\n\n if int(type_key) == 604: # RNAi_reagent\n # TODO add other reagents?\n self.idhash['reagent'][feature_key] = feature_id\n\n # deal with the taxonomy\n # only get taxa for features that are actually used in our set\n tax_internal_id = self._makeInternalIdentifier(\n 'organism', organism_id)\n if organism_id not in self.checked_organisms:\n # will get the NCBITax if necessary\n tax_id = self._get_organism_id(organism_id)\n self.checked_organisms.add(organism_id)\n else:\n tax_id = self.idhash['organism'][organism_id]\n\n tax_label = self.label_hash.get(tax_id)\n if not re.search(r'FBog', feature_id) \\\n and re.search(r'Drosophila', tax_label):\n # make only fly things leaders\n model.makeLeader(feature_id)\n\n if not self.testMode \\\n and limit is not None and line_counter > limit:\n pass\n else:\n if is_gene:\n model.addClassToGraph(\n feature_id, name, type_id)\n g.addTriple(\n feature_id, model.object_properties['in_taxon'],\n tax_id)\n else:\n if re.search('FBa[lb]', feature_id):\n type_id = Genotype.genoparts['allele']\n model.addIndividualToGraph(feature_id, name, type_id)\n\n # stop adding what we do not appreciate\n # if is_obsolete == 't':\n # if is_gene:\n # model.addDeprecatedClass(feature_id)\n # else:\n # model.addDeprecatedIndividual(feature_id)\n # self.deprecated_features.add(feature_key)\n\n model.addClassToGraph(tax_id)\n if tax_id != tax_internal_id:\n model.addEquivalentClass(tax_id, tax_internal_id)\n\n model.addComment(\n feature_id,\n self._makeInternalIdentifier('feature', feature_key))\n\n # TODO save checked_organisms fbid to ncbitax mapping to\n # a local file to speed up subsequent searches\n\n return", "def load_features(self, features):\n pass\n # self.features = features", "def classify(self, features):\n \n class_labels = []\n # TODO: finish this.\n features = np.array(features)\n feat_shape = features.shape\n for indx in range(feat_shape[0]):\n# print list(features[indx,:]), features[indx,:]\n decision = self.root.decide(list(features[indx,:]))\n class_labels.append(decision)\n return class_labels", "def read_traindata (filename, labels = ['pos', 'neg']):\n def split (l):\n \"\"\"split one line into words and label\"\"\"\n segs = l.strip().split ('\\t')\n label = segs [-1]\n words = segs [:-1]\n return words, label\n \n encoding = chardet.detect(open (filename).read ()) ['encoding']\n \n with codecs.open (filename, 'r', encoding) as f:\n for line in f.readlines ():\n row = split (line)\n assert len (row) == 2\n assert isinstance(row [0], list)\n assert isinstance(row [1], basestring)\n print row [1]\n assert row [1] in labels\n yield row", "def load_image(filename):\n im = Image.open(filename) # .convert('L')\n width, height = im.size\n pixels = list(im.getdata())\n features = [pixels[i * width:(i + 1) * width] for i in range(height)]\n features = np.asarray(im, dtype=np.float32)\n features /= 255.0\n return features", "def load_as_one_hot(self):\n\n labels = [] \n examples = [] \n\n # document number -> label mapping\n doc2label = n2b2.map_patients_to_labels(\n self.xml_dir,\n self.category)\n\n # load examples and labels\n for f in os.listdir(self.cui_dir):\n doc_id = f.split('.')[0]\n file_path = os.path.join(self.cui_dir, f)\n file_feat_list = read_cuis(file_path)\n examples.append(' '.join(file_feat_list))\n \n string_label = doc2label[doc_id]\n int_label = LABEL2INT[string_label]\n labels.append(int_label)\n\n examples = self.token2int.texts_to_matrix(examples, mode='binary')\n\n return examples, labels", "def load_simple_features(self, simple_features):\n self.simple_features = pd.DataFrame(simple_features).T\n self.simple_features.fillna(False, inplace=True)\n self.simple_features = self.simple_features.astype(bool)\n\n # Aggregate features descriptions\n self.simple_features_description = {}\n for binary in simple_features:\n for token in simple_features[binary]:\n if token not in self.simple_features_description:\n self.simple_features_description[token] = \\\n simple_features[binary][token]", "def _extract_feature(element):\n features = tf.parse_single_example(\n element,\n # Defaults are not specified since both keys are required.\n features={\n 'image/encoded': tf.FixedLenFeature([], tf.string),\n 'label/x': tf.FixedLenFeature([], tf.int64),\n 'label/y': tf.FixedLenFeature([], tf.int64)\n })\n return features", "def extract_feat(self, imgs):\n pass", "def extract_feat(self, imgs):\n pass", "def features_from_CNN(self):\n\n dataloader = self.datasetManager.get_dataloader()\n print(\"\\nFeatures obtention with CNN\")\n print(\"-\"*15)\n for i, batch in tqdm.tqdm(enumerate(dataloader)):\n img = self.to_device(batch[0])\n img_name = batch[2][0]\n \n temp = re.findall(r'\\d+', img_name)\n res = list(map(int, temp))\n X = res[-2]\n Y = res[-1]\n \n savepath = os.path.join(self.output_dir, 'data%i'%X)\n create_folder(savepath)\n \n out_CNN = self.network(img) \n \n torch.save(out_CNN, os.path.join(savepath,'features_tensor%i.pt'%Y))", "def reformat(x, y, img_size, num_ch, num_class):\n dataset = x.reshape(\n (-1, img_size, img_size, num_ch)).astype(np.float32)\n labels = (np.arange(num_class) == y[:, None]).astype(np.float32)\n return dataset, labels", "def test_features_property():\n atom = ATOMClassifier(X_bin, y_bin, random_state=1)\n atom.run(\"LR\")\n assert [i == j for i, j in zip(atom.lr.features, atom.features)]", "def extract_data(filename):\n with open(filename, 'rb') as f:\n reader=f.readlines()\n train_data_label = [[int(x) for x in line.split() if x.isdigit()] for line in reader] \n # sorted by label\n train_data_label = sorted(train_data_label, key=lambda x: x[-1])\n train_data_label = np.array(train_data_label) \n return train_data_label", "def extract_features(raw_data):\n width = len(raw_data[0])\n num_features = len(raw_data) * width\n features = np.zeros((num_features, 3), dtype=bool)\n for row, line in enumerate(raw_data):\n for col, char in enumerate(line):\n if char == ' ':\n features[col + row * width][0] = True\n elif char == '+':\n features[col + row * width][1] = True\n elif char == '#':\n features[col + row * width][2] = True\n return features", "def _extract_labels(self, filename, one_hot=False):\n print('Extracting', filename)\n with gzip.open(filename) as bytestream:\n magic = self._read32(bytestream)\n if magic != 2049:\n raise ValueError(\n 'Invalid magic number %d in MNIST label file: %s' %\n (magic, filename))\n num_items = self._read32(bytestream)\n buf = bytestream.read(num_items)\n labels = np.frombuffer(buf, dtype=np.uint8)\n if one_hot:\n return self._dense_to_one_hot(labels)\n return labels", "def read_node_features_file(nodes_features_file):\n\n node_features = dgl.data.utils.load_tensors(nodes_features_file, False)\n return node_features", "def data_loader(edges,features,y):\n\n\n edge_index = torch.tensor(edges, dtype=torch.long)\n edge_index = edge_index.t().contiguous()\n x = torch.tensor(features.todense(), dtype=torch.float)\n\n y = torch.tensor(y)\n\n data = Data(x=x, edge_index=edge_index, y = y)\n\n return data", "def read_train_data(ids, features=[], convert_nan=True): \n return read_data(ids, train_feat_fnames, train_gs_fnames,\n features=features, convert_nan=convert_nan)", "def extract_features(input_feature_map, points=conv43Points):\n arr = []\n for y,x in points:\n arr.append(input_feature_map[:,y,x,:])\n return tf.stack(arr, axis=1, name=\"extracted_features\"), len(points)", "def genFeatures(dimension, name2features, file_girls, file_boys):\n \n # Load in the data\n Xgirls = name2features(file_girls, B=dimension)\n Xboys = name2features(file_boys, B=dimension)\n X = np.concatenate([Xgirls, Xboys])\n \n # Generate Labels\n Y = np.concatenate([-np.ones(len(Xgirls)), np.ones(len(Xboys))])\n \n # shuffle data into random order\n ii = np.random.permutation([i for i in range(len(Y))])\n \n return X[ii, :], Y[ii]", "def read_vectorized_features(data_dir, subset=None, feature_version=2):\n if subset is not None and subset not in [\"train\", \"test\"]:\n return None\n\n extractor = PEFeatureExtractor(feature_version)\n ndim = extractor.dim\n X_train = None\n y_train = None\n X_test = None\n y_test = None\n\n if subset is None or subset == \"train\":\n X_train_path = os.path.join(data_dir, \"X_train.dat\")\n y_train_path = os.path.join(data_dir, \"y_train.dat\")\n y_train = np.memmap(y_train_path, dtype=np.float32, mode=\"r\")\n N = y_train.shape[0]\n X_train = np.memmap(X_train_path, dtype=np.float32, mode=\"r\", shape=(N, ndim))\n if subset == \"train\":\n return X_train, y_train\n\n if subset is None or subset == \"test\":\n X_test_path = os.path.join(data_dir, \"X_test.dat\")\n y_test_path = os.path.join(data_dir, \"y_test.dat\")\n y_test = np.memmap(y_test_path, dtype=np.float32, mode=\"r\")\n N = y_test.shape[0]\n X_test = np.memmap(X_test_path, dtype=np.float32, mode=\"r\", shape=(N, ndim))\n if subset == \"test\":\n return X_test, y_test\n\n return X_train, y_train, X_test, y_test", "def load_features(feature_path):\n if not os.path.exists(os.path.join(feature_path, f\"0_features.npy\")): \n raise ValueError(f\"The provided location {feature_path} does not contain any representation files\")\n\n ds_list, chunk_id = [], 0\n while os.path.exists(os.path.join(feature_path, f\"{chunk_id}_features.npy\")): \n features = ch.from_numpy(np.load(os.path.join(feature_path, f\"{chunk_id}_features.npy\"))).float()\n labels = ch.from_numpy(np.load(os.path.join(feature_path, f\"{chunk_id}_labels.npy\"))).long()\n ds_list.append(ch.utils.data.TensorDataset(features, labels))\n chunk_id += 1\n\n print(f\"==> loaded {chunk_id} files of representations...\")\n return ch.utils.data.ConcatDataset(ds_list)", "def feature_label(features):\n f=[]\n l=[]\n for item in features:\n f.append(item[0])\n l.append(item[1])\n return f,l", "def features(self):\n return self.shape[2]", "def get_raw_data():\n\twith open('train_label.pkl', 'rb') as f:\n\t\ttrain_label = pickle.load(f)\n\n\twith open('train_image.pkl', 'rb') as f:\n\t\ttrain_data = pickle.load(f)\n\n\tprint(np.unique(np.asarray(train_label)))\n\n\treturn (train_label, np.asarray(train_data))", "def load_CIFAR_batch(filename):\r\n with open(filename, 'rb')as f:\r\n datadict = p.load(f)\r\n \r\n X = datadict['data']\r\n Y = datadict['labels']\r\n \r\n print X.shape\r\n X = X.reshape(X.shape[0], SHAPE[0], SHAPE[1], SHAPE[2])\r\n Y = np.array(Y)\r\n return X, Y", "def load_CIFAR_batch(filename):\r\n with open(filename, 'rb') as f:\r\n data_dict = cPickle.load(f)\r\n ims = data_dict['data']\r\n coarse_labels = np.array(data_dict['coarse_labels'])\r\n fine_labels = np.array(data_dict['fine_labels'])\r\n return ims, coarse_labels, fine_labels", "def get_name_to_features(self):\n name_to_features = {\n 'input_ids': tf.io.FixedLenFeature([self.seq_len], tf.int64),\n 'label_ids': tf.io.FixedLenFeature([], tf.int64),\n }\n return name_to_features", "def labeledfeatures(eqdata, featurefunc, labelfunc):\n _size = len(eqdata.index)\n _labels, _skipatend = labelfunc(eqdata)\n _features, _skipatstart = featurefunc(eqdata.iloc[:(_size - _skipatend), :])\n return _features, _labels.iloc[_skipatstart:, :]", "def read_labels(labels_path):\n data = []\n with open(labels_path, 'r') as f:\n for line in f:\n line = line.split()\n sample = (line[0], int(line[1]))\n data.append(sample)\n \n dtype = [('video', '<U50'), ('label', int)]\n X = np.array(data, dtype=dtype)\n X = np.sort(X, order='video')\n return X", "def parse_feature_importances(filepath):\r\n lines = open(filepath, 'U').readlines()\r\n feature_IDs = []\r\n scores = []\r\n for line in lines[1:]:\r\n words = line.strip().split('\\t')\r\n feature_IDs.append(words[0].strip())\r\n scores.append(float(words[1].strip()))\r\n return array(feature_IDs), array(scores)", "def _read_datafile(self,path):\n \tlabels, images = [], []\n \twith gzip.GzipFile(path) as f:\n \t for line in f:\n \t vals = line.strip().split()\n \t labels.append(float(vals[0]))\n \t images.append([float(val) for val in vals[1:]])\n \tlabels = np.array(labels, dtype=np.int32)\n \tlabels[labels == 10] = 0 # fix weird 0 labels\n \timages = np.array(images, dtype=np.float32).reshape(-1, 16, 16, 1)\n \timages = (images + 1) / 2\n \treturn images, labels", "def image_to_feature_vector(raw_tensor):\n result = []\n for tensor in raw_tensor:\n result.append(tensor.flatten())\n return result", "def load_data(self):\n with open(self.file_name) as f:\n lines = f.readlines()\n\n labels = list()\n all_dat = list()\n for i, l in enumerate(lines):\n\n labels.append(int(l[0]))\n\n l = gensim.utils.any2unicode(l)\n all_dat.append(LabeledSentence(l.split(\"\\t\")[-1], [i]))\n\n return all_dat, np.asarray(labels)", "def _parser(serialized_example):\n\n features = tf.compat.v1.parse_single_example(\n serialized_example,\n features={\n 'img_raw': tf.compat.v1.FixedLenFeature([], tf.string),\n 'label': tf.compat.v1.FixedLenFeature([], tf.int64),\n 'category': tf.compat.v1.FixedLenFeature([], tf.int64),\n 'elevation': tf.compat.v1.FixedLenFeature([], tf.int64),\n 'azimuth': tf.compat.v1.FixedLenFeature([], tf.int64),\n 'lighting': tf.compat.v1.FixedLenFeature([], tf.int64),\n })\n\n img = tf.compat.v1.decode_raw(features['img_raw'], tf.float64)\n img = tf.reshape(img, [96, 96, 1])\n img = tf.cast(img, tf.float32) # * (1. / 255) # left unnormalized\n\n lab = tf.cast(features['label'], tf.int32)\n cat = tf.cast(features['category'], tf.int32)\n elv = tf.cast(features['elevation'], tf.int32)\n azi = tf.cast(features['azimuth'], tf.int32)\n lit = tf.cast(features['lighting'], tf.int32)\n\n return img, lab, cat, elv, azi, lit", "def load_csv(data_file_path, class_index=-1):\n\n handle = open(data_file_path, 'r')\n contents = handle.read()\n handle.close()\n rows = contents.split('\\n')\n out = np.array([[float(i) for i in r.split(',')] for r in rows if r])\n\n if class_index == -1:\n classes = map(int, out[:, class_index])\n features = out[:, :class_index]\n return features, classes\n\n elif class_index == 0:\n classes = map(int, out[:, class_index])\n features = out[:, 1:]\n return features, classes\n\n else:\n return out", "def extract_features(data_dir,mode='train'):\n files = get_files(data_dir)\n t0 = time.time()\n features = list()\n labels = list()\n for f in files:\n freq = get_frequencies(f)\n if mode=='train':\n sents = corpus_reader(f)\n labels.extend(d2l(sents,f,freq))\n elif mode=='decode':\n sents = corpus_reader(f,tag='pos')\n else:\n print('Invalid mode!')\n break\n features.extend(d2f(sents,f,freq)) \n dt = time.time() - t0\n print('Total feature extraction time: %d seconds' % dt)\n return features,labels", "def _generate_elements(example, label):\n\n class_label = None\n parsed = tf.train.Example.FromString(example.numpy())\n if parsed.features.feature[label].int64_list.value:\n val = parsed.features.feature[label].int64_list.value\n if len(val) > 0:\n class_label = val[0]\n else:\n val = parsed.features.feature[label].bytes_list.value\n if len(val) > 0:\n class_label = val[0].decode()\n return (class_label, parsed)", "def _parse(serialized_example):\n\n feature_map = {\n 'dayofweek': tf.io.FixedLenFeature([], tf.int64),\n 'dropofflat': tf.io.FixedLenFeature([], tf.float32),\n 'dropofflon': tf.io.FixedLenFeature([], tf.float32),\n 'fare_amount': tf.io.FixedLenFeature([], tf.float32),\n 'hourofday': tf.io.FixedLenFeature([], tf.int64),\n 'passengers': tf.io.FixedLenFeature([], tf.float32),\n 'pickuplat': tf.io.FixedLenFeature([], tf.float32),\n 'pickuplon': tf.io.FixedLenFeature([], tf.float32)\n }\n\n # Parse the serialized data into a dictionary.\n parsed_example = tf.io.parse_single_example(\n serialized=serialized_example,\n features=feature_map)\n\n features = add_engineered(parsed_example)\n label = features.pop(\"fare_amount\")\n\n return features, label", "def load_data(fname):\n pathname = \"data/\" + fname\n data = pickle.load(open(pathname, 'rb'), encoding='latin1')\n images = np.array([img[:-1] for img in data])\n ys = [int(img[-1]) for img in data]\n length = len(ys)\n labels = np.zeros((length, 10))\n\n for i in range(length):\n labels[i, ys[i]] = 1\n\n return images, labels", "def load_batch(filename: str) -> Tuple[ndarray, ndarray, ndarray]:\n dataDict = unpickle(filename)\n print(\"1\", dataDict[b\"data\"][1, :])\n X = (dataDict[b\"data\"] / 255).T\n print(\"2\", X[:, 1])\n y = np.array(dataDict[b\"labels\"])\n Y = np.eye(10)[y].T\n return X, Y, y", "def load_data():\n (trainx, trainy), (valx, valy), (testx, testy) = pickle.load(gzip.open(\"data/mnist_one_hot.pkl.gz\"),\n encoding=\"latin1\")\n trainy = np.argmax(trainy, axis=1)\n valy = np.argmax(valy, axis=1)\n testy = np.argmax(testy, axis=1)\n trainx = trainx * 2 - 1\n valx = valx * 2 - 1\n testx = testx * 2 - 1\n return (trainx.reshape(-1, 1, 28, 28), trainy), (valx.reshape(-1, 1, 28, 28), valy), (testx.reshape(-1, 1, 28, 28),\n testy)", "def read_htk_user_feat(name='filename'):\n f = open(name, 'rb')\n hdr = f.read(12)\n num_frames, _, samp_size, parm_kind = struct.unpack(\">IIHH\", hdr)\n if parm_kind != 9:\n raise RuntimeError(\n 'feature reading code only validated for USER '\n 'feature type for this lab. There is other publicly available '\n 'code for general purpose HTK feature file I/O\\n')\n\n num_dim = samp_size // 4\n\n feat = np.zeros([num_dim, num_frames], 'f')\n for t in range(num_frames):\n feat[:, t] = np.array(\n struct.unpack('>' + ('f' * num_dim), f.read(samp_size)),\n dtype=float)\n\n return feat" ]
[ "0.6834548", "0.66014194", "0.6516929", "0.64742565", "0.6434231", "0.6379081", "0.63573194", "0.63504636", "0.6342509", "0.62751335", "0.6247352", "0.62230104", "0.62069976", "0.61604685", "0.6158316", "0.6132908", "0.6128687", "0.61250186", "0.61247563", "0.6107242", "0.61066765", "0.61062026", "0.6096995", "0.6089647", "0.60666376", "0.60339326", "0.6001705", "0.60010546", "0.599424", "0.5993754", "0.59708", "0.59650457", "0.5964438", "0.5961148", "0.5960548", "0.59154165", "0.5913071", "0.59118694", "0.59109867", "0.5904307", "0.5899108", "0.58968043", "0.5890172", "0.58780766", "0.58732206", "0.5860085", "0.5843903", "0.5816645", "0.58112216", "0.5799385", "0.5796767", "0.5787572", "0.57806516", "0.57569236", "0.5753116", "0.5753116", "0.57444674", "0.5736066", "0.5730782", "0.57285994", "0.57205284", "0.56981117", "0.56959593", "0.56913155", "0.56801", "0.56791437", "0.56791437", "0.56747615", "0.5663085", "0.5657612", "0.5655172", "0.5655042", "0.5654495", "0.5651661", "0.5644065", "0.5641315", "0.5641117", "0.56399447", "0.56212", "0.56190604", "0.5618374", "0.5609987", "0.5609509", "0.56055236", "0.5602745", "0.55932766", "0.5587234", "0.5586892", "0.5585861", "0.5584461", "0.5576936", "0.55745167", "0.5572991", "0.5568168", "0.55673134", "0.5567255", "0.5563629", "0.55631465", "0.55629057", "0.555367", "0.5552515" ]
0.0
-1
Plot the data as a scatter plot
def plot_data(x, t, new_figure=True, save_path=None): # Plot the binary data ma = ['o', 's', 'v'] fc = ['r', 'g', 'b'] # np.array([0, 0, 0]), np.array([1, 1, 1])] tv = numpy.unique(t.flatten()) # an array of the unique class labels if new_figure: plt.figure() for i in range(tv.shape[0]): pos = (t == tv[i]).nonzero() # returns a boolean vector mask for selecting just the instances of class tv[i] plt.scatter(numpy.asarray(x[pos, 0]), numpy.asarray(x[pos, 1]), marker=ma[i], facecolor=fc[i]) plt.xlabel('$x_1$') plt.ylabel('$x_2$') if save_path is not None: plt.savefig(save_path, fmt='png')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def plot_scatter_points(self):\n self.plot(1)", "def scatter_plot(self):\n\n X = self.reduce_dimension(n_components=2)\n\n plt.figure()\n plt.scatter(X[:,0], X[:,1])\n\n return plt", "def plot_scatter(data):\n minimum = data[data.columns[0]]\n distance = data[data.columns[1]]\n\n # Forms the scatterplot\n plt.scatter(minimum, distance)\n\n # Adds a title and axis names\n plt.title('Minimum vs Total distance', fontweight='bold', fontsize='large')\n plt.xlabel('Minimun Bound', fontsize='large')\n plt.gca().invert_xaxis()\n plt.ylabel('Total Distance', fontsize='large')\n plt.grid(True)\n\n # Actually shows the scatterplot\n plt.show()", "def plot(self):\n y = self.projection\n mpl.scatter(y[:, 0], y[:, 1], c=self.data_class)\n mpl.show()", "def plot_scatter(x, y):\n\tplt.scatter(x, y)", "def plot_data(x: np.ndarray, y: np.ndarray) -> None:\n\n _, ax = plt.subplots()\n scatter = ax.scatter(x[:, 0], x[:, 1], c=y, s=40, cmap=plt.cm.RdYlBu)\n legend1 = ax.legend(*scatter.legend_elements(),\n loc=\"lower right\", title=\"Classes\")\n ax.add_artist(legend1)\n plt.xlim((min(x[:, 0]) - 0.1, max(x[:, 0]) + 0.1))\n plt.ylim((min(x[:, 1]) - 0.1, max(x[:, 1]) + 0.1))", "def plot_data(self):\n if hasattr(self,'data'):\n plt.scatter(*self.data.T)\n plt.show()\n else:\n raise Exception('No 2d data of the instance has been loaded')", "def plot_data(self, dataset, plt):\n cluster_markers = ['*', '+', 'o']\n cluster_color = ['b', 'g', 'r']\n for i in range(dataset.shape[0]):\n plt.scatter(*zip(*dataset[i]), marker=cluster_markers[i], c=cluster_color[i])\n\n return plt", "def plot_scatter(data):\n city_x,city_y = get_city_base()\n fig = plt.figure(figsize = FIGURE_SIZE)\n plt.scatter(data['longitude'],data['latitude'], color = CRIME_POINTS_COLOR, s = SCATTER_SIZE_OF_CRIME_POINTS)\n plt.scatter(city_x,city_y, color = CITY_MAP_COLOR, s = SCATTER_SIZE_OF_CHICAGO_CITY, zorder = CITY_MAP_ORDER)", "def plot_data(self):", "def plot_data(x):\n if DATA_2D:\n plt.scatter(x[:, 0], x[:, 1])\n plt.show()\n else:\n fig = plt.figure()\n ax = Axes3D(fig)\n ax.scatter(x[:, 0], x[:, 1], x[:, 2])\n ax.set_xlabel('X Label')\n ax.set_ylabel('Y Label')\n ax.set_zlabel('Z Label')\n plt.show()", "def make_scatter():\n x = np.linspace(4, 8, 6)\n y = np.sin(x)\n plt.plot(x, y, 'o', color='black');\n plt.show()", "def plot_data(data, fig):\n\n if data.shape[1] > 3:\n print(\"Warning: data dimension is larger than 3, dim is %s\" % (data.shape[1]))\n\n ax = fig.add_subplot(111, projection='3d')\n # ax.scatter(data[:, 0], data[:, 1], data[:, 2], marker='.', s=0.5)\n return ax", "def scattered():\r\n c = 'A'\r\n i = 'FLR '\r\n data = chart_data(i, '2018-09-01', 12*5, c).set_index('date').sort_index()\r\n # print(data)\r\n data.plot(kind='scatter', x='Perc.idv', y='Perc.ids') # ,c='Centre')\r\n # plt.xticks(range(len(data)),data.index.tolist(),rotation=20)\r\n # plt.axhline(y=100, color='r', linestyle='-', label='Individual target')\r\n # plt.axhline(y=75, color='b', linestyle='-', label='Industry target')\r\n plt.title(centres[c] + ' ' + indic)\r\n plt.savefig('pic/' + c + indic + '.jpg')", "def coords_plot(self):\n self.load_coords()\n x = []\n y = []\n px = [] \n for item in self.coords:\n if item[1] >52.10 and item[1] <52.4 and item[2]>20.8 and item [2] <21.4:\n x.append(item[1])\n y.append(item[2])\n px.append(item[3])\n plt.scatter(x,y,c=px,s=150,alpha=0.3)\n plt.show()", "def plot_scatter_points_lines(self):\n self.plot(2)", "def PlotData(data, true_labels):\n\tcolors = ['red' if l == 0 else 'blue' for l in true_labels]\n\tfig = plt.figure()\n\tplt.scatter(data[:, 0], data[:, 1], c=colors)\n\tplt.show()\n\treturn", "def scatter_it(x, y, x_label, y_label):\n\n fig, ax1 = plt.subplots()\n plt.scatter(x, y)\n ax1.set_xlabel(x_label)\n ax1.set_ylabel(y_label)\n plt.show()", "def scatter(xarr, yarr, xlbl=None, ylbl=None, pw=600, ph=400):\n p = figure(plot_width=pw, plot_height=ph)\n # Model\n p.circle(xarr, yarr, color='black')#, legend='data')\n # Label\n if xlbl is not None:\n p.xaxis.axis_label = xlbl\n if ylbl is not None:\n p.yaxis.axis_label = ylbl\n # Show\n show(p)", "def scatterplot(x, y):\n plt.figure(figsize=(14, 8), dpi=80)\n plt.scatter(x[:, 1], y, s=30, c='r', marker='x', linewidths=1)\n plt.grid(True)\n plt.xlim(4, 24)\n plt.ylabel('Profit ($10k)')\n plt.xlabel('Population (10k)')\n plt.show()\n plt.close()", "def plot_scatter(\n xdata: np.ndarray,\n ydata: np.ndarray,\n ax=None,\n labelsize: int = 14,\n grid: bool = True,\n **kwargs,\n):\n if ax is None:\n ax = get_non_gui_ax()\n\n # Default plot options\n plot_opts = kwargs.copy()\n if \"c\" not in plot_opts:\n plot_opts[\"c\"] = \"grey\"\n if \"marker\" not in plot_opts:\n plot_opts[\"marker\"] = \"x\"\n if \"alpha\" not in plot_opts:\n plot_opts[\"alpha\"] = 0.8\n\n # Plot data\n ax.scatter(xdata, unp.nominal_values(ydata), **plot_opts)\n\n # Formatting\n ax.tick_params(labelsize=labelsize)\n ax.grid(grid)\n return ax", "def plot_data(x_plot, X_train, X_test, y_train, y_test, low, high):\n s = 15\n plt.plot(x_plot, ground_truth(x_plot), alpha=0.5, label='ground truth')\n plt.scatter(X_train, y_train, s=s, alpha=0.2)\n plt.scatter(X_test, y_test, s=s, alpha=0.2, color='red')\n plt.xlim((low, high))\n plt.ylabel('y')\n plt.xlabel('x')\n plt.legend(loc='upper left')\n plt.show()", "def _scatter_example_3(data):\n # Plot the data\n ch = chartify.Chart(blank_labels=True, x_axis_type=\"datetime\")\n ch.plot.scatter(\n data_frame=data,\n x_column=\"date\",\n y_column=\"unit_price\",\n size_column=\"quantity\",\n color_column=\"fruit\",\n )\n ch.set_title(\"Scatterplot\")\n ch.set_subtitle(\"Optional 'color_column' argument for grouping by color.\")\n ch.show(_OUTPUT_FORMAT)", "def scatterPlot2():\n N = 100\n x = np.random.rand(N)\n y = np.random.rand(N)\n colors = np.random.rand(N)\n\n plt.scatter(x, y, c=colors, alpha=0.5)\n plt.show()", "def scatter_plot(self):\n sns.set_style('whitegrid')\n \n fig, ax = plt.subplots()\n cmap = sns.cubehelix_palette(8, start=.5, rot=-.75, as_cmap=True)\n \n \n plt.title('Benchmark and Trial Samples', fontsize=16)\n \n ax.xaxis.set_tick_params(labelsize=16, direction='inout', length=6, width=1, color='gray')\n ax.yaxis.set_tick_params(labelsize=16, direction='inout', length=6, width=1, color='gray')\n \n ax.scatter(self.x_benchmark[:,0], self.x_benchmark[:,1], c='magenta',\n alpha=0.5, marker='x',label='B sample')\n ax.scatter(self.x_trial[:,0],self.x_trial[:,1], c='blue',\n alpha=0.2, marker='s',label='T sample')\n \n plt.grid(True)\n plt.legend(loc='upper left', fontsize=14)\n # plt.show()\n plt.savefig(\"pyplot.png\")", "def show_scatter(self):\n plt.scatter(self.a1[:, 0], self.a1[:, 1], c=\"red\", alpha=0.5, s=10)\n plt.scatter(self.a2[:, 0], self.a2[:, 1], c=\"blue\", alpha=0.5, s=10)\n plt.scatter(0, 0, marker=\"D\", c=\"black\", alpha=0.8)\n plt.scatter(2, 2, marker=\"D\", c=\"black\", alpha=0.8)\n plt.show()", "def scatter_plot(x, y):\n mpl_fig = plt.figure()\n plt.scatter(x, y)\n return get_div_from_data(mpl_fig)", "def ScatterPlot(data, labels, gamma):\n\n fig, ax = plt.subplots(figsize=(16, 8))\n for i, label in enumerate(labels):\n plt.scatter(data[i, 0], data[i, 1], label=label,\n color=['red' if label < 0 else 'green'])\n plt.axvline(gamma / 2, linestyle='--', color='indigo', alpha=0.3)\n plt.axvline(-gamma / 2, linestyle='--', color='indigo', alpha=0.3)\n plt.show()", "def scatterplot():\r\n #get the data for the plots\r\n reddata = np.array([[1,1],[1,3],[4,2]])\r\n bluedata = np.array([[0,1],[0,5],[1,2],[2,3],[3,4]])\r\n yellowdata = np.array([[1,4],[2,2],[3,5],[6,2]])\r\n #convert the data to a pd DataFrame\r\n df = pd.DataFrame(reddata, columns=[\"x\",\"y\"])\r\n df1 = pd.DataFrame(bluedata, columns=[\"x\",\"y\"])\r\n df2 = pd.DataFrame(yellowdata, columns=[\"x\",\"y\"])\r\n #create the plot\r\n ax = df.plot.scatter(x=\"x\",y=\"y\",label=\"Red Group\",color=\"Red\",title=\"Scatter Plot in Three Colors\",xlim=(-1,7),ylim=(0,6))\r\n ax1 = df1.plot.scatter(x=\"x\",y=\"y\",label=\"Blue Group\",color=\"Blue\",ax=ax)\r\n ax2 = df2.plot.scatter(x=\"x\",y=\"y\",label=\"Yellow Group\",color=\"Yellow\",ax=ax)\r\n #get the figure from the axes and save it\r\n fig = ax.get_figure()\r\n fig.savefig(\"my_scatter_plot.png\")", "def scatter_plot(x_vals, y_vals, x_variable):\n if (x_variable == 'm'):\n x_variable = 'Mole Fraction A'\n elif (x_variable == 'p'):\n x_variable = 'Pressure (kPa)'\n elif (x_variable == 't'):\n x_variable = 'Temperature (K)'\n fig = plt.figure(figsize=FIG_SIZE)\n plt.scatter(x_vals, y_vals)\n plt.xlabel(x_variable)\n plt.ylabel('Electrical Conductivity')\n\n return fig", "def scatter(filename, data, lines=[]):\n import matplotlib.pyplot as plot\n plot.figure(random.randint(0, 10000000))\n plot.scatter(data[0], data[1], 20, 'b', 'o')\n plot.title(filename.split('.')[0])\n for line in lines:\n plot.plot([line[0], line[2]], [line[1], line[3]], '-')\n plot.savefig(filename)", "def scatter_plot(group1, group2):\n plt.scatter(group1, group2)\n plt.show()", "def add_positions_to_plot(self, **kwargs):\n logger.debug('Adding data positions to plot')\n plt.scatter(self.x, self.y, **kwargs)", "def plot_data(data, title):\n plt.title(title)\n plt.plot(range(len(data)), data[:, 0], 'r-', label='x')", "def plot_raw_data(dataset):\n num_attributes = list(dataset.drop(\"Sex\", axis=1))\n for att in num_attributes:\n if att != \"Rings\":\n dataset.plot(kind=\"scatter\", x=\"Rings\", y = att)\n plt.show()\n\n # dataset[\"Rings\"].hist()\n # plt.show()", "def plot_data(data, labels=None, markers = ['o', 's']):\n if labels is None:\n plt.scatter(data[:, 0], data[:, 1], c='b', s = 80, marker = markers[0])\n\n else:\n classes = np.sort(np.unique(labels))\n n_classes = classes.shape[0]\n color_blind_list = sns.color_palette(\"colorblind\", n_classes)\n sns.set_palette(color_blind_list)\n\n for i, l in enumerate(classes):\n plt.scatter(data[labels == l, 0],\n data[labels == l, 1],\n c=color_blind_list[i],\n s=80,\n marker=markers[i])", "def draw_scatter_plot(x: pd.Series, y: pd.Series, x_label: str, y_label: str):\n\n plt.xlabel(x_label)\n plt.ylabel(y_label)\n plt.scatter(x, y)\n plt.title(\"Scatter plot of '%s' and '%s'\" % (x_label, y_label))\n\n lr_model = linear_regression(x, y)\n plt.plot(x, lr_model.predict(np.array(x).reshape(-1, 1)), color='red')\n\n plt.show()", "def epics_data_plot(data):\n if isinstance(data, (xr.DataArray, xr.Dataset)):\n data = data.to_dataframe()", "def afficher_XY(X, Y):\n plt.scatter(X,Y, s = size)\n plt.show()", "def scatterplot(loc: List[CrimeStatistics]) -> None: \n # return None #stub\n #template based on visualization\n \n x = enrollment_list(loc)\n y = crime_list(loc)\n \n \n pyplot.scatter(x,y)\n pyplot.xlabel(\"Enrollment\")\n pyplot.ylabel(\"Total crime per campus\")\n pyplot.title(\"correlation between enrollment and crimes committed\")\n \n \n \n pyplot.show()\n print(linregress(x,y))\n \n \n return None", "def make_2d_scatter_plot(self, xdata, ydata, xlabel=None, xunits=None,\n ylabel=None, yunits=None, title=None,\n subplotnum=None, num_rows=None,\n plot_cor=True, set_range=True):\n import matplotlib.pyplot as plt\n plt.rcParams['text.usetex'] = True\n\n if not set_range:\n xlim = plt.gca().get_xlim()\n ylim = plt.gca().get_ylim()\n\n plt.scatter(xdata, ydata)\n\n # Adjust ranges unless told otherwise\n if set_range:\n if isinstance(xdata, list):\n hrange = max(xdata) - min(xdata)\n if hrange != 0.0:\n plt.xlim(min(xdata)-0.1*hrange,\n max(xdata)+0.1*hrange)\n elif isinstance(xdata, np.ndarray):\n hrange = xdata.max() - xdata.min()\n if hrange != 0.0:\n plt.xlim(xdata.min()-0.1*hrange,\n xdata.max()+0.1*hrange)\n if isinstance(ydata, list):\n vrange = max(ydata) - min(ydata)\n if vrange != 0.0:\n plt.ylim(min(ydata)-0.1*vrange,\n max(ydata)+0.3*vrange)\n elif isinstance(ydata, np.ndarray):\n vrange = ydata.max() - ydata.min()\n if vrange != 0.0:\n plt.ylim(ydata.min()-0.1*vrange,\n ydata.max()+0.3*vrange)\n else:\n plt.xlim(xlim)\n plt.ylim(ylim)\n if plot_cor:\n # Calculate correlation and annotate\n rho, pval = self.get_correlation_coefficient(\n xdata=xdata,\n ydata=ydata,\n xsystkey=xlabel,\n ysystkey=ylabel\n )\n if (len(set(xdata)) != 1) and (len(set(ydata)) != 1):\n if subplotnum is not None:\n if num_rows is None:\n raise ValueError(\n \"Need to know the number of rows in \"\n \"order to correctly place the correlation \"\n \"annotation on the subplot\"\n )\n row = int((subplotnum-1)/4)\n xtext = 0.25*0.25+((subplotnum-1)%4)*0.25\n ytext = 0.88-(1.0/num_rows)*0.9*row\n plt.figtext(\n xtext,\n ytext,\n 'Correlation = %.2f'%rho,\n fontsize='large'\n )\n else:\n plt.figtext(\n 0.15,\n 0.80,\n 'Correlation = %.2f'%rho,\n fontsize=16\n )\n\n # Set labels, if required\n if xlabel is not None:\n nice_xlabel = self.make_label(xlabel, xunits)\n plt.xlabel(nice_xlabel, fontsize=16)\n if ylabel is not None:\n nice_ylabel = self.make_label(ylabel, yunits)\n plt.ylabel(nice_ylabel, fontsize=16)\n if subplotnum is None and (title is not None):\n plt.title(title, fontsize=16)", "def plot_transformed_data(transformed_data):\n plt.figure(1)\n plt.title('Transformed data')\n plt.xlabel('Eigenvector 1')\n plt.ylabel('Eigenvector 2')\n plt.plot(*transformed_data.T, 'o')", "def plotXY(data, x, y):\n fig, ax = plt.subplots()\n m, s = np.mean(data), np.std(data)\n im = ax.imshow(data, interpolation='nearest', cmap='gray',\n vmin=m-s, vmax=m+s, origin='lower')\n \n for (i, j) in zip(x, y):\n c = Circle(xy=(i, j), radius=3)\n c.set_facecolor('none')\n c.set_edgecolor('red')\n ax.add_artist(c)\n \n plt.xlabel('x')\n plt.ylabel('y')\n plt.show()\n plt.close(fig)", "def plot_scatter(dataframe, colx, coly, xlabel='', \n ylabel='', \n xlim=[0,15], ylim=[0,15], density=True):\n\n if not density : \n plt.scatter(dataframe[colx].values, dataframe[coly].values)\n else:\n xvals = dataframe[colx].values\n yvals = dataframe[coly].values\n xy = np.vstack([xvals, yvals])\n z = gaussian_kde(xy)(xy)\n plt.scatter(xvals, yvals, c=z, s=10, edgecolor='')\n plt.xlabel(xlabel)\n plt.ylabel(ylabel)\n plt.plot(np.linspace(xlim[0], xlim[1], 100), \n np.linspace(ylim[0], ylim[1], 100), \n color='black')\n plt.xlim(xlim)\n plt.ylim(ylim)\n plt.show();", "def plotScatter(verts, data, coords=(1,2), comp=2):\n z = data[:,:,comp].flatten()\n x = verts[:,coords[0]]\n y = verts[:,coords[1]]\n\n # NOTE: either scatter or pcolor should work\n plt.figure()\n compDict = {0:'X',1:'Y',2:'Z'}\n #plt.gca().set_aspect('equal')\n plt.scatter(x, y, c=z, s=80, cmap=plt.cm.bwr)\n plt.title( compDict[comp] + ' Displacement' )\n plt.xlabel(compDict[coords[0]] + ' Distance [m]')\n plt.ylabel(compDict[coords[1]] + ' Distance [m]')\n cb = plt.colorbar()\n cb.set_label('[m]')", "def plot(self):\n\t\tif (2 <= len(self.X) <= 3):\n\t\t\tvDataFrame(self.name, self.cursor).scatter(columns = self.X, catcol = \"dbscan_cluster\", max_cardinality = 100, max_nb_points = 10000)\n\t\telse:\n\t\t\traise ValueError(\"Clustering Plots are only available in 2D or 3D\")", "def plot_data(self, classA, classB):\n plt.scatter(classA[:,0], classA[:,1], color='cyan', alpha=0.7, s=7)\n plt.scatter(classB[:,0], classB[:,1], color='purple', alpha=0.7, s=7)\n plt.axis('tight')\n plt.show()", "def plot_scatter(x_data, y_data, xlabel=None, ylabel=None, annotate=True, ax=None):\n\n ax = plt.subplots()[1] if not ax else ax\n\n ax.scatter(x_data, y_data, alpha=0.2)\n\n ax.set_xlabel(xlabel)\n ax.set_ylabel(ylabel)\n\n if annotate:\n r_val = spearmanr(x_data, y_data, nan_policy='omit')[0]\n plt.text(0.65, 0.11, 'r={:1.2f}'.format(r_val),\n size=20, transform=ax.transAxes);\n\n ax.yaxis.set_major_formatter(FormatStrFormatter('%.1f'))", "def plot_dataset(X, classes):\n data = pd.DataFrame(X, columns=['x', 'y'])\n data['dataset'] = classes\n sns.lmplot('x', 'y', data=data, hue='dataset', fit_reg=False, size=10,\n palette=sns.color_palette(\"Set3\", 10),\n scatter_kws={\"s\": 75})", "def plot(self):\n\n fig = plt.figure()\n ax = fig.add_subplot(111, projection=Axes3D.name)\n\n # TODO Use numpy to rotate esp_points matrix for faster variable access.\n ax.scatter(\n xs=[i[0][0] for i in self.esp_points],\n ys=[i[0][1] for i in self.esp_points],\n zs=[i[0][2] for i in self.esp_points],\n c=[i[1] for i in self.esp_points],\n marker='o',\n s=2,\n alpha=0.5\n )\n\n ax.scatter(\n xs=[i[0][0] for i in self.atom_points],\n ys=[i[0][1] for i in self.atom_points],\n zs=[i[0][2] for i in self.atom_points],\n c=[i[1] for i in self.atom_points],\n marker='X',\n s=100\n )\n\n plt.show()", "def make_plot(x,y):", "def plot_scatter(fdata, chart=None, *, sample_points=None, derivative=0,\n fig=None, axes=None,\n n_rows=None, n_cols=None, n_points=None, domain_range=None,\n sample_labels=None, label_colors=None, label_names=None,\n **kwargs):\n\n if sample_points is None:\n # This can only be done for FDataGrid\n sample_points = fdata.sample_points\n evaluated_points = fdata.data_matrix\n else:\n evaluated_points = fdata(sample_points, grid=True)\n\n fig, axes = _get_figure_and_axes(chart, fig, axes)\n fig, axes = _set_figure_layout_for_fdata(fdata, fig, axes, n_rows, n_cols)\n\n if domain_range is None:\n domain_range = fdata.domain_range\n else:\n domain_range = _list_of_arrays(domain_range)\n\n sample_colors, patches = _get_color_info(\n fdata, sample_labels, label_names, label_colors, kwargs)\n\n if fdata.dim_domain == 1:\n\n color_dict = {}\n\n for i in range(fdata.dim_codomain):\n for j in range(fdata.n_samples):\n\n if sample_colors is not None:\n color_dict[\"color\"] = sample_colors[j]\n\n axes[i].scatter(sample_points[0],\n evaluated_points[j, ..., i].T,\n **color_dict, **kwargs)\n\n else:\n\n X = fdata.sample_points[0]\n Y = fdata.sample_points[1]\n X, Y = np.meshgrid(X, Y)\n\n color_dict = {}\n\n for i in range(fdata.dim_codomain):\n for j in range(fdata.n_samples):\n\n if sample_colors is not None:\n color_dict[\"color\"] = sample_colors[j]\n\n axes[i].scatter(X, Y,\n evaluated_points[j, ..., i].T,\n **color_dict, **kwargs)\n\n _set_labels(fdata, fig, axes, patches)\n\n return fig", "def pf_plot(pf, t):\n xx = pf.XS[t, :, 0]\n yy = pf.XS[t, :, 1]\n ww = pf.WS[t, :]\n plt.scatter(xx, yy, s=ww * 5000)", "def scatterPlot1():\n n = 1024\n X = np.random.normal(0,1,n)\n Y = np.random.normal(0,1,n)\n T = np.arctan2(Y,X)\n\n plt.axes([0.025,0.025,0.95,0.95])\n plt.scatter(X,Y, s=75, c=T, alpha=.5)\n\n plt.xlim(-1.5,1.5), plt.xticks([])\n plt.ylim(-1.5,1.5), plt.yticks([])\n # savefig('../figures/scatter_ex.png',dpi=48)\n plt.show()", "def visualize_data(data):\n\n # Instantiate a PCA object for the sake of easy visualisation\n pca = PCA(n_components=2)\n\n # Fit and transform x to visualise inside a 2D feature space\n x_vis = pca.fit_transform(data[data.columns[:-1]])\n y = data['Tumor'].as_matrix()\n\n # Plot the original data\n # Plot the two classes\n palette = sns.color_palette()\n\n plt.scatter(x_vis[y == 0, 0], x_vis[y == 0, 1], label=\"Normal\", alpha=0.5,\n edgecolor=ALMOST_BLACK, facecolor=palette[0], linewidth=0.15)\n plt.scatter(x_vis[y == 1, 0], x_vis[y == 1, 1], label=\"Tumor\", alpha=0.5,\n edgecolor=ALMOST_BLACK, facecolor=palette[2], linewidth=0.15)\n\n plt.legend()\n plt.show()", "def scatter(x, colors):\n \n # We choose a color palette with seaborn.\n palette = np.array(sns.color_palette(\"hls\", 2))\n\n # We create a scatter plot.\n f = plt.figure(figsize=(10, 8))\n ax = plt.subplot(aspect='equal')\n sc = ax.scatter(x[:,0], x[:,1], lw=0, s=40,\n c=palette[colors.astype(np.int)])\n \n ax.axis('off') # the axis will not be shown\n ax.axis('tight') # makes sure all data is shown\n \n # set title\n plt.title(\"Featurespace Visualization Titanic\", fontsize=25)\n \n # legend with color patches\n survived_patch = mpatches.Patch(color=palette[1], label='Survived')\n died_patch = mpatches.Patch(color=palette[0], label='Died')\n plt.legend(handles=[survived_patch, died_patch], fontsize=20, loc=1)\n\n return f, ax, sc", "def scatter_plot(x_all, labels):\n \n color = labels.iloc[:,1].apply(lambda x: \"green\" if x == 0 else \"red\" if x == 1 else \"blue\")\n \n plt.figure(figsize = (6,4))\n plt.scatter(x_all.iloc[:,1], x_all.iloc[:,2], c = color)\n plt.xlabel('Title Polarity', fontsize = 15)\n plt.ylabel('Content Polarity', fontsize = 15)", "def scatterPlot2DBig(data, title, classes):\n fig = plt.figure(figsize=(15, 15))\n colormap = np.array([\"g\", \"b\"])\n\n if classes is not None:\n plt.scatter(data[:, 0], data[:, 1], c=colormap[classes])\n else:\n plt.scatter(data[:, 0], data[:, 1])\n plt.title(title, fontsize=18)\n plt.show()", "def scatter_plot(self, speed=0.001):\n FPARTX, FPARTY, FPARTST = self.get_particles_props('x', 'y', 'state')\n plt.clf()\n plt.scatter(FPARTX, FPARTY, FPARTST*5 + 0.01, c=FPARTST, cmap=\"jet\")\n plt.clim(0, 1)\n plt.colorbar()\n plt.pause(speed)", "def _timeseries_scatter_plot_panel(self, data, axes, project, y_values):\n timesteps = np.linspace(0, 1, len(data[0]))\n if project == \"cmip6\":\n cb_colors = plt.cm.Reds(np.linspace(0, 1, len(data[1])))\n if project == \"cmip5\":\n cb_colors = plt.cm.Blues(np.linspace(0, 1, len(data[1])))\n cb_colors[:, -1] = timesteps\n\n axes.scatter(\n data[0],\n data[1],\n facecolors=\"none\",\n linewidths=0.8,\n s=70,\n color=cb_colors,\n label=self.formatter(project.upper()),\n )\n base_colors = {\"cmip5\": \"#2161A6\", \"cmip6\": \"#BB3437\"}\n # plot regression\n axes.plot(data[0], y_values, color=base_colors[project])\n return base_colors[project]", "def scatterPlot2DMiddle(data, title, classes):\n fig = plt.figure(figsize=(8, 8))\n colormap = np.array([\"g\", \"b\"])\n if classes is not None:\n plt.scatter(data[:, 0], data[:, 1], c=colormap[classes], s=0.2)\n else:\n plt.scatter(data[:, 0], data[:, 1], s=1)\n plt.title(title, fontsize=18)\n plt.show()", "def scatterplot(self, x = \"Predictor\", y = \"Response\", color = None, jitter = False, jitter_sd = .1,\n marg_x = None, marg_y = None, trendline = None, opacity = 1, template = \"ggplot2\",\n has_title = True, title = None):\n x_clean, df_clean = clean_varname(self._df, var = x)\n y_clean, df_clean = clean_varname(df_clean, var = y)\n\n if jitter:\n df_clean[x_clean] = df_clean[x_clean] + np.random.normal(0, jitter_sd, size=len(df))\n df_clean[y_clean] = df_clean[y_clean] + np.random.normal(0, jitter_sd, size=len(df))\n\n if color:\n color_clean, df_clean = clean_varname(df_clean, var = color)\n else:\n color_clean = color \n\n if has_title:\n if not title:\n title = f\"Scatter Plot of {x_clean} and {y_clean}\"\n \n fig = px.scatter(df_clean, x=x_clean, y=y_clean, color=color_clean, title = title,\n marginal_x = marg_x, marginal_y = marg_y, trendline = trendline, template = template, opacity = opacity)\n return fig", "def scatter_plot(x_train, y_train, x_test, y_test, class1, class2):\n train_c0 = x_train[y_train == 0, :]\n train_c1 = x_train[y_train == 1, :]\n test_c0 = x_test[y_test == 0, :]\n test_c1 = x_test[y_test == 1, :]\n fig, a = plt.subplots(1, 2)\n fig.set_size_inches(11, 5)\n a[0].scatter(train_c0[:, 0], train_c0[:, 1], color='green', label=class1)\n a[0].scatter(train_c1[:, 0], train_c1[:, 1], color='red', label=class2)\n a[0].legend()\n a[0].set_title('Train Set')\n a[1].scatter(test_c0[:, 0], test_c0[:, 1], color='green', label=class1)\n a[1].scatter(test_c1[:, 0], test_c1[:, 1], color='red', label=class2)\n a[1].legend()\n a[1].set_title('Test Set')\n plt.show()", "def plot(model, samples):\n # compute responsiblity values\n resp = model.predict_proba(samples)\n\n # plot\n plt.axis('equal')\n plt.scatter(samples[:,0], samples[:,1], c=resp)\n plt.show()", "def scatterplot(X,Y,xlabel,ylabel,filename,outdir,name_mod):\n\tplt.clf()\n\tplt.scatter(X,Y)\n\tplt.xlabel(xlabel)\n\tplt.ylabel(ylabel)\n\tplt.savefig(outdir+filename + name_mod + \".png\")\n\tnp.savetxt(outdir+ xlabel + name_mod + \".npy\",x)\n\tnp.savetxt(outdir+ ylabel + name_mod + \".npy\",y)\n\treturn None", "def exercise_4(self):\n student_data = self.student_data\n # Change the legend order in the scatter plot\n sns.scatterplot(x=\"absences\", y=\"G3\", \n data=student_data, \n hue=\"location\",\n hue_order = [\"Rural\"\n ,\"Urban\"])\n\n # Show plot\n plt.show()", "def vis_points(data,f1,f2):\n if np.isnan(data).any():\n return\n \n plt.scatter(data[:,f1], data[:,f2], alpha=0.2, c='b')\n plt.xlim(lims)\n plt.ylim(lims)", "def plot(self) -> Figure:\n ax = self.setup_plot()\n ax.scatter(\n self.individual_doses,\n self.responses,\n label=\"Data\",\n **plotting.DATASET_INDIVIDUAL_FORMAT,\n )\n ax.legend(**plotting.LEGEND_OPTS)\n return ax.get_figure()", "def chaco_scatter(dataview, x_name, y_name, x_label=None, y_label=None,\n color=None):\n\n plot = Plot(dataview)\n plot.plot((x_name, y_name), type=\"scatter\", marker='dot', color=color)\n\n if x_label is None:\n x_label = x_name\n if y_label is None:\n y_label = y_name\n x_axis = PlotAxis(mapper=plot.x_mapper, orientation='bottom', title=x_label)\n y_axis = PlotAxis(mapper=plot.y_mapper, orientation='left', title=y_label)\n plot.underlays.append(x_axis)\n plot.underlays.append(y_axis)\n return plot", "def plot_scatter_and_linreg(df, col='b'):\n lr = LinearRegression()\n lr.fit(df['x'].reshape(-1, 1), df['y'])\n df.plot(kind='scatter', x='x', y='y', c=col, s=50)\n x_pred = np.linspace(df['x'].min(), df['x'].max(), 10)\n y_pred = lr.predict(x_pred.reshape(-1, 1))\n plt.plot(x_pred, y_pred, ls=':', c=col)\n\n plt.title(df.name)", "def plot_data(X, y):\n\n x_dim = X.shape[1]\n\n # Ignore 1 dimensional data\n if x_dim == 1:\n print(\"plot_data not gonna bother with 1 dimensional data\")\n return\n\n # For 2 dimensional data, just plot it\n if x_dim == 2:\n plt.scatter(X[:,0], X[:,1], c=y)\n plt.show()\n return\n\n # For at least 4 dimensions, do PCA\n if x_dim >= 4:\n pca = PCA(n_components=3)\n pca.fit(X)\n plot_x = pca.transform(X)\n else:\n plot_x = X\n\n # Assumes y is either 1 or 0\n pos_idxs = np.where(y == 1)[0]\n neg_idxs = np.where(y == 0)[0]\n\n # Plot the now 3 dimensional data\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n\n Xs = plot_x[neg_idxs, :]\n ax.scatter(Xs[:,0], Xs[:,1], Xs[:,2], color='orange')\n Xs = plot_x[pos_idxs, :]\n ax.scatter(Xs[:,0], Xs[:,1], Xs[:,2], color='purple')\n\n # Label plot\n if x_dim >= 4:\n ax.set_title(\"PCA of Generated Data\")\n ax.set_xlabel(\"1st Principal Component\")\n ax.set_ylabel(\"2nd Principal Component\")\n ax.set_zlabel(\"3rd Principal Component\")\n\n else:\n ax.set_xticklabels([])\n ax.set_yticklabels([])\n ax.set_zticklabels([])\n\n # Display!\n plt.show()", "def multi_plot(data, fname=None):\n for entry in data['data']:\n plt.plot(entry['x'], entry['y'], label=entry['label'])\n\n plt.title(data['title'])\n plt.xlabel(data['x_label'])\n plt.ylabel(data['y_label'])\n\n #plt.legend(loc='best')\n\n Plotter.show(data['title'], fname=fname)", "def simple_scatter():\n\n # Make two datasets specifying scatter graph\n dataset_a = DataSet(random_2d_a,plot='scatter')\n dataset_b = DataSet(random_2d_b,plot='scatter')\n\n # Make plot object and add data sets\n plot = Plot()\n plot.add_dataset(dataset_a)\n plot.add_dataset(dataset_b)\n\n # Plot graph and display\n plot.plot()\n plot.save(name='./figures/2d_simple_scatter',fmt='png')\n plot.display()", "def plot_dataset(features, labels, nb_classes: int) -> None:\n sns.scatterplot(x=features[:, 0], y=features[:, 1], hue=labels, markers=True)\n plt.title(f'Data from {nb_classes} classes')\n save_plot('mock_dataset')", "def Scatter_graph(COVID_data_scatter, Display_annotations_mask = False):\r\n COVID_data_scatter_names = COVID_data_scatter.pop('0Date')['Country'] # Extract names of columns plotted\r\n \r\n X_axis, Y_axis, Z_axis = [], [], [] # Separate the axes in COVID_data_scatter in order to find the minimum and maximum along each axis\r\n for Date_item in COVID_data_scatter.values():\r\n for Country_item in Date_item.values():\r\n for Axis_inc in range(3):\r\n [X_axis, Y_axis, Z_axis][Axis_inc].append(Country_item[Axis_inc])\r\n \r\n Min_list, Max_list = [], [] # Limits of the plotting area\r\n Graph_window_margin = 2 # Since the graph is in log scale, the plotting area can't be extended using New max = Factor * (Max - Min) so I just went with multiplying the maximum and dividing the minimum by a factor of 2\r\n for Axis_inc in range(2):\r\n Min_list.append(min([X_axis, Y_axis][Axis_inc]) / Graph_window_margin)\r\n Max_list.append(max([X_axis, Y_axis][Axis_inc]) * Graph_window_margin)\r\n \r\n cmap = cm.jet # Colormap for the 3rd axis\r\n cmap = colors.LinearSegmentedColormap.from_list('jet_truncated', cmap(np.linspace(0.2, 0.95, 100)))\r\n \r\n Z_axis_cleaned = list(filter(lambda Item: Item != None, Z_axis)) # Positivity rate to color converter\r\n norm = colors.Normalize(vmin = 0, vmax = max(Z_axis_cleaned), clip = True)\r\n mapper = cm.ScalarMappable(norm = norm, cmap = cmap)\r\n \r\n plt.close() # Initialise plotting area. A simple \"plt.clf()\" doesn't work to erase everything and prompts glitches after the 2nd execution of the code, forcing us to close the figure and reopen it\r\n fig = plt.figure(\"Scatter graph of COVID data\")\r\n fig.set_size_inches(tuple(1/fig.dpi * np.array([1920, 1080])))\r\n ax = fig.gca()\r\n \r\n manager = plt.get_current_fig_manager() # Adapt the matplotlib window to the screen\r\n manager.window.showMaximized()\r\n \r\n Data_frames = zip(COVID_data_scatter.keys(), COVID_data_scatter.values()) # Transform the first level of dictionnary into a list because we need to have access to the keys of that first level during the creation of the animation frames\r\n Animation_frames = [] # List where all the matplotlib objects for the animation will be stored\r\n for Frame in Data_frames:\r\n Date = Frame[0]\r\n \r\n Points_to_display, Positivity_rate_list, Points_colors = [], [], []\r\n \r\n Countries_displayed = list(Frame[1].keys())\r\n \r\n for Country in Countries_displayed: # For each country...\r\n Country_coords = Frame[1][Country][:2]\r\n Positivity_rate = Frame[1][Country][2]\r\n \r\n Points_to_display.append(Country_coords)\r\n \r\n if Positivity_rate != None: # If there is a positivity rate for that country, it is plotted with the color it corresponds to on the colormap\r\n Positivity_rate_list.append(Positivity_rate)\r\n Points_colors.append(mapper.to_rgba(Positivity_rate))\r\n else: # Otherwise, it appears in #ABB7B7 gray and a \"-1\" is appended to the list of positivity rates. That way, these points will be in last after the sorting in descending order in a few lines\r\n Positivity_rate_list.append(-1)\r\n Points_colors.append((0.6627, 0.6627, 0.6627, 1))\r\n \r\n All_points_info = list(zip(Countries_displayed, Points_to_display, Positivity_rate_list, Points_colors)) # Group everything, sort the points based on the positivity rate and then seperate everything to get the same objects as before but sorted\r\n All_points_info.sort(key = lambda x: x[2])\r\n All_points_info = list(zip(*All_points_info))\r\n \r\n Countries_displayed = list(All_points_info[0])\r\n Points_to_display = list(All_points_info[1])\r\n Positivity_rate_list = list(All_points_info[2])\r\n Points_colors = list(All_points_info[3])\r\n \r\n X_list_frame, Y_list_frame = zip(*Points_to_display) # Separate X and Y axes and plot the points\r\n scatter = ax.scatter(X_list_frame, Y_list_frame, c = Points_colors, s = np.pi * (Marker_ray*72/fig.dpi)**2, linewidth = 0.5, edgecolors = 'black') # Marker ray is the radius of the circle in pixels but s is the area of the circle in points. We have to convert the pixels in points (1 inch = 72 points = Screen dpi) then apply area = pi * radius²\r\n \r\n # Note: ax.scatter plots the points one by one so the last elements of the lists will be above the firsts. Since the X and Y axes are sorted in ascending order of positivity rate, the last points (high positivity rates) will be on top. This is on purpose because these are the most interesting ones\r\n \r\n Text_date = ax.text(0.02, 0.97, Date, transform = ax.transAxes, fontsize = Date_fontsize, verticalalignment = 'top', horizontalalignment = 'left', bbox = dict(boxstyle = 'round', facecolor = 'white', alpha = 0.9, pad = 0.3)) # Display the date\r\n \r\n fig.tight_layout() # Annotations_frame() requires the use of lines regarding the size of the plotting area. For them to work properly, we have to virtually draw the elements, which is why we use fig.tight_layout() in the middle of the creation of the animation frames\r\n \r\n Countries_to_annotate, Annotations_mask = Annotations_frame(Points_to_display[::-1], Countries_displayed[::-1], (Min_list[0], Max_list[0], Min_list[1], Max_list[1])) # Decide which countries to annotate and which not to\r\n \r\n Annotation_list = []\r\n for Country, Country_coords in zip(Countries_to_annotate.keys(), Countries_to_annotate.values()): # Annotate countries\r\n Annotation_list.append(ax.annotate(Country, Country_coords, textcoords = 'offset pixels', xytext=(0, Marker_ray + Annotation_fontsize/72*fig.dpi*0.5 + Annotation_offset), ha='center', va='center', fontsize = Annotation_fontsize))\r\n \r\n if Display_annotations_mask: # If something goes wrong during an edit, the user can still display the annotations outline\r\n ax_tw_x = ax.twinx() # Duplicate axis. Compulsory because the graph is in logscale and an image cannot be properly displayed in logscale\r\n ax2 = ax_tw_x.twiny()\r\n \r\n mapper_mask = cm.ScalarMappable(norm = colors.Normalize(vmin = 0, vmax = 1, clip = True), cmap = cm.gray) # Convert array of bools into array of colors then display the image\r\n Annotations_mask_im = mapper_mask.to_rgba(np.rot90(np.invert(Annotations_mask) + np.zeros(Annotations_mask.shape)), alpha = 0.3)\r\n Annotations_mask_ax = ax2.imshow(Annotations_mask_im, extent = [Min_list[0], Max_list[0], Min_list[1], Max_list[1]], aspect = 'auto')\r\n \r\n ax_tw_x.axis('off') # Not display axes of the image\r\n ax2.axis('off')\r\n \r\n Animation_frames.append([scatter, Text_date, Annotations_mask_ax] + Annotation_list)\r\n \r\n else: Animation_frames.append([scatter, Text_date] + Annotation_list)\r\n \r\n ax.set_title(\"COVID-19 pandemic - %s vs. %s\" % tuple(COVID_data_scatter_names[:2][::-1]), fontsize = Title_fontsize, pad = Title_pad)\r\n \r\n ax.set_xlabel(COVID_data_scatter_names[0], fontsize = Axis_label_fontsize)\r\n ax.set_ylabel(COVID_data_scatter_names[1], fontsize = Axis_label_fontsize)\r\n \r\n ax.set_xlim(Min_list[0], Max_list[0])\r\n ax.set_ylim(Min_list[1], Max_list[1])\r\n \r\n ax.set_xscale('log')\r\n ax.set_yscale('log')\r\n \r\n ax.grid(linestyle = '--', linewidth = 1.5, which = 'major')\r\n ax.grid(linestyle = '--', linewidth = 0.5, which = 'minor')\r\n \r\n ax.set_axisbelow(True)\r\n \r\n ax.tick_params(axis='x', labelsize = Axis_tick_fontsize)\r\n ax.tick_params(axis='y', labelsize = Axis_tick_fontsize)\r\n \r\n Formatter_list = [] # Display axes graduations as multiples of 10 (rather than 10^n) and find how many decimal places to display\r\n for Axis_inc in range(2):\r\n if Min_list[Axis_inc] < 1: Min_axis_log = int(np.abs(np.floor(np.log10(Min_list[Axis_inc])))) - 1\r\n else: Min_axis_log = 0\r\n \r\n Formatter_list.append('%.' + str(Min_axis_log) + 'f')\r\n \r\n ax.xaxis.set_major_formatter(ticker.FormatStrFormatter(Formatter_list[0]))\r\n ax.yaxis.set_major_formatter(ticker.FormatStrFormatter(Formatter_list[1]))\r\n \r\n Divider = make_axes_locatable(ax) # Display 3rd axis (colors). Using make_axes_locatable() allows for better tight_layout results\r\n cax = Divider.append_axes('right', size = '2%', pad = 0.3)\r\n cbar = fig.colorbar(mapper, cax = cax)\r\n\r\n cbar.ax.set_ylabel(COVID_data_scatter_names[2], fontsize = Axis_label_fontsize, labelpad=Axis_label_pad)\r\n cbar.ax.tick_params(axis='y', labelsize = Axis_tick_fontsize)\r\n cbar.ax.yaxis.set_major_formatter(ticker.PercentFormatter(xmax = 1, decimals = 0)) # Set axis graduations as percentage with no decimal places\r\n \r\n ani = animation.ArtistAnimation(fig, Animation_frames, blit = True, interval = Animation_interval)\r\n \r\n fig.tight_layout()\r\n fig.show()\r\n \r\n return ani, COVID_data_scatter_names", "def create_scatter_plot(x,y,df,kmeans, X_scaled, scaler, name):\n \n plt.figure(figsize=(10, 6))\n sns.scatterplot(x = x, y = y, data = df, hue = name)\n centroids = pd.DataFrame(scaler.inverse_transform(kmeans.cluster_centers_), columns=X_scaled.columns)\n centroids.plot.scatter(y=y, x= x, ax=plt.gca(), alpha=.30, s=500, c='black')\n plt.legend(bbox_to_anchor=(1.2,.8))", "def _plot_dict_scatter(d):\n xvals, yvals = _dict2lists(d)\n pylab.scatter(xvals, yvals)", "def ploter(self):\n if len(self.dataset[self.first_title]) != 2:\n print('plot is only avilable for two features')\n return\n x_axis = []\n y_axis = []\n for title in self.dataset:\n x_axis.append(self.dataset[title][0])\n y_axis.append(self.dataset[title][1])\n plt.plot(x_axis, y_axis, 'o')\n plt.show()", "def display(self):\n scatter_plot(self.points, self.hull_points, self.color, self.title)", "def proteinScatterPlot(yDataDict, xData, xMin=0, xMax=None, yMin=-0.1, yMax=10,\r\n title=None, xLabel=None, yLabel=None, colors=None, \r\n figSize=(10,10), markerSize=10, legend=False, alpha=1.0, marker='o',\r\n linestyle=None, xTicks=None, legendLoc='upper left', legendCols=2, axes=None): \r\n if xMax is None:\r\n xMax = max(xData)\r\n if colors is None:\r\n colors = [pylab.cm.jet(float(i)/float(len(yDataDict))) for i in range(len(yDataDict))]\r\n if axes is None:\r\n scat = pylab.figure(figsize=figSize)\r\n scatAx = scat.add_subplot(111)\r\n else:\r\n scatAx=axes\r\n for i,p in enumerate(qMS.sort_nicely(yDataDict.keys())):\r\n \r\n if not (linestyle is None):\r\n scatAx.plot(xData, yDataDict[p], c=colors[i], linestyle=linestyle, label=p, marker=marker, markersize=markerSize, alpha=alpha)\r\n else:\r\n scatAx.plot(xData, yDataDict[p], c=colors[i], markersize=markerSize, marker=marker, label=p, alpha=alpha)\r\n scatAx.set_title(title, multialignment='center')\r\n scatAx.set_xlabel(xLabel)\r\n scatAx.set_ylabel(yLabel)\r\n scatAx.set_xlim([xMin,xMax])\r\n scatAx.set_ylim([yMin,yMax])\r\n if xTicks is None:\r\n scatAx.set_xticks([0,xMax/4,xMax/4*2,xMax/4*3,xMax])\r\n else:\r\n scatAx.set_xticks(xTicks)\r\n scatAx.set_yticks([0,yMax/4,yMax/4*2,yMax/4*3,yMax])\r\n if legend:\r\n pylab.legend(loc=legendLoc, ncol=legendCols, scatterpoints=1)\r\n scatAx.yaxis.tick_left()\r\n scatAx.xaxis.tick_bottom()\r\n pylab.tight_layout()\r\n \r\n return scatAx", "def plot(self):\n fig = go.Figure()\n for traj in self.data:\n fig.add_trace(\n go.Scatter(\n x=traj.age,\n y=traj.AF\n )\n )\n fig.update_layout(title=self.id)\n return fig", "def scatterResults(X, Xcolour,offset,x_angles,x_angle_location,y_angles,y_angle_location):\n #ind=np.argpartition(X, 4, axis=0)[:4]\n ind=np.argmin(X,axis=0)\n xind = []\n yind = []\n for i in range(len(ind)):\n xind.append(x_angle_location[ind[i]])\n yind.append(y_angle_location[ind[i]])\n plt.scatter(xind,yind, alpha=0.01, color=Xcolour)\n plt.xticks(np.array(range(0,len(x_angles))),x_angles)\n pylab.xlim(-1,len(x_angles)+1)\n plt.yticks(np.array(range(0,len(y_angles))),y_angles)\n pylab.ylim(-1,len(y_angles)+1) \n locs, labels = plt.xticks()\n plt.setp(labels, rotation=90)\n plt.xlabel(\"x-angles\")\n plt.ylabel(\"y-angles\")\n plt.grid(b=True, which='major')", "def scatter_plot(points, values=None, discrete=False, fname=\"scatter_plot.pdf\",\n axis_labels=None, valrange=None, show_legend=True, color_map=cm.gray, \n plot_title=None, plot_suptitle=None):\n figure()\n x, y = zip(*points) # unzip points\n # axis\n maxval, minval = valrange if valrange else (max(x+y) * 1.1 , min(x+y) * 1.1)\n axis([minval, maxval, minval, maxval])\n # axis labels\n if axis_labels:\n xlabel(axis_labels[0])\n ylabel(axis_labels[1]) \n # plot\n if values:\n if discrete: # points belongs to classes\n m = ['s', 'o', '^', '>', 'v', '<', 'd', 'p', 'h', '8', '+', 'x']\n c = ['red', 'blue', 'green', 'cyan', 'magenta', 'yellow', 'grey']\n clnames = sorted(set(values)) # class names\n for cidx, cn in enumerate(clnames):\n pts = [points[idx] for idx, el in enumerate(values) if el == cn]\n x, y = zip(*pts) # unzip\n scatter(x, y, marker=m[cidx % len(m)], c=c[cidx % len(c)], label=str(cn))\n # legend\n if show_legend: \n legend(loc='best')\n else: # continuous values -> using a colormap\n scatter(x, y, c=values, cmap=color_map)\n else: # no values attached\n scatter(x, y)\n if not plot_title is None:\n title(plot_title)\n if not plot_suptitle is None:\n suptitle(plot_suptitle)\n # save\n savefig(fname)", "def lda_scatter(X,Y, dim3=True):\n # Fit data\n lda = LDA()\n lda.fit(X, Y)\n X_r2 = lda.transform(X) \n\n # 3-D plot\n if dim3:\n fig = pylab.figure()\n ax = Axes3D(fig)\n ax.scatter3D(X_r2[:,0],X_r2[:,1],X_r2[:,2], c=Y)\n \n #2-D plot\n else:\n plt.scatter(X_r2[:,0], X_r2[:,1], c= Y )", "def plot_scatter(df):\n fig = px.scatter(df, x=\"preds\", y=\"truth\", title=\"Predictions vs True Values\", color=\"mae\")\n wandb.log({f\"Predictions vs True Values\": fig})\n\n # Poor Results\n df = df.query(\"mae > 2\")\n fig = px.scatter(df, x=\"preds\", y=\"truth\", title=\"Predictions vs True Values\", color=\"mae\")\n wandb.log({f\"Predictions vs True Values [mae > 2]\": fig})", "def plot_axis(ax, data, color):\n x_indices = [0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]\n ax.scatter(x_indices, data, color=color, s=4)\n ax.plot(x_indices, data, color=color, linewidth=2)\n ax.set_yticks([]) # same for y ticks\n ax.set_ylim([0, 1])", "def create_scatter_plot(x_data_dict,\r\n y_data_dict,\r\n x_label=\"X\",\r\n y_label=\"Y\",\r\n title=\"Scatter Plot\",\r\n log_x=False,\r\n log_y=False,\r\n marks=[]):\r\n\r\n # copy the dict so we don't change it\r\n x_data = copy.copy(x_data_dict)\r\n y_data = copy.copy(y_data_dict)\r\n\r\n # take the log of the data as appropriate\r\n if log_x:\r\n for key in x_data.keys():\r\n if not x_data[key] == 0:\r\n x_data[key] = np.log(x_data[key])\r\n\r\n if log_y:\r\n for key in y_data.keys():\r\n if not y_data[key] == 0:\r\n y_data[key] = np.log(y_data[key])\r\n\r\n # create a list of countries common to both data sets\r\n countries = set([key for key in x_data.keys() if key in y_data] + [key for key in y_data.keys() if key in x_data])\r\n\r\n # create lists of X, Y, marked_X, and marked_Y data\r\n X = [x_data[key] for key in countries if key not in marks]\r\n Y = [y_data[key] for key in countries if key not in marks]\r\n marked_X = [x_data[key] for key in marks]\r\n marked_Y = [y_data[key] for key in marks]\r\n\r\n # plot the scatter plot\r\n plt.style.use('Solarize_Light2')\r\n fig, ax = plt.subplots()\r\n plt.scatter(X, Y)\r\n\r\n # plot and annotate the points of interest\r\n if len(marks) > 0:\r\n plt.scatter(marked_X, marked_Y, marker=\"D\")\r\n for mark in marks:\r\n mark_X = x_data[mark]\r\n mark_Y = y_data[mark]\r\n mark_Y += max(max(Y), max(marked_Y))/60\r\n ax.annotate(mark,\r\n (mark_X, mark_Y),\r\n bbox=dict(boxstyle='round', facecolor='white', alpha=0.5))\r\n\r\n # add titles\r\n plt.xlabel(x_label)\r\n plt.ylabel(y_label)\r\n plt.title(title)\r\n\r\n # display the plot\r\n plt.show()", "def plotly_scatter_plot_chart():\n df = read_dataset(Path('..', '..', 'iris.csv'))\n\n model_data = cluster_iris_dataset_again()\n df['clusters'] = model_data['clusters']\n\n fig = px.scatter(df, x=\"sepal_width\", y=\"sepal_length\", color=\"clusters\")\n\n return fig", "def plot(data, labels):\n plt.subplots_adjust(bottom=0.1)\n plt.scatter(\n data[0, :], data[1, :], marker='o',\n cmap=plt.get_cmap('Spectral')\n )\n\n for label, x, y in zip(labels, data[0, :], data[1, :]):\n plt.annotate(\n label,\n xy=(x, y), xytext=(-20, 20),\n textcoords='offset points', ha='right', va='bottom',\n bbox=dict(boxstyle='round,pad=0.5', fc='yellow', alpha=0.5),\n arrowprops=dict(arrowstyle='->', connectionstyle='arc3,rad=0'))\n\n plt.show()", "def plot(self, *args, **kwargs):\n pass", "def setup_plot(self):\n x, y = next(self.stream).T\n c = None\n s = 100\n self.scat = self.ax.scatter(x, y, c=c, s=s, \n vmin=0, vmax=1,\n cmap=\"jet\", edgecolor=\"k\")\n self.ax.axis([0, 16.5, 0, 5])\n self.ax.set_ylabel('wouldbe x')\n self.ax.set_xlabel('wouldbe y')\n # return the updated artist to FuncAnimation\n # It expects a sequence of artists, thus the trailing comma.\n return self.scat,", "def scatter_plot(self, species_id_xaxis, fit_id_xaxis, species_id_yaxis,\n fit_id_yaxis, lin_fit_opt=1, species_id_zaxis=None, \n fit_id_zaxis=None, start=None, stop=None, ax=None, \n **kwargs):\n res_x = self.get_results(species_id_xaxis, fit_id_xaxis, start, stop)\n res_y = self.get_results(species_id_yaxis, fit_id_yaxis, start, stop)\n \n res_z = None\n if species_id_zaxis is not None and fit_id_zaxis is not None:\n res_z = self.get_results(\\\n species_id_zaxis, fit_id_zaxis, start, stop)\n if ax is None:\n fig, ax = subplots(1, 1)\n else:\n fig = ax.figure.canvas.figure\n if res_z is not None:\n sc = ax.scatter(res_x.values, res_y.values, 15, res_z.values,\\\n marker = 'o', edgecolor = 'none', **kwargs)\n cb = fig.colorbar(sc, ax = ax, shrink = 0.9, **kwargs) \n cb.set_label(species_id_zaxis + \" \" + fit_id_zaxis, **kwargs)\n else:\n ax.plot(res_x.values, res_y.values, \" b*\", label = \"Data\",\\\n **kwargs)\n ax.set_xlabel(species_id_xaxis + \" \" + fit_id_xaxis)\n ax.set_ylabel(species_id_yaxis + \" \" + fit_id_yaxis)\n ax.grid()\n if lin_fit_opt:\n self.linear_regression(res_x.values, res_y.values, ax = ax)\n ax.legend(loc = 'best', fancybox = True, framealpha = 0.5, fontsize = 12)\n return ax", "def scatter(args):\n prism.scatter.run(\n input_fp=args.input,\n output_fp=args.output,\n width=args.width,\n height=args.height,\n scale=args.scale,\n font_family=args.font_family,\n )", "def scatter(series):\n\n if len(series) != 2:\n raise ValueError(\"\"\"Can only plot 2 series in a scatter plot.\n Received {0}\"\"\".format(str(len(series))))\n\n if len(series[0]) != 2:\n raise ValueError(\"\"\"First element of series array of length {0}.\n It should be of a tuple of length 2\"\"\".format(str(len(series[0]))))\n\n if len(series[1]) != 2:\n raise ValueError(\"\"\"Second element of series array of length {0}.\n It should be of a tuple of length 2\"\"\".format(str(len(series[1]))))\n\n matplotlib.pyplot.scatter(series[0][1], series[1][1], marker='.')\n matplotlib.pyplot.xlabel(series[0][0])\n matplotlib.pyplot.ylabel(series[1][0])\n matplotlib.pyplot.show()", "def scatter(self, points):\n assert points.shape[0] == 3\n self.points = points\n self.remove_scatter_plot()\n x, y, z = zip(*points.T)\n self.base_fig.add_trace(go.Scatter3d(\n x=x, y=y, z=z,\n mode='markers',\n marker=dict(\n size=2,\n color=np.zeros(points.shape[1]),\n colorscale=\"RdBu\",\n colorbar=dict(\n title=\"Samples\",\n x=-0.2\n ),\n )\n ))", "def scatterplot(\n template,\n tm: str,\n x_data: list,\n y_data: list,\n t: str,\n x_l: str,\n y_l: str,\n color: str = \"b\",\n marker: str = \"o\"\n ) -> None:\n\n # Determine the maximum x-axis value of the graph\n x_max = math.ceil(max(x_data))\n\n # Open a figure\n plot.figure()\n\n # Plot the scatter plot\n plot.rcParams.update({\"figure.figsize\":(7, 5), \"figure.dpi\":100})\n plot.scatter(x_data, y_data, c = color, marker = marker)\n plot.gca().set(title = t, ylabel = y_l, xlabel = x_l)\n plot.xlim(0, x_max)\n\n # # Show the plot\n # plot.show()\n\n # Save the figure\n save_plot(template, t, tm)\n\n return", "def plot_2D(df):\n import matplotlib.pyplot as plt\n fig = plt.figure(figsize=(8,6))\n fig.clf()\n #Get the current Axes instance on the current figure matching the given \n #keyword args, or create one.\n ax = fig.gca()\n df.plot(kind = 'scatter', x = 'x', y = 'y', ax = ax, alpha = 0.5)\n ax.set_xlabel('X')\n ax.set_ylabel('Y')\n ax.set_title('X vs. Y')\n return 'Done'", "def generate_scatter(data, x, y, data_format=\"wide\", selected_points=[], **kwargs):\n if data_format == \"wide\":\n fig = go.Figure()\n\n # Plot only if 2 variables are chosen, show an empty plot otherwise\n if len(y) > 0 and len(x) > 0:\n \n # This is done to provide a fillvalue for the zip_longest funtion\n if len(x) > len(y):\n previous = y[0]\n else:\n previous = x[0]\n\n\n \"\"\"colors = data[\"value\"][data[\"variable\"] == \"SARS-Cov-2 exam result\"].to_numpy()\n colors = np.where(colors==\"positive\", \"red\", colors)\n colors = np.where(colors==\"negative\", \"blue\", colors)\"\"\"\n\n # Loop through the pairs of attributes and add traces to the graph\n # zip_longest makes sure the number of pairs correspond to the lenght of the lognest of two argumens\n # The shorter argument is paired with the previous argument\n for attribute_x, attribute_y in zip_longest(x, y, fillvalue=previous):\n fig.add_trace(go.Scatter(\n x=data[\"value\"][data[\"variable\"] == attribute_x],\n y=data[\"value\"][data[\"variable\"] == attribute_y],\n name=attribute_x + \"-\" + attribute_y,\n mode='markers',\n )\n )\n fig.update_layout(legend=dict(\n orientation=\"h\",\n yanchor=\"bottom\",\n y=1.02,\n xanchor=\"right\",\n x=1\n ))\n return fig\n\n elif data_format == \"long\":\n fig = px.scatter(data, x=x, y=y, **kwargs)\n # Highlight the selected point with yellow\n if selected_points:\n fig.update_traces(selectedpoints=selected_points, selected={'marker': { 'color': 'yellow' }})\n return fig", "def plot_scatter(x_variable, y_variable, df, x_title, y_title):\n slope, intercept, r_value, p_value, std_err = stats.linregress(\n df[x_variable], df[y_variable])\n plot = sns.regplot(x=x_variable, y=y_variable, data=df, line_kws={'label':\n \"y={0:.3f}x+{1:.3f}\".format(slope, intercept)})\n plot.set(xlabel=x_title, ylabel=y_title + ' (K)')\n title = y_title + ' Versus ' + x_title\n plot.set_title(title)\n plot.legend()\n plot.figure.savefig(title.replace(' ', '_'))\n plt.figure()", "def dot_plot(data) :\n if data.dtype != 'int' :\n raise ValueError('Data is not all integers.')\n else :\n x = np.arange(np.min(data), np.max(data) + 1)\n y = np.bincount(data)[np.min(data):]\n \n plot_x = []\n plot_y = []\n \n for a, b in zip(x, y) :\n count = b\n while b >= 1 :\n plot_x.append(a)\n plot_y.append(b)\n b -= 1\n if b == 1 :\n plot_x.append(a)\n plot_y.append(1)\n break\n \n plt.figure(figsize = (12, 5))\n plt.plot(plot_x, plot_y, linestyle = 'None', marker = 'o', markerfacecolor = 'white', markeredgecolor = 'firebrick')\n plt.xticks([int(x_x) for x_x in np.arange(np.min(x), np.max(x) + 1)])\n if np.max(y) > 25 :\n plt.yticks([int(y_y) for y_y in np.arange(1, np.max(y) + 5) if y_y % 5 == 0])\n else :\n plt.yticks([int(y_y) for y_y in np.arange(1, np.max(y) + 1)])\n plt.show()" ]
[ "0.7719039", "0.767001", "0.7650398", "0.75902605", "0.75371957", "0.75242335", "0.7382766", "0.73495", "0.7263594", "0.72284347", "0.7221781", "0.72211146", "0.7214683", "0.72076035", "0.71131575", "0.7094507", "0.7062563", "0.7052775", "0.70064485", "0.70044965", "0.6990876", "0.69709325", "0.6929317", "0.69268787", "0.69209695", "0.6874614", "0.68673384", "0.686475", "0.684981", "0.6818148", "0.68173474", "0.6810051", "0.6807947", "0.68066835", "0.6794783", "0.6769252", "0.67542636", "0.67487705", "0.6725545", "0.6708644", "0.6700579", "0.66860634", "0.6679364", "0.66595644", "0.6645949", "0.66185033", "0.660977", "0.6591136", "0.6575681", "0.6559661", "0.65337056", "0.65175277", "0.6515443", "0.6513505", "0.65004253", "0.64883757", "0.6481861", "0.64776343", "0.6459555", "0.6452147", "0.64481926", "0.64453936", "0.64368755", "0.6436553", "0.64316523", "0.6429679", "0.642869", "0.641646", "0.641597", "0.638031", "0.6371311", "0.636547", "0.63652325", "0.6355605", "0.63502336", "0.63471746", "0.6332115", "0.6325782", "0.6316388", "0.63132435", "0.63011605", "0.6300351", "0.6292527", "0.629083", "0.6276667", "0.62754923", "0.62737095", "0.6271842", "0.6267679", "0.6249291", "0.6236792", "0.6235643", "0.6226694", "0.6221463", "0.6210948", "0.62103736", "0.6189439", "0.6188808", "0.618678", "0.61854875" ]
0.6269806
88
KNearest Neighbors classifier. Return the most frequent class among the k nearest points
def knn(p, k, x, t): # Number of instances in data set N = x.shape[0] Euclidean_Distance = numpy.square(x - p) #Euclidean distance dis = numpy.sum(Euclidean_Distance, axis=1) #sum of the euclidean distance inds = numpy.argsort(dis)[:k] #sort the indices of the distance array tgt_cat = Counter([t[i] for i in inds]) #count the times of equivalent target labels top_class = max(tgt_cat, key= tgt_cat.get) #top class among the k nearest points #top_class = 0 return top_class
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def knn_classify_point(point, data, k, distance_metric):\n k_closest_points = get_k_closest_points(point, data, k, distance_metric)\n classification_counts = {}\n for item in k_closest_points:\n classification_type = item.classification\n if classification_type not in classification_counts:\n classification_counts[classification_type] = 0\n else:\n classification_counts[classification_type] += 1\n classification_counts = sorted(classification_counts, key = classification_counts.get)\n return classification_counts[-1]", "def knn_classify(k, labeled_points, new_point):\n by_distance = sorted(labeled_points,\n key=lambda point, _: la.distance(point, new_point))\n\n #find the labels for the k clsest\n k_nearest_labels = [label for _, label in by_distance[:k]]\n #and ket them vote\n return majority_vote(k_nearest_labels)", "def k_nn(frame, newPoint, colClass, k): \n counts = []\n \n # find all distances wrt the newPoint\n dist = find_distances(frame, newPoint)\n\n # find the nearest k points, extract their labels and save them in a list\n labels = [label for distance,label in dist[:k]] \n \n # for each class label, count how many occurrencies have been found\n for label in frame[colClass].unique():\n # save the number of occurrencies in a list of tuples (number, label)\n counts.append((labels.count(label), label)) \n \n # sort the list in descending order, and use the first label of the tuples'\n # list to make the prediction \n counts.sort(reverse=True)\n prediction = counts[0][1] \n \n return prediction", "def majority_voting(distances, labels, k):\n nearest_index = np.argsort(distances)\n k_neighbor_labels = []\n for i in range(k):\n index = nearest_index[i]\n label = labels[index]\n k_neighbor_labels.append(label)\n major_class = np.argmax(np.bincount(k_neighbor_labels))\n return major_class", "def knn_classification(x_test, df_training, attrib_column, k):\n return majority_vote(k_nearest_neighbors(x_test, df_training,k),df,attrib_column)", "def nearest_neighbors_classifier(data):\n clf = KNeighborsClassifier(3, 'distance')\n clf.name = \"KNN\"\n train_predict_and_results(data, clf)", "def predict_knn(data, example, k):\n # Use distance to find most similar examples, sort\n distTuples = list()\n for d in data:\n distTuples.append((d, example.distance(d)))\n distTuples.sort(key=lambda x: x[1])\n\n # Find most common labels\n labels = list()\n for i in range(k):\n newLabel = True\n for j in range(len(labels)):\n if labels[j][0] == distTuples[i][0].label:\n labels[j] = (labels[j][0], labels[j][1] + 1)\n newLabel = False\n if newLabel:\n labels.append((distTuples[i][0].label, 1))\n return max(labels, key=lambda x: x[1])[0]", "def knn(x, x_train, y_train, k=1):\n y_pred = np.zeros(len(x), dtype=np.int8)\n for i, sample in enumerate(x):\n # Calculate distance from this sample to every training sample\n dist = [np.linalg.norm(sample-train) for train in x_train]\n\n # Find the k nearest training samples\n k_nearest_labels = []\n for j in range(k):\n closest = np.argmin(dist)\n k_nearest_labels.append(y_train[closest])\n dist.pop(closest)\n\n # This sample's label the one the appears most frequently in\n # the k nearest, or the first nearest if all appear equally\n labels, counts = np.unique(k_nearest_labels, return_counts=True)\n y_pred[i] = labels[np.argmax(counts)]\n return y_pred", "def KNN(x_train, x_test, y_train, k=3):\n knn = KNeighborsClassifier(n_neighbors=k)\n knn.fit(x_train, y_train)\n y_pred = knn.predict(x_test)\n return y_pred", "def knn(train_data, train_labels, test_data, test_labels, k):\n pred_labels = []\n for t in test_data:\n dist = calculate_distances(train_data, t)\n pred_class = majority_voting(dist, train_labels, k)\n pred_labels.append(pred_class)\n correct_pred_count = np.sum(pred_labels == test_labels)\n acc = correct_pred_count/len(test_labels)\n return acc", "def find_best_k(X_train, y_train, X_test, y_test, min_k=1, max_k=25):\n best_k = 0\n best_score = 0.0\n for k in range(min_k, max_k+1, 2):\n knn = KNeighborsClassifier(n_neighbors=k)\n knn.fit(X_train, y_train)\n preds = knn.predict(X_test)\n f1 = f1_score(y_test, preds)\n if f1 > best_score:\n best_k = k\n best_score = f1\n print(\"Best Value for k: {}\".format(best_k))\n print(\"F1-Score: {}\".format(best_score))", "def knn(k, Xtrain, Ytrain, Xtest):\n d = euclidean_distances(Xtest, Xtrain, squared=True)\n nnc = Ytrain[np.argsort(d)[..., :k].flatten()].reshape(Xtest.shape[0], k)\n pred = [max(nnc[i], key=Counter(nnc[i]).get) for i in range(nnc.shape[0])]\n return np.array(pred)", "def predictClass(training_data, test_row, k):\n\n neighbors = getNeighbors(training_data, test_row, k)\n output_vals = [row[-1] for row in neighbors]\n \n counts = dict()\n\n for i in output_vals:\n counts[i] = counts.get(i, 0) + 1\n\n v = [value for value in counts.values()]\n\n #Pick a class on random if ties occur\n prediction = choice([key for key in counts if counts[key] == max(v)])\n\n return prediction", "def find_best_k(x_train, y_train, ks):\n params = {'n_neighbors': ks}\n knn = neighbors.KNeighborsRegressor()\n model = GridSearchCV(knn, params, cv=5)\n model.fit(x_train, y_train)\n best_k = model.best_params_\n return best_k", "def _predict(self, x):\n # Compute the distance between x and each data point in X_train\n distances = [self._get_distance(x, x_train) for x_train in self.X_train]\n # Get the labels of the k nearest samples to x based on the distances\n k_nearest_indices = np.argsort(distances)[:self.k]\n k_nearest_labels = [self.y_train[idx] for idx in k_nearest_indices]\n # Determine the most common of the k nearest labels\n most_common_class = Counter(k_nearest_labels).most_common(1)[0][0]\n\n return most_common_class", "def classify(k, sorted_labels):\n k_neighbors = sorted_labels[:k]\n men_occurencies = np.count_nonzero(k_neighbors == 'M')\n women_occurencies = np.count_nonzero(k_neighbors == 'W')\n\n return 'M' if men_occurencies > women_occurencies else 'W'", "def K_Nearest_Neighbours_Model(train_features, train_labels, k_value=5, algorithm_auto=\"auto\"):\n # create an instance of the KNN SciKit learn class\n model = KNeighborsClassifier(n_neighbors=k_value, algorithm=algorithm_auto)\n # fit the model to the training data and labels\n model.fit(train_features, train_labels.values.ravel())\n # return the .fit() model\n return model", "def getKNNClassifier():\n codebook = loadCodebook()\n \n args.nVisualWords = codebook.shape[0]\n \n # find nearest neighbor in the codebook\n knn = cv2.KNearest()\n # construct kd-tree with labels from 0 - (nCodewords-1)\n knn.train(codebook,np.arange(args.nVisualWords))\n \n return knn", "def classify_with_knn(train_data, train_labels, test_data, test_labels, k=3, metric='minkowski'):\n from sklearn.neighbors import KNeighborsClassifier\n from sklearn.metrics import f1_score, roc_auc_score\n\n neigh = KNeighborsClassifier(n_neighbors=k, metric=metric)\n neigh.fit(train_data, train_labels)\n accuracy = neigh.score(test_data, test_labels)\n pred_labels = neigh.predict(test_data)\n F1 = f1_score(test_labels, pred_labels)\n AUC = roc_auc_score(test_labels, pred_labels)\n\n return accuracy, F1, AUC", "def topk_accuracy(scores, labels, ks, selected_class=None):\n if selected_class is not None:\n idx = labels == selected_class\n scores = scores[idx]\n labels = labels[idx]\n rankings = scores.argsort()[:, ::-1]\n # trim to max k to avoid extra computation\n maxk = np.max(ks)\n\n # compute true positives in the top-maxk predictions\n tp = rankings[:, :maxk] == labels.reshape(-1, 1)\n\n # trim to selected ks and compute accuracies\n return [tp[:, :k].max(1).mean() for k in ks]", "def knn_predict(p, points, outcomes, k):\n\tind = find_nearest_neighbors(p, points, k)\n\treturn majority_vote(outcomes[ind])", "def classify(self, document, k, distance_type=\"sqeuclidean\"):\n if k == 0:\n raise ValueError(\"Must enter positive value for k parameter.\")\n \n # If only one neighbor, do more optimal calculation\n if k == 1:\n return self.__classify_nearest_neighbor(document, distance_type)\n \n # List of distance - class tuples\n nearest_neighbors = list()\n \n for index in self.vectors.shape[0]:\n vector = self.vectors[index, :].data.tolist()\n distance = self.distance(document, vector, distance_type)\n n = len(nearest_neighbors)\n \n if n < k:\n nearest_neighbors = sorted(nearest_neighbors.append((distance, self.classes[index])))\n else: \n for i in range(n):\n if distance < nearest_neighbors[i][0]:\n j = n - 1\n while j > i:\n nearest_neighbors[j] = nearest_neighbors[j - 1]\n j -= 1\n nearest_neighbors[i] = (distance, self.classes[index])\n break\n \n occurrences = dict()\n for neighbor in nearest_neighbors:\n if neighbor[1] not in occurrences.keys():\n occurrences[neighbor[1]] = 1\n else:\n occurrences[neighbor[1]] += 1\n \n class_count = [(ocurrence, class_) for class_, ocurrence in occurrences]\n return class_count[max(class_count)[1]]", "def kNN_train(self, x_train, y_train, x_test, k = 5, processing = None, distMethod = \"Manhattan\"):\n y_test = list()\n\n if processing == \"Scalar\":\n # print(\"Preprocessing = Scalar\")\n stdScalar = preprocessing.StandardScaler().fit(x_train)\n x_train = stdScalar.transform(x_train)\n x_test = stdScalar.transform(x_test)\n\n elif processing == \"MinMax\":\n\n # print(\"Preprocessing = MinMax\")\n mmScalar = preprocessing.MinMaxScaler()\n x_train = mmScalar.fit_transform(x_train)\n x_test = mmScalar.fit_transform(x_test)\n\n elif processing == \"None\":\n self.true = True\n # print(\"No Preprocessing\")\n\n else:\n print(\"wrong processing\")\n exit()\n\n for i in range(0, len(x_test)):\n y_test_temp = list()\n zeroCount = 0\n oneCount = 0\n\n # find distance of a instance in test test to all instances in training set\n for j in range(0, len(x_train)):\n if distMethod == \"Manhattan\":\n y_test_temp.append(self.manhattan(x_train[j], x_test[i]))\n elif distMethod == \"Euclidean\":\n y_test_temp.append(self.euclidean(x_train[j], x_test[i]))\n else:\n print \"something wrong with distance calculation\"\n exit()\n\n # take indices of k nearest points\n # print y_test_temp\n temp = np.asarray(y_test_temp).argsort()[:k]\n # check class of each of k nearest points\n for tmp in temp:\n if y_train[tmp] == 0:\n zeroCount += 1\n elif y_train[tmp] == 1:\n oneCount += 1\n else:\n print(\"something wrong in counting\")\n\n # classify\n if zeroCount >= oneCount:\n y_test.append(int(0))\n elif oneCount > zeroCount:\n y_test.append(int(1))\n else:\n print(\"somethign wrong\")\n\n # print y_test\n return y_test", "def predict_labels(self, distances, k=1):\n\n num_test = distances.shape[0]\n Y_pred = np.zeros((num_test,))\n\n \n for i in range(num_test):\n # extracting k-nearest-neighbors for each test-point\n kNN_idxs = np.argsort(distances[i,:])[0:k]\n \n # voting among the k-nearest-neighbors\n kNN_labels = {}\n # print(type(kNN_labels))\n\n for j in range(k):\n m_label = self.Y_train[kNN_idxs[j]]\n if m_label in kNN_labels.keys():\n # print(type(kNN_labels))\n kNN_labels[m_label] += 1 # increment count\n else:\n # print(m_label,'....', type(kNN_labels))\n kNN_labels[m_label] = 1 # initial count when the label occurs\n \n # counting the winning label\n\n winning_label = kNN_labels.keys()[0] # initialization\n \n for label in kNN_labels.keys():\n if kNN_labels[label] > kNN_labels[winning_label]:\n winning_label = label\n elif kNN_labels[label] == kNN_labels[winning_label]:\n # tie breaker\n if label < winning_label:\n winning_label = label\n \n\n Y_pred[i] = winning_label # storing winning label for each test-point\n \n return Y_pred", "def classify(self, point=None, k=1, dist=None, prbout=0):\n if not point:\n return []\n\n neighbors = self.kdtree.search_knn(point, k, dist)\n prb = self.decision(neighbors)\n # print prb\n if prbout == 0:\n return prb[0][0]\n elif prbout == 1:\n return prb", "def run_knn(\n features: List[List[float]],\n labels: List[Optional[bool]],\n k: int = 1,\n) -> List[bool]:\n # Filter out the features that are already clustered\n features_l, labels_l = zip(*[(f, l) for f, l in zip(features, labels) if isinstance(l, bool)])\n\n # Fit a nearest neighbour algorithm\n neighbours = KNeighborsClassifier(\n n_neighbors=k,\n ).fit(features_l, labels_l)\n\n # Predict all the features' labels\n return neighbours.predict(features) # type: ignore", "def knn(trainingSetData, testSetData, k):\n trainingSet = trainingSetData.drop([14], axis=1) # drop income\n testSet = testSetData.drop([14], axis=1) # drop income\n\n distances = {}\n # this will store the distances re-sorted in ascending/descending order\n sort = {}\n # income band results (>=50k or <50K)\n incomePredictions = []\n\n # Calculating euclidean distance between each row of training data and test data instance\n for testInstance in range(len(testSet)): # len(testSet)\n \n # Store current test Point:\n testInstance = testSet.iloc[testInstance] \n \n distances = euclideanDistanceRow(testInstance, trainingSet)\n\n # sort the distances in order of smallest first:\n sorted_d = sorted(distances.items(), key=lambda x: x[1], reverse=False)\n\n neighbors = []\n\n # Extracting top k neighbors\n for x in range(k):\n neighbors.append(sorted_d[x])\n\n\n classVotes = {}\n\n # Calculating the most freq class in the neighbors\n results = {\"lessThan50\": 0, \"moreThan50\": 0}\n\n # creating a dataframe to which we will add the income values:\n\n for x in range(len(neighbors)):\n if (trainingSetData.iloc[neighbors[x][0]][14] == 0.0):\n results[\"lessThan50\"] += 1\n elif (trainingSetData.iloc[neighbors[x][0]][14] == 1.0):\n results[\"moreThan50\"] += 1\n\n print('results',results)\n\n if (results[\"lessThan50\"] > results[\"moreThan50\"]):\n incomePredictions.append(0.0)\n elif (results[\"lessThan50\"] < results[\"moreThan50\"]):\n incomePredictions.append(1.0)\n\n return incomePredictions", "def get_neighbors(training_set, \r\n labels, \r\n test_instance, \r\n k, \r\n distance=distance):\r\n distances = []\r\n for index in range(len(training_set)):\r\n dist = distance(test_instance, training_set[index])\r\n distances.append((training_set[index], dist, labels[index]))\r\n distances.sort(key=lambda x: x[1])\r\n neighbors = distances[:k]\r\n return neighbors", "def knn(k, train_data, train_labels, valid_data):\n dist = l2_distance(valid_data.T, train_data.T)\n nearest = np.argsort(dist, axis=1)[:, :k]\n\n train_labels = train_labels.reshape(-1)\n valid_labels = train_labels[nearest]\n\n # Note this only works for binary labels:\n valid_labels = (np.mean(valid_labels, axis=1) >= 0.5).astype(np.int)\n valid_labels = valid_labels.reshape(-1, 1)\n\n return valid_labels", "def knnSame(k, Xtrain, Ytrain):\n d = euclidean_distances(Xtrain, squared=True)\n np.fill_diagonal(d, np.inf)\n nnc = Ytrain[np.argsort(d)[..., :k].flatten()].reshape(Xtrain.shape[0], k)\n pred = [max(nnc[i], key=Counter(nnc[i]).get) for i in range(nnc.shape[0])]\n return np.array(pred)", "def KNN_classification(sample, k, df_dataset, drop_age):\n if drop_age:\n inputs = df_dataset.drop(['age', 'gender'], axis=1).values\n else:\n inputs = df_dataset.drop(['gender'], axis=1).values\n\n labels = df_dataset[\"gender\"].values\n\n # get the cartesian distance from each data point\n cart_distance = cartesian_distance(sample, inputs)\n\n # create a 2D array with the 1st column being the above distances and the second corresponding label\n labeled_cart = np.vstack((cart_distance, labels))\n\n # sort in an ascending manner the above 2D array based on the distances\n sorted_cart = labeled_cart.T[labeled_cart.T[:, 0].argsort()]\n sorted_labels = sorted_cart.T[1]\n\n return classify(k, sorted_labels)", "def predict(self, testFeatures): \r\n\r\n if(not self._fitCalled):\r\n print('The fit method has not been called yet')\r\n return None\r\n\r\n l,d = testFeatures.shape\r\n n,d = self.data.shape \r\n\r\n \"\"\" Fill and return this in your implementation. \"\"\"\r\n predictions = np.empty(shape=(l,), dtype=self.labels.dtype)\r\n\r\n \"\"\" Implement kNN prediction here. \"\"\"\r\n\r\n for i in range(0, l):\r\n distances = []\r\n for j in range(0, n):\r\n distances.append((np.sqrt(np.sum((testFeatures[i]-self.data[j])**2)), self.labels[j]))\r\n distances.sort()\r\n kNearestLabels = [x[1] for x in distances][0:self.k]\r\n most_common, num_most_common = Counter(kNearestLabels).most_common(1)[0]\r\n predictions[i] = most_common\r\n return predictions", "def get_k_best(data_dict, features_list, k):\r\n data = featureFormat(data_dict, features_list)\r\n labels, features = targetFeatureSplit(data)\r\n k_best = SelectKBest(k=k)\r\n k_best.fit(features, labels)\r\n scores = k_best.scores_\r\n print(scores)\r\n unsorted_pairs = zip(features_list[1:], scores)\r\n sorted_pairs = list(reversed(sorted(unsorted_pairs, key=lambda x: x[1])))\r\n k_best_features = dict(sorted_pairs[:k])\r\n print (\"{0} best features: {1}\\n\".format(k, k_best_features.keys(), scores))\r\n return k_best_features", "def k_nearest_neighbors(x_test, df_training, k):\n\n return np.argpartition(distance_to_each_training_point(x_test,\n df_training), k-1)[:,0:k]", "def knnForOne(x_training_data, y_training_data, single_x_test_data, n_neighbors):\n nearest_neighbors = {}\n length = len(x_training_data)\n\n for i in range(length):\n X2 = x_training_data[i,:] # get current row of known data\n Y2 = y_training_data[i] # get current label of known data\n distance = getDistance(single_x_test_data, X2) # compare test to known data\n\n if len(nearest_neighbors) < n_neighbors: # reach capacity of nearest neighbors\n nearest_neighbors[distance] = Y2\n else: # kick out largest distance\n\n # Assumes no two distances are exactly the same, or data point will be overwritten in dictionary\n largest_distance = max(nearest_neighbors)\n if distance < largest_distance:\n del nearest_neighbors[largest_distance]\n nearest_neighbors[distance] = Y2\n\n # nearest_neighbors is a dictionary with the n nearest neighbors \n # as values and their distances from single_x_test_data as keys\n \n counts = {}\n for key in nearest_neighbors: # initialize counts dictionary\n counts[nearest_neighbors[key]] = 0\n\n for key in nearest_neighbors: # count labels within the nearest neighbors\n counts[nearest_neighbors[key]] += 1\n\n max_value = max(counts.values()) # find most frequent label within the nearest neighbors\n for key in counts:\n if counts[key] == max_value:\n return key", "def knn(X, Y, seed):\n model = neighbors.KNeighborsClassifier(algorithm='auto')\n param_grid = {'n_neighbors': [1, 5, 9, 13, 17, 21, 25, 29, 33, 37], 'weights': ['uniform', 'distance']}\n \n # Grid search on the parameters, to find the best score.\n k = 3\n split = StratifiedShuffleSplit(n_splits=k, random_state=seed)\n search = GridSearchCV(model, param_grid, cv=split, scoring=\"f1\")\n search.fit(X,Y)\n\n score = search.best_score_\n model = search.best_estimator_\n print(\"score={}\\nModel: {}\".format(score, model))\n \n return model", "def get_k_best(data_dict, features_list, k):\n data = featureFormat(data_dict, features_list)\n labels, features = targetFeatureSplit(data)\n\n k_best = SelectKBest(k=k)\n k_best.fit(features, labels)\n scores = k_best.scores_\n print(scores)\n unsorted_pairs = zip(features_list[1:], scores)\n sorted_pairs = list(reversed(sorted(unsorted_pairs, key=lambda x: x[1])))\n k_best_features = dict(sorted_pairs[:k])\n print (\"{0} best features: {1}\\n\".format(k, k_best_features.keys(), scores))\n return k_best_features", "def train_knn(training_data):\n return knnclassifier(training_data, keys, 3)", "def k_neighbors(self, unknown, dataset, k):\n distances = []\n for title in dataset:\n point = dataset[title]\n distance_to_point = distance.euclidean_distance(point, unknown)\n distances.append([distance_to_point, title])\n distances.sort()\n neighbors = distances[0:k]\n return neighbors", "def choose_k ( \n feature_matrix,\n target_array,\n model_call,\n param_grid,\n scoring_func = accuracy,\n cv = KFoldStratifiedCV ( number_of_folds = 3 ),\n):\n grid_search_cv = GridSearchCV (\n model_callable = model_call,\n param_grid = param_grid,\n scoring_func = scoring_func,\n cv_object = cv,\n )\n \n # Get the last sorted value and take k from that values\n return sorted ( list ( grid_search_cv.get_cv_scores ( feature_matrix, target_array ) ), key = lambda x: x [ 1 ] ) [ -1 ][ 0 ][ \"k\" ]\n # End choose_k()", "def get_neighbours(self, x, k):\n k = min(k, self.n)\n nearest = {}\n for i in range(k):\n nearest[i] = self.euclidean_distance(x, self.train_x[i])\n for i in range(k, self.n):\n dist = self.euclidean_distance(x, self.train_x[i])\n if dist < max(nearest.values()):\n nearest.pop(max(nearest, key=nearest.get))\n nearest[i] = dist\n return nearest", "def kNN(self, u_eval, v_compares):\n max_sim = [] # [(tag, sim) ... ]\n\n for v_comp in v_compares:\n cosine_sim = self.cosine_sim(u_eval, v_comp)\n\n if cosine_sim > MIN_COS_SINE:\n # add vector tag and cos sim: (tag, sim)\n max_sim.append((self.get_vector(v_comp).tag, cosine_sim))\n\n # sort cosine similarity\n # [('SPORT', 0.2), ('ART', 0.60), ('ART', 0.13)]\n # [('ART', 0.13), ('SPORT', 0.2), ('ART', 0.60)]\n max_sim.sort(key=lambda tag_nb: tag_nb[1]) \n\n # return the k-nearest neighbor only\n # [('ART', 0.13), ('SPORT', 0.2), ('ART', 0.60)]\n # if K_ITEM = 2 \n # [('SPORT', 0.2), ('ART', 0.60)]\n return max_sim[-K_ITEM:]", "def knn_prediction(X, y, x, k):\n ## homework:start\n result = \n ## homework:end\n return result", "def get_k_neighbors(self, point):\n nn = []\n nnl = []\n for p,l in zip(self.train_features,self.train_labels):\n d = self.distance_function(p,point)\n dl_pair = (d,l)\n nn.append(dl_pair)\n nn = sorted(nn, key = lambda x: x[0])\n for i in range(0,self.k):\n nnl.append(nn[i][1])\n return nnl\n raise NotImplementedError", "def run_knn(k, train_data, train_labels, valid_data):\n\n dist = l2_distance(valid_data.T, train_data.T)\n nearest = np.argsort(dist, axis=1)[:,:k]\n\n train_labels = train_labels.reshape(-1)\n valid_labels = train_labels[nearest]\n\n # note this only works for binary labels\n valid_labels = (np.mean(valid_labels, axis=1) >= 0.5).astype(np.int)\n valid_labels = valid_labels.reshape(-1,1)\n\n return valid_labels", "def predict(self, X, k):\n # Sanity check\n if self.__trained == False:\n return 'Model not trained yet'\n \n # Reshape data to 2D array if possible\n if len(X.shape) == 1:\n X = X[None, :]\n \n # Compute the distances between all given points and all training points\n distances = np.sum((X[:, None, :] - self.__X[None, :, :]) ** 2, axis = -1)\n \n # Find the K nearest neighbors\n k_nearest_neighbors = np.argsort(distances, axis=-1)[:,:k]\n\n # Run over all columns and find the most common neighbor in each, then return his label\n n_observations = X.shape[0]\n idx = [np.argmax(np.bincount(k_nearest_neighbors[i])) for i in range(n_observations)]\n return self.__y[idx]", "def p_y_x_knn(y, k):\n number_of_classes = 4\n resized = np.delete(y, range(k, y.shape[1]), axis=1)\n summed_with_zero = np.vstack(np.apply_along_axis(np.bincount, axis=1, arr=resized, minlength=number_of_classes + 1))\n summed = np.delete(summed_with_zero, 0, axis=1)\n return summed / k", "def get_k_best(data_dict, feature_list, num_features):\n data = featureFormat(data_dict, feature_list)\n target, features = targetFeatureSplit(data)\n\n clf = SelectKBest(k = num_features)\n clf = clf.fit(features, target)\n feature_weights = {}\n for idx, feature in enumerate(clf.scores_):\n feature_weights[feature_list[1:][idx]] = feature\n best_features = sorted(feature_weights.items(), key = lambda k: k[1], reverse = True)[:num_features]\n new_features = []\n for k, v in best_features:\n new_features.append(k)\n return new_features", "def fit_and_predict_KNN(X_train, Y_train, X_test, K):\n \n # Import the package\n from sklearn.neighbors import KNeighborsClassifier\n\n ### YOUR SOLUTION STARTS HERE###\n #referenced to sklearn documentation\n # fit the model (for KNN this is just storing the training data and labels) \n clf = KNeighborsClassifier(n_neighbors=K).fit(X_train, Y_train)\n # Predict\n predicted_KNN = clf.predict(X_test)\n return predicted_KNN", "def get_k_best(data_dict, features_list, k):\n \n data = featureFormat(data_dict, features_list)\n labels, features = targetFeatureSplit(data) \n\n k_best = SelectKBest(k=k).fit(features, labels)\n scores = k_best.scores_\n pairs = zip(scores, features_list[1:])\n pairs.sort(reverse = True)\n pairs_sorted = [(v2,v1) for v1,v2 in pairs]\n k_best_features = dict(pairs_sorted[:k])\n pprint(pairs_sorted)\n return k_best_features", "def get_k_best(data_dict, features_list, k):\n\n data = featureFormat(data_dict, features_list)\n labels, features = targetFeatureSplit(data)\n\n k_best = SelectKBest(k='all')\n k_best.fit(features, labels)\n scores = k_best.scores_\n unsorted_pairs = zip(features_list[1:], scores)\n sorted_pairs = list(reversed(sorted(unsorted_pairs, key=lambda x: x[1])))\n k_best_features = dict(sorted_pairs[:k])\n return k_best_features", "def kNN(k, testTfidf, trainTfidf, test_file, train_label, weight=True):\r\n test_y = [] \r\n \r\n # iterate through all lines in the test reviews and classify them\r\n for index, line in enumerate(test_file):\r\n # cosine similarity\r\n cos_similarity = linear_kernel(testTfidf[index:index+1], trainTfidf).flatten()\r\n \r\n if weight == True:\r\n # get the indices of nearest neighbors based on k parameter \r\n neighbor_indices = cos_similarity.argsort()[:-k:-1]\r\n # similarities\r\n similarities = cos_similarity[neighbor_indices]\r\n # get a list of labels from the neighbors and sum the list\r\n labels_list = train_label[neighbor_indices].tolist()\r\n\r\n # make cosine similarity value negative or positive based on\r\n # its label and sum the cosine similarities\r\n my_list = [] \r\n for s, l in zip(similarities, labels_list):\r\n if l == -1:\r\n my_list.append(-s)\r\n else:\r\n my_list.append(s) \r\n \r\n label_sum = sum(my_list)\r\n #classify based on label_sum\r\n if label_sum > 0:\r\n test_y.append(\"+1\")\r\n else:\r\n test_y.append(-1)\r\n\r\n else:\r\n # get the indices of nearest neighbors based on k parameter \r\n neighbor_indices = cos_similarity.argsort()[:-k:-1]\r\n # get a list of labels from the neighbors and sum the list\r\n labels_list = train_label[neighbor_indices].tolist()\r\n label_sum = sum(labels_list)\r\n\r\n # classify based on label_sum\r\n if label_sum > 0:\r\n test_y.append(\"+1\")\r\n else:\r\n test_y.append(-1)\r\n \r\n print(index)\r\n \r\n return pd.DataFrame(test_y)", "def build_classifier(self, n_neighbours, data_index):\n knn = KNeighborsClassifier(n_neighbors=n_neighbours)\n BayesianKneighborClassifier.update_current_data(self, data_index)\n X_train, X_test, y_train, y_test = BayesianKneighborClassifier.split_test_and_train_data\\\n (self, 0.3, data_index)\n knn.fit(X_train, y_train)\n y_predicted = knn.predict(X_test)\n print(\"KNN classifier built. Accuracy score: {} using K={} neighbours in view: {}\".format(\n metrics.accuracy_score(y_test, y_predicted), n_neighbours,\n BayesianKneighborClassifier.views[data_index]))\n return knn", "def predict(self, game_features, game_features_matrix, key, n_recommendations=10):\n model_knn = NearestNeighbors(metric='cosine', algorithm='brute', n_neighbors=20, n_jobs=-1)\n model_knn.fit(game_features_matrix)\n idx = game_features.index.get_loc(key)\n distances, indices = model_knn.kneighbors(\n game_features_matrix[idx],\n n_neighbors=n_recommendations + 1)\n raw_recommends = \\\n sorted(\n list(\n zip(\n indices.squeeze().tolist(),\n distances.squeeze().tolist()\n )\n ),\n key=lambda x: x[1]\n )[:0:-1]\n return raw_recommends", "def predict_labels(self, dists, k=1):\n num_test = dists.shape[0]\n y_pred = np.zeros(num_test)\n for i in range(num_test):\n # A list of length k storing the labels of the k nearest neighbors to\n # the ith test point.\n closest_y = []\n closest_y = self.y_train[np.argsort(dists[i])][0:k]\n closest_y = closest_y.astype(int)\n y_pred[i] = np.bincount(closest_y).argmax()\n return y_pred", "def kNN(x, y, s):\n usx = np.array(x)\n usy = np.array(y)\n\n # split data into train and validation set\n x_train, x_test, y_train, y_test = train_test_split(usx, usy, test_size=s)\n clf = neighbors.KNeighborsClassifier(algorithm='kd_tree')\n clf.fit(x_train, y_train)\n y_predict = clf.predict(x_test)\n\n # select only the probabilities of being fraud\n y_pred_prob = clf.predict_proba(x_test)[:, 1]\n return y_predict, y_test, y_pred_prob", "def nearest_neighbors(self):\n neighbor_distances_and_indices = []\n for idx, data_point in enumerate(self.data):\n distance = self.euclidean_dis(data_point[:-1], self.query) # Calculate the distance between the query\n # example and the current example from the data.\n\n neighbor_distances_and_indices.append((distance, idx)) # Add the distance and the index of the example\n # to an ordered collection\n\n sorted_neighbor_distances_and_indices = sorted(neighbor_distances_and_indices, key=lambda x: x[0]) #\n # Sort the ordered collection of distances and indices from smallest to largest (in ascending order) by\n # the distances\n\n k_nearest_distances_and_indices = sorted_neighbor_distances_and_indices[:self.k] # Pick the first K\n # entries from the sorted collection\n\n k_nearest_labels = [self.data[i][1] for distance, i in k_nearest_distances_and_indices] # Get the labels of\n # the selected K entries\n\n return k_nearest_labels, self.mode(k_nearest_labels)", "def distance_score(x_embeddings_test, x_embeddings_train, y_true_train, K=50):\n num_samples = x_embeddings_test.shape[0]\n num_classes = y_true_train.shape[1]\n y_test_confidence = []\n for i in range(num_samples):\n sample_embedding = x_embeddings_test[i]\n distances = np.square(sample_embedding - x_embeddings_train).sum(axis=-1)\n K_nn = np.argsort(distances)[:K]\n K_nn_distances = np.exp(-np.sqrt(distances[K_nn]))\n K_nn_labels = y_true_train[K_nn, :]\n\n class_indicators = np.eye(num_classes)\n classes_masks = np.matmul(class_indicators, np.transpose(K_nn_labels))\n\n # foreach class we mask away the samples in Knn that belong to other classes\n class_samples_distances = classes_masks * np.expand_dims(K_nn_distances, axis=0) # this gives num_classes X K (100 X 50 matrix)\n sum_distances = np.sum(K_nn_distances)\n D_x = np.sum(class_samples_distances, axis=-1)/sum_distances\n\n y_test_confidence.append(D_x)\n\n return np.vstack(y_test_confidence)", "def classify(self, data):\n\n \"*** YOUR CODE HERE ***\"\n # should compute (validationData[i] - trainingData[j])^2\n result = np.zeros(data.shape[0])\n for i in range(data.shape[0]):\n distances = np.linalg.norm(self.trainingData - data[i], axis=1)\n nearest = np.argsort(distances)[:self.num_neighbors]\n nearest_tags = [self.trainingLabels[j] for j in nearest]\n result[i] = max(nearest_tags, key=lambda x: nearest_tags.count(x))\n return result", "def top_k_accuracy(y_true : np.ndarray, probs: np.ndarray, k: int) -> float:\r\n \r\n # Top k sorted preds\r\n sorted_probs = probs.argsort()[:,-k:]\r\n\r\n # Does the truth intersect with any of the top k predictions?\r\n matches = np.max(sorted_probs == y_true.reshape(-1, 1), axis=1)\r\n return matches.mean()", "def choose_k ( \n X,\n y,\n model_call,\n param_grid,\n scoring_func = accuracy,\n cv = KFoldStratifiedCV ( number_of_folds = 3 ),\n):\n grid_search_cv = GridSearchCV (\n model_callable = model_call,\n param_grid = param_grid,\n scoring_func = scoring_func,\n cv_object = cv,\n )\n \n # Get the last sorted value and take k from that values\n return sorted ( list ( grid_search_cv.get_cv_scores ( X, y ) ), key = lambda x: x [ 1 ] ) [ -1 ][ 0 ][ \"k\" ]\n # End choose_k()", "def selectFeatures(k_features=5, *args):\n X, y = args\n skb = SelectKBest(k=k_features)\n return skb.fit_transform(X, y)", "def find_nearest_neighbors(p, points, k=5):\n dist = np.zeros(points.shape[0])\n for i in range(len(dist)):\n dist[i] = distance(p, points[i])\n ind = np.argsort(dist)\n return ind[0:k]", "def knn(X, k=1):\n from ..utils.fast_distance import euclidean_distance\n\n if np.size(X) == X.shape[0]:\n X = np.reshape(X, (np.size(X), 1))\n try:\n k = int(k)\n except:\n \"k cannot be cast to an int\"\n if np.isnan(k):\n raise ValueError('k is nan')\n if np.isinf(k):\n raise ValueError('k is inf')\n k = min(k, X.shape[0] - 1)\n\n # create the distance matrix\n dist = euclidean_distance(X)\n sorted_dist = dist.copy()\n sorted_dist.sort(0)\n\n # neighbour system\n bool_knn = dist < sorted_dist[k + 1]\n bool_knn += bool_knn.T\n # xor diagonal\n bool_knn ^= np.diag(np.diag(bool_knn))\n dist *= (bool_knn > 0)\n return wgraph_from_adjacency(dist)", "def predict(self, X, k=None):\n \n if not hasattr(self, 'n_neighbors'):\n self.fit(X)\n \n if k is None:\n k = self.n_neighbors\n else:\n k = check_n_neighbors(k, X.shape[0])\n \n distances, _ = self.nbrs.kneighbors(X, n_neighbors=k+1)\n #distances = distances[:, 1:]\n distances[distances[:, 0] == 0., :-1] = distances[distances[:, 0] == 0., 1:]\n distances = distances[:, :-1]\n \n return distances.mean(axis=1)", "def predict_with_tree(self, X, ball_tree):\n predicted_y = np.zeros(X.shape[0])\n for i in range(X.shape[0]):\n neighbours = []\n ball_tree.knn_search(X[i], self.k_nearest, neighbours)\n votes = {}\n for neighbour in neighbours:\n if neighbour.classification in votes:\n votes[neighbour.classification] += 1\n else:\n votes[neighbour.classification] = 1\n predicted_y[i] = max(votes, key=votes.get)\n return predicted_y", "def find_best_k(data, anots, neibhours_range):\r\n \r\n best_k = 0\r\n best_acc = 0\r\n for n_neighbors in neibhours_range:\r\n accur = iterate_over_chanels(data, anots, n_neighbors)\r\n mean_acc = accur.mean()\r\n if mean_acc > best_acc:\r\n best_acc = mean_acc\r\n best_k = n_neighbors\r\n return best_k", "def classify(self, features):\n\n # TODO: finish this.\n class_labels = []\n # TODO: finish this.\n features = np.array(features)\n feat_shape = features.shape\n for i in range(feat_shape[0]):\n vote = np.zeros((self.num_trees))\n for j in range(self.num_trees):\n #print self.trees[j].classify(feat)\n vote[j] = self.trees[j].classify(features[i,self.attr_track[j]].reshape(1,-1))[0]\n counts = np.bincount(vote.astype(int))\n class_labels.append(np.argmax(counts))\n return class_labels", "def estimate_class(self, observation: np.ndarray) -> int:\n neighbor_classes, distances = self.get_neighbor_classes(observation)\n weights = 1 / np.square(distances)\n classes = np.unique(neighbor_classes)\n class_weight = [sum(weights[neighbor_classes == neighbor_class]) for neighbor_class in classes]\n return classes[np.argmax(class_weight)]", "def fit(self, data, labels, labels_pred):\n self.n_samples, dim = data.shape\n self.labels_unique = np.unique(labels)\n self.n_classes = len(self.labels_unique)\n if self.n_neighbors is None:\n # Set number of nearest neighbors based on the maximum number of samples per class and the neighborhood\n # constant\n num = 0\n for c in self.labels_unique:\n ind = np.where(labels == c)[0]\n if ind.shape[0] > num:\n num = ind.shape[0]\n\n self.n_neighbors = int(np.ceil(num ** self.neighborhood_constant))\n\n logger.info(\"Number of samples: {:d}. Data dimension = {:d}.\".format(self.n_samples, dim))\n logger.info(\"Number of classes: {:d}.\".format(self.n_classes))\n logger.info(\"Number of neighbors (k): {:d}.\".format(self.n_neighbors))\n logger.info(\"Fraction of outliers (alpha): {:.4f}.\".format(self.alpha))\n if self.model_dim_reduction:\n data = transform_data_from_model(data, self.model_dim_reduction)\n dim = data.shape[1]\n logger.info(\"Applying dimension reduction to the data. Projected dimension = {:d}.\".format(dim))\n\n # Distance from each sample in `data` to the `1 - alpha` level sets corresponding to each class\n distance_level_sets = np.zeros((self.n_samples, self.n_classes))\n self.index_knn = dict()\n self.epsilon = dict()\n indices_sub = dict()\n for j, c in enumerate(self.labels_unique):\n logger.info(\"Processing data from class '{}':\".format(c))\n logger.info(\"Building a KNN index for all the samples from class '{}'.\".format(c))\n indices_sub[c] = np.where(labels == c)[0]\n data_sub = data[indices_sub[c], :]\n self.index_knn[c] = KNNIndex(\n data_sub, n_neighbors=self.n_neighbors,\n metric=self.metric, metric_kwargs=self.metric_kwargs,\n approx_nearest_neighbors=self.approx_nearest_neighbors,\n n_jobs=self.n_jobs,\n low_memory=self.low_memory,\n seed_rng=self.seed_rng\n )\n # Distances to the k nearest neighbors of each sample\n _, nn_distances = self.index_knn[c].query_self(k=self.n_neighbors)\n # Radius or distance to the k-th nearest neighbor for each sample\n radius_arr = nn_distances[:, self.n_neighbors - 1]\n\n # Smallest radius `epsilon` such that only `alpha` fraction of the samples from class `c` have radius\n # greater than `epsilon`\n if self.alpha > 0.:\n self.epsilon[c] = np.percentile(radius_arr, 100 * (1 - self.alpha), interpolation='midpoint')\n\n # Exclude the outliers and build a KNN index with the remaining samples\n mask_incl = radius_arr <= self.epsilon[c]\n mask_excl = np.logical_not(mask_incl)\n num_excl = mask_excl[mask_excl].shape[0]\n else:\n # Slightly larger value than the largest radius\n self.epsilon[c] = 1.0001 * np.max(radius_arr)\n\n # All samples are included in the density level set\n mask_incl = np.ones(indices_sub[c].shape[0], dtype=np.bool)\n mask_excl = np.logical_not(mask_incl)\n num_excl = 0\n\n if num_excl:\n logger.info(\"Excluding {:d} samples with radius larger than {:.6f} and building a KNN index with \"\n \"the remaining samples.\".format(num_excl, self.epsilon[c]))\n self.index_knn[c] = KNNIndex(\n data_sub[mask_incl, :], n_neighbors=self.n_neighbors,\n metric=self.metric, metric_kwargs=self.metric_kwargs,\n approx_nearest_neighbors=self.approx_nearest_neighbors,\n n_jobs=self.n_jobs,\n low_memory=self.low_memory,\n seed_rng=self.seed_rng\n )\n # Distance to the nearest neighbor of each sample that is part of the KNN index\n _, dist_temp = self.index_knn[c].query_self(k=1)\n ind = indices_sub[c][mask_incl]\n distance_level_sets[ind, j] = dist_temp[:, 0]\n\n # Distance to the nearest neighbor of each sample that is not a part of the KNN index (outliers)\n _, dist_temp = self.index_knn[c].query(data_sub[mask_excl, :], k=1)\n ind = indices_sub[c][mask_excl]\n distance_level_sets[ind, j] = dist_temp[:, 0]\n else:\n # No need to rebuild the KNN index because no samples are excluded.\n # Distance to the nearest neighbor of each sample\n distance_level_sets[indices_sub[c], j] = nn_distances[:, 0]\n\n logger.info(\"Calculating the trust score for the estimation data.\")\n for c in self.labels_unique:\n # Compute the distance from each sample from class `c` to the level sets from the remaining classes\n data_sub = data[indices_sub[c], :]\n for j, c_hat in enumerate(self.labels_unique):\n if c_hat == c:\n continue\n\n _, dist_temp = self.index_knn[c_hat].query(data_sub, k=1)\n distance_level_sets[indices_sub[c], j] = dist_temp[:, 0]\n\n self.scores_estim = self._score_helper(distance_level_sets, labels_pred)\n return self", "def search_highest_k_neighbor(self, k):\n max_score = 0\n target_node = None\n from_idx = None\n to_idx = None\n for i in range(k):\n node = self.graph.nodes[random.randrange(len(self.graph.nodes))]\n cluster_idx = self.search_cluster_by_node(node)\n if len(self.result[cluster_idx].get_nodes()) == 1:\n end_i = len(self.result)\n else:\n end_i = len(self.result) + 1\n\n random_cluster_idx = random.randrange(end_i)\n if random_cluster_idx != cluster_idx:\n tried_score = self.try_replace_node(node, cluster_idx, random_cluster_idx)\n if max_score < tried_score:\n max_score = tried_score\n target_node = node\n from_idx = cluster_idx\n to_idx = random_cluster_idx\n\n return max_score, target_node, from_idx, to_idx", "def predict(self, test):\n test_data = np.asarray(test)\n assert self.x is not None and self.y is not None, \"You must train the classifier before testing\"\n results = []\n for i in range(test_data.shape[0]):\n m = self.x - test_data[i]\n # dist holds the Euclidean distance to every training point\n dist = np.sum(m*m, 1)\n # this call uses a quickselect algo to find k-smallest\n ind = np.argpartition(dist, self.k)[:self.k]\n # take the class present the most among the k closest\n out = int(scipy.stats.mode(self.y[ind], axis=None)[0])\n results.append(out)\n return results", "def knn(X_tr_sc, n_neighbors, radius): \n\n neigh = NearestNeighbors(n_neighbors, radius,metric='euclidean')\n model = neigh.fit(X_tr_sc) \n \n return model", "def knn_entropy(X: np.ndarray, k: int = 5, algorithm=\"brute\", n_jobs=1, **kwargs):\n X = check_array(X, ensure_2d=True)\n\n # initialize KNN estimator\n n_samples, d_dimensions = X.shape\n\n # volume of unit ball in d^n\n vol = (np.pi ** (0.5 * d_dimensions)) / gamma(0.5 * d_dimensions + 1)\n\n # 1. Calculate the K-nearest neighbors\n\n clf_knn = NearestNeighbors(**kwargs)\n\n clf_knn.fit(X)\n\n distances, _ = clf_knn.kneighbors(X)\n\n # return distance to kth nearest neighbor\n distances = distances[:, -1]\n\n # add error margin to avoid zeros\n distances += np.finfo(X.dtype).eps\n\n # estimation\n return (\n d_dimensions * np.mean(np.log(distances))\n + np.log(vol)\n + psi(n_samples)\n - psi(k)\n )", "def nearest_neighbours_generalisation_accuracy(X, y, n_neighbors=1):\n kf = KFold(n_splits=10)\n kf.get_n_splits(X)\n scores = []\n for train_index, test_index in kf.split(X):\n X_train, X_test = X[train_index], X[test_index]\n y_train, y_test = y[train_index], y[test_index]\n clf = neighbors.KNeighborsClassifier(n_neighbors)\n clf.fit(X_train, y_train)\n y_pred = clf.predict(X_test)\n scores.append(accuracy_score(y_test, y_pred))\n return np.average(scores)", "def find_k_most_uncertain(nn, sess, X, batch_size=None, k=2,\n pool_size=None):\n\n # Initialization\n results = []\n i = -1\n\n if batch_size is None:\n _batch_size = X.shape[0]\n else:\n _batch_size = batch_size\n\n # Create the pool\n order = np.arange(X.shape[0])\n\n if pool_size is not None:\n order = np.random.choice(X.shape[0], \n min(X.shape[0], pool_size),\n replace=False)\n X_pool = X[order, :].copy()\n else:\n X_pool = X.copy()\n\n # Loop over the batches\n for i in range(X_pool.shape[0]/_batch_size):\n\n feed_dict = {}\n feed_dict[nn.input_tensor] = X_pool[i * _batch_size:\n (i+1) * _batch_size]\n\n # Predict the batch\n pred = sess.run(nn.prediction, feed_dict=feed_dict)\n\n # The most uncertain is the closest to 0.5\n pred = np.abs(0.5 - pred).reshape(-1)\n\n # Sort it by uncertainty\n batch_order = np.argsort(pred)\n pred = pred[batch_order]\n\n # Create associated indices\n to_zip = order[range(i * _batch_size, i * _batch_size + pred.shape[0])]\n to_zip = to_zip[batch_order]\n\n results = kmin(results, zip(pred, to_zip), k)\n\n # Last uncomplete batch\n feed_dict = {}\n feed_dict[nn.input_tensor] = X_pool[(i+1) * _batch_size:]\n\n # Predict the last batch\n pred = sess.run(nn.prediction, feed_dict=feed_dict)\n\n # Sort it by uncertainty\n pred = np.abs(0.5 - pred).reshape(-1)\n batch_order = np.argsort(pred)\n pred = pred[batch_order]\n\n # Create associated indices\n to_zip = order[(i + 1) * _batch_size:]\n to_zip = to_zip[batch_order]\n\n results = kmin(results, zip(pred, to_zip), k)\n\n return [i[1] for i in results]", "def predict_labels(self, dists, k=1):\n num_test = dists.shape[0]\n y_pred = np.zeros(num_test)\n for i in range(num_test):\n # A list of length k storing the labels of the k nearest neighbors to\n # the ith test point.\n closest_y = []\n indices = np.argsort(dists[i])\n indices = indices[range(k)]\n closest_y = self.y_train[indices]\n counts = np.bincount(closest_y)\n y_pred[i] = np.argmax(counts)\n\n return y_pred", "def classify(som, data):\n winmap = som.labels_map(X_train, y_train)\n default_class = np.sum(list(winmap.values())).most_common()[0][0]\n result = []\n for d in data:\n win_position = som.winner(d)\n if win_position in winmap:\n result.append(winmap[win_position].most_common()[0][0])\n else:\n result.append(default_class)\n return result", "def top_k_metric(self, y_true, y_pred):\n return top_k_categorical_accuracy(y_true, y_pred, k=self.ACCURACY_K)", "def top_k_1_5(output: torch.Tensor, target: torch.Tensor, topk=(1, 5)) -> List[torch.FloatTensor]:\n with torch.no_grad():\n # ---- get the topk most likely labels according to your model\n # get the largest k \\in [n_classes] (i.e. the number of most likely probabilities we will use)\n # max number labels we will consider in the right choices for out model\n maxk = max(topk)\n batch_size = target.size(0)\n\n # get top maxk indicies that correspond to the most likely probability scores\n # (note _ means we don't care about the actual top maxk scores just their corresponding indicies/labels)\n # _, [B, n_classes] -> [B, maxk]\n _, y_pred = output.topk(k=maxk, dim=1)\n # [B, maxk] -> [maxk, B] Expects input to be <= 2-D tensor and transposes dimensions 0 and 1.\n y_pred = y_pred.t()\n\n # - get the credit for each example if the models predictions is in maxk values (main crux of code)\n # for any example, the model will get credit if it's prediction matches the ground truth\n # for each example we compare if the model's best prediction matches the truth. If yes we get an entry of 1.\n # if the k'th top answer of the model matches the truth we get 1.\n # Note: this for any example in batch we can only ever get 1 match (so we never overestimate accuracy <1)\n # [B] -> [B, 1] -> [maxk, B]\n target_reshaped = target.view(1, -1).expand_as(y_pred)\n # compare every topk's model prediction with the ground truth & give credit if any matches the ground truth\n # [maxk, B] were for each example we know which topk prediction matched truth\n correct = (y_pred == target_reshaped)\n # original: correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n # -- get topk accuracy\n list_topk_accs = [] # idx is topk1, topk2, ... etc\n for k in topk:\n # get tensor of which topk answer was right\n ind_which_topk_matched_truth = correct[:k] # [maxk, B] -> [k, B]\n # flatten it to help compute if we got it correct for each example in batch\n # [k, B] -> [kB]\n flattened_indicator_which_topk_matched_truth = ind_which_topk_matched_truth.reshape(\n -1).float()\n # get if we got it right for any of our top k prediction for each example in batch\n tot_correct_topk = flattened_indicator_which_topk_matched_truth.float().sum(\n dim=0, keepdim=True) # [kB] -> [1]\n # compute topk accuracy - the accuracy of the mode's ability to get it right within it's top k guesses/preds\n topk_acc = tot_correct_topk / batch_size # topk accuracy for entire batch\n list_topk_accs.append(topk_acc)\n # list of topk accuracies for entire batch [topk1, topk2, ... etc]\n return list_topk_accs", "def predict_labels(self, dists, k=1):\n num_test = dists.shape[0]\n y_pred = np.zeros(num_test)\n for i in range(num_test):\n indices = np.argsort(dists[i])[:k]\n closest_y = self.y_train[indices]\n y_pred_i = mode(closest_y)[0]\n y_pred[i] = y_pred_i\n return y_pred", "def classify_treeNN(self, query_name):\n # 1) Find set of closest neighbors & their class names\n # ie. leaves with at most neighborhood_max_edges edges between itself \n # and the query node\n neighborhood_classes = self.getNeighborhoodClasses(query_name)\n print \"neighborhood \" , neighborhood_classes\n\n # 2) Find aggregate similarity score for each class\n # Use minimum operator for distance measure & maximum for similarity measure\n # EQ 6.1 in Chapt 6, Busa-Fekete et al\n R = {}\n for c,ids in neighborhood_classes.iteritems():\n sim_score = min([nx.shortest_path_length(self.tree, source=query_name, \n target=i, weight='length') for i in ids])\n if DEBUG: print \"\\tCLASS / SIM_SCORE: \", c, sim_score\n R[sim_score] = c # distance measure\n\n min_score = min(R.keys())\n if DEBUG: print \"MIN_SCORE: \", min_score\n\n return R[min_score] #class of minimum distance score", "def predict(self, features):\n feature_labels = []\n for f in features:\n get_label = self.get_k_neighbors(f)\n c0 = get_label.count(0)\n c1 = get_label.count(1)\n if c0 >= c1:\n f_label = 0\n else:\n f_label = 1\n feature_labels.append(f_label)\n return feature_labels\n raise NotImplementedError", "def knn_manage(k):\n\n xtrain, xtest, label_train, label_test = get_data()\n pred = knn_classify(xtrain, xtest, label_train, k)\n conf_mat, accuracy, misclassified = confusion_matrix_accuracy(pred, label_test)\n print accuracy\n print conf_mat", "def classify(self, features):\n \n class_labels = []\n # TODO: finish this.\n features = np.array(features)\n feat_shape = features.shape\n for indx in range(feat_shape[0]):\n# print list(features[indx,:]), features[indx,:]\n decision = self.root.decide(list(features[indx,:]))\n class_labels.append(decision)\n return class_labels", "def __classify_nearest_neighbor(self, document, distance_type):\n min_distance = sys.float_info.max\n min_class = \"\"\n \n for index in self.vectors.shape[0]:\n vector = self.vectors[index:].data.tolist()\n distance = self.distance(document, vector, distance_type)\n if distance < min_distance:\n min_distance = distance\n min_class = self.classes[index]\n \n return min_class", "def test_k_models(param_dict, features, classes, cross_val=4):\r\n assert type(param_dict) == dict\r\n model = GridSearchCV(KNeighborsClassifier(), param_dict, cv=cross_val)\r\n model.fit(features, classes)\r\n return list(model.best_params_.values())[0]", "def predict(self):\n probabilities = self.probability_array()\n # THIS ASSUMES the classifiers are in order: 0th column of the\n # probabilities corresponds to label = 0, ..., 9th col is for 9.\n classes = np.argmax(probabilities, axis=1)\n return classes", "def find_knn(self, k, coordinate, threshold=0):\n def r_square(c1, c2):\n return (c1[0] - c2[0]) ** 2 + (c1[1] - c2[1]) ** 2\n\n h = []\n for sno in self._coordinates:\n heapq.heappush(\n h, (r_square(coordinate, self._coordinates[sno]), sno))\n\n knn = []\n for unused_i in range(k):\n knn.append(self._stations[heapq.heappop(h)[1]])\n\n min_dist = r_square((float(knn[0]['lat']), float(knn[0]['lng'])),\n coordinate)\n if threshold and min_dist > threshold ** 2:\n return []\n\n return knn", "def pk(y_true, y_pred, k):\n \n # if k is 0, return 0. we should never have this\n # as k is always >= 1\n if k == 0:\n return 0\n # we are interested only in top-k predictions\n y_pred = y_pred[:k]\n \n # convert predictions to set\n pred_set = set(y_pred)\n \n # convert actual values to set\n true_set = set(y_true)\n \n # find common values\n common_values = pred_set.intersection(true_set)\n \n # return length of common values over k\n return len(common_values) / len(y_pred[:k])", "def accuracy(output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True) # pred是top k的索引值\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred)) # target每个样本只有一个值,表示具体类别值,expand之后比较是否相等,相等的就是对的\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0, keepdim=True) # top几的分类正确数量累加,然后除以batch_size就是准确率\n res.append(correct_k.mul_(100.0 / batch_size))\n return res", "def knn(p, pnts, k=1, return_dist=True):\r\n def _remove_self_(p, pnts):\r\n \"\"\"Remove a point which is duplicated or itself from the array\r\n \"\"\"\r\n keep = ~np.all(pnts == p, axis=1)\r\n return pnts[keep]\r\n #\r\n def _e_2d_(p, a):\r\n \"\"\" array points to point distance... mini e_dist\r\n \"\"\"\r\n diff = a - p[np.newaxis, :]\r\n return np.einsum('ij,ij->i', diff, diff)\r\n #\r\n p = np.asarray(p)\r\n k = max(1, min(abs(int(k)), len(pnts)))\r\n pnts = _remove_self_(p, pnts)\r\n d = _e_2d_(p, pnts)\r\n idx = np.argsort(d)\r\n if return_dist:\r\n return pnts[idx][:k], d[idx][:k]\r\n return pnts[idx][:k]", "def getNeighbors(training_data, test_row, k):\n\n distances = list()\n for training_row in training_data:\n dist = euclidianDistance(training_row, test_row)\n distances.append([training_row, dist])\n \n #Sort on the basis of dist\n distances.sort(key=lambda row:row[1])\n\n neighbors = list()\n\n for i in range(int(k)):\n neighbors.append(distances[i][0])\n\n return neighbors", "def GetNearestElements(user_id, current_context, suggestees, k=10):\n\n if type(user_id) is int:\n user_history = ExtractFeatures(user_id)\n else:\n user_history = user_id\n user_interest = GetUserInterest(user_id, current_context, suggestees)\n\n neighbours = []\n counts = {}\n for entry in user_history:\n dist = GetDist(entry[1:], current_context)\n if dist > kMaxDistThreshold:\n continue\n if len(counts) < k:\n heapq.heappush(neighbours, (-dist, entry[0]))\n if entry[0] not in counts:\n counts[entry[0]] = 1\n else:\n counts[entry[0]] += 1\n elif dist < -neighbours[0][0]:\n _, smallest = heapq.heappushpop(neighbours, (-dist, entry[0]))\n if entry[0] not in counts:\n counts[entry[0]] = 1\n else:\n counts[entry[0]] += 1\n counts[smallest] -= 1\n if counts[smallest] == 0:\n del counts[smallest]\n\n # TODO(kadircet): Add data coming from cold start or maybe most liked N\n # elements into the base tags too.\n base_tags = GetTagWeights(counts.keys())\n similar_suggestees = GetSimilarSuggestees(\n None, base_tags=base_tags, similarity_metric=WeightedJaccardSimilarity)\n neighbours = []\n for suggestee_id, count in user_interest.items():\n history_count = counts.get(suggestee_id, 0)\n # If user simply disliked and never eaten it, abandon the choice.\n if history_count == 0 and count < 0:\n continue\n counts.pop(suggestee_id, 0)\n neighbours.append((history_count * kHistoryCoef + count, suggestee_id))\n for suggestee_id, history_count in counts.items():\n neighbours.append((history_count * kHistoryCoef, suggestee_id))\n max_count = max(max(neighbours)[0], 1)\n\n def CountsToProb(x):\n return (x[0] / max_count, x[1])\n\n neighbours = list(map(CountsToProb, neighbours))\n neighbours.extend(similar_suggestees)\n neighbours.sort()\n neighbours.reverse()\n\n return tuple(map(lambda x: int(x[1]), neighbours))[:20]", "def get_k(self, minimum_confidence=3, cluster_search_minimum=8):\n clusters_and_counts = [(int(str(i.stem).split('_')[0].split('-')[1]),\n len(str(i.stem).split('_')[1:])) for i in Path(self.models_dir_path).glob('*') if i.stem[0] == 'k']\n best_cluster_num, highest_count = functools.reduce(lambda x,y: x if x[1] > y[1] else y, clusters_and_counts)\n if (highest_count >= minimum_confidence) and (best_cluster_num >= cluster_search_minimum): \n self.optimal_k = best_cluster_num\n for i in Path(self.models_dir_path).glob('**/*'): \n if i.match(f'*k-{best_cluster_num}_*'): \n self.cluster_dir = i\n if not utils.find('*extent*.png', self.cluster_dir):\n visualization.get_domain_geometry(self, self.cluster_dir)\n print(f'\"cluster_dir\" initialized @:\\n{i}')\n return best_cluster_num\n else:\n self.optimal_k = None\n return False", "def get_class_weights(img_paths: List[str], class_to_idx: Dict[str, int], label_names: List[str]):\n labels = list()\n for img_path in img_paths:\n label = os.path.basename(os.path.dirname(img_path))\n labels.append(class_to_idx[label]) \n\n counts = Counter(labels) + Counter([class_to_idx[name] for name in label_names])\n counts = np.array(sorted(counts.items()))[:,1]\n \n return counts.max()/counts", "def prob6(n_neighbors, filename=\"mnist_subset.npz\"):\n #Extract the data\n data = np.load(\"mnist_subset.npz\")\n X_train = data[\"X_train\"].astype(np.float)\n y_train = data[\"y_train\"]\n X_test = data[\"X_test\"].astype(np.float)\n y_test = data[\"y_test\"]\n\n # instantiate a KNeighborsClassifier to hold the data to make predictions\n myClassifier = KNeighborsClassifier(n_neighbors)\n myClassifier.fit(X_train, y_train)\n accurate_trains = 0\n prediction = 0\n # test each label\n for i in range(len(X_test)):\n target = X_test[i]\n #make prediction\n prediction = myClassifier.predict(target)\n # check accuracy\n if prediction == y_test[i]:\n accurate_trains += 1\n # return the accuracy ratio\n return accurate_trains / len(y_test)", "def classify(self, input):\n return min(range(self.k), key=lambda i: squared_distance(input, self.means[i]))", "def test_determine_k(self):\n test_dir_name = os.path.dirname(__file__)\n feat_array_fn = os.path.join(\n test_dir_name, \"data\", \"four_clusters.csv\")\n df = pd.read_csv(feat_array_fn)\n feat_array = df[[\"x\", \"y\"]].values\n\n clusterer = Clusterer(feat_array_fn, \"/dev/null\", [])\n best_k = clusterer._determine_k(feat_array, 9)\n\n self.assertEqual(best_k, 4)\n\n feat_array_fn = os.path.join(\n test_dir_name, \"data\", \"iris.csv\")\n df = pd.read_csv(feat_array_fn)\n feat_array = df[[\n \"Sepal.Length\", \"Sepal.Width\", \"Petal.Length\",\n \"Petal.Width\"]].values\n\n clusterer = Clusterer(feat_array_fn, \"/dev/null\", [])\n best_k = clusterer._determine_k(feat_array, 9)\n\n self.assertEqual(best_k, 2)", "def find_best_classifier(data, possible_classifiers, target_classifier):\n best_disorder_score = 10000000\n best_classifier = None\n try:\n for classifier in possible_classifiers:\n total_disorder = average_test_disorder(data, classifier, target_classifier)\n if total_disorder < best_disorder_score:\n best_classifier = classifier\n best_disorder_score = total_disorder\n else:\n pass\n if best_classifier!=None:\n branches = split_on_classifier(data, best_classifier)\n if len(branches) == 1:\n raise NoGoodClassifiersError\n else:\n return best_classifier\n except Exception as e:\n raise NoGoodClassifiersError" ]
[ "0.7780232", "0.77610284", "0.7689671", "0.7591956", "0.75637233", "0.7306566", "0.7278552", "0.72072935", "0.71550226", "0.71461403", "0.71255", "0.71031046", "0.70180625", "0.69530344", "0.6946558", "0.6925374", "0.6919172", "0.6916706", "0.6903651", "0.6782697", "0.6778249", "0.67604357", "0.6715812", "0.67094284", "0.66881603", "0.66718286", "0.66690904", "0.6635434", "0.66141915", "0.65977496", "0.65868706", "0.65536386", "0.653902", "0.6532944", "0.6527435", "0.6518511", "0.64963657", "0.64827615", "0.6482443", "0.64732164", "0.64729524", "0.6461722", "0.64572316", "0.6422307", "0.6418075", "0.64078194", "0.63984627", "0.6388835", "0.6385166", "0.63766783", "0.6374405", "0.63742965", "0.6365658", "0.63361555", "0.6331093", "0.63068557", "0.6297601", "0.6295141", "0.6288726", "0.628319", "0.62766397", "0.6246008", "0.62407464", "0.623566", "0.6211822", "0.6202874", "0.6198472", "0.6194732", "0.6194022", "0.61879957", "0.6187355", "0.61781996", "0.61764735", "0.6174141", "0.61453927", "0.61373806", "0.6132341", "0.6125072", "0.6118343", "0.6102629", "0.6096839", "0.60955304", "0.6091385", "0.6074632", "0.6028442", "0.60275495", "0.6023956", "0.60187125", "0.60157096", "0.60079557", "0.6006473", "0.6000818", "0.5994254", "0.5992684", "0.59838927", "0.59820914", "0.59808064", "0.59708333", "0.59686655", "0.5962021" ]
0.83432674
0
Given data (observed x and labels t) and choice k of nearest neighbors, plots the decision boundary based on a grid of classifications over the feature space.
def plot_decision_boundary(k, x, t, granularity=100, figures_root='../figures', data_name=None): print(f'KNN for K={k}') # Initialize meshgrid to be used to store the class prediction values # this is used for computing and plotting the decision boundary contour pointsX = numpy.linspace(numpy.min(x[:, 0]) - 0.1, numpy.max(x[:, 0]) + 0.1, granularity) pointsY = numpy.linspace(numpy.min(x[:, 1]) - 0.1, numpy.max(x[:, 1]) + 0.1, granularity) Xv, Yv = numpy.meshgrid(pointsX, pointsY) # Calculate KNN classification for every point in meshgrid classes = numpy.zeros(shape=(Xv.shape[0], Xv.shape[1])) for i in range(Xv.shape[0]): for j in range(Xv.shape[1]): c = knn(numpy.array([Xv[i][j], Yv[i][j]]), k, x, t) # print('{0} {1} {2}'.format(i, j, c)) classes[i][j] = c # plot the binary decision boundary contour plt.figure() plt.pcolormesh(Xv, Yv, classes, cmap=CMAP_LIGHT) ti = f'KNN with K = {k}' plt.title(ti) plt.draw() save_path = None if data_name is not None: save_path = os.path.join(figures_root, f'knn_{data_name}_k={k}') # else: # save_path = os.path.join(figures_root, f'knn_k={k}') # plot the data (on top of the decision boundary color mesh) plot_data(x, t, new_figure=False, save_path=save_path) return classes
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def draw_knn_boundaries(knn, h=0.02): # h = Step size in the mesh\n ax = plt.gca()\n [xmin, xmax] = ax.get_xlim()\n [ymin, ymax] = ax.get_ylim()\n # Generate the axis associated to the first feature: \n x_axis = np.arange(xmin, xmax, h)\n # Generate the axis associated to the 2nd feature: \n y_axis = np.arange(ymin, ymax, h)\n # Generate a meshgrid (2D grid) from the 2 axis:\n x_grid, y_grid = np.meshgrid(x_axis, y_axis)\n # Vectorize the grids into column vectors:\n x_grid_vectorized = x_grid.flatten()\n x_grid_vectorized = np.expand_dims(x_grid_vectorized, axis=1)\n y_grid_vectorized = y_grid.flatten()\n y_grid_vectorized = np.expand_dims(y_grid_vectorized, axis=1)\n # Concatenate the vectorized grids\n grid = np.concatenate((x_grid_vectorized, y_grid_vectorized), axis=1)\n # Now you can use 'grid' as data to classify by the knn \n\n # Predict concatenated features to get the decision boundaries:\n decision_boundaries = ... #TODO!\n\n # Reshape the decision boundaries into a 2D matrix:\n decision_boundaries = decision_boundaries.reshape(x_grid.shape)\n plt.pcolormesh(x_grid, y_grid, decision_boundaries, cmap=cmap_light, zorder=1)\n return ax", "def plot_boundary(X, y, resolution=100, n_neighbors=1):\n \n xmin, xmax, ymin, ymax = np.min(X[:,0]), np.max(X[:,0]), np.min(X[:,1]), np.max(X[:,1])\n \n xs, ys = np.linspace(xmin-0.1, xmax+0.1, num=resolution), np.linspace(ymin-0.1, ymax+0.1, num=resolution)\n xgrid, ygrid = np.meshgrid(xs, ys)\n \n \n clf = KNN(n_neighbors=n_neighbors)\n clf.fit(X, y)\n \n Xpred = np.stack((xgrid.flatten(), ygrid.flatten()), axis=1)\n ypred = clf.predict(Xpred)\n ypred = ypred.reshape((resolution, resolution))\n \n ind1 = np.where(ypred[:-1,:] != ypred[1:,:])\n ind2 = np.where(ypred[:,:-1] != ypred[:,1:])\n \n xret = np.concatenate((xgrid[ind1].flatten(), xgrid[ind2].flatten()))\n yret = np.concatenate((ygrid[ind1].flatten(), ygrid[ind2].flatten()))\n \n return xret, yret", "def plot_decision_regions(X, y, classifier, resolution=0.02):\n #setup marker generator and color map\n markers = ('s', 'x', 'o', '^', 'v')\n colors = ('red', 'blue', 'lightgreen', 'gray', 'cyan')\n cmap = ListedColormap(colors[:len(np.unique(y))])\n\n #plot the decision surface\n #just find the limit and/reduce 1\n x1_min, x1_max = X[:, 0].min() - 1, X[:, 0].max() + 1\n x2_min, x2_max = X[:, 1].min() - 1, X[:, 1].max() + 1\n #np.arange(start, stop, step): create list of tupple from start to stop with step of step\n #np.meshgrid convert: create accessible arrays from list of tupple\n #(-1,-2) (-1,0) (-1,1) xx1 = [-1 -1 -1][0 0 0 ][1 1 1]\n #(0,-2)(0,0)(0,1) ==> \n #(1,-2)(1,0)(1,1) xx2 = [-2 -2 -2][0 0 0 ][1 1 1]\n xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max, resolution),\n np.arange(x2_min, x2_max, resolution))\n\n #ravel() xx1 = [-1 -1 -1 0 0 0 1 1 1]\n # xx2 = [-2 -2 -2 0 0 0 1 1 1]\n #array() [[-1 -1 -1 0 0 0 1 1 1]\n # [-2 -2 -2 0 0 0 1 1 1]] concatenation... sort of\n #.T , transpose from in this case a 2x9 to 9x2\n\n Z = classifier.predict(np.array([xx1.ravel(), xx2.ravel()]).T)\n Z = Z.reshape(xx1.shape)\n plt.contourf(xx1, xx2, Z, alpha = 0.3, cmap=cmap)\n plt.xlim(xx1.min(), xx1.max())\n plt.ylim(xx2.min(), xx2.max())\n\n #plot class samples\n for idx, cl in enumerate(np.unique(y)):\n plt.scatter(x=X[y == cl, 0],\n y=X[y == cl, 1],\n alpha=0.8,\n c=colors[idx],\n marker=markers[idx],\n label=cl,\n edgecolor='black')", "def plot(self):\r\n \r\n\r\n print(\"Printing decision surfaces of decision trees\")\r\n plot_colors = \"rb\"\r\n plot_step = 0.02\r\n n_classes = 2\r\n for _ in range (self.n_estimators):\r\n plt.subplot(2, 3, _ + 1)\r\n x_min, x_max = self.X.iloc[:, 0].min() - 1, self.X.iloc[:, 0].max() + 1\r\n y_min, y_max = self.X.iloc[:, 1].min() - 1, self.X.iloc[:, 1].max() + 1\r\n xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),np.arange(y_min, y_max, plot_step))\r\n plt.tight_layout(h_pad=0.5, w_pad=0.5, pad=2.5)\r\n Z = self.clfs[_].predict(np.c_[xx.ravel(), yy.ravel()])\r\n Z = np.array(Z)\r\n Z = Z.reshape(xx.shape)\r\n cs = plt.contourf(xx, yy, Z, cmap=plt.cm.RdBu)\r\n for i, color in zip(range(n_classes), plot_colors):\r\n if i == 0:\r\n idx = np.where(self.y == -1)\r\n if i == 1:\r\n idx = np.where(self.y == 1)\r\n for i in range (len(idx[0])):\r\n plt.scatter(self.X.loc[idx[0][i]][0], self.X.loc[idx[0][i]][1],c=color,cmap=plt.cm.RdBu, edgecolor='black', s=15)\r\n plt.suptitle(\"Decision surface of a decision tree using paired features\")\r\n plt.legend(loc='lower right', borderpad=0, handletextpad=0)\r\n plt.axis(\"tight\")\r\n\r\n plt.show()\r\n fig1 = plt\r\n\r\n # Figure 2\r\n print(\"Printing decision surface by combining the individual estimators\")\r\n plot_colors = \"rb\"\r\n plot_step = 0.02\r\n n_classes = 2\r\n x_min, x_max = self.X.iloc[:, 0].min() - 1, self.X.iloc[:, 0].max() + 1\r\n y_min, y_max = self.X.iloc[:, 1].min() - 1, self.X.iloc[:, 1].max() + 1\r\n xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),np.arange(y_min, y_max, plot_step))\r\n plt.tight_layout(h_pad=0.5, w_pad=0.5, pad=2.5)\r\n Z = config.Classifier_AB.predict(np.c_[xx.ravel(), yy.ravel()])\r\n Z = np.array(Z)\r\n Z = Z.reshape(xx.shape)\r\n cs = plt.contourf(xx, yy, Z, cmap=plt.cm.RdBu)\r\n for i, color in zip(range(n_classes), plot_colors):\r\n if i == 0:\r\n idx = np.where(self.y == -1)\r\n if i == 1:\r\n idx = np.where(self.y == 1)\r\n for i in range (len(idx[0])):\r\n plt.scatter(self.X.loc[idx[0][i]][0], self.X.loc[idx[0][i]][1],c=color,cmap=plt.cm.RdBu, edgecolor='black', s=15)\r\n plt.suptitle(\"Decision surface by combining individual estimators\")\r\n plt.legend(loc='lower right', borderpad=0, handletextpad=0)\r\n plt.axis(\"tight\")\r\n\r\n plt.show()\r\n fig2 = plt\r\n\r\n return [fig1,fig2]", "def plot_decision_regions(self, option, canvas):\n\t\tle = preprocessing.LabelEncoder()\t\t# integer encoder\n\t\tle.fit(self.y)\n\t\tclassifier = self.classifier.fit(self.X, le.transform(self.y))\n\t\tclasses = classifier.classes_\n\t\tnum_classes = len(classes)\n\n\t\tif option == 'train':\n\t\t\tX = self.X\n\t\t\ty = self.y\n\t\telif option == 'test':\n\t\t\tX = self.test_X\n\t\t\ty = self.test_y\n\n\t\tb1 = self.X.iloc[:, 0]\n\t\tb2 = self.X.iloc[:, 1]\n\t\tb1_slack = (b1.max() - b1.min()) * 0.1\n\t\tb2_slack = (b2.max() - b2.min()) * 0.1\n\t\tb1_min, b1_max = b1.min() - b1_slack, b1.max() + b1_slack \t# x-axis range\n\t\tb2_min, b2_max = b2.min() - b2_slack, b2.max() + b2_slack\t# y-axis range\n\t\tstep_1 = (b1_max - b1_min) / 200\n\t\tstep_2 = (b2_max - b2_min) / 200\n\t\tmd1, md2 = np.meshgrid(np.arange(b1_min, b1_max, step_1), np.arange(b2_min, b2_max, step_2))\n\n\t\trcParams.update({'font.size': 7})\n\t\tcanvas.figure.clear()\n\t\tax = canvas.figure.subplots()\n\t\tlevels = np.arange(-0.19, 1, 0.2) + 0.2\n\n\t\tif num_classes == 2:\n\t\t\tcm_bkgd = plt.cm.RdBu\n\t\t\tcm_pts = ListedColormap(['#FF0000', '#0000FF'])\n\t\t\tZ = classifier.predict_proba(np.c_[md1.ravel(), md2.ravel()])[:, 1]\n\t\t\tZ = Z.reshape(md1.shape)\n\t\t\tax.contourf(md1, md2, Z, vmin=0, vmax=1, cmap=cm_bkgd, alpha=0.8)\n\n\t\telif num_classes == 3:\n\t\t\tcm_bkgd_1 = plt.cm.Reds\n\t\t\tcm_bkgd_2 = plt.cm.Greens\n\t\t\tcm_bkgd_3 = plt.cm.Blues\n\t\t\tcm_pts = cm_pts = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])\n\t\t\tZ = classifier.predict_proba(np.c_[md1.ravel(), md2.ravel()])\n\t\t\tZ1 = Z[:, 0]\n\t\t\tZ2 = Z[:, 1]\n\t\t\tZ3 = Z[:, 2]\n\n\t\t\tP1 = np.maximum(0, Z1 - np.maximum(Z2, Z3))\n\t\t\tP2 = np.maximum(0, Z2 - np.maximum(Z1, Z3))\n\t\t\tP3 = np.maximum(0, Z3 - np.maximum(Z1, Z2))\n\t\t\tP1 = P1.reshape(md1.shape)\n\t\t\tP2 = P2.reshape(md1.shape)\n\t\t\tP3 = P3.reshape(md1.shape)\n\n\t\t\tax.contourf(md1, md2, P1, levels, cmap=cm_bkgd_1, alpha=0.8)\n\t\t\tax.contourf(md1, md2, P2, levels, cmap=cm_bkgd_2, alpha=0.8)\n\t\t\tax.contourf(md1, md2, P3, levels, cmap=cm_bkgd_3, alpha=0.8)\n\n\t\td1 = X.iloc[:, 0] \t# x-axis\n\t\td2 = X.iloc[:, 1]\t# y-axis\n\t\tax.scatter(d1, d2, c=le.transform(y), cmap=cm_pts, alpha=0.6, edgecolors='k')\n\t\tax.set_xlim(md1.min(), md1.max())\n\t\tax.set_ylim(md2.min(), md2.max())\n\t\tax.set_xticks(())\n\t\tax.set_yticks(())\n\t\tax.set_xlabel(X.columns[0])\n\t\tax.set_ylabel(X.columns[1])\n\n\t\tcanvas.figure.tight_layout()\n\t\tcanvas.draw()", "def plot_tree_clarans(data, k):\n\n n = len(data)\n num_points = int(scipy.special.binom(n, k))\n num_neigh = k * (n - k)\n\n if (num_points > 50) or (num_neigh > 10):\n print(\n \"Either graph nodes are more than 50 or neighbors are more than 10, the graph would be too big\"\n )\n return\n\n # all possibile combinations of k elements from input data\n name_nodes = list(itertools.combinations(list(data.index), k))\n\n dot = graphviz.Digraph(comment=\"Clustering\")\n\n # draw nodes, also adding the configuration cost\n for i in range(num_points):\n tot_cost, meds = compute_cost_clarans(data, list(name_nodes[i]))\n tc = round(tot_cost, 3)\n\n dot.node(str(name_nodes[i]), str(name_nodes[i]) + \": \" + str(tc))\n\n # only connect nodes if they have k-1 common elements\n for i in range(num_points):\n for j in range(num_points):\n if i != j:\n if (\n len(set(list(name_nodes[i])) & set(list(name_nodes[j])))\n == k - 1\n ):\n dot.edge(str(name_nodes[i]), str(name_nodes[j]))\n\n graph = graphviz.Source(dot) # .view()\n display(graph)", "def plot_decision_boundary(data, x, y, labels, model, **kwargs):\n xx, yy, Z = setup_contours(data=data, x=x, y=y, model=model)\n\n x0, x1 = data[x].values, data[y].values\n x0lim = x0.min(), x0.max()\n x1lim = x1.min(), x1.max()\n\n col = data[labels].values\n plt.figure(figsize=(10, 10))\n\n plt.scatter(x0, x1, c=col, **kwargs)\n CS = plt.contourf(xx, yy, Z, **kwargs)\n CS2 = plt.contour(CS, CS.levels[::2], **kwargs)\n cbar = plt.colorbar(CS, **kwargs)\n cbar.ax.set_ylabel('Fitted Probability')\n # Add the contour line levels to the colorbar\n cbar.add_lines(CS2)\n\n plt.xlim(x0lim)\n plt.ylim(x1lim)\n plt.xlabel(x)\n plt.ylabel(y)\n plt.legend()", "def visclassifier(fun,xTr,yTr):\n\n yTr = np.array(yTr).flatten()\n \n symbols = [\"ko\",\"kx\"]\n marker_symbols = ['o', 'x']\n mycolors = [[0.5, 0.5, 1], [1, 0.5, 0.5]]\n classvals = np.unique(yTr)\n\n plt.figure()\n\n res=300\n xrange = np.linspace(min(xTr[:, 0]), max(xTr[:, 0]),res)\n yrange = np.linspace(min(xTr[:, 1]), max(xTr[:, 1]),res)\n pixelX = repmat(xrange, res, 1)\n pixelY = repmat(yrange, res, 1).T\n\n xTe = np.array([pixelX.flatten(), pixelY.flatten()]).T\n\n testpreds = fun(xTe)\n Z = testpreds.reshape(res, res)\n # Z[0,0] = 1 # optional: scale the colors correctly\n plt.contourf(pixelX, pixelY, np.sign(Z), colors=mycolors)\n\n for idx, c in enumerate(classvals):\n plt.scatter(xTr[yTr == c,0],\n xTr[yTr == c,1],\n marker=marker_symbols[idx],\n color='k'\n )\n\n plt.axis('tight')\n plt.show()", "def plot_decision_regions(X, y, classifier, resolution=.02, test_idx=None):\n # setup marker generator & color map\n plt.figure()\n markers = ('x', 'o')\n colors = ('red', 'blue')\n\n # calculate and plot the decision surface\n x1_min, x1_max = X[:, 0].min() - 1, X[:, 0].max() + 1\n x2_min, x2_max = X[:, 1].min() - 1, X[:, 1].max() + 1\n xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max, resolution),\n np.arange(x2_min, x2_max, resolution))\n Z = classifier.predict(np.array([xx1.ravel(), xx2.ravel()]).T)\n Z = Z.reshape(xx1.shape)\n plt.contourf(xx1, xx2, Z, alpha=.35, cmap=ListedColormap(colors=colors[:len(np.unique(y))]))\n plt.xlim(xx1.min(), xx2.max())\n plt.ylim(xx2.min(), xx2.max())\n\n # scatter plot all values of the data sets\n for idx, cl in enumerate(np.unique(y)):\n plt.scatter(x=X[y == cl, 0],\n y=X[y == cl, 1],\n c=colors[idx],\n marker=markers[idx],\n label=cl,\n edgecolors='black')\n if test_idx:\n # circle test data\n X_test, y_test = X[test_idx, :], y[test_idx]\n plt.scatter(X_test[:, 0],\n X_test[:, 1],\n c='',\n edgecolors='black',\n alpha=1.0,\n linewidths=1,\n marker='o',\n s=100,\n label='test set')", "def nearest_neighbors_classifier(data):\n clf = KNeighborsClassifier(3, 'distance')\n clf.name = \"KNN\"\n train_predict_and_results(data, clf)", "def plot_decision_boundary(pred_func):\n # Set min and max values\n x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5\n y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5\n h = 0.01\n # Generate a grid of points\n xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))\n # Predict the function value for the whole gid\n Z = pred_func(np.c_[xx.ravel(), yy.ravel()])\n Z = Z.reshape(xx.shape)\n # Plot the contour and training examples\n plt.contourf(xx, yy, Z, cmap=plt.cm.Spectral)\n plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Spectral)\n plt.show()", "def plot_2D_boundary(plot_range, points, decisionfcn, labels, values=[0]):\n\n clist = ['b', 'r', 'g', 'k', 'm', 'y'] # colors for the classes\n\n # evaluate on a grid and plot contour of decision function\n x = np.arange(plot_range[0], plot_range[1], .1)\n y = np.arange(plot_range[2], plot_range[3], .1)\n xx, yy = np.meshgrid(x, y)\n xxx, yyy = xx.flatten(), yy.flatten() # lists of x,y in grid\n zz = np.array(decisionfcn(xxx, yyy))\n zz = zz.reshape(xx.shape)\n\n # plot contour(s) at values\n plt.contour(xx, yy, zz, values)\n\n # for each class, plot the points with ’*’ for correct, ’o’ for incorrect\n for i in range(len(points)):\n d = decisionfcn(points[i][:, 0], points[i][:, 1])\n correct_ndx = labels[i] == d\n incorrect_ndx = labels[i] != d\n plt.plot(\n points[i][correct_ndx, 0],\n points[i][correct_ndx, 1],\n '*',\n color=clist[i])\n plt.plot(\n points[i][incorrect_ndx, 0],\n points[i][incorrect_ndx, 1],\n 'o',\n color=clist[i])\n plt.axis('equal')\n plt.show()", "def optimal_neighbors(X_data,\n y_data,\n standardize = True,\n pct_test=0.25,\n seed=802,\n response_type='reg',\n max_neighbors=20,\n show_viz=True): \n \n \n if standardize == True:\n # optionally standardizing X_data\n scaler = StandardScaler()\n scaler.fit(X_data)\n X_scaled = scaler.transform(X_data)\n X_scaled_df = pd.DataFrame(X_scaled)\n X_data = X_scaled_df\n\n\n\n # train-test split\n X_train, X_test, y_train, y_test = train_test_split(X_data,\n y_data,\n test_size = pct_test,\n random_state = seed)\n\n\n # creating lists for training set accuracy and test set accuracy\n training_accuracy = []\n test_accuracy = []\n \n \n # setting neighbor range\n neighbors_settings = range(1, max_neighbors + 1)\n\n\n for n_neighbors in neighbors_settings:\n # building the model based on response variable type\n if response_type == 'reg':\n clf = KNeighborsRegressor(n_neighbors = n_neighbors)\n clf.fit(X_train, y_train)\n \n elif response_type == 'class':\n clf = KNeighborsClassifier(n_neighbors = n_neighbors)\n clf.fit(X_train, y_train) \n \n else:\n print(\"Error: response_type must be 'reg' or 'class'\")\n \n \n # recording the training set accuracy\n training_accuracy.append(clf.score(X_train, y_train))\n \n # recording the generalization accuracy\n test_accuracy.append(clf.score(X_test, y_test))\n\n\n # optionally displaying visualization\n if show_viz == True:\n # plotting the visualization\n fig, ax = plt.subplots(figsize=(12,8))\n plt.plot(neighbors_settings, training_accuracy, label = \"training accuracy\")\n plt.plot(neighbors_settings, test_accuracy, label = \"test accuracy\")\n plt.ylabel(\"Accuracy\")\n plt.xlabel(\"n_neighbors\")\n plt.legend()\n plt.show()\n \n \n # returning optimal number of neighbors\n print(f\"The optimal number of neighbors is: {test_accuracy.index(max(test_accuracy))+1}\")\n return test_accuracy.index(max(test_accuracy))+1", "def plot_decision_boundary(resolution=100, colors=('b', 'k', 'r'), levels=(-1, 0, 1)):\n\n # Generate coordinate grid of shape [resolution x resolution]\n # and evaluate the model over the entire space\n xrange = np.linspace(x_train[:,0].min(), x_train[:,0].max(), resolution)\n yrange = np.linspace(x_train[:,1].min(), x_train[:,1].max(), resolution)\n grid = [[decision_function(alpha, y_train,\n Kernel1, x_train,\n np.array([xr, yr]), b) for xr in xrange] for yr in yrange]\n grid = np.array(grid).reshape(len(xrange), len(yrange))\n\n # Plot decision contours using grid and\n # make a scatter plot of training data\n ax.contour(xrange, yrange, grid, levels=levels, linewidths=(1, 1, 1),\n linestyles=('--', '-', '--'), colors=colors)\n ax.scatter(x_train[:,0], x_train[:,1],\n c=y_train, cmap=plt.cm.viridis, lw=0, alpha=0.25)\n\n # Plot support vectors (non-zero alphas)\n # as circled points (linewidth > 0)\n mask = np.round(alpha, decimals=2) != 0.0\n ax.scatter(x_train[mask,0], x_train[mask,1],\n c=y_train[mask], cmap=plt.cm.viridis, lw=1, edgecolors='k')\n\n return grid, ax", "def plot_decision_boundary(model, X, y):\r\n \r\n x1_array, x2_array = np.meshgrid(np.arange(-4, 4, 0.01), np.arange(-4, 4, 0.01))\r\n grid_coordinates = np.c_[x1_array.ravel(), x2_array.ravel()]\r\n Z = model.predict(grid_coordinates)\r\n Z = Z.reshape(x1_array.shape)\r\n plt.contourf(x1_array, x2_array, Z, cmap=plt.cm.bwr)\r\n plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.bwr)\r\n plt.show()", "def fit(self, data, labels, labels_pred):\n self.n_samples, dim = data.shape\n self.labels_unique = np.unique(labels)\n self.n_classes = len(self.labels_unique)\n if self.n_neighbors is None:\n # Set number of nearest neighbors based on the maximum number of samples per class and the neighborhood\n # constant\n num = 0\n for c in self.labels_unique:\n ind = np.where(labels == c)[0]\n if ind.shape[0] > num:\n num = ind.shape[0]\n\n self.n_neighbors = int(np.ceil(num ** self.neighborhood_constant))\n\n logger.info(\"Number of samples: {:d}. Data dimension = {:d}.\".format(self.n_samples, dim))\n logger.info(\"Number of classes: {:d}.\".format(self.n_classes))\n logger.info(\"Number of neighbors (k): {:d}.\".format(self.n_neighbors))\n logger.info(\"Fraction of outliers (alpha): {:.4f}.\".format(self.alpha))\n if self.model_dim_reduction:\n data = transform_data_from_model(data, self.model_dim_reduction)\n dim = data.shape[1]\n logger.info(\"Applying dimension reduction to the data. Projected dimension = {:d}.\".format(dim))\n\n # Distance from each sample in `data` to the `1 - alpha` level sets corresponding to each class\n distance_level_sets = np.zeros((self.n_samples, self.n_classes))\n self.index_knn = dict()\n self.epsilon = dict()\n indices_sub = dict()\n for j, c in enumerate(self.labels_unique):\n logger.info(\"Processing data from class '{}':\".format(c))\n logger.info(\"Building a KNN index for all the samples from class '{}'.\".format(c))\n indices_sub[c] = np.where(labels == c)[0]\n data_sub = data[indices_sub[c], :]\n self.index_knn[c] = KNNIndex(\n data_sub, n_neighbors=self.n_neighbors,\n metric=self.metric, metric_kwargs=self.metric_kwargs,\n approx_nearest_neighbors=self.approx_nearest_neighbors,\n n_jobs=self.n_jobs,\n low_memory=self.low_memory,\n seed_rng=self.seed_rng\n )\n # Distances to the k nearest neighbors of each sample\n _, nn_distances = self.index_knn[c].query_self(k=self.n_neighbors)\n # Radius or distance to the k-th nearest neighbor for each sample\n radius_arr = nn_distances[:, self.n_neighbors - 1]\n\n # Smallest radius `epsilon` such that only `alpha` fraction of the samples from class `c` have radius\n # greater than `epsilon`\n if self.alpha > 0.:\n self.epsilon[c] = np.percentile(radius_arr, 100 * (1 - self.alpha), interpolation='midpoint')\n\n # Exclude the outliers and build a KNN index with the remaining samples\n mask_incl = radius_arr <= self.epsilon[c]\n mask_excl = np.logical_not(mask_incl)\n num_excl = mask_excl[mask_excl].shape[0]\n else:\n # Slightly larger value than the largest radius\n self.epsilon[c] = 1.0001 * np.max(radius_arr)\n\n # All samples are included in the density level set\n mask_incl = np.ones(indices_sub[c].shape[0], dtype=np.bool)\n mask_excl = np.logical_not(mask_incl)\n num_excl = 0\n\n if num_excl:\n logger.info(\"Excluding {:d} samples with radius larger than {:.6f} and building a KNN index with \"\n \"the remaining samples.\".format(num_excl, self.epsilon[c]))\n self.index_knn[c] = KNNIndex(\n data_sub[mask_incl, :], n_neighbors=self.n_neighbors,\n metric=self.metric, metric_kwargs=self.metric_kwargs,\n approx_nearest_neighbors=self.approx_nearest_neighbors,\n n_jobs=self.n_jobs,\n low_memory=self.low_memory,\n seed_rng=self.seed_rng\n )\n # Distance to the nearest neighbor of each sample that is part of the KNN index\n _, dist_temp = self.index_knn[c].query_self(k=1)\n ind = indices_sub[c][mask_incl]\n distance_level_sets[ind, j] = dist_temp[:, 0]\n\n # Distance to the nearest neighbor of each sample that is not a part of the KNN index (outliers)\n _, dist_temp = self.index_knn[c].query(data_sub[mask_excl, :], k=1)\n ind = indices_sub[c][mask_excl]\n distance_level_sets[ind, j] = dist_temp[:, 0]\n else:\n # No need to rebuild the KNN index because no samples are excluded.\n # Distance to the nearest neighbor of each sample\n distance_level_sets[indices_sub[c], j] = nn_distances[:, 0]\n\n logger.info(\"Calculating the trust score for the estimation data.\")\n for c in self.labels_unique:\n # Compute the distance from each sample from class `c` to the level sets from the remaining classes\n data_sub = data[indices_sub[c], :]\n for j, c_hat in enumerate(self.labels_unique):\n if c_hat == c:\n continue\n\n _, dist_temp = self.index_knn[c_hat].query(data_sub, k=1)\n distance_level_sets[indices_sub[c], j] = dist_temp[:, 0]\n\n self.scores_estim = self._score_helper(distance_level_sets, labels_pred)\n return self", "def find_knn_hyperparams():\n n_neighbors = np.arange(5, 10)\n ps = np.arange(1, 10)\n results = []\n\n for p in ps:\n result = []\n for _ in range(10):\n data = FaceDataset(\"embeddings/known\", n=50)\n train_data, train_labels = data.train()\n test_data, test_labels = data.test()\n accs = []\n for n in n_neighbors:\n clf = KNeighborsClassifier(n_neighbors=n, weights=\"distance\", p=p)\n clf, _ = train(clf, train_data, train_labels)\n acc, _ = test(clf, test_data, test_labels)\n accs.append(acc)\n result.append(accs)\n result = np.mean(result, axis=0)\n results.append(result)\n\n plots = []\n for i in range(len(ps)):\n p = plotly.graph_objs.Scatter(x=n_neighbors, y=results[i], name=\"p={}\".format(ps[i]))\n plots.append(p)\n\n plotly.offline.plot(plots, filename=\"knn.html\")\n print(\"C={}\".format(n_neighbors[np.argmax(results)]))", "def show_nn(X):\n neigh = NearestNeighbors(n_neighbors=2)\n nbrs = neigh.fit(X)\n distances, indices = nbrs.kneighbors(X)\n distances = np.sort(distances, axis=0)\n distances = distances[:,1]\n plt.plot(distances)", "def plot_decision_boundary(model: torch.nn.Module, X: torch.Tensor, y: torch.Tensor):\n # Put everything to CPU (works better with NumPy + Matplotlib)\n model.to(\"cpu\")\n X, y = X.to(\"cpu\"), y.to(\"cpu\")\n\n # Setup prediction boundaries and grid\n x_min, x_max = X[:, 0].min() - 0.1, X[:, 0].max() + 0.1\n y_min, y_max = X[:, 1].min() - 0.1, X[:, 1].max() + 0.1\n xx, yy = np.meshgrid(np.linspace(x_min, x_max, 101), np.linspace(y_min, y_max, 101))\n\n # Make features\n X_to_pred_on = torch.from_numpy(np.column_stack((xx.ravel(), yy.ravel()))).float()\n\n # Make predictions\n model.eval()\n with torch.inference_mode():\n y_logits = model(X_to_pred_on)\n\n # Test for multi-class or binary and adjust logits to prediction labels\n if len(torch.unique(y)) > 2:\n y_pred = torch.softmax(y_logits, dim=1).argmax(dim=1) # mutli-class\n else:\n y_pred = torch.round(torch.sigmoid(y_logits)) # binary\n\n # Reshape preds and plot\n y_pred = y_pred.reshape(xx.shape).detach().numpy()\n plt.contourf(xx, yy, y_pred, cmap=plt.cm.RdYlBu, alpha=0.7)\n plt.scatter(X[:, 0], X[:, 1], c=y, s=40, cmap=plt.cm.RdYlBu)\n plt.xlim(xx.min(), xx.max())\n plt.ylim(yy.min(), yy.max())", "def decisionBoundary(root, figure, fileName):\n stepValue = 0.001\n classClassification = [1, 2, 3, 4]\n colorClassification = ['b', 'g', 'r', 'm']\n markerClassification = ['x', '+', '*', 'o']\n classesList = [\"Bolts\", \"Nuts\", \"Rings\", \"Scraps\"]\n decisionPlot = figure.add_subplot(111)\n attributeValues, classes, _ = readData(fileName)\n attributeValues = np.array(attributeValues)\n classes = np.array(classes)\n \n \n\n attribute1, attribute2 = np.meshgrid(np.arange(0, 1, stepValue), np.arange(0, 1, stepValue))\n\n predicted_class = []\n for i in range(attribute1.shape[0]):\n predicted_class.append([])\n for j in range(attribute1.shape[1]):\n result = [attribute1[i][j], attribute2[i][j]]\n predicted_value = classify(np.array(result), root)\n predicted_class[i].append(predicted_value)\n\n decisionPlot.contourf(attribute1, attribute2, np.array(predicted_class))\n\n for a in classClassification:\n attribute1=[]\n attribute2=[]\n \n for j in range(len(attributeValues[:])):\n \n if classes[j]==a:\n attribute1 +=[attributeValues[j][0]]\n for k in range(len(attributeValues[:])):\n if classes[k]==a:\n attribute2 +=[attributeValues[k][1]]\n \n \n decisionPlot.scatter(attribute1, attribute2, color=colorClassification[a - 1], marker=markerClassification[a - 1]\n , label=classesList[a - 1], s=100)\n\n decisionPlot.legend(loc='upper right')\n decisionPlot.set_xlabel(\"Six fold Rotational Symmetry\")\n decisionPlot.set_ylabel(\"Eccentricity\")\n decisionPlot.set_title(\"Decision boundary\")\n return decisionPlot", "def knn(p, k, x, t):\r\n\r\n # Number of instances in data set\r\n N = x.shape[0]\r\n\r\n Euclidean_Distance = numpy.square(x - p) #Euclidean distance\r\n dis = numpy.sum(Euclidean_Distance, axis=1) #sum of the euclidean distance\r\n inds = numpy.argsort(dis)[:k] #sort the indices of the distance array\r\n tgt_cat = Counter([t[i] for i in inds]) #count the times of equivalent target labels\r\n top_class = max(tgt_cat, key= tgt_cat.get) #top class among the k nearest points\r\n\r\n\r\n #top_class = 0\r\n\r\n return top_class", "def knn(trainingSetData, testSetData, k):\n trainingSet = trainingSetData.drop([14], axis=1) # drop income\n testSet = testSetData.drop([14], axis=1) # drop income\n\n distances = {}\n # this will store the distances re-sorted in ascending/descending order\n sort = {}\n # income band results (>=50k or <50K)\n incomePredictions = []\n\n # Calculating euclidean distance between each row of training data and test data instance\n for testInstance in range(len(testSet)): # len(testSet)\n \n # Store current test Point:\n testInstance = testSet.iloc[testInstance] \n \n distances = euclideanDistanceRow(testInstance, trainingSet)\n\n # sort the distances in order of smallest first:\n sorted_d = sorted(distances.items(), key=lambda x: x[1], reverse=False)\n\n neighbors = []\n\n # Extracting top k neighbors\n for x in range(k):\n neighbors.append(sorted_d[x])\n\n\n classVotes = {}\n\n # Calculating the most freq class in the neighbors\n results = {\"lessThan50\": 0, \"moreThan50\": 0}\n\n # creating a dataframe to which we will add the income values:\n\n for x in range(len(neighbors)):\n if (trainingSetData.iloc[neighbors[x][0]][14] == 0.0):\n results[\"lessThan50\"] += 1\n elif (trainingSetData.iloc[neighbors[x][0]][14] == 1.0):\n results[\"moreThan50\"] += 1\n\n print('results',results)\n\n if (results[\"lessThan50\"] > results[\"moreThan50\"]):\n incomePredictions.append(0.0)\n elif (results[\"lessThan50\"] < results[\"moreThan50\"]):\n incomePredictions.append(1.0)\n\n return incomePredictions", "def make_prediction_grid(predictors, outcomes, limits, h, k):\n\t(x_min, x_max, y_min, y_max) = limits\n\txs = np.arange(x_min, x_max, h)\n\tys = np.arange(y_min, y_max, h)\n\txx, yy = np.meshgrid(xs, ys)\n\n\tprediction_grid = np.zeros(xx.shape, dtype = int)\n\tfor i,x in enumerate(xs):\n\t\tfor j, y in enumerate(ys):\n\t\t\tp = np.array([x, y])\n\t\t\tprediction_grid[j,i] = knn_predict(p, predictors, outcomes, k)\n\n\treturn(xx, yy, prediction_grid)", "def visualize(self, reduced_data):\n\t\t# Step size of the mesh. Decrease to increase the quality of the VQ.\n\t\th = .02 # point in the mesh [x_min, m_max]x[y_min, y_max].\n\t\t\n\t\t# Plot the decision boundary. For that, we will assign a color to each\n\t\tx_min, x_max = reduced_data[:, 0].min() + 1, reduced_data[:, 0].max() - 1\n\t\ty_min, y_max = reduced_data[:, 1].min() + 1, reduced_data[:, 1].max() - 1\n\t\txx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))\n\n\t\t# Obtain labels for each point in mesh. Use last trained model.\n\t\tZ = self.estimator.predict(np.c_[xx.ravel(), yy.ravel()])\n\n\t\t# Put the result into a color plot\n\t\tZ = Z.reshape(xx.shape)\n\t\t\n\t\tplt.figure(1)\n\t\tplt.clf()\n\t\tplt.imshow(Z, interpolation='nearest',\n\t\t extent=(xx.min(), xx.max(), yy.min(), yy.max()),\n\t\t cmap=plt.cm.Paired,\n\t\t aspect='auto', origin='lower')\n\n\t\tplt.plot(reduced_data[:, 0], reduced_data[:, 1], 'k.', markersize=4)\n\t\t# Plot the centroids as a white X\n\t\tcentroids = self.estimator.cluster_centers_\n\t\tplt.scatter(centroids[:, 0], centroids[:, 1],\n\t\t marker='x', s=169, linewidths=3,\n\t\t color='w', zorder=10)\n\t\tplt.title('K-means clustering with random data (PCA-reduced data)\\n'\n\t\t 'Centroids are marked with white cross')\n\t\tplt.xlim(x_min, x_max)\n\t\tplt.ylim(y_min, y_max)\n\t\tplt.xticks(())\n\t\tplt.yticks(())\n\t\tplt.show()", "def decision_plot(self, X, y):\n\n y = self._slice_target_index(y=y)\n\n for index in range(_n_targets(y)):\n if sklearn.utils.multiclass.type_of_target(y) == 'continuous-multioutput':\n self.fit(X, y.iloc[:, index].values.ravel(order='K'))\n else:\n self.fit(X, y)\n explainer, shap_values = self.explainer(X=X)\n shap.decision_plot(base_value=explainer.expected_value, shap_values=shap_values,\n feature_names=list(X.columns), show=self.show)", "def visualizePredictions(testData,knn_predictions):\r\n testData.visualize.scatterPlot('Petal length','Petal width')\r\n testData.dataDict[testData.reference] = knn_predictions\r\n testData.visualize.scatterPlot('Petal length','Petal width')\r\n\r\n pass", "def run_knn(\n features: List[List[float]],\n labels: List[Optional[bool]],\n k: int = 1,\n) -> List[bool]:\n # Filter out the features that are already clustered\n features_l, labels_l = zip(*[(f, l) for f, l in zip(features, labels) if isinstance(l, bool)])\n\n # Fit a nearest neighbour algorithm\n neighbours = KNeighborsClassifier(\n n_neighbors=k,\n ).fit(features_l, labels_l)\n\n # Predict all the features' labels\n return neighbours.predict(features) # type: ignore", "def plotDistributionWithLimitsRefine(lXs, llYs, lKClassif,out=\"out.png\", title=\"title\", xax=\"xax\", yax=\"yax\",legend=\"\"):\n\n fig = plt.Figure(figsize=(40,20))\n fig.suptitle(title, fontsize=32)\n nbPlots = len(llYs)\n sqrt = int(math.ceil(math.sqrt(nbPlots)))\n ymax = 0.0\n for i,val in enumerate(llYs):\n if lKClassif[i] != \"refine\":\n ymax = max(max(val[0]),ymax)\n ymaxCurrent = max(max(val[2]),ymax)\n ymax = ymax*1.05\n xmax = 147\n gs = gridspec.GridSpec(1,2) \n ax = fig.add_subplot(gs[0])\n gsLimit = gridspec.GridSpecFromSubplotSpec(sqrt,sqrt, subplot_spec=gs[1])\n for i,val in enumerate(llYs):\n if lKClassif[i] != \"refine\":\n ax.plot(lXs,val[0],color=Graphics.lColors[i%25])\n axCurrent = fig.add_subplot(gsLimit[i]) \n axCurrent.fill_between(lXs, val[1], val[2], alpha=0.35, edgecolor='black', facecolor=Graphics.lColors[i%25])\n axCurrent.set_title(\"Cluster K{}, (position: {})\".format(i,lKClassif[i]))\n axCurrent.fill_between(lXs, val[3], val[4], alpha=0.85, edgecolor='darkgray', facecolor='lightgray')\n axCurrent.plot(lXs,val[0],color=Graphics.lColors[i%25])\n axCurrent.set_ylim(0,ymaxCurrent)\n axCurrent.set_xlim(1,xmax)\n axCurrent.text(10, ymaxCurrent*0.90, \"#nucleosomes: {}\".format(legend[i]), fontsize=12)\n axis_font = {'size':'28'}\n ax.set_ylim(0,ymax)\n ax.set_xlim(1,xmax)\n ax.legend([\"K{}\".format(x) for x in range(0,nbPlots)])\n ax.set_title(\"all nucleosomes\", **axis_font)\n ax.set_xlabel(xax, **axis_font)\n ax.set_ylabel(yax, **axis_font)\n ax.tick_params(labelsize=20)\n canvas = FigureCanvasAgg(fig)\n canvas.print_figure(out, dpi=80)", "def trainAndTune(self, trainingData, trainingLabels, validationData, validationLabels, kgrid):\n\n \"*** YOUR CODE HERE ***\"\n\t#create dictionary of all features for each label\n dict = {}\n for feature in self.features:\n\t\tfor label in self.legalLabels:\n\t\t\tdict[feature, label] = util.Counter()\n\t\t\tfor i in [0,1]: #values of a counter from datum\n\t\t\t\tdict[(feature, label)][i] = 0\n\t\t\t\t#print str(feature) + str(label) + ' ' + str(dict[(feature, label)])\n labelCount = util.Counter()\n for i in range(len(trainingData)):\n\t\t#increment occurrences of each label found in the training data\n\t\tlabel = trainingLabels[i]\n\t\tlabelCount[label] += 1\n\t\tfor feature in trainingData[i]:\n\t\t\t#increment dictionary value by 1 when a feature label combination with a value is found\n\t\t\tdict[(feature, label)][trainingData[i][feature]] += 1\n #normalize labelCount to get P(y) for each label y, or the prior probability \n self.prior = util.normalize(labelCount)\n\t\n bestk = 0\n bestcond = {}\n topguesses = 0\n\t#iterate through each k to find the best k\n for k in kgrid:\n\t\t#empty cond probs\n\t\tself.condprobs = {} \n\t\t#smooth data\n\t\tfor feature_label in dict:\n\t\t\ttmpcounter = dict[feature_label] \n\t\t\t#print feature_label\n\t\t\ttmpcounter.incrementAll(tmpcounter.keys(), k)\n\t\t\t#set condprobs to cond probs with current k value\n\t\t\tself.condprobs[feature_label] = util.normalize(tmpcounter)\n\t\tguesses = self.classify(validationData)\n\t\tguesscorrect = 0\n\t\t#print[guesses]\n\t\tfor i in range(len(guesses)):\n\t\t\tif guesses[i] == validationLabels[i]:\n\t\t\t\tguesscorrect += 1\n\t\tif guesscorrect > topguesses:\n\t\t\tprint \"Guess \",k ,\" is better than \",bestk\n\t\t\ttopguesses = guesscorrect\n\t\t\tbestcond = self.condprobs\n\t\t\tbestk = k\n self.condprobs = bestcond\n self.k = bestk", "def KNN(x_train, x_test, y_train, k=3):\n knn = KNeighborsClassifier(n_neighbors=k)\n knn.fit(x_train, y_train)\n y_pred = knn.predict(x_test)\n return y_pred", "def plot_decision_paths(\n self, distributions=True, heatmap=True, thr_pvalue=1, num_cols=6, save=None\n ):\n # drop insignificant values\n data_clustering_ranked = self._data_clustering_ranked.copy()\n for column in data_clustering_ranked.columns:\n if self.p_value_of_features[column] > thr_pvalue:\n data_clustering_ranked.drop(column, axis=1, inplace=True)\n\n if heatmap:\n plotting._plot_heatmap(\n data_clustering_ranked, thr_pvalue, self.model_type, save\n )\n\n if distributions:\n plotting._plot_distributions(\n data_clustering_ranked, thr_pvalue, num_cols, save\n )", "def plot_decision_function(fitted_classifier, range_features, ax=None):\n from sklearn.preprocessing import LabelEncoder\n\n feature_names = list(range_features.keys())\n # create a grid to evaluate all possible samples\n plot_step = 0.02\n xx, yy = np.meshgrid(\n np.arange(*range_features[feature_names[0]], plot_step),\n np.arange(*range_features[feature_names[1]], plot_step),\n )\n\n # compute the associated prediction\n Z = fitted_classifier.predict(np.c_[xx.ravel(), yy.ravel()])\n Z = LabelEncoder().fit_transform(Z)\n Z = Z.reshape(xx.shape)\n\n # make the plot of the boundary and the data samples\n if ax is None:\n _, ax = plt.subplots()\n ax.contourf(xx, yy, Z, alpha=0.4, cmap=\"RdBu\")\n\n return ax", "def showData(data, labels, truelabels=None):\n\n n = data.shape[0]\n colors = np.dot(labels,np.arange(2)).reshape([-1]) # for color-coding labels\n\n plt.figure()\n plt.scatter(data[:,0],data[:,1], c=colors, s=40)\n\n\n # identify incorrectly labeled examples with an x colored with the correct class\n if truelabels is not None:\n incorrect_idx = []\n truecolors = np.dot(truelabels,np.arange(2)).reshape([-1])\n for i in range(n):\n if not isgoodprediction(labels[i,:], truelabels[i,:]):\n incorrect_idx.append(i)\n plt.scatter( data[incorrect_idx,0], data[incorrect_idx,1],s=50, c='k', marker='x',lw=5 ,label='misclassified')\n\n plt.legend()\n plt.axes().set_aspect('equal', 'datalim')\n plt.show()", "def plot_decision_regions(X, y, clf,\n ax=None,\n X_highlight=None,\n res=0.02, legend=1,\n hide_spines=True,\n markers='s^oxv<>',\n colors='red,blue,limegreen,gray,cyan'):\n # http://stackoverflow.com/questions/22294241/plotting-a-decision-boundary-separating-2-classes-using-matplotlibs-pyplot?lq=1\n # check if data is numpy array\n for a in (X, y):\n if not isinstance(a, np.ndarray):\n raise ValueError('%s must be a NumPy array.' % a.__name__)\n\n if ax is None:\n ax = plt.gca()\n\n if not y.dtype == int:\n y = y.astype(int)\n\n # check if test data is provided\n plot_testdata = True\n if not isinstance(X_highlight, np.ndarray):\n if X_highlight is not None:\n raise ValueError('X_test must be a NumPy array or None')\n else:\n plot_testdata = False\n\n if len(X.shape) == 2 and X.shape[1] > 1:\n dim = '2d'\n else:\n dim = '1d'\n\n marker_gen = cycle(list(markers))\n\n # make color map\n n_classes = len(np.unique(y))\n colors = colors.split(',')\n cmap = ListedColormap(colors[:n_classes])\n\n # plot the decision surface\n if dim == '2d':\n y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1\n else:\n y_min, y_max = -1, 1\n\n x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1\n xx, yy = np.meshgrid(np.arange(x_min, x_max, res),\n np.arange(y_min, y_max, res))\n\n if dim == '2d':\n y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1\n Z = clf.predict(np.array([xx.ravel(), yy.ravel()]).T)\n else:\n y_min, y_max = -1, 1\n Z = clf.predict(np.array([xx.ravel()]).T)\n\n Z = Z.reshape(xx.shape)\n ax.contourf(xx, yy, Z, alpha=0.3, cmap=cmap)\n\n ax.axis(xmin=xx.min(), xmax=xx.max(), y_min=yy.min(), y_max=yy.max())\n\n # plot class samples\n\n for c in np.unique(y):\n if dim == '2d':\n y_data = X[y == c, 1]\n else:\n y_data = [0 for i in X[y == c]]\n\n ax.scatter(x=X[y == c, 0],\n y=y_data,\n alpha=0.8,\n c=cmap(c),\n marker=next(marker_gen),\n label=c)\n\n if hide_spines:\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n ax.spines['left'].set_visible(False)\n ax.spines['bottom'].set_visible(False)\n ax.yaxis.set_ticks_position('left')\n ax.xaxis.set_ticks_position('bottom')\n if not dim == '2d':\n ax.axes.get_yaxis().set_ticks([])\n\n if legend:\n legend = plt.legend(loc=legend,\n fancybox=True,\n framealpha=0.3,\n scatterpoints=1,\n handletextpad=-0.25,\n borderaxespad=0.9)\n\n ax.add_artist(legend)\n\n if plot_testdata:\n if dim == '2d':\n ax.scatter(X_highlight[:, 0],\n X_highlight[:, 1],\n c='',\n alpha=1.0,\n linewidth=1,\n marker='o',\n s=80)\n else:\n ax.scatter(X_highlight,\n [0 for i in X_highlight],\n c='',\n alpha=1.0,\n linewidth=1,\n marker='o',\n s=80)\n\n return ax", "def decision_heatmaps(obs):\n global model\n assert obs > 4 and obs < 1788, \"Chosen observation is outside the range of the demonstration episode.\"\n saved_observations = np.load('saved_observations.npy')\n state = saved_observations[obs-3:obs+1]\n state_batch = np.expand_dims(state, axis=0)\n q_vals = model.compute_q_values(state)\n print(q_vals)\n decision = np.argmax(q_vals)\n print(decision)\n decision_encodings = ['None','Up','Right','Left','Down','Right-Up','Left-Up','Right-Down','Left-Down']\n decision_node = model.model.output[:, decision]\n last_conv_layer = model.model.layers[3]\n grads = K.gradients(decision_node, last_conv_layer.output)[0]\n pooled_grads = K.mean(grads, axis=(0,2,3))\n frame = Image.fromarray(state[-1])\n iterate = K.function([model.model.input],[pooled_grads, last_conv_layer.output[0]])\n pooled_grads_value, conv_layer_output_value = iterate([state_batch])\n for i in range(64):\n conv_layer_output_value[i, :, :] *= pooled_grads_value[i]\n\n heatmap = np.mean(conv_layer_output_value, axis=0)\n heatmap = np.maximum(heatmap, 0)\n heatmap /= np.max(heatmap)\n heatmap = cv2.resize(heatmap, (84, 84))\n heatmap = np.uint8(255 * heatmap)\n superimposed_img = heatmap * .3 + np.array(frame) * .35\n\n plt.xlabel(decision_encodings[decision])\n plt.imshow(superimposed_img, cmap='gray')\n plt.show()", "def plot_seg_bound_comparison(data_list, rows, start_with, show_every, start_inx, n_class, fig_name=None, width=2, scale=4):\n cols = 6 # [input, label_seg, label_bound, pred_bound(converted), pred_bound_2d, pred_bound_3d]\n n_batch = len(data_list)\n # print(\"number of slices: {}\".format(n_batch))\n _, ax = plt.subplots(rows, cols, figsize=[scale * cols, scale * rows])\n\n for ind in range(n_batch):\n input = data_list[ind]['input']\n label_seg = data_list[ind]['GT_seg']\n pred_seg = data_list[ind]['pred_seg'] # seg prediction is not plotted here\n pred_bound_conv = mask2outerbound(pred_seg, width=width) # convert seg to inner-outer bound\n label_bound = data_list[ind]['GT_bound']\n pred_bound_2d = data_list[ind]['pred_2d_bound']\n pred_bound_3d = data_list[ind]['pred_3d_bound']\n # print(\"input: {}, seg: {}, pred_seg: {}, label_bound: {}, pred_bound_2d: {}, pred_bound_3d: {}\".format(input.shape,\n # label_seg.shape, pred_seg.shape, label_bound.shape, pred_bound_2d.shape, pred_bound_3d.shape))\n # print()\n\n # # calculate average F1 score\n # label_binary = label_binarize(label_seg.flatten(), classes=range(n_class))\n # pred_binary = label_binarize(pred_seg.flatten(), classes=range(n_class))\n #\n # f_score = np.zeros(n_class, dtype=np.float32)\n # slice_effect_class = 0\n # for i in range(n_class):\n # if np.sum(label_binary[:,i]) == 0:\n # f_score[i] = 0.0\n # else:\n # slice_effect_class += 1\n # f_score[i] = f1_score(label_binary[:,i], pred_binary[:,i])\n #\n # ave_f_score = np.sum(f_score) / slice_effect_class\n\n # calculate average HFD\n hdf_seg = slicewise_hd95(pred_bound_conv, label_bound, n_class)\n hdf_bound_2d = slicewise_hd95(pred_bound_2d, label_bound, n_class)\n hdf_bound_3d = slicewise_hd95(pred_bound_3d, label_bound, n_class)\n\n if (ind - start_with) % show_every == 0:\n i = (ind - start_with) // show_every\n if i < rows:\n ax[i, 0].imshow(input, cmap='gray')\n ax[i, 0].set_title(\"Slice {} : {}\".format(ind+start_inx, 'input'))\n ax[i, 0].axis('off')\n\n ax[i, 1].imshow(mask2rgb(label_seg))\n ax[i, 1].set_title('Slice %d : %s' % (ind+start_inx, 'label_seg'))\n ax[i, 1].axis('off')\n\n label_bound_cp = label_bound.copy()\n label_bound_cp[label_bound != 0] = 4\n\n ax[i, 2].imshow(mask2rgb(label_bound_cp))\n ax[i, 2].set_title('Slice %d : %s' % (ind + start_inx, 'label_bound'))\n ax[i, 2].axis('off')\n\n # plot overlapping between pred_bound_conv and label_bound\n overlap_seg = pred_bound_conv.copy()\n overlap_seg[label_bound != 0] = 4\n\n ax[i, 3].imshow(mask2rgb(overlap_seg))\n ax[i, 3].set_title(\"Slice {:d} : bound from seg (hdf={:.4f})\".format(ind + start_inx, hdf_seg))\n ax[i, 3].axis('off')\n\n overlap_bound_2d = pred_bound_2d.copy()\n overlap_bound_2d[label_bound != 0] = 4\n ax[i, 4].imshow(mask2rgb(overlap_bound_2d))\n ax[i, 4].set_title(\"Slice {:d} : 2D bound (hdf={:.4f})\".format(ind + start_inx, hdf_bound_2d))\n ax[i, 4].axis('off')\n\n overlap_bound_3d = pred_bound_3d.copy()\n overlap_bound_3d[label_bound != 0] = 4\n ax[i, 5].imshow(mask2rgb(overlap_bound_3d))\n ax[i, 5].set_title(\"Slice {:d} : 3D bound (hdf={:.4f})\".format(ind + start_inx, hdf_bound_3d))\n ax[i, 5].axis('off')\n\n if fig_name:\n plt.savefig(fig_name + '.pdf')\n\n plt.close()", "def plot_decision(X, y, path, model, param, ax=None, h=0.07):\n if ax is None:\n _, ax = plt.subplots(figsize=(7, 6))\n\n # https://stackoverflow.com/a/19055059/6027071\n # sample a region larger than our training data X\n x_min = X[:, 0].min() - 0.5\n x_max = X[:, 0].max() + 0.5\n y_min = X[:, 1].min() - 0.5\n y_max = X[:, 1].max() + 0.5\n xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))\n\n # plot decision boundaries\n x = np.concatenate(([xx.ravel()], [yy.ravel()]))\n pred = model.predict(x.T).reshape(xx.shape)\n ax.contourf(xx, yy, pred, alpha=0.8, cmap='RdYlBu')\n\n # plot points (coloured by class)\n ax.scatter(X[:, 0], X[:, 1], alpha=0.8, c=y, cmap='RdYlBu')\n ax.axis('off')\n\n title = 'hidden_dim: {} | learning rate: {} | n_epochs: {} | lambda_1: {} | lambda_2: {}'.format(\n param[0], param[1], param[2], param[3], param[4]\n )\n\n plt.title(title)\n plt.savefig(path)\n plt.close()", "def K_Nearest_Neighbours_Model(train_features, train_labels, k_value=5, algorithm_auto=\"auto\"):\n # create an instance of the KNN SciKit learn class\n model = KNeighborsClassifier(n_neighbors=k_value, algorithm=algorithm_auto)\n # fit the model to the training data and labels\n model.fit(train_features, train_labels.values.ravel())\n # return the .fit() model\n return model", "def partial_dependence_plot(model, data, important_labels, feature_names):\n n_plots_per_row = 3\n n_plots = ceil(important_labels.shape[0] / n_plots_per_row)\n\n for plot_index, x_index in enumerate(important_labels, 1):\n target = X_train[:, x_index]\n unique_target = np.unique(target)\n n_unique = unique_target.shape[0]\n\n is_categorical = n_unique == 2\n if is_categorical:\n x_points = unique_target\n y_points = np.zeros_like(unique_target)\n else:\n # for numeric values, generate a fix number of values\n # in between the min and max value of the target column\n n_points = min(n_unique, 50)\n x_points = np.linspace(np.min(target), np.max(target), n_points)\n y_points = np.zeros_like(x_points)\n\n for i in range(x_points.shape[0]):\n x_data = data.copy()\n x_data[:, x_index] = x_points[i]\n y_pred = model.predict(x_data)\n y_points[i] = np.mean(y_pred)\n\n plt.subplot(n_plots, n_plots_per_row, plot_index)\n if is_categorical:\n plt.bar(x_points, y_points)\n else:\n plt.plot(x_points, y_points)\n\n plt.title(feature_names[x_index])\n\n plt.tight_layout()\n plt.show()", "def plot_metrics2(df, n_arr, risk_best, epsilon_pos, epsilon_neg, row, col,\n **kwargs):\n xlim = (min(n_arr), max(n_arr))\n # g = sns.FacetGrid(df, row = row, col = col,\n # col_order = ['risk', 'gap_FPR', 'gap_FNR'], xlim = xlim,\n # ylim = (0, 1), **kwargs)\n g = sns.FacetGrid(df, row=row, col=col,\n col_order=['risk', 'gap_FPR', 'gap_FNR'], xlim=xlim,\n **kwargs)\n g.map(sns.pointplot, 'n', 'value', order=n_arr, ci='sd')\n g.set_xticklabels(rotation=45)\n\n risk_best = to_iterable(risk_best)\n epsilon_pos = to_iterable(epsilon_pos)\n epsilon_neg = to_iterable(epsilon_neg)\n\n for i, rr in enumerate(risk_best):\n g.axes[i, 0].hlines(rr, *g.axes[i, 0].get_xlim())\n g.axes[i, 0].hlines(rr, *g.axes[i, 0].get_xlim())\n\n for i, ee in enumerate(epsilon_pos):\n g.axes[i, 1].hlines(ee, *g.axes[i, 1].get_xlim())\n g.axes[i, 1].hlines(ee, *g.axes[i, 1].get_xlim())\n\n for i, ee in enumerate(epsilon_neg):\n g.axes[i, 2].hlines(ee, *g.axes[i, 1].get_xlim())\n g.axes[i, 2].hlines(ee, *g.axes[i, 1].get_xlim())\n\n g.set_titles(template='')\n\n for ax, m in zip(g.axes[0, :], ['risk', 'gap_FPR', 'gap_FNR']):\n ax.set_title(m)\n for ax, l in zip(g.axes[:, 0], df[row].unique()):\n ax.set_ylabel(l, rotation=90, ha='center', va='center')\n\n return g", "def dp_kendall_feature_selection(features, labels, k, epsilon):\n # Remove intercept feature\n features = features[:, :-1]\n _, d = features.shape\n split_eps = epsilon / k\n selected_indices = []\n # selected_correlations tracks correlations between features and previously\n # selected features\n selected_correlations = np.zeros((k - 1, d))\n label_coefficients = kendall(features, labels)\n # Sensitivity is 3/2 in the first round and 3 after\n sensitivity = 3/2\n for j in range(k):\n diffs = label_coefficients\n if selected_indices:\n last_idx = selected_indices[-1]\n old_feature = features[:, last_idx]\n selected_correlations[j - 1, :] = kendall(features, old_feature)\n diffs = diffs - (\n np.sum(selected_correlations, axis=0) / len(selected_indices)\n )\n sensitivity = 3\n new_idx = top_k.basic_peeling_mechanism(\n item_counts=diffs,\n k=1,\n epsilon=split_eps,\n l_inf_sensitivity=sensitivity,\n monotonic=False,\n )[0]\n selected_indices.append(new_idx)\n label_coefficients[new_idx] = -np.inf\n # Add back intercept feature\n selected_indices.append(d)\n return np.asarray(selected_indices)", "def plot_classification(nsrc_predict, nsrc_true, n_max=4, label=''):\n bins = np.linspace(0.5, n_max + 0.5, n_max + 1)\n fig, axes = plt.subplots(1, n_max, sharex=True, sharey=True,\n figsize=(2.5 * n_max, 2.5))\n for n in range(1, n_max + 1):\n sel = (nsrc_true == n)\n ax = axes[n - 1]\n f, _, _ = ax.hist(nsrc_predict[sel], bins, histtype='stepfilled',\n lw=2, density=True)\n ax.axvline(n, c='r', ls='--')\n ax.text(0.83, 0.9, f'{label}{100 * f[n - 1]:.1f}%',\n horizontalalignment='right', color='r', fontsize=14,\n transform=ax.transAxes)\n plt.tight_layout()", "def leaveKout_CV(X, y, n_scz_te, rep, perms, classifiers, parameters, count,\n freq_bands, x_size, auc, nz_coef_idx, nz_coef_val, n_BAitaSig = None):\n \n skf = StratifiedKFold(n_splits=int(sum(y==0)//n_scz_te),shuffle=True, random_state = rep)\n count_plt = 0\n fig, ax = plt.subplots(2,3 , figsize=(10,6.5))\n for tr_idx, te_idx in skf.split(X,y):\n # Compute test and train targets\n y_tr = np.ravel(y[tr_idx])\n y_te = np.ravel(y[te_idx])\n \n # Make gridsearch function\n clf_name = list(classifiers.keys())[0]\n count += 1\n sns.set(font_scale=1.5)\n for i in range(1): #range(len(freq_bands)):\n if count_plt == 6:\n plt.suptitle('Example of line search for the regularization parameter', fontsize= 18)\n plt.tight_layout()\n plt.subplots_adjust(top = 0.84, bottom = 0.15, hspace = 0.5, wspace = 0.45)\n fig.legend(['Train', 'Validation'], bbox_to_anchor = (0.5, 0.89), \n borderaxespad = 0., loc = 'upper center', ncol = 2)\n \n plt.show()\n fig.savefig('/share/FannyMaster/PythonNew/Figures/LineSearchEx.jpg', bbox_inches = 'tight')\n sns.reset_orig()\n raise NameError('This is just a dumb way of stopping the code after 6 iterations')\n \n i = 1\n clf = GridSearchCV(classifiers[clf_name], {'alpha' :parameters[freq_bands[i]]}, \n cv = StratifiedKFold(n_splits = int(sum(y_tr==0)//n_scz_te)), \n scoring = 'roc_auc', n_jobs = -1, return_train_score=True)\n # Compute test and train sets \n if n_BAitaSig == None:\n X_tr = X[tr_idx, x_size*i:x_size*(i+1)]\n X_te = X[te_idx, x_size*i:x_size*(i+1)]\n else:\n if x_size == sum(n_BAitaSig):\n X_tr = X[tr_idx, :]\n X_te = X[te_idx, :]\n else:\n n_temp = [0]\n n_temp.extend(np.cumsum(n_BAitaSig))\n X_tr = X[tr_idx, n_temp[i]:n_temp[i+1]]\n X_te = X[te_idx, n_temp[i]:n_temp[i+1]]\n \n \n # Standardize\n scaler_out = preprocessing.StandardScaler().fit(X_tr)\n X_tr = scaler_out.transform(X_tr)\n X_te = scaler_out.transform(X_te)\n\n # Fit data and save auc scores\n fit = clf.fit(X_tr, y_tr)\n auc[freq_bands[i]][count] = fit.score(X_te, y_te)\n \n # Make parameter plot\n #plot_grid_search(clf.cv_results_, 'score', parameters[freq_bands[i]], 'log($\\lambda$) ' + freq_bands[i])\n cv_results = clf.cv_results_\n metric = 'score'\n grid_param_1 = parameters[freq_bands[i]]\n \n scores_mean = cv_results[('mean_test_' + metric)]\n # scores_sd = cv_results[('std_test_' + metric)]\n scores_mean_tr = cv_results[('mean_train_' + metric)]\n \n # Set plot style\n #plt.style.use('seaborn')\n \n # Plot Grid search scores\n\n sns.set(font_scale=1.5)\n df1 = pd.DataFrame({'log($\\lambda$)':[math.log(i) for i in grid_param_1], 'CV Average AUC' : scores_mean_tr, 'type' : ['train']*len(scores_mean_tr)})\n df2 = pd.DataFrame({'log($\\lambda$)':[math.log(i) for i in grid_param_1], 'CV Average AUC' : scores_mean, 'type' : ['test']*len(scores_mean_tr)})\n sns.lineplot(x = 'log($\\lambda$)', y = 'CV Average AUC', style='type', legend = False, markers = \"o\", data = df1, ax = ax[count_plt//3][count_plt%3])\n sns.lineplot(x = 'log($\\lambda$)', y = 'CV Average AUC', style='type', legend = False, markers = \"o\", data = df2, ax = ax[count_plt//3][count_plt%3])\n\n ax[count_plt//3][count_plt%3].set_xlabel('log($\\lambda$)', fontsize=14)\n ax[count_plt//3][count_plt%3].set_ylabel('CV Average AUC' , fontsize=14) \n \n #pprint(clf.cv_results_)\n #pdb.set_trace() # Type \"exit\" to get out, type \"c\" to continue\n count_plt += 1\n if len(perms) == 1:\n coef_idx = np.nonzero(fit.best_estimator_.coef_)\n nz_coef_idx[freq_bands[i]].append(coef_idx)\n nz_coef_val[freq_bands[i]].append(fit.best_estimator_.coef_[coef_idx])\n\n return auc, nz_coef_idx, nz_coef_val, count", "def fit_and_plot(self, max_iter):\n from matplotlib import pyplot as plt\n from matplotlib import cm\n\n colours = cm.rainbow(np.linspace(0, 1, self.num_classes)) # FIXME: rainbow list -> array\n\n def plot_data(d):\n for c in range(self.num_classes):\n for n in range(self.num_nuisances):\n plt.scatter(*d[c][n].T, c=colours[c])\n plt.waitforbuttonpress()\n\n def plot_mean(th):\n for c in range(self.num_classes):\n for n in range(self.num_nuisances):\n plt.scatter(*th[c][n].mean.T, c=colours[c], marker=\"x\")\n plt.waitforbuttonpress()\n\n plt.ion()\n plt.scatter(*self.data.T)\n plt.waitforbuttonpress()\n\n split_data = self.initialise_clusters_with_kmeans()\n plot_data(split_data)\n thetas = self.maximization(split_data)\n plot_mean(thetas)\n\n for i in range(max_iter):\n plt.clf()\n split_data = self.expectation(thetas)\n plot_data(split_data)\n thetas = self.maximization(split_data)\n plot_mean(thetas)\n return split_data, thetas", "def plot_decision_boundary(X, Y, models, titles):\n fig, sub = plt.subplots(2, 4, figsize=(20, 8))\n plt.subplots_adjust(wspace=1.0, hspace=0.6)\n\n xx, yy = create_meshgrid(X[:, 0], X[:, 1])\n\n for clf, title, ax in zip(models, titles, sub.flatten()):\n plot_contours(ax, clf, xx, yy,\n cmap=plt.cm.coolwarm, alpha=0.8)\n ax.scatter(X[:,0], X[:,1], c=Y, cmap=plt.cm.coolwarm, s=20, edgecolors='k')\n\tax.set_xlim(xx.min(), xx.max())\n\tax.set_ylim(yy.min(), yy.max())\n\tax.set_xlabel('Xvalues')\n\tax.set_ylabel('Yvalues')\n\tax.set_xticks(())\n\tax.set_yticks(())\n\tax.set_title(title)\n\n back = matplotlib.get_backend()\n manager = plt.get_current_fig_manager()\n if \"QT\" in back:\n manager.window.showMaximized()\n elif \"Tk\" in back:\n manager.resize(*manager.window.maxsize())\n else:\n manager.frame.Maximize(True)\n plt.show()\n plt.close()", "def build_classifier(self, n_neighbours, data_index):\n knn = KNeighborsClassifier(n_neighbors=n_neighbours)\n BayesianKneighborClassifier.update_current_data(self, data_index)\n X_train, X_test, y_train, y_test = BayesianKneighborClassifier.split_test_and_train_data\\\n (self, 0.3, data_index)\n knn.fit(X_train, y_train)\n y_predicted = knn.predict(X_test)\n print(\"KNN classifier built. Accuracy score: {} using K={} neighbours in view: {}\".format(\n metrics.accuracy_score(y_test, y_predicted), n_neighbours,\n BayesianKneighborClassifier.views[data_index]))\n return knn", "def plot_metrics3(df, n_arr, risk_best, epsilon_pos, epsilon_neg, row, col,\n **kwargs):\n xlim = (min(n_arr), max(n_arr))\n # g = sns.FacetGrid(df, row = row, col = col,\n # col_order = ['risk', 'gap_FPR', 'gap_FNR'], xlim = xlim,\n # ylim = (0, 1), **kwargs)\n g = sns.FacetGrid(df, row=row, col=col,\n row_order=['risk', 'gap_FPR', 'gap_FNR'], xlim=xlim,\n **kwargs)\n g.map(sns.pointplot, 'n', 'value', order=n_arr, ci='sd')\n g.set_xticklabels(rotation=45)\n\n risk_best = to_iterable(risk_best)\n epsilon_pos = to_iterable(epsilon_pos)\n epsilon_neg = to_iterable(epsilon_neg)\n\n for i, rr in enumerate(risk_best):\n g.axes[0, i].hlines(rr, *g.axes[0, i].get_xlim())\n g.axes[0, i].hlines(rr, *g.axes[0, i].get_xlim())\n\n for i, ee in enumerate(epsilon_pos):\n g.axes[1, i].hlines(ee, *g.axes[1, i].get_xlim())\n g.axes[1, i].hlines(ee, *g.axes[1, i].get_xlim())\n\n for i, ee in enumerate(epsilon_neg):\n g.axes[2, i].hlines(ee, *g.axes[2, i].get_xlim())\n g.axes[2, i].hlines(ee, *g.axes[2, i].get_xlim())\n\n g.set_titles(template='')\n for ax, m in zip(g.axes[0, :], df[col].unique()):\n ax.set_title(m)\n\n return g", "def plotDistributionWithLimits(lXs, llYs, lKClassif,out=\"out.png\", title=\"title\", xax=\"xax\", yax=\"yax\",legend=\"\"):\n\n fig = plt.Figure(figsize=(40,20))\n fig.suptitle(title, fontsize=32)\n nbPlots = len(llYs)\n sqrt = int(math.ceil(math.sqrt(nbPlots)))\n ymax = 0.0\n for val in llYs:\n ymax = max(max(val[0]),ymax)\n ymaxCurrent = max(max(val[2]),ymax)\n ymax = ymax*1.05\n xmax = 147\n gs = gridspec.GridSpec(1,2) \n ax = fig.add_subplot(gs[0])\n gsLimit = gridspec.GridSpecFromSubplotSpec(sqrt,sqrt, subplot_spec=gs[1])\n for i,val in enumerate(llYs):\n ax.plot(lXs,val[0],color=Graphics.lColors[i%25])\n axCurrent = fig.add_subplot(gsLimit[i]) \n axCurrent.fill_between(lXs, val[1], val[2], alpha=0.35, edgecolor='black', facecolor=Graphics.lColors[i%25])\n axCurrent.set_title(\"Cluster K{}, (position: {})\".format(i,lKClassif[i]))\n axCurrent.fill_between(lXs, val[3], val[4], alpha=0.85, edgecolor='darkgray', facecolor='lightgray')\n axCurrent.plot(lXs,val[0],color=Graphics.lColors[i%25])\n axCurrent.set_ylim(0,ymaxCurrent)\n axCurrent.set_xlim(1,xmax)\n axCurrent.text(10, ymaxCurrent*0.90, \"#nucleosomes: {}\".format(legend[i]), fontsize=12)\n axis_font = {'size':'28'}\n ax.set_ylim(0,ymax)\n ax.set_xlim(1,xmax)\n ax.legend([\"K{}\".format(x) for x in range(0,nbPlots)])\n ax.set_title(\"all nucleosomes\", **axis_font)\n ax.set_xlabel(xax, **axis_font)\n ax.set_ylabel(yax, **axis_font)\n ax.tick_params(labelsize=20)\n canvas = FigureCanvasAgg(fig)\n canvas.print_figure(out, dpi=80)", "def decision_tree(df):\n features = df[['Temperature(F)', 'Humidity(%)', 'Visibility(mi)', 'Wind_Speed(mph)',\n 'Precipitation(in)', 'Amenity', 'Bump', 'Crossing', 'Give_Way',\n 'Junction', 'No_Exit', 'Railway', 'Roundabout', 'Station', 'Stop',\n 'Traffic_Calming', 'Traffic_Signal', 'Civil_Twilight', 'Rush Hour', 'Weekend',\n 'Side_R', 'Season_Spring', 'Season_Summer',\n 'Season_Winter', 'Weather_Condition_Clear', 'Weather_Condition_Fog',\n 'Weather_Condition_Other', 'Weather_Condition_Rain',\n 'Weather_Condition_Snow', 'Weather_Condition_Thunderstorm']]\n X= features\n y = df['Severity']\n clf = DecisionTreeClassifier(min_samples_split=6, min_samples_leaf=2, max_depth=3, \n criterion = 'gini', random_state=42)\n clf.fit(X, y)\n\n plt.figure(figsize=(25,10))\n a = plot_tree(clf, \n feature_names=X.columns.to_list(), \n filled=True, \n rounded=True, \n fontsize=14)\n plt.savefig(\"../Images/rockies_decision_tree.png\")\n plt.show()", "def summarize_model(clf_, X_tr, X_te, y_tr, y_te, tree=False):\n \n import sklearn.metrics as metrics\n import matplotlib.pyplot as plt\n import pandas as pd\n \n y_hat_tr, y_hat_te = fit_n_pred(clf_, X_tr, X_te, y_tr)\n print('Classification Report:')\n print(metrics.classification_report(y_te, y_hat_te))\n \n if tree:\n fig, ax = plt.subplots(figsize=(10,5), nrows=2)\n\n metrics.plot_confusion_matrix(clf_,X_te,y_te,cmap=\"YlOrRd\", normalize='true',\n ax=ax[0])\n ax[0].set(title='Confusion Matrix')\n ax[0].grid(False)\n\n plot_importance(clf_, X_tr, ax=ax[1])\n plt.tight_layout()\n \n else:\n clf_coef = pd.Series(clf_.coef_[0], index=X_tr.columns, name='Normal')\n abs_coef = pd.Series(abs(clf_.coef_[0]), index=X_tr.columns, name='Absolute')\n posi_coef = pd.Series((clf_coef > 0), name='Positive')\n coef_all = pd.concat([clf_coef, abs_coef, posi_coef], axis=1)\n coef_all.sort_values('Absolute', ascending=True, inplace=True)\n coef_all.tail(20)['Normal'].plot(kind='barh', color=coef_all['Positive'].map({True:'b',False:'r'})\n\n metrics.plot_confusion_matrix(clf_,X_te,y_te,cmap=\"YlOrRd\", normalize='true')\n plt.title('Confusion Matrix')\n plt.grid(False)\n plt.tight_layout()\n\ndef grid_searcher(clf_, params, X_tr, X_te, y_tr, y_te, cv=None, keep_t=False, train_score=True):\n \n \"\"\"Takes any classifier, train/test data for X/y, and dict of parameters to\n iterate over. Optional parameters select for cross-validation tuning, keeping\n time for running the gridsearch, and returning training scores when done.\n Default parameters only return the fitted grid search object. MUST HAVE Timer\n class imported.\"\"\"\n \n from sklearn.model_selection import GridSearchCV\n import numpy as np\n \n ## Instantiate obj. with our targets\n grid_s = GridSearchCV(clf_, params, cv=cv, return_train_score=train_score)\n \n ## Time and fit run the 'search'\n time = Timer()\n time.start()\n grid_s.fit(X_tr, y_tr)\n time.stop()\n \n ## Display results\n tr_score = np.mean(grid_s.cv_results_['mean_train_score'])\n te_score = grid_s.score(X_te, y_te)\n print(f'Mean Training Score: {tr_score :.2%}')\n print(f'Mean Test Score: {te_score :.2%}')\n print('Best Parameters:')\n print(grid_s.best_params_)\n \n ## Time keeping and grid obj\n if keep_t:\n lap = time.record().total_seconds()\n print('**********All done!**********')\n return grid_s, lap\n else:\n return grid_s", "def do_knn(x_data):\n return True", "def plot_decision_contour(pred_func, X, y, labels, targetdir = '.', matrix = 'numpy', reso=50, npoints=400):\n \n print(__name__ + '.plot_decision_contour ...')\n MAXP = min(npoints, X.shape[0])\n D = X.shape[1]\n pad = 0.5\n\n for dim1 in tqdm(range(D)) :\n x_min, x_max = X[:, dim1].min() - pad, X[:, dim1].max() + pad\n for dim2 in range(D) :\n if dim2 <= dim1 :\n continue\n\n # (x,y)-plane limits\n y_min, y_max = X[:, dim2].min() - pad, X[:, dim2].max() + pad\n\n # Grid points\n PX,PY = np.meshgrid(np.linspace(x_min, x_max, reso), np.linspace(y_min, y_max, reso))\n \n # Function values through 'pred_func' lambda \n Z = np.zeros((reso*reso, D))\n Z[:, dim1] = PX.ravel()\n Z[:, dim2] = PY.ravel()\n\n signalclass = 1\n if (matrix == 'torch'):\n Z = pred_func(torch.tensor(Z, dtype=torch.float32))\n Z = Z[:, signalclass].detach().numpy() # 2 output units\n if (matrix == 'numpy'):\n Z = pred_func(Z)\n if (matrix == 'xgboost'):\n Z = pred_func(xgboost.DMatrix(data = Z))\n\n Z = Z.reshape(PX.shape)\n fig, axs = plt.subplots()\n\n # Contour\n cs = plt.contourf(PX, PY, Z, cmap = plt.cm.Spectral)\n\n # Samples as dots\n plt.scatter(X[0:MAXP, dim1], X[0:MAXP, dim2], c = y[0:MAXP], cmap = plt.cm.binary)\n\n plt.xlabel('X[%d]' % dim1 + ' (%s)' % labels[dim1])\n plt.ylabel('X[%d]' % dim2 + ' (%s)' % labels[dim2])\n plt.colorbar(cs, ticks = np.linspace(0.0, 1.0, 11))\n \n plt.savefig(targetdir + str(dim1) + \"_\" + str(dim2) + \".pdf\", bbox_inches='tight')\n plt.close()", "def k_neighbors(self, unknown, dataset, k):\n distances = []\n for title in dataset:\n point = dataset[title]\n distance_to_point = distance.euclidean_distance(point, unknown)\n distances.append([distance_to_point, title])\n distances.sort()\n neighbors = distances[0:k]\n return neighbors", "def VoronoiNeighbors(\n tree, xy, k=100, **kd_kwds\n):\n from scipy.spatial import Voronoi\n if xy.shape[0] != 1:\n raise ValueError('VoronoiNeighbors is designed for a single target')\n k = min(k, tree.data.shape[0])\n dist, idx = tree.query(xy, k)\n dist = dist[0]\n idx = idx[0]\n xyg = np.vstack([tree.data[idx], xy])\n\n # Create a set of verticies with obs and model\n # then create a Voronoi diagram\n # --Consider optimizing via a Voronoi copy and add_points\n gcidx = xyg.shape[0] - 1\n vor = Voronoi(xyg)\n vor.close()\n\n # For QA\n # plt.interactive(True)\n # voronoi_plot_2d(vor)\n\n # ridge_points \"Indicies of the points between which each\n # Voronoi ridge lies\"[1]\n # - points sharing a ridge are neighbors\n # - the last point is the grid cell\n #\n # [1] https://docs.scipy.org/doc/scipy-0.18.1/reference/\n # generated/scipy.spatial.Voronoi.html\n shared_edges = [xy for xy in vor.ridge_points if gcidx in xy]\n\n # Unique pairs excluding the grid cell piont are the closest\n # observations.\n neighboridx = np.sort(np.unique(shared_edges).ravel())[:-1]\n\n # Neighbors\n return dist[neighboridx], idx[neighboridx]", "def kohonen():\n# plb.close('all')\n \n dim = 28*28\n data_range = 255.0\n \n # load in data and labels \n data = np.array(np.loadtxt('data.txt'))\n labels = np.loadtxt('labels.txt')\n\n # select 4 digits \n name = \"Stettler\"\n targetdigits = name2digits(name) # assign the four digits that should be used\n print(targetdigits) # output the digits that were selected\n\n # this selects all data vectors that corresponds to one of the four digits\n data = data[np.logical_or.reduce([labels==x for x in targetdigits]),:]\n \n dy, dx = data.shape\n \n #set the size of the Kohonen map. In this case it will be 6 X 6\n size_k = 6\n \n #set the width of the neighborhood via the width of the gaussian that\n #describes it\n sigma = 2.0\n \n #initialise the centers randomly\n centers = np.random.rand(size_k**2, dim) * data_range\n \n #build a neighborhood matrix\n neighbor = np.arange(size_k**2).reshape((size_k, size_k))\n\n #set the learning rate\n eta = 0.9 # HERE YOU HAVE TO SET YOUR OWN LEARNING RATE\n \n #set the maximal iteration count\n tmax = 5000 # this might or might not work; use your own convergence criterion\n \n #set the random order in which the datapoints should be presented\n i_random = np.arange(tmax) % dy\n np.random.shuffle(i_random)\n \n for t, i in enumerate(i_random):\n som_step(centers, data[i,:],neighbor,eta,sigma)\n\n # for visualization, you can use this:\n for i in range(size_k**2):\n plb.subplot(size_k,size_k,i)\n \n plb.imshow(np.reshape(centers[i,:], [28, 28]),interpolation='bilinear')\n plb.axis('off')\n \n # leave the window open at the end of the loop\n plb.show()\n plb.draw()", "def fit(self, X, y, **kwargs):\n\n\t\tif kwargs:\n\t\t\tself.view1_columns_ = kwargs['view1_columns']\n\t\t\tself.view2_columns_ = kwargs['view2_columns']\n\t\t\tself.knn_view1_.n_neighbors = kwargs['n_neighbors']\n\t\t\tself.knn_view2_.n_neighbors = kwargs['n_neighbors']\n\t\t\tself.knn_view3_.n_neighbors = kwargs['n_neighbors']\n\n\t\tX_view1 = X[:, self.view1_columns_]\n\t\tX_view2 = X[:, self.view2_columns_]\n\n\t\t# Train the KNN classifiers.\n\t\tself.knn_view1_.fit(X_view1, y)\n\t\tself.knn_view2_.fit(X_view2, y)\n\t\tself.knn_view3_.fit(X, y)\n\n\t\t# Train the Bayesian classifiers.\n\t\tself.gauss_view1_.fit(X_view1, y)\n\t\tself.gauss_view2_.fit(X_view2, y)\n\t\tself.gauss_view3_.fit(X, y)\n\n\t\t# Calculate the probabilities.\n\t\t_, classes_count = np.unique(y, return_counts=True)\n\t\tself.P_w = classes_count/np.sum(classes_count)\n\n\t\treturn self", "def dk_plotting():\n heatmap_mode1_error_x(make_heatmap=False, make_panel=True)\n\n #heatmap_mode1_error_x()\n figure_2_combined_cross_sections()\n\n #heatmap_combined_error_c()\n #heatmap_combined_error_koff()\n #heatmap_kpr_error_c()\n #heatmap_kpr_error_koff()\n\n #heatmap_kpr2_error_c()\n #heatmap_kpr2_error_koff()\n\n ctildePosterior = [truncate(f, 3) for f in list(np.arange(0.0 * KON / KP, 5.0 * KON / KP + 0.005, 0.005))[1:]]\n kofftildePosterior = [truncate(f, 2) for f in list(np.arange(0.0 / KP, 50.0 / KP + 0.05, 0.05))[1:]]\n\n #heatmap_figure_4()\n\n return 0", "def plot_2d_results(perceptron, data):\n\n\t# Divides the data into classes.\n\ttraining_data_classes = split_into_classes(data['training_data'], data['training_labels'])\n\ttest_data_classes = split_into_classes(data['test_data'], data['test_labels'])\n\n\t# Plots the data.\n\tplt.plot(training_data_classes[0][:, 0], training_data_classes[0][:, 1], 'bo',\n\t\ttraining_data_classes[1][:, 0], training_data_classes[1][:, 1], 'ro',\n\t\ttest_data_classes[0][:, 0], test_data_classes[0][:, 1], 'b*',\n\t\ttest_data_classes[1][:, 0], test_data_classes[1][:, 1], 'r*',\n\t\tmarkersize = 12)\n\n\t# Constructs a line that represents the decision boundary.\n\tweights = perceptron.weights\n\tbias = perceptron.bias\n\tx_range = np.array([0, 100])\n\ty_range = -(x_range * weights[0] + bias) / weights[1]\n\n\t# Plots the decision boundary.\n\tplt.plot(x_range, y_range, 'k')\n\tplt.show()", "def illustrate_prediction(model, test_data, test_target):\n selects = np.random.random_integers(0, len(test_data), 16)\n labels = test_target[selects]\n predicts = model.predict(test_data[selects])\n plt.figure()\n for k in range(16):\n plt.subplot(4, 4, k+1)\n plot_face(test_data[selects[k]])\n if predicts[k] == 1:\n plt.title('smile')\n else:\n plt.title('ugly')\n\n if predicts[k] != labels[k]:\n plt.plot([0, 24], [0, 24], 'r', linewidth=2)\n plt.plot([0, 24], [24, 0], 'r', linewidth=2)", "def k_nn(frame, newPoint, colClass, k): \n counts = []\n \n # find all distances wrt the newPoint\n dist = find_distances(frame, newPoint)\n\n # find the nearest k points, extract their labels and save them in a list\n labels = [label for distance,label in dist[:k]] \n \n # for each class label, count how many occurrencies have been found\n for label in frame[colClass].unique():\n # save the number of occurrencies in a list of tuples (number, label)\n counts.append((labels.count(label), label)) \n \n # sort the list in descending order, and use the first label of the tuples'\n # list to make the prediction \n counts.sort(reverse=True)\n prediction = counts[0][1] \n \n return prediction", "def plot_result(data, gt_y, pred_y):\n assert data.shape[0] == gt_y.shape[0]\n assert data.shape[0] == pred_y.shape[0]\n\n plt.figure()\n\n plt.subplot(1, 2, 1)\n plt.title('Ground Truth', fontsize=18)\n\n for idx in range(data.shape[0]):\n if gt_y[idx] == 0:\n plt.plot(data[idx][0], data[idx][1], 'ro')\n else:\n plt.plot(data[idx][0], data[idx][1], 'bo')\n\n plt.subplot(1, 2, 2)\n plt.title('Prediction', fontsize=18)\n\n for idx in range(data.shape[0]):\n if pred_y[idx] == 0:\n plt.plot(data[idx][0], data[idx][1], 'ro')\n else:\n plt.plot(data[idx][0], data[idx][1], 'bo')\n\n plt.show()", "def plot_result(data, gt_y, pred_y):\n assert data.shape[0] == gt_y.shape[0]\n assert data.shape[0] == pred_y.shape[0]\n\n plt.figure()\n\n plt.subplot(1, 2, 1)\n plt.title('Ground Truth', fontsize=18)\n\n for idx in range(data.shape[0]):\n if gt_y[idx] == 0:\n plt.plot(data[idx][0], data[idx][1], 'ro')\n else:\n plt.plot(data[idx][0], data[idx][1], 'bo')\n\n plt.subplot(1, 2, 2)\n plt.title('Prediction', fontsize=18)\n\n for idx in range(data.shape[0]):\n if pred_y[idx] == 0:\n plt.plot(data[idx][0], data[idx][1], 'ro')\n else:\n plt.plot(data[idx][0], data[idx][1], 'bo')\n\n plt.show()", "def model_visualization(model,X,y,classifier):\n sns.set_context(context='notebook',font_scale=2)\n plt.figure(figsize=(16,9))\n from matplotlib.colors import ListedColormap\n X_set, y_set = X, y\n X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))\n plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape), alpha = 0.6, cmap = ListedColormap(('green', 'blue')))\n plt.xlim(X1.min(), X1.max())\n plt.ylim(X2.min(), X2.max())\n for i, j in enumerate(np.unique(y_set)):\n plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],\n color = ListedColormap(('turquoise', 'blue'))(i), label = j)\n plt.title(\"%s Model Set\" %(model))\n plt.xlabel('PC 1')\n plt.ylabel('PC 2')\n plt.legend()\n plt.savefig('images/{0}.png'.format(model))", "def plot_split(self, conf, nn_in_trn, nn_out_trn, nn_in_test, nn_out_test, y_trn, y_test, a, b, vert_coef, hor_coef):\n plot_win_trn=tk.Toplevel()\n plot_win_trn.title(\"Train data dot plot\")\n fig = Figure(figsize=(5, 4), dpi=100)\n ax=fig.add_subplot(111)\n ax.scatter(nn_in_trn[:, 0], nn_out_trn[:, 0], label=\"Train data\", s=3)\n ax.scatter(nn_in_trn[:, 0], y_trn[:, 0], label=\"Prediction on train data\", s=5)\n ax.set_xlabel('IN[1]')\n ax.set_ylabel('OUT[1]')\n fig.legend()\n canvas = FigureCanvasTkAgg(fig, master=plot_win_trn)\n canvas.draw()\n canvas.get_tk_widget().pack(side=tk.TOP, fill=tk.BOTH, expand=1)\n\n toolbar = NavigationToolbar2Tk(canvas, plot_win_trn)\n toolbar.update()\n canvas.get_tk_widget().pack(side=tk.TOP, fill=tk.BOTH, expand=1)\n lab1=tk.Label(plot_win_trn,width=35, text=\"Model configuration\"+str(conf))\n lab1.pack()\n lab2=tk.Label(plot_win_trn,width=35, text=\"Train data loss= \"+str(round(a,4))+ \" %\")\n lab2.pack()\n plot_win_trn.geometry(f'+{self.winfo_x()+self.winfo_width()+20}+{self.winfo_y() + vert_coef}')\n plot_win_trn.update_idletasks()\n\n plot_win_test=tk.Toplevel()\n plot_win_test.title(\"Test data dot plot\")\n fig = Figure(figsize=(5, 4), dpi=100)\n ax=fig.add_subplot(111)\n ax.scatter(nn_in_test[:, 0], nn_out_test[:, 0], label=\"Test data\", s=3)\n ax.scatter(nn_in_test[:, 0], y_test[:, 0], label=\"Prediction on test data\",s=5)\n ax.set_xlabel('IN[1]')\n ax.set_ylabel('OUT[1]')\n fig.legend()\n canvas = FigureCanvasTkAgg(fig, master=plot_win_test)\n canvas.draw()\n canvas.get_tk_widget().pack(side=tk.TOP, fill=tk.BOTH, expand=1)\n\n toolbar = NavigationToolbar2Tk(canvas, plot_win_test)\n toolbar.update()\n canvas.get_tk_widget().pack(side=tk.TOP, fill=tk.BOTH, expand=1)\n lab1=tk.Label(plot_win_test,width=35, text=\"Model configuration\"+str(conf))\n lab1.pack()\n lab2=tk.Label(plot_win_test,width=35, text=\"Test data loss= \"+str(round(b,4))+ \" %\")\n lab2.pack()\n plot_win_test.geometry(f'+{self.winfo_x()+hor_coef+20}+{self.winfo_y() + vert_coef}')\n plot_win_test.update_idletasks()", "def plot_true_predictions(self):\n \n if self.data_generated:\n \n # Choosing a different color for each target\n n_targets = len(self.true_data['targets'])\n cmap = plt.get_cmap('gnuplot')\n colors = [cmap(i) for i in np.linspace(0, 0.9, n_targets)]\n\n # Plot of the ground truth X vs Y\n fig = plt.figure(figsize=(15,5))\n plt.subplot(1,3,1)\n for k in self.phd_filter['estimated_positions'].keys():\n plt.plot(self.phd_filter['estimated_positions'][k][0], self.phd_filter['estimated_positions'][k][1], 'bx')\n for i in self.true_data['targets']:\n plt.plot(self.true_data['all_x'][i][:,0],self.true_data['all_x'][i][:,2],\\\n '-',label=\"true track %s\" %i,color=colors[i])\n plt.xlabel(\"X\",fontsize=20)\n plt.ylabel(\"Y\",fontsize=20)\n #plt.legend(loc='best')\n\n\n # Plot of the ground truth time vs X\n plt.subplot(1,3,2)\n for k in self.phd_filter['estimated_positions'].keys(): \n plt.plot(k*np.ones(self.phd_filter['estimated_positions'][k].shape[1]), self.phd_filter['estimated_positions'][k][0], 'bx')\n for i in self.true_data['targets']:\n plt.plot(self.timeline[i],self.true_data['all_x'][i][:,0],\\\n '-',label=\"true track %s\" %i,color=colors[i])\n plt.xlabel(\"time\",fontsize=20)\n plt.ylabel(\"X\",fontsize=20)\n plt.xlim(0,self.n_time_steps+1)\n #plt.legend(loc='upper right')\n\n # Plot of the ground truth time vs Y\n plt.subplot(1,3,3)\n for k in self.phd_filter['estimated_positions'].keys():\n plt.plot(k*np.ones(self.phd_filter['estimated_positions'][k].shape[1]), self.phd_filter['estimated_positions'][k][1], 'bx')\n for i in self.true_data['targets']:\n plt.plot(self.timeline[i],self.true_data['all_x'][i][:,2],\\\n '-',label=\"true track %s\" %i,color=colors[i])\n plt.xlabel(\"time\",fontsize=20)\n plt.ylabel(\"Y\",fontsize=20)\n plt.xlim(0,self.n_time_steps+1)\n #plt.legend(loc='upper right')\n plt.show();\n\n elif self.data_given:\n raise ValueError(\"Cannot plot true positions if y_obs is given because the true x are not known.\")\n else:\n raise ValueError(\"No data to plot !\")", "def fig5(X_r, Y, TRAIN_SIZE=6000):\n \n # Normalize X_r\n X_n = preprocessing.normalize(X_r)\n \n #kNN weighting and k\n weights = [ \"uniform\", \"distance\" ]\n ks = [2,4,8,16,32,64] \n \n # Little lambda functions to standardize feature extraction\n pca = lambda X,Y: PCA(n_components=128).fit(X).transform(X)\n lda = lambda X,Y: LDA().fit(X, Y).transform(X)\n idn = lambda X,Y: X\n \n # Start the plot\n fig, ax = plt.subplots()\n plt.ylabel(\"Error %\")\n plt.xlabel(\"k\")\n \n \n # Try every combination (product) of weights, feature extraction and normalization\n for weight, feat_reduce, X_ in itertools.product(\n weights, [pca, lda, idn], [X_r, X_n]):\n \n # Reset error rate\n errors = []\n \n #Flags to make things easier\n reduction = \"PCA\" if feat_reduce == pca else \"LDA\"\n normalized = \"n\" if X_ is X_n else \"r\"\n \n #Initialize a black (i.e. key - cmy_K_) line\n linestyle = \"k\"\n \n # Match the point style used in Vailaya\n if weight == \"uniform\":\n if X_ is X_n:\n linestyle += \"x\"\n else:\n linestyle += \"*\"\n if weight == \"distance\":\n if X_ is X_n:\n linestyle += \"o\"\n else:\n linestyle += \"+\"\n \n # As well as the line style\n if feat_reduce is pca:\n linestyle += \":\" # Dotted\n elif feat_reduce is lda:\n linestyle += \"--\" # Solid\n else:\n linestyle += \"-\" # Dashed\n \n # Loop through all k's \n for k in ks:\n #Initialized classifier parameters\n knn = neighbors.KNeighborsClassifier(warn_on_equidistant=False)\n knn.n_neighbors = k\n knn.weights = weight\n \n #Here's where the lambda's come in handy.\n X = feat_reduce(X_,Y)\n \n # Fit the training set\n knn.fit(X[:TRAIN_SIZE], Y[:TRAIN_SIZE])\n \n # Again ugly code for the predictions\n predictions = []\n for i in range(TRAIN_SIZE, len(X)):\n predictions += [ knn.predict(X[i])[0] ] \n \n # Calculate error rate and append it to error rate list\n error = 1.- float(sum(predictions == Y[TRAIN_SIZE:])) / len(predictions)\n errors += [error]\n \n # Print it just for fun. Also in case error rates need to be exported.\n print weight, reduction, normalized, k, error\n \n # Plot the line for all k values \n ax.plot(ks, errors, linestyle)\n \n # Couldn't specify legends properly\n #ax.legend()", "def visualize_openset_classification(data, other_data_dicts, dict_key, data_name,\n thresholds, save_path, tailsize):\n\n lw = 10\n plt.figure(figsize=(20, 20))\n plt.plot(thresholds, data, label=data_name, color=colors[0], linestyle='solid', linewidth=lw)\n\n c = 0\n for other_data_name, other_data_dict in other_data_dicts.items():\n plt.plot(thresholds, other_data_dict[dict_key], label=other_data_name, color=colors[c],\n linestyle=linestyles[c % len(linestyles)], linewidth=lw)\n c += 1\n\n plt.xlabel(r\"Weibull CDF outlier rejection prior $\\Omega_t$\", fontsize=axes_font_size)\n plt.ylabel(\"Percentage of dataset outliers\", fontsize=axes_font_size)\n plt.xlim(left=-0.05, right=1.05)\n plt.ylim(bottom=-0.05, top=1.05)\n plt.legend(loc=0, fontsize=legend_font_size - 15)\n plt.savefig(os.path.join(save_path, data_name + '_' + \",\".join(list(other_data_dicts.keys())) +\n '_outlier_classification' + '_tailsize_' + str(tailsize) + '.pdf'),\n bbox_inches='tight')", "def knn_prediction(X, y, x, k):\n ## homework:start\n result = \n ## homework:end\n return result", "def plot_decision_tree(classifier, feature_names=None, class_names=None):\n fig, axes = plt.subplots(nrows=1, ncols=1, figsize=(4, 4))\n tree.plot_tree(\n classifier,\n feature_names=feature_names,\n class_names=class_names,\n rounded=True,\n filled=True,\n )\n fig.show()", "def compare_borders(X,Y, k=50):\n \n # Use sklearn's train/test split\n X_train, X_test, Y_train, Y_test = cross_validation.train_test_split(X, Y)\n \n #Remove the center (note border=5 with N=10 will remove nothing. Anything larger will end up with redundant labels)\n knn1 = neighbors.KNeighborsClassifier(k).fit(remove_center(X_train, border=1), Y_train)\n knn2 = neighbors.KNeighborsClassifier(k).fit(remove_center(X_train, border=2), Y_train)\n knn3 = neighbors.KNeighborsClassifier(k).fit(remove_center(X_train, border=3), Y_train)\n knn4 = neighbors.KNeighborsClassifier(k).fit(remove_center(X_train, border=4), Y_train)\n knn5 = neighbors.KNeighborsClassifier(k).fit(X_train, Y_train)\n \n # Arrays of predictions == actual\n p1 = knn1.predict(remove_center(X_test, border=1)) == Y_test\n p2 = knn2.predict(remove_center(X_test, border=2)) == Y_test\n p3 = knn3.predict(remove_center(X_test, border=3)) == Y_test\n p4 = knn4.predict(remove_center(X_test, border=4)) == Y_test\n p5 = knn5.predict(X_test) == Y_test\n \n # Accuracy function\n acc = lambda X: 1.*sum(X)/len(X)\n \n # Print results\n print \"border =1\", acc(p1)\n print \"border =2\", acc(p2)\n print \"border =3\", acc(p3)\n print \"border =4\", acc(p4)\n print \"no border\", acc(p5)", "def make_plots():\n prep = DataPrep(filepath='/home/ubuntu/ca_bills_project/data/extra/topic_intro_data_05-23-17-08-23.csv')\n prep.prepare()\n k = 100\n trained_model_file = \"/home/ubuntu/ca_bills_project/data/extra/intro_model_100_topics_rf_10000trees.pkl\"\n with open(trained_model_file) as p:\n model = pickle.load(p)\n print \"loaded model\"\n features = [u'days_since_start', u'session_type', u'party_ALL_DEM', u'party_ALL_REP',\n u'party_BOTH', 'party_COM', u'urgency_No', u'urgency_Yes',\n u'taxlevy_No',\n u'taxlevy_Yes']\n topic_features = [\"topic_\"+str(x) for x in range(k)]\n features += topic_features\n X_train, y_train = prep.subset(features)\n feature_importance(model, features)\n feature_subset_indices = [73, 13]\n gb_file = \"/home/ubuntu/ca_bills_project/data/extra/intro_model_100_topics_gb.pkl\"\n with open(gb_file) as p:\n gb = pickle.load(p)\n make_partial_dependence(gb, X_train, y_train, features, feature_subset_indices)", "def vis_detections(im, class_name, dets, image_name, thresh=0.5):\n inds = np.where(dets[:, -1] >= thresh)[0]\n max_inds = 0\n max_score = 0.0\n if len(inds) == 0:\n # print('Warning: no target detected!')\n return\n elif len(inds) > 1:\n # print('Warning: ' + str(len(inds)) + ' targets detected! Choose the highest one')\n for i in inds:\n if(dets[i, -1] > max_score):\n max_inds = i\n max_score = dets[i, -1]\n\n# im = im[:, :, (2, 1, 0)]\n# fig, ax = plt.subplots(figsize=(12, 12))\n# ax.imshow(im, aspect='equal')\n # for i in inds:\n # bbox = dets[i, :4]\n # score = dets[i, -1]\n #print max_inds\n bbox = dets[max_inds, :4]\n score = dets[max_inds, -1]\n\n# ax.add_patch(\n# plt.Rectangle((bbox[0], bbox[1]),\n# bbox[2] - bbox[0],\n# bbox[3] - bbox[1], fill=False,\n# edgecolor='red', linewidth=3.5)\n# )\n# ax.text(bbox[0], bbox[1] - 2,\n# '{:s} {:.3f}'.format(class_name, score),\n# bbox=dict(facecolor='blue', alpha=0.5),\n# fontsize=14, color='white')\n\n # end for\n #print image_name, class_name\n #print score\n # file.writelines([image_name,'\\t',class_name,'\\t',str(score),'\\n'])\n # ax.set_title(('{} detections with '\n # 'p({} | box) >= {:.1f}').format(class_name, class_name,\n # thresh),fontsize=14)\n # plt.axis('off')\n # plt.tight_layout()\n # plt.draw()\n\t### SAVE IMAGES ? ###\n save_img_dir = os.path.join(cfg.ROOT_DIR, 'result', 'test_img')\n # if not os.path.exists(save_img_dir):\n # os.makedirs(save_img_dir)\n # plt.savefig(os.path.join(save_img_dir, image_name + '_' + class_name))\n\n boxes = {'boxes': ((bbox[0], bbox[1]), bbox[2] - bbox[0], bbox[3] - bbox[1])}\n \n save_mat_dir = os.path.join(cfg.ROOT_DIR, 'result', 'test_box')", "def train(self, data, labels, validationData, validationLabels):\n \n # -- calls the classify method to evaluate performance \n # -- OUR CODE HERE\n \n legalLabels = labels\n self.legalLabels = legalLabels\n trainingData = validationData\n trainingLabels = validationLabels\n \n kCorrect = util.Counter()\n self.conditionalProb = []\n \n \n self.prior = util.Counter()\n for label in labels:\n self.prior[label] += 1.0\n self.prior.normalize()\n #for label in self.prior:\n # self.prior[label]/=len(trainingLabels)\n \n \"\"\"\n print \"legal labels are \", len(legalLabels)\n print \"kgrid is \", kgrid\n print \"the legal labels are.... \", legalLabels\n \"\"\"\n \n import time\n \n condprobForK = {}\n \n # -- iterate through each k in kgrid... should we be doing this?\n # -- won't this affect the cond prob tables? :(\n k = 0.5\n #print \"working on k = \",k,\" in kgrid\"\n \n # -- reset the conditonal prob table\n # -- each time we go through a different k...\n self.conditionalProb = {}\n \n # -- go through each label and initialize the Counter for that label (the cond prob table)\n for label in legalLabels:\n self.conditionalProb[label] = util.Counter()\n \n # -- go through each piece of training data and train the tables on it \n for dataNum in range(len(trainingData)):\n \n # -- identify which label we're using... not sure if this is correct\n label = trainingLabels[dataNum] # 0 or like 9 or 2\n \n # -- iterate through each pixel and update the conditional prob counter for that label\n for pixel in trainingData[dataNum]:\n \n if pixel is \"moreThanOneConnBlackRegions\":\n #print \"Number is :: \", label, \" and has \", trainingData[dataNum][pixel]\n assert 1 is 1\n \n on_off = trainingData[dataNum][pixel] * 1.0\n self.conditionalProb[label][pixel] += on_off * 1.0\n \n # -- now we go through and add k to each of the conditional probabilities\n # -- note that we do so for each label and every single pixel\n for label in legalLabels:\n for pixel in self.conditionalProb[label]: \n # -- add the k value \n self.conditionalProb[label][pixel] += k * 1.0\n assert self.conditionalProb[label][pixel] >= k # -- sanity check that it should be at least k\n self.conditionalProb[label][pixel] /= (self.prior[label] * len(trainingLabels) + k*2)\n \n \n \n # -- END OUR CODE", "def knn(k, train_data, train_labels, valid_data):\n dist = l2_distance(valid_data.T, train_data.T)\n nearest = np.argsort(dist, axis=1)[:, :k]\n\n train_labels = train_labels.reshape(-1)\n valid_labels = train_labels[nearest]\n\n # Note this only works for binary labels:\n valid_labels = (np.mean(valid_labels, axis=1) >= 0.5).astype(np.int)\n valid_labels = valid_labels.reshape(-1, 1)\n\n return valid_labels", "def knn_classify(k, labeled_points, new_point):\n by_distance = sorted(labeled_points,\n key=lambda point, _: la.distance(point, new_point))\n\n #find the labels for the k clsest\n k_nearest_labels = [label for _, label in by_distance[:k]]\n #and ket them vote\n return majority_vote(k_nearest_labels)", "def run_knn(k, train_data, train_labels, valid_data):\n\n dist = l2_distance(valid_data.T, train_data.T)\n nearest = np.argsort(dist, axis=1)[:,:k]\n\n train_labels = train_labels.reshape(-1)\n valid_labels = train_labels[nearest]\n\n # note this only works for binary labels\n valid_labels = (np.mean(valid_labels, axis=1) >= 0.5).astype(np.int)\n valid_labels = valid_labels.reshape(-1,1)\n\n return valid_labels", "def grid_search(title, class_names, background_images, option_threshold_curve):\n # prepare title\n if title == DEFAULT_TITLE:\n title = f\"class{'es' if len(class_names) > 1 else ''} {', '.join(class_names)}\"\n\n # calculate true positive confidences\n true_positive_confidences = []\n num_samples = 0\n for class_name in class_names:\n images = load_base64(\n class_name,\n os.path.join(TEST_DIR, class_name),\n desc=f\"[{class_name}] loading\"\n )\n num_samples = num_samples + len(images)\n true_positive_confidences.extend(_calculate_tp_confidences(\n images,\n class_name\n ))\n\n # calculate false positive confidences\n false_positive_confidences = _calculate_fp_confidences(\n background_images,\n class_names\n )\n\n print(\"Grid searching...\")\n tvals = np.linspace(0, 1, int(1 / STEP_SIZE) + 1)\n # discard thresholds below the 1/num_classes\n tvals = [t for t in tvals if t >= 1 / NUM_CLASSES]\n deltas, tp_percentages, fp_percentages = calculate_deltas(\n tvals,\n true_positive_confidences,\n false_positive_confidences,\n num_samples\n )\n best_t_index = np.argmin(deltas)\n best_t = tvals[best_t_index]\n print(f\"Best threshold: {best_t}\")\n\n plt.figure()\n plt.plot(tvals, deltas)\n plt.xlim([tvals[0], tvals[-1]])\n plt.xlabel(\"Threshold\")\n plt.ylabel(\"$\\delta$\")\n plt.title(TITLE_DELTA % title)\n plt.grid()\n\n if option_threshold_curve:\n plt.figure()\n plt.axes().set_aspect('equal')\n plt.plot(\n tp_percentages,\n fp_percentages,\n label=\"Threshold values\"\n )\n optimal_tp = len(true_positive_confidences) / num_samples\n plt.scatter([optimal_tp], [0], color=\"orange\", label=\"Optimal point\")\n nearest_point = (\n tp_percentages[best_t_index],\n fp_percentages[best_t_index]\n )\n plt.plot(\n [optimal_tp, nearest_point[0]],\n [0, nearest_point[1]],\n color=\"gray\",\n linestyle=\":\",\n label=\"Nearest point\"\n )\n plt.text(nearest_point[0], nearest_point[1], f\"$t$ = {best_t:.2f}\")\n left, right = plt.xlim()\n bottom, top = plt.ylim()\n plt.xlim(min(left, bottom), max(right, top))\n plt.ylim(min(left, bottom), max(right, top))\n plt.xlabel(\"True positive percentage\")\n plt.ylabel(\"False positive percentage\")\n plt.title(TITLE_THRESHOLD_CURVE % title)\n plt.legend()\n plt.grid()", "def plot_boxes(predictions, labels):\n visuals =[] \n num_det = predictions.shape[0]\n for i in range(num_det):\n box = predictions[i:i+1]#.numpy()\n label = labels[i]\n corner = center_to_corner_box3d(box[:, :3], box[:, 3:6], box[:, -1])[0].tolist()\n color = label2color(int(label) -1)\n visuals.append(corners_to_lines(corner, color))\n return visuals", "def display_missclassified(class_to_idx: Dict[str,int], \n targets: List[int], \n predictions: List[int], \n images: List[np.ndarray], \n gridsize: Tuple[int] = (4,4)):\n fig = plt.figure()\n plot_counter = 1\n mean = np.array([0.485, 0.456, 0.406])\n std = np.array([0.229, 0.224, 0.225])\n idx_to_class = {i:label for i, label in enumerate(class_to_idx)}\n for i in range(len(targets)):\n if plot_counter > gridsize[0]*gridsize[1]:\n break\n \n image = images[i].transpose(1, 2, 0)\n image = ((image * std) + mean) * 255\n image = image.astype(\"uint8\")\n \n image = cv2.resize(image, (128, 128))\n image = cv2.putText(image, idx_to_class[predictions[i]], (0,20), 3, 0.4, (0,0,255), 1)\n if predictions[i] == targets[i]:\n pass\n else:\n ax = fig.add_subplot(gridsize[0], gridsize[1], plot_counter)\n ax.imshow(image)\n plot_counter += 1\n plt.show()", "def kNN(x, y, s):\n usx = np.array(x)\n usy = np.array(y)\n\n # split data into train and validation set\n x_train, x_test, y_train, y_test = train_test_split(usx, usy, test_size=s)\n clf = neighbors.KNeighborsClassifier(algorithm='kd_tree')\n clf.fit(x_train, y_train)\n y_predict = clf.predict(x_test)\n\n # select only the probabilities of being fraud\n y_pred_prob = clf.predict_proba(x_test)[:, 1]\n return y_predict, y_test, y_pred_prob", "def accuracy_plot(LS_sizes, data_fun):\r\n\r\n opt_neigh = []\r\n\r\n #plot of optimal n_neighbors as a function of the LS size\r\n\r\n for size in LS_sizes:\r\n\r\n acc = []\r\n neighbors_values = np.arange(1,size+1,1)\r\n\r\n # For a given LS size, plots of accuracy(n_neighbors)\r\n\r\n for value in neighbors_values:\r\n\r\n X_train, y_train, X_test, y_test = data_fun(n_ts=500, n_ls=size)\r\n\r\n clf = KNeighborsClassifier(n_neighbors = value)\r\n clf = clf.fit(X_train, y_train)\r\n acc.append(clf.score(X_test,y_test))\r\n\r\n plt.figure()\r\n plt.plot(neighbors_values,acc, '.')\r\n plt.title(\"Evolution of accuracy as a function \\nof n_neighbors for LS_size = {} samples, for {}.\".format(size, data_fun.__name__))\r\n plt.savefig(\"acc(n_neigh)_{}_{}.pdf\".format(size, data_fun.__name__))\r\n\r\n opt_neigh.append(np.argmax(acc)+1)\r\n\r\n plt.figure()\r\n plt.plot(LS_sizes, opt_neigh, '.')\r\n plt.title(\"Optimal n_neighbors as a function \\nof the size of the learning sample, for {}.\".format(data_fun.__name__))\r\n plt.savefig(\"opt_n_neigh(LS_size)_{}.pdf\".format(data_fun.__name__))", "def plot(self):\n h = .02\n i=1\n bags_X = self.bags_X\n bags_y = self.bags_y\n fig1 = plt.figure(figsize=(45, 9))\n\n \n cm = plt.cm.RdBu\n cm_bright = ListedColormap(['#FF0000', '#0000FF'])\n \n for model in self.models:\n ax = plt.subplot(1, len(self.models) , i)\n X = pd.DataFrame(bags_X[i-1])\n y = pd.Series(bags_y[i-1])\n x_min, x_max = X[X.columns[0]].min() - .5, X[X.columns[0]].max() + .5\n y_min, y_max = X[X.columns[1]].min() - .5, X[X.columns[1]].max() + .5\n xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))\n Z = np.array(model.predict(pd.DataFrame(np.c_[xx.ravel(), yy.ravel()], columns=X.columns)))\n # print(Z[12])\n Z = Z.reshape(xx.shape)\n ax.contourf(xx, yy, Z, cmap=cm, alpha=.8)\n ax.scatter(X[X.columns[0]], X[X.columns[1]], c=y, cmap=cm_bright, edgecolors='k')\n # size=[1000*w for w in self.weights[i-1]]\n ax.set_xlim(xx.min(), xx.max())\n ax.set_ylim(yy.min(), yy.max())\n ax.set_xlabel(str(X.columns[0]))\n ax.set_ylabel(str(X.columns[1]))\n plt.title(\"Estimator \"+str(i))\n i+=1\n \n fig2 = plt.figure(figsize=(9,9))\n X = self.X\n y = self.y\n ax2 = plt.subplot(1,1,1)\n x_min, x_max = X[X.columns[0]].min() - .5, X[X.columns[0]].max() + .5\n y_min, y_max = X[X.columns[1]].min() - .5, X[X.columns[1]].max() + .5\n xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))\n Z = np.array(self.predict(pd.DataFrame(np.c_[xx.ravel(), yy.ravel()], columns=X.columns)))\n Z = Z.reshape(xx.shape)\n ax2.contourf(xx, yy, Z, cmap=cm, alpha=.8)\n # size=[1000*w for w in self.weights[i-2]]\n ax2.scatter(X[X.columns[0]], X[X.columns[1]], c=y, cmap=cm_bright, edgecolors='k')\n ax2.set_xlim(xx.min(), xx.max())\n ax2.set_ylim(yy.min(), yy.max())\n plt.title(\"Combined Decision Surface\")\n \n plt.tight_layout()\n plt.show()\n\n return [fig1,fig2]", "def knn(X_tr_sc, n_neighbors, radius): \n\n neigh = NearestNeighbors(n_neighbors, radius,metric='euclidean')\n model = neigh.fit(X_tr_sc) \n \n return model", "def predict_knn(data, example, k):\n # Use distance to find most similar examples, sort\n distTuples = list()\n for d in data:\n distTuples.append((d, example.distance(d)))\n distTuples.sort(key=lambda x: x[1])\n\n # Find most common labels\n labels = list()\n for i in range(k):\n newLabel = True\n for j in range(len(labels)):\n if labels[j][0] == distTuples[i][0].label:\n labels[j] = (labels[j][0], labels[j][1] + 1)\n newLabel = False\n if newLabel:\n labels.append((distTuples[i][0].label, 1))\n return max(labels, key=lambda x: x[1])[0]", "def plot_data (features : list, actual_labels : list, classified_labels : list = None,\n extra_lines : list = None, normalize=False):\n samples = np.array(features)\n if normalize:\n norms = np.linalg.norm(samples, axis=1)\n l=[]\n for i, s in enumerate(samples):\n l.append(s/norms[i])\n samples = np.array(l)\n \n plt.figure(figsize=(8, 8))\n for (idx_case, ((actual, classified), marker, color)) in enumerate(zip(cases, markers, colors)):\n mask = np.logical_and(np.equal(actual_labels, actual), \n np.equal(actual if classified_labels == None else classified_labels, classified))\n if not np.any(mask): continue\n plt.scatter(\n samples[mask, 0], samples[mask, 1],\n label = f\"Class {actual}\" if classified_labels == None else f\"Was {actual}, classified {classified}\",\n marker = marker, s = 300, c = [color],\n )\n # Add the lines to show the true classes boundaries, if provided\n if extra_lines != None:\n for line in extra_lines:\n plt.plot(line[0], line[1], color = 'gray')\n plt.legend()", "def _fit(self, data):\n\n\t\ttrain_in, train_labels = self._split_inputs_outputs(data)\n\t\tclf = KNeighborsClassifier(n_neighbors=self.k)\n\t\tclf.fit(train_in, train_labels)\n\n\t\treturn clf", "def visualize_features_according_class(features: np.array, labels: np.array):\n # check if labels and features formats are correct\n if len(features.shape) != 2:\n raise AttributeError('Provided features must be 2-dimensional. Got %i.' % len(features.shape))\n if len(labels.shape) > 2:\n raise AttributeError('Provided labels must be 2- or 1-dimensional. Got %i.' % len(labels.shape))\n # reshape labels if they are 2-dimensional\n if len(labels.shape) == 2:\n labels = labels.reshape((-1,))\n # transform data via TSNE\n tsne = TSNE(n_components=2)\n features = tsne.fit_transform(features)\n # create support variables to create graph\n num_classes = np.unique(labels).shape[0]\n colors = [i for i in range(num_classes)]\n class_names = ['Neutral', 'Anger', 'Disgust', 'Fear', 'Happiness', 'Sadness', 'Surprise']\n # creating graph\n plt.figure(figsize=(10, 8))\n colors = ['r', 'g', 'b', 'c', 'm', 'y', 'k']\n for i, c, label in zip(range(num_classes), colors, class_names):\n plt.scatter(features[labels == i, 0], features[labels == i, 1], c=c, label=label)\n plt.legend()\n plt.show()", "def knn_manage(k):\n\n xtrain, xtest, label_train, label_test = get_data()\n pred = knn_classify(xtrain, xtest, label_train, k)\n conf_mat, accuracy, misclassified = confusion_matrix_accuracy(pred, label_test)\n print accuracy\n print conf_mat", "def kNN_train(self, x_train, y_train, x_test, k = 5, processing = None, distMethod = \"Manhattan\"):\n y_test = list()\n\n if processing == \"Scalar\":\n # print(\"Preprocessing = Scalar\")\n stdScalar = preprocessing.StandardScaler().fit(x_train)\n x_train = stdScalar.transform(x_train)\n x_test = stdScalar.transform(x_test)\n\n elif processing == \"MinMax\":\n\n # print(\"Preprocessing = MinMax\")\n mmScalar = preprocessing.MinMaxScaler()\n x_train = mmScalar.fit_transform(x_train)\n x_test = mmScalar.fit_transform(x_test)\n\n elif processing == \"None\":\n self.true = True\n # print(\"No Preprocessing\")\n\n else:\n print(\"wrong processing\")\n exit()\n\n for i in range(0, len(x_test)):\n y_test_temp = list()\n zeroCount = 0\n oneCount = 0\n\n # find distance of a instance in test test to all instances in training set\n for j in range(0, len(x_train)):\n if distMethod == \"Manhattan\":\n y_test_temp.append(self.manhattan(x_train[j], x_test[i]))\n elif distMethod == \"Euclidean\":\n y_test_temp.append(self.euclidean(x_train[j], x_test[i]))\n else:\n print \"something wrong with distance calculation\"\n exit()\n\n # take indices of k nearest points\n # print y_test_temp\n temp = np.asarray(y_test_temp).argsort()[:k]\n # check class of each of k nearest points\n for tmp in temp:\n if y_train[tmp] == 0:\n zeroCount += 1\n elif y_train[tmp] == 1:\n oneCount += 1\n else:\n print(\"something wrong in counting\")\n\n # classify\n if zeroCount >= oneCount:\n y_test.append(int(0))\n elif oneCount > zeroCount:\n y_test.append(int(1))\n else:\n print(\"somethign wrong\")\n\n # print y_test\n return y_test", "def plot_partial_dependence(estimator, X, features, *, feature_names=..., target=..., response_method=..., n_cols=..., grid_resolution=..., percentiles=..., method=..., n_jobs=..., verbose=..., line_kw=..., contour_kw=..., ax=..., kind=..., subsample=..., random_state=...):\n ...", "def plot_kinetics(k_data, i_data, tlim=None, xlim=None, lb=10, mpp=0.33, seg_length=100, fps=10, plot=True):\n \n t = [] \n power = []\n \n # apply tlim\n if tlim == None:\n pass\n elif isinstance(tlim, int):\n tc = (k_data.segment-1)*seg_length/fps\n k_data = k_data.loc[ tc < tlim]\n i_data = i_data.loc[i_data.t / fps < tlim]\n elif isinstance(tlim, list) and len(tlim) == 2:\n assert(tlim[1]>tlim[0])\n tc = (k_data.segment-1)*seg_length/fps\n k_data = k_data.loc[ (tc < tlim[1]) & (tc >= tlim[0])]\n i_data = i_data.loc[(i_data.t / fps < tlim[1]) & (i_data.t / fps >= tlim[0])]\n else:\n raise ValueError('tlim should be None, int or list of 2 int') \n \n # compute exponents at different time\n # t, power will be plotted on ax1\n for idx in k_data.segment.drop_duplicates():\n subdata = k_data.loc[k_data.segment==idx]\n xx, yy = postprocess_gnf(subdata, lb, xlim=xlim, sparse=3)\n x = np.log(xx)\n y = np.log(yy)\n p = np.polyfit(x, y, deg=1)\n t.append((idx-1)*seg_length/fps)\n power.append(p[0])\n\n # rescale light intensity to (0, 1)\n # t1, i will be plotted on ax2\n t1 = i_data.t / fps\n i = i_data.intensity - i_data.intensity.min()\n i = i / i.max()\n \n data = {'t0': t, 'alpha': power, 't1': t1, 'i': i}\n \n if plot == True:\n # set up fig and ax\n fig = plt.figure()\n ax1 = fig.add_axes([0,0,1,1])\n ax2 = ax1.twinx()\n\n # plot t, power\n color = wowcolor(0)\n ax1.set_xlabel('$t$ [s]')\n ax1.set_ylabel('$\\\\alpha$', color=color)\n ax1.plot(t, power, color=color)\n ax1.tick_params(axis='y', labelcolor=color)\n\n # plot t1, intensity\n color = wowcolor(4)\n ax2.set_ylabel('$I$', color=color)\n ax2.plot(t1, i, color=color)\n ax2.tick_params(axis='y', labelcolor=color)\n return data, fig, ax1\n else:\n return data", "def gridSearch(xTrain, yTrain, xTest, yTest, model, modelParameters, hyperParameters, \n nFolds = 1, reTrain = True, plotGraphs = False):\n leastLoss = None\n bestModel = None\n bestHyperParams = None\n \n \"\"\"Generate the parameter grid\"\"\"\n parameterGrid = []\n gridKeys = []\n \n parameterGrid = list(product(*hyperParameters.values()))\n hyperParameterKeys = hyperParameters.keys()\n \n \"\"\"For plottong graphs\"\"\"\n if plotGraphs:\n plt.close()\n plotHeight = 10\n plotWidth = 20\n index = 0\n fig, axs = plt.subplots(len(parameterGrid), 2, figsize=(plotWidth, plotHeight * len(parameterGrid)))\n fig = plt.figure()\n fig.set_figheight(15)\n fig.set_figwidth(15)\n ax = fig.add_subplot(111, projection='3d')\n \n\n \"\"\"Grid search for cartesian product of hyperParameters\"\"\" \n for parameterMesh in parameterGrid:\n hyperParameterMesh = {}\n for k,v in zip(hyperParameterKeys, parameterMesh):\n hyperParameterMesh[k] = v\n \n \"\"\"Combine model Parameters\"\"\"\n updatedParam = modelParameters.copy()\n updatedParam.update(hyperParameterMesh)\n \n \"\"\"Perform grid search with cross validation\"\"\"\n if nFolds > 1:\n modelParams, trainLossList, testLossList, analysisMetricList = kFoldAnalysis(model = model,\n xTrain = xTrain,\n yTrain = yTrain,\n nFolds = nFolds,\n modelParameters = updatedParam) \n \n \n \"\"\"For storing best model\"\"\"\n avg = np.average(analysisMetricList)\n if leastLoss == None or avg < leastLoss:\n leastLoss = avg\n bestModel = modelParams\n bestHyperParams = hyperParameterMesh\n \n \"\"\"For plotting\"\"\"\n if plotGraphs:\n foldIndex = 1\n\n ax.scatter(hyperParameterMesh['alpha'], hyperParameterMesh['regularizationParameter'], \n avg, marker = 'o', label = str(hyperParameterMesh))\n \n\n for train, test in zip(trainLossList, testLossList):\n axs[index][0].plot(train, label = \"Fold-\" + str(foldIndex))\n axs[index][1].plot(test, label = \"Fold-\" + str(foldIndex))\n foldIndex = foldIndex + 1\n \n axs[index][0].legend()\n axs[index][0].grid()\n \n axs[index][1].legend()\n axs[index][1].grid()\n \n axs[index][0].set_title(\"Train set for \" + str(hyperParameterMesh))\n axs[index][1].set_title(\"Validation set for \" + str(hyperParameterMesh))\n \n index = index + 1\n \n \n \"\"\"Perform only grid search and no cross validation. Test set will be used for validation\"\"\" \n else:\n trainedModel, trainLoss, testLoss = model(xTrain, yTrain, xTest, yTest, **updatedParam)\n \n \"\"\"For storing best model\"\"\"\n if leastLoss == None or testLoss[-1] < leastLoss:\n leastLoss = testLoss[-1]\n bestModel = trainedModel\n bestHyperParams = hyperParameterMesh\n \n \"\"\"For plotting graphs\"\"\"\n if plotGraphs:\n axs[index][0].plot(trainLoss, label = \"Training set Loss for \" + str(hyperParameterMesh))\n axs[index][0].legend()\n axs[index][0].grid()\n axs[index][1].plot(testLoss, label = \"Test set Loss for \" + str(hyperParameterMesh))\n axs[index][1].legend()\n axs[index][1].grid()\n index = index + 1\n \n if plotGraphs:\n ax.legend()\n ax.set_xlabel('alpha')\n ax.set_ylabel('regularizationParameter')\n ax.set_zlabel('RMSE')\n\n plt.show()\n plt.close()\n \n if reTrain:\n \n \"\"\"Combine model Parameters\"\"\"\n updatedParam = modelParameters.copy()\n updatedParam.update(bestHyperParams)\n\n bestModel, trainLoss, testLoss = model(xTrain, yTrain, xTest, yTest, **updatedParam)\n print trainLoss[-1]\n print testLoss[-1]\n \n if plotGraphs:\n plt.close()\n plotHeight = 10\n plotWidth = 20\n fig, axs = plt.subplots(1, 2, figsize = (plotWidth, plotHeight)) \n \n plt.suptitle(\"Best model\")\n\n axs[0].plot(trainLoss, label = \"Training set Loss for \" + str(bestHyperParams))\n axs[0].legend()\n axs[0].grid()\n axs[1].plot(testLoss, label = \"Test set Loss for \" + str(bestHyperParams))\n axs[1].legend()\n axs[1].grid()\n \n plt.show()\n \n \n \n return bestModel, bestHyperParams", "def plot_filtered_spots(\n adata, \n kernel_matrix, \n contrib_thresh,\n row_key='row',\n col_key='col',\n ax=None,\n figure=None,\n dsize=37,\n ticks=True,\n fig_path=None,\n fig_format='pdf',\n fig_dpi=150\n ):\n if ax is None:\n width = 5\n figure, ax = plt.subplots(\n 1,\n 1,\n figsize=(width,5)\n )\n\n # Filter spots with too little contribution\n # from neighbors\n contrib = np.sum(kernel_matrix, axis=1)\n keep_inds = [\n i\n for i, c in enumerate(contrib)\n if c >= contrib_thresh\n ]\n print('Kept {}/{} spots.'.format(len(keep_inds), len(adata.obs)))\n\n cat = []\n keep_inds = set(keep_inds)\n for ind in range(adata.obs.shape[0]):\n if ind in keep_inds:\n cat.append('Kept')\n else:\n cat.append('Filtered')\n cat_palette = ['#595959', '#d9d9d9']\n plot_slide(\n adata.obs,\n cat,\n cmap='categorical',\n colorbar=False,\n vmin=None,\n vmax=None,\n title='Filtered Spots',\n ax=ax,\n figure=figure,\n ticks=ticks,\n dsize=dsize,\n row_key=row_key,\n col_key=col_key,\n cat_palette=cat_palette\n )\n\n if fig_path:\n plt.tight_layout()\n figure.savefig(\n fig_path,\n format=fig_format,\n dpi=fig_dpi\n )\n plt.show()", "def grid_search_intro_model_with_latent_topics(k):\n if k == 100: # there exists a saved file already if using 100 latent topics\n prep = DataPrep(filepath='/home/ubuntu/ca_bills_project/data/extra/topic_intro_data_05-23-17-08-23.csv')\n prep.prepare()\n else:\n prep = DataPrep(filepath='/home/ubuntu/ca_bills_project/data/extra/intro_data_w_content_5_22.csv')\n prep.prepare(n_components=k, use_cached_tfidf='/home/ubuntu/ca_bills_project/data/extra/cached_tfidf_real_05-23-17-05-28.pkl', save=True)\n\n topic_features = [\"topic_\"+str(x) for x in range(k)]\n features = [u'days_since_start', u'session_type', u'party_ALL_DEM', u'party_ALL_REP',\n u'party_BOTH', u'party_COM', u'urgency_No', u'urgency_Yes',\n u'taxlevy_No', u'taxlevy_Yes']\n features += topic_features\n X_train, y_train = prep.subset(features)\n\n rf = RandomForestClassifier()\n gb = GradientBoostingClassifier()\n ada = AdaBoostClassifier()\n\n mc = ModelChooser([rf, gb, ada])\n\n tuning_params = [ {'max_features': [.1, .5, .7], 'max_depth': [5, 8, 10], 'n_estimators': [100000]},\n {'learning_rate': [.1, .05], 'max_depth': [2, 4], 'n_estimators': [100, 500]},\n {'learning_rate': [.1, .05], 'n_estimators': [100, 500]}]\n\n mc.grid_search(X_train, y_train, tuning_params)", "def k_nearest_neighbors(x_test, df_training, k):\n\n return np.argpartition(distance_to_each_training_point(x_test,\n df_training), k-1)[:,0:k]", "def parameter_compare(regressions,colors=['m','c'],upper_q=75,lower_q=25,ci_alpha = 0.2, bound_alpha = 0.0,\n labels = None,vertical_bbox_position = 1.4,width = 6,height = 5,draw_samples=True,num_samples =500):\n\n assert type(regressions) is dict\n \n # If no labels are provided, we take them from the first DynamicRegression object\n if labels is None:\n labels = regressions[regressions.keys()[0]].predictor_columns\n \n # this is the number of subplots in this figure\n n_predictors = regressions[regressions.keys()[0]].design.shape[1]\n figure, axes = plt.subplots(n_predictors,figsize = (width,height),sharex=True)\n \n for i,key in enumerate(regressions.keys()):\n \n if draw_samples:\n samples = regressions[key].ffbs.backward_sample(num_samples = num_samples)\n else:\n samples = regressions[key].ffbs.theta\n x = regressions[key].design.index\n \n for j in range(n_predictors):\n \n # Calculate and plot the confidence interval plus median\n lower = np.percentile(samples[:,j,:],lower_q,axis=1)\n upper = np.percentile(samples[:,j,:],upper_q,axis=1)\n median = np.percentile(samples[:,j,:],50,axis=1)\n axes[j].fill_between(x,upper,lower,color=colors[i],alpha = ci_alpha,\n label = '{0}%-{1}% range for {2}'.format(lower_q,upper_q,key))\n axes[j].plot(x,lower,color=colors[i],linestyle='--',alpha = bound_alpha)\n axes[j].plot(x,upper,color=colors[i],linestyle='--',alpha = bound_alpha)\n axes[j].plot(x,median,color=colors[i])\n axes[j].tick_params(direction = 'in')\n\n # a twin axis is made so we can label it easily on the right hand side of the plot\n twin = plt.twinx(axes[j])\n twin.set_ylabel(labels[j])\n \n # hide the tick labels and ticks because we only want the axis label\n twin.set_yticks([])\n \n axes[0].legend(ncol=len(list(regressions.keys())),bbox_to_anchor=(1.00, vertical_bbox_position), borderaxespad=0.,frameon=True,edgecolor='k',fancybox=False)\n return figure", "def draw_k_main_features_cv(feature_importance_pool, first_k=20):\n name_mean_std_pool = []\n for name, importances in feature_importance_pool.items():\n mean = numpy.mean(importances)\n std = numpy.std(importances, ddof=1)\n name_mean_std_pool.append([name, mean, std])\n\n name_mean_std_pool = sorted(name_mean_std_pool, key=lambda x: -x[1])\n\n name_pool, mean_pool, std_pool = [], [], []\n for name, mean, std in name_mean_std_pool[:first_k]:\n name_pool.append(name)\n mean_pool.append(mean)\n std_pool.append(std)\n\n fig, ax_features = pyplot.subplots(figsize=(10, 10))\n ax_features.bar(name_pool, mean_pool, yerr=std_pool)\n ax_features.set_xticklabels(\n name_pool, rotation_mode='anchor', rotation=45,\n horizontalalignment='right'\n )\n ax_features.set(\n title=\"Feature importances(with stand deviation as error bar)\",\n xlabel='Feature name', ylabel='Importance'\n )\n\n return (fig, ax_features)", "def visualize_training(features, labels, pl):\n print(\"Visualizing training\")\n fig = plt.figure()\n ax = fig.add_subplot(111)\n\n # Take out each feature type, one at a time\n label_map = get_label_map(labels)\n\n for key in label_map.keys():\n like_ind = label_map[key]\n like_data = np.array([features[i] for i in like_ind])\n\n plt.scatter(like_data[:,0],like_data[:,1],label=key)\n\n # get limits\n xmin = features.column_min(0) - .5\n xmax = features.column_max(0) + .5\n ymin = features.column_min(1) - .5\n ymax = features.column_max(1) + .5\n\n plt.xlim(xmin,xmax)\n plt.ylim(ymin,ymax)\n\n # Track the current dividing line, as well as the number of epochs passed\n divider, = plt.plot([],[])\n epoch_tracker = plt.text(-1,.9, '', fontsize=15)\n\n def update(i):\n \"\"\"\n 1.) Get the next set of weights from the tracker\n 2.) Calculate and draw the new divider line\n 3.) Update the epoch counter\n 4.) If we are at the end of an epoch, plot a dashed divider line to track progress\n \"\"\"\n epoch = i//features.instance_count\n w = pl.weights_tracker[i]\n a = pl.accuracy_tracker[epoch]\n divider.set_data([xmin,xmax],[(-xmin * w[0] - w[2]) / w[1], (-xmax * w[0] - w[2]) / w[1]])\n epoch_tracker.set_text(\"{} {}\".format(epoch + 1, a))\n\n # Keep a shadow of the hyperplane at the end of each epoch\n if i % features.instance_count == 0:\n plot_hyperplane(w,xmin,xmax,iter = i, alpha = .3, color='black',linestyle='dashed')\n\n return divider\n\n ani = animation.FuncAnimation(fig, update, frames=range(len(pl.weights_tracker)), interval=250,repeat=False)\n plt.legend()\n\n # optional save file\n if len(sys.argv) >= 3 :\n ani.save(sys.argv[2], writer='imagemagick', fps=5)\n\n plt.show()", "def knn(X,Y):\n \n # Transform all X data by PCA. Note that PCA was fit on the testing set as well as training.\n pca = PCA(n_components=100)\n X_r = pca.fit(X).transform(X)\n \n # Transform all X data by LDA. Same problem as above.\n lda = LDA()\n X_r2 = lda.fit(X, Y).transform(X)\n \n # Vary k.\n for k in [1,2,4,8,16,32, 64, 128, 256, 512]:\n \n # Training set was fixed at first 2000 vectors. This was for a smaller dataset at the time\n \n # No feature extraction\n knn = neighbors.KNeighborsClassifier(k)\n knn.fit(X[:2000], Y[:2000])\n \n # PCA\n knn2 = neighbors.KNeighborsClassifier(k)\n knn2.fit(X_r[:2000], Y[:2000])\n \n # LDA\n knn3 = neighbors.KNeighborsClassifier(k)\n knn3.fit(X_r2[:2000], Y[:2000])\n \n #Prediction results. Rather ugly way to code this looking back.\n predict = []\n predict2 = []\n predict3 = []\n for i in range(2000, len(X)):\n predict += [ knn.predict(X[i]) == Y[i] ]\n predict2 += [ knn2.predict(X_r[i]) == Y[i] ]\n predict3 += [ knn3.predict(X_r2[i]) == Y[i] ]\n \n \n # Plot accuracy. R= no feature extraction, G= PCA, B= LDA \n pylab.scatter(k, float(sum(predict))/len(predict), c='r')\n pylab.scatter(k, float(sum(predict2))/len(predict2), c='g')\n pylab.scatter(k, float(sum(predict3))/len(predict3), c='b')", "def predict_labels(self, distances, k=1):\n\n num_test = distances.shape[0]\n Y_pred = np.zeros((num_test,))\n\n \n for i in range(num_test):\n # extracting k-nearest-neighbors for each test-point\n kNN_idxs = np.argsort(distances[i,:])[0:k]\n \n # voting among the k-nearest-neighbors\n kNN_labels = {}\n # print(type(kNN_labels))\n\n for j in range(k):\n m_label = self.Y_train[kNN_idxs[j]]\n if m_label in kNN_labels.keys():\n # print(type(kNN_labels))\n kNN_labels[m_label] += 1 # increment count\n else:\n # print(m_label,'....', type(kNN_labels))\n kNN_labels[m_label] = 1 # initial count when the label occurs\n \n # counting the winning label\n\n winning_label = kNN_labels.keys()[0] # initialization\n \n for label in kNN_labels.keys():\n if kNN_labels[label] > kNN_labels[winning_label]:\n winning_label = label\n elif kNN_labels[label] == kNN_labels[winning_label]:\n # tie breaker\n if label < winning_label:\n winning_label = label\n \n\n Y_pred[i] = winning_label # storing winning label for each test-point\n \n return Y_pred" ]
[ "0.7444855", "0.6746422", "0.66787404", "0.64081985", "0.6387924", "0.63505673", "0.62327814", "0.62229425", "0.61925626", "0.6169651", "0.61491215", "0.61412793", "0.6130474", "0.61028457", "0.60486627", "0.6044063", "0.6039184", "0.60355693", "0.599889", "0.5996375", "0.59319514", "0.5906268", "0.58779544", "0.58564925", "0.5840142", "0.5834796", "0.5776254", "0.5763396", "0.57293314", "0.57292587", "0.5722269", "0.57088107", "0.5700497", "0.56929934", "0.56893194", "0.56871593", "0.5685026", "0.56585395", "0.564135", "0.56390536", "0.5633387", "0.5631368", "0.56153905", "0.5569474", "0.55597186", "0.5550992", "0.5549865", "0.55435216", "0.5539137", "0.5528251", "0.55281234", "0.5527325", "0.55230767", "0.55212337", "0.5513448", "0.55119413", "0.55022", "0.54851", "0.54746956", "0.54669267", "0.5454989", "0.5454989", "0.5449659", "0.5438534", "0.543525", "0.54312277", "0.54264504", "0.5415207", "0.541207", "0.54117924", "0.5406337", "0.54013187", "0.5398365", "0.5394278", "0.53879774", "0.53809255", "0.5379002", "0.53777623", "0.53684515", "0.53594804", "0.53585505", "0.53583056", "0.535676", "0.5355727", "0.5353413", "0.5349296", "0.53484887", "0.5346527", "0.5344", "0.53429854", "0.5341343", "0.53402376", "0.5327953", "0.53260636", "0.53173906", "0.53095853", "0.52960134", "0.52875924", "0.52864605", "0.5284013" ]
0.7342933
1
Inits the client manager.
def __init__(self, **auth_kwargs): self.session = None self.neutron = None self.nova = None self.glance = None self.cinder = None self.swift = None self.keystone = None self.auth_kwargs = auth_kwargs
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def client_setup(self):\n self.client = Client()", "def init_client(self, client):\n self.client = client", "def initialize(self):\n self._validate_client_objects()\n for execution_type in self.clients:\n # check for valid connection is done in _validate_client_objects()\n _ = self.clients[execution_type].connection # Unused", "def __init__(self, client):\n self._client = client", "def __init__(self, client=None):\n self._client = client", "def init_client():\n init_config()\n begin_sending_packets()", "def __init__(self, client):\n\n self.client = client", "def __init__(self, client):\n self.client = client", "def init(self):\n self._service_store = ServiceStore(self.driver, self.network)\n self._emulator = NetworkEmulator(self.store, self.driver)", "def __init__(self):\n self.client.ssl = True\n self.client.http_client_debug = False\n self.createBaseFolder()", "def __init__(self, *args, **kwargs):\n super(Client, self).__init__(role='c', *args, **kwargs)\n\n # Internal variables\n self._bulksize = None\n self._server_hostname = None\n self._port = None\n self._num_streams = None\n self._zerocopy = False", "def init_client_manager(ip, port, authkey):\n class ServerQueueManager(SyncManager):\n pass\n\n ServerQueueManager.register('get_trmanager_plmanager_queue')\n ServerQueueManager.register('get_player_trmanager_queue')\n\n manager = ServerQueueManager(address=(ip, port), authkey=authkey)\n print('Connecting queue to %s:%d ...' % (ip, port))\n manager.connect()\n\n print('Connected.')\n return manager", "def __init__(self, client):\n super().__init__(client)", "def __init__(self):\n self.__client = Client(verify_ssl_cert=True)\n self.__headers = {'Content-Type': 'application/json'}\n self.login()", "def __init__(self, client, **kwargs):\n self._ac = client\n self._wrapped = kwargs", "def __init__(self, client_config, main_loop):\n _LOGGER.debug(\"Preparing setup: %s\", client_config)\n\n # generic configuration\n self._loop = main_loop\n self._client = None\n self._lock = asyncio.Lock()\n self._config_name = client_config[CONF_NAME]\n self._config_type = client_config[CONF_TYPE]\n self._config_port = client_config[CONF_PORT]\n self._config_timeout = client_config[CONF_TIMEOUT]\n self._config_delay = client_config[CONF_DELAY]\n\n if self._config_type == \"serial\":\n # serial configuration\n self._config_method = client_config[CONF_METHOD]\n self._config_baudrate = client_config[CONF_BAUDRATE]\n self._config_stopbits = client_config[CONF_STOPBITS]\n self._config_bytesize = client_config[CONF_BYTESIZE]\n self._config_parity = client_config[CONF_PARITY]\n else:\n # network configuration\n self._config_host = client_config[CONF_HOST]", "def initialize(self):\n if self.real:\n self.agent.connect(self)\n else:\n self.connect() # Connect python client to VREP\n self.agent.connect(self)", "def __init__(self, client):\n self.client = client\n self.call_params = {\n }", "def __init__(self, processManager, clientManager):\n self.processManager = processManager\n self.clientManager = clientManager\n self.engine_types = {}\n self.engine_allocations = {}\n self.engine_instances = {}", "def __init__(self):\n self.sp, self.user = self.init_auth_client()\n self.logger = logging.getLogger(__name__)", "def init_compute_clients(self):\n\n print \"\\t* instantiating clients\"\n # instantiate nova client\n self.gen_nova_client()\n\n # instantiate neutron client\n self.gen_neutron_client()\n\n # instantiate heat client (used to validate templates)\n self.gen_heat_client()", "def service_client_initialization(self) -> global___Snippet.ClientInitialization:", "def __init__(self, server, params, backend):\r\n super(SentinelClient, self).__init__(server, params, backend)\r\n self._client_write = None\r\n self._client_read = None\r\n self._connection_string = server", "def __init__(self, **kwargs):\n self.local = salt.client.LocalClient()\n self.minion_nodes = self._query()", "def __init__(self):\n super(NovaClientWrapper, self).__init__(\n retry_exceptions=(nova_exc.ConnectionRefused,\n nova_exc.Conflict),\n auth_exceptions=(nova_exc.Unauthorized),\n name=\"Nova\")", "def __init__(self):\n self.clients = []\n self.outstanding_keypresses: Dict[Tuple[int, int], float] = {}\n self.last_button_pressed: Optional[str] = None\n self.last_button_action: Optional[const.InputAction] = None\n self.connection_state = None\n self.states = {}\n self.active_player = None\n self.powered_on = True\n self.has_authenticated = False\n self.heartbeat_count = 0\n self.volume: float = 0.5\n self.cluster_id: Optional[str] = None\n self.output_devices: List[str] = [DEVICE_UID]", "def __init__(self, client, use_stubs=True):\n super().__init__(client, use_stubs)", "def __init__(self, client, use_stubs=True):\n super().__init__(client, use_stubs)", "def __init__(self, client, use_stubs=True):\n super().__init__(client, use_stubs)", "def __init__(self, client, use_stubs=True):\n super().__init__(client, use_stubs)", "def __init__(self, client, use_stubs=True):\n super().__init__(client, use_stubs)", "def _initialize(self):\n self.send_init_command()", "def setup(self):\n # pylint: disable = E0633\n # Client* do deliver loop, client as result but\n # pylint does not accept that fact\n\n _LOGGER.debug(\"doing setup\")\n if self._config_type == \"serial\":\n _, self._client = ClientSerial(\n schedulers.ASYNC_IO,\n method=self._config_method,\n port=self._config_port,\n baudrate=self._config_baudrate,\n stopbits=self._config_stopbits,\n bytesize=self._config_bytesize,\n parity=self._config_parity,\n timeout=self._config_timeout,\n loop=self._loop,\n )\n elif self._config_type == \"rtuovertcp\":\n _, self._client = ClientTCP(\n schedulers.ASYNC_IO,\n host=self._config_host,\n port=self._config_port,\n framer=ModbusRtuFramer,\n timeout=self._config_timeout,\n loop=self._loop,\n )\n elif self._config_type == \"tcp\":\n _, self._client = ClientTCP(\n schedulers.ASYNC_IO,\n host=self._config_host,\n port=self._config_port,\n timeout=self._config_timeout,\n loop=self._loop,\n )\n elif self._config_type == \"udp\":\n _, self._client = ClientUDP(\n schedulers.ASYNC_IO,\n host=self._config_host,\n port=self._config_port,\n timeout=self._config_timeout,\n loop=self._loop,\n )\n else:\n assert False", "def setUp(self):\n self.client_socket = open_client_socket()", "def setUp(self):\n self.client_socket = open_client_socket()", "def create_client(self) -> None:\n self._client = discovery.build('ml', 'v1')", "def setUp(self):\r\n super(SSLClientTest, self).setUp()\r\n self.client = Client()\r\n self.factory = RequestFactory()\r\n self.mock = Mock()", "def __init__(self):\n config = self.read_config()\n self.deployment = config['deployment']\n self.deployment_config = config[self.deployment]\n logger.info(f'Initializing storage client with the {self.deployment} deployment config {pformat(self.deployment_config)}')\n\n # get the MLOS config from the user else default it from the deployment config file\n # self.mlos_config = config['MLOS']\n # logger.info(f'Initializing storage client with the MLOS config {pformat(self.mlos_config)}')\n\n # setup the mount path\n if self.deployment == \"LOCAL\":\n self.mount_dir = self.setup_mount()\n logger.info(f'Mount directory setup completed: {self.mount_dir}')", "def init():\n # make sure pool is initialized\n global pool\n if not pool:\n pool = aiohttp.ClientSession(\n connector=aiohttp.TCPConnector(limit=config.MAX_PARALLEL_REQUESTS),\n raise_for_status=False,\n trust_env=True,\n auth=aiohttp.BasicAuth( config.CACHE_USERNAME, config.CACHE_PASSWORD ),\n )", "def initialize(self) -> None:\n conn = self.optionally_wrap_socket(self.client.connection)\n conn.setblocking(False)\n self.client = TcpClientConnection(conn=conn, addr=self.addr)\n if b'ProtocolHandlerPlugin' in self.config.plugins:\n for klass in self.config.plugins[b'ProtocolHandlerPlugin']:\n instance = klass(self.config, self.client, self.request)\n self.plugins[instance.name()] = instance", "def __init__(self, **kwargs):\n self.config = kwargs[\"config\"]\n self.cli = client.DefaultClient(app_key=self.config[\"app_key\"], app_secret=self.config[\"app_secret\"])\n self.req = None", "def __init__(self):\n super(GconfStore, self).__init__()\n self._client = gconf.client_get_default()\n self.__connect_notifications()", "def __init__(self, **kwargs):\r\n super(Client, self).__init__()\r\n self.httpclient = client.HTTPClient(**kwargs)\r\n self.version = '2.0'\r\n self.format = 'json'\r\n self.action_prefix = \"/v%s\" % (self.version)\r\n self.retries = 0\r\n self.retry_interval = 1", "def __init__(self, client_authentication=None):\n super(OAuthClientAuthHandler, self).__init__()\n self._client_authentication = client_authentication", "def __init__(self, wm) -> None:\n conf_dict = wm.context.config.arango_storage._to_dict()\n\n log.debug(conf_dict)\n client = ArangoClient(hosts=conf_dict['hosts'])\n db = client.db(conf_dict['database'],\n username=conf_dict['username'],\n password=conf_dict['password'])\n\n self.db = db\n self.client = client", "def _init_raw_client(self) -> None:\n if self.credentials:\n auth = HTTPBasicAuth(self.credentials['username'], self.credentials['password'])\n else:\n auth = None\n base_url = \"http://\" if self.untrusted else \"https://\"\n base_url += self.url\n self.raw_client = client.DockerRegistryClient(base_url=base_url, auth=auth)", "def __init__(self, *args, **kwargs):\n super(KBaseWSManager, self).__init__(*args, **kwargs)\n if not self.kbasews_uri:\n raise HTTPError(412, \"Missing KBase workspace service endpoint URI.\")\n\n # Init the session info we need.\n self.narrative_logger = get_narrative_logger()\n self.user_service = UserService()", "def __init__(self, client):\n self._client = client\n self._argument_converter = ArgumentConverter()", "def __init__(self):\n Session.SESSIONS_COUNT += 1\n self.channelCount = 0\n self._channels = []\n self._error = ''\n self._client = None", "def __init__(self):\n\n self.loop = asyncio.get_event_loop()\n self.aiohttp = web.Application(\n loop=self.loop,\n middlewares=[unhandled_route],\n )\n self.client = ClientSession()\n self.ws = WebSocketHandler(self)\n self.cert = self._load_ssl_certificate()\n\n self.config()", "def __init__(self):\n \n # Initialize logger\n self._log = logging.getLogger(\"OemGateway\")\n \n # Initialize variables\n self._data_buffer = []\n self._settings = {}", "def __init__(self, host=None, logger=core_log):\n if host is None:\n host = RESOURCE_MANAGER_HOST\n\n super(ClientResultManager, self).__init__(logger=logger, host=host)", "def __init__(self, address=('', 50000), authkey=b'tradingbot'):\n self.id = 0\n _ClientBot.__init__(self, address=address, authkey=authkey)\n self.conn_tbm = ConnTradingBotManager(self.id)", "def do_init(self):\n\n pass", "def __init__(self):\n try:\n context = ssl.create_default_context(\n purpose=ssl.Purpose.CLIENT_AUTH)\n context.options |= ssl.OP_NO_SSLv2\n context.options |= ssl.OP_NO_SSLv3\n context.options |= ssl.OP_NO_TLSv1\n context.options |= ssl.OP_NO_TLSv1_1\n context.options |= ssl.OP_NO_COMPRESSION\n context.verify_mode = ssl.CERT_REQUIRED\n # TODO do not use static configuration parameters\n context.load_verify_locations(cafile='/sbin/rpcsd/root.cert.pem')\n context.load_cert_chain(certfile='/sbin/rpcsd/gaps.pem')\n context.set_ciphers('AES128-SHA256')\n RPCS.context = context\n except FileNotFoundError:\n # If we can't set up TLS context, log error and exit\n LOG.error(\"Could not setup TLS context: certificate file(s) \"\n \"not present in the correct directory\")\n exit(1)", "def __init__(self, session, *args, **kwargs) -> None:\n super().__init__(*args, **kwargs)\n self.__client_session = session", "def _init(self):\n pass", "def __init__(self, *args, **kwargs):\n self._initialize_protocols()\n super().__init__(*args, **kwargs)", "def __init__(self):\n self.client_id = None\n self.bridge_config = {}\n self.bridge_config_answer_status = None", "def __init__(self):\n # Create a TCP/IP socket\n self.client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def __init__(self, session):\n ConfigurableManager.__init__(self)\n self.session = session\n self._objs = {}\n self._objslock = threading.Lock()\n self._ifccounts = {}\n self._ifccountslock = threading.Lock()\n # Port numbers are allocated from these counters\n self.platformport = self.session.get_config_item_int(\"emane_platform_port\", 8100)\n self.transformport = self.session.get_config_item_int(\"emane_transform_port\", 8200)\n self.doeventloop = False\n self.eventmonthread = None\n self.logversion()\n # model for global EMANE configuration options\n self.emane_config = EmaneGlobalModel(session, None)\n session.broker.handlers.add(self.handledistributed)\n self.service = None\n self._modelclsmap = {\n self.emane_config.name: self.emane_config\n }\n self.loadmodels()", "def initService(self):", "def __init__(self):\n self._url_base = None\n self._keystone = None\n self._auth_token = None\n self._auth_lock = threading.Lock()\n self._failed_auth = False", "def __init__(\n self, config: interface.BaseConfig, session_manager: ClientSessionManager\n ):\n super().__init__(max_calls=1) # To StateProducer via interface.AppleTV\n self._config = config\n self._session_manager = session_manager\n self._protocols_to_setup: Queue[SetupData] = Queue()\n self._protocol_handlers: Dict[Protocol, SetupData] = {}\n self._push_updates = FacadePushUpdater()\n self._features = FacadeFeatures(self._push_updates)\n self._pending_tasks: Optional[set] = None\n self._device_info = interface.DeviceInfo({})\n self._interfaces = {\n interface.Features: self._features,\n interface.RemoteControl: FacadeRemoteControl(),\n interface.Metadata: FacadeMetadata(),\n interface.Power: FacadePower(),\n interface.PushUpdater: self._push_updates,\n interface.Stream: FacadeStream(self._features),\n interface.Apps: FacadeApps(),\n interface.Audio: FacadeAudio(),\n }", "def __init__(self, main_logger = None):\n\t\tself.log = main_logger or logger.init_logger(\"mandrill\")\n\n\t\tself.http_client = AsyncHTTPClient()\n\t\tself.key = config.MANDRILL_KEY", "def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.start_agents_once = False\n self.start_servers_once = False\n self.setup_start_agents = False\n self.setup_start_servers = False", "def init_kube_client(self):\n kubecfg_path = os.environ.get('KUBECFG_PATH')\n if kubecfg_path is None:\n config.load_kube_config()\n else:\n config.load_kube_config(config_file='/tmp/.kube/config') \n self.kube_client = k_client.CoreV1Api()\n self.kube_v1_batch_client = k_client.BatchV1Api()\n self.kube_v1_delete = k_client.V1DeleteOptions()", "def __init__(self, kubeconfig_path=None):\n config.load_kube_config(config_file=kubeconfig_path)\n self.api_client = client.ApiClient()\n self.core_client = client.CoreV1Api()\n self.batch_client = client.BatchV1Api()\n self.crd_client = client.CustomObjectsApi()", "def setup(self):\n self.ip = self.client_address[0]\n self.port = self.client_address[1]\n self.connection = self.request #TCP socket object for the client\n self.server.clients[(self.ip, self.port)] = self\n self.server.peers.append((self.connection)) \n for client in self.server.clients:\n print(\"Connected client: \", client)\n\n #for peer in self.server.peers:\n # print(\"Peers: \", peer)", "async def _init(self, **kwargs):", "def __init__(self):\n \n # Initialize logger\n self._log = logging.getLogger(\"OemGateway\")\n \n # Initialize variables\n self._data_buffer = []\n self._last_send = time.time()\n self._settings = {}", "def setUp(self):\n self.clnt = CvpClient()\n nodes = [\"1.1.1.1\"]\n self.clnt.nodes = nodes\n self.clnt.node_cnt = len(nodes)\n self.clnt.node_pool = cycle(nodes)\n self.api = CvpApi(self.clnt)", "def setUp(self):\n self.client = api.Client(config.get_config(), api.json_handler)", "def __init__(self):\n self.config = get_config()\n self.log = get_logger(self)\n\n self.factory = SugarServerFactory(\"wss://*:5505\")\n self.factory.protocol = SugarServerProtocol\n\n self.console_factory = SugarConsoleServerFactory(\"wss://localhost:5507\")\n self.console_factory.protocol = SugarConsoleServerProtocol\n\n self.api = APIService(self.config)", "def initialize(self, extra=None):\n if extra is None:\n extra = {}\n self.client_name = extra.get(ExtraItems.CLIENT_NAME, \"noname\")\n self.logger.info(f\"Initializing {self.client_name} ...\")\n\n # FL platform needs to provide filepath to configuration files\n self.app_root = extra.get(ExtraItems.APP_ROOT, \"\")\n\n # Read bundle config files\n self.bundle_root = os.path.join(self.app_root, self.bundle_root)\n\n config_train_files = self._add_config_files(self.config_train_filename)\n config_filter_files = self._add_config_files(self.config_filters_filename)\n\n # Parse\n self.train_parser = ConfigParser()\n self.filter_parser = ConfigParser()\n if len(config_train_files) > 0:\n self.train_parser.read_config(config_train_files)\n check_bundle_config(self.train_parser)\n if len(config_filter_files) > 0:\n self.filter_parser.read_config(config_filter_files)\n\n # override some config items\n self.train_parser[RequiredBundleKeys.BUNDLE_ROOT] = self.bundle_root\n\n # Get data location\n self.dataset_root = self.train_parser.get_parsed_content(\n BundleKeys.DATASET_DIR, default=ConfigItem(None, BundleKeys.DATASET_DIR)\n )\n\n # Get filters\n self.post_statistics_filters = self.filter_parser.get_parsed_content(\n FiltersType.POST_STATISTICS_FILTERS, default=ConfigItem(None, FiltersType.POST_STATISTICS_FILTERS)\n )\n\n self.logger.info(f\"Initialized {self.client_name}.\")", "def _init_zk(self):\n try:\n # clean old transition\n if self.zk:\n self.zk._reset()\n encrptor = AESEncrptor()\n auth_info = ZOOKEEPER_CONFIG['user'] + ':' + encrptor.decrypt(ZOOKEEPER_CONFIG['password'])\n self.zk = KazooClient(self.ZK_HOST, timeout=self.TIMEOUT, auth_data=[(\"digest\", auth_info)])\n self.zk.start()\n self.zk.add_listener(listener=self.listener)\n self.zk.ensure_path(self.LEADERSHIP_PATH)\n self.zk.ensure_path(self.SERVICE_PATH)\n except Exception as e:\n logger.error('Fail to connect to zk hosts %s, exception %s' %\n (self.ZK_HOST, e))\n self.zk = None\n raise e", "def __init__(self, address=('', 50000), authkey=b'tradingbot'):\n _ClientBot.__init__(self, address=address, authkey=authkey)", "def setUpClass(cls):\n super(IronicTest, cls).setUpClass()\n if cls.manager.clients_initialized:\n cls.usr = cls.config.compute.controller_node_ssh_user\n cls.pwd = cls.config.compute.controller_node_ssh_password\n cls.key = cls.config.compute.path_to_private_key\n cls.timeout = cls.config.compute.ssh_timeout\n if not cls.ironic_client:\n LOG.warning('Ironic client was not initialized')", "def __init__(self, client_id: str):\n\n self._cs = aiohttp.ClientSession(\n loop=asyncio.get_event_loop(),\n raise_for_status=True,\n headers={\"Client-ID\": client_id},\n )", "def __init__(self, reactor, logger):\n super(ManagerThread, self).__init__()\n\n self.logger = logger\n\n self._requests = []\n self.request_queue = Queue()\n\n self._reactor = reactor\n self._stop_flag = False\n self._requests_handlers = {StopTest: self.stop_test,\n StartTest: self.start_test,\n ShouldSkip: self.should_skip,\n CleanupUser: self.cleanup_user,\n AddResult: self.add_test_result,\n UpdateFields: self.update_fields,\n StartTestRun: self.start_test_run,\n StopComposite: self.stop_composite,\n LockResources: self.lock_resources,\n UpdateRunData: self.update_run_data,\n QueryResources: self.query_resources,\n StartComposite: self.start_composite,\n UpdateResources: self.update_resources,\n ReleaseResources: self.release_resources}", "def initialize_core(self):\n # set current session unique id\n self.conf.set(\"accesspoint\", \"current_session\", self.currentSessionID)\n # set interface for shared connection from params\n self.conf.set(\"accesspoint\", \"interface_net\", self.parse_args.interface_net)\n \n if self.parse_args.interface:\n self.conf.set(\"accesspoint\", \"interface\", self.parse_args.interface)\n\n self.all_modules = module_list\n\n # intialize the LoggerManager\n # TODO: this change solve IndexError: list index out of range\n # but not a definitive solution\n self.logger_manager = LoggerManager(self)\n self.coreui = DefaultController(self)\n\n # print(self.coreui.Plugins)\n self.proxy_controller = self.coreui.getController(\"proxy_controller\")\n self.mitm_controller = self.coreui.getController(\"mitm_controller\")\n self.wireless_controller = self.coreui.getController(\"wireless_controller\")\n self.dhcp_controller = self.coreui.getController(\"dhcp_controller\")\n self.dns_controller = self.coreui.getController(\"dns_controller\")\n self.uiwid_controller = self.coreui.getController(\"ui_controller\")\n\n self.parser_list_func = {\n # parser_set_proxy is default extend class\n \"parser_set_proxy\": self.proxy_controller.pumpkinproxy,\n \"parser_set_plugin\": self.mitm_controller.sniffkin3,\n \"parser_set_mode\": self.wireless_controller.Settings,\n \"parser_set_security\": self.wireless_controller.Settings,\n \"parser_set_hostapd_config\": self.wireless_controller.Settings,\n \"parser_set_dhcpconf\": self.wireless_controller.Settings,\n \"parser_set_dhcpmode\": self.dns_controller.Active,\n }\n self.parser_autcomplete_func = {}\n\n # hook function (plugins and proxies)\n self.intialize_hook_func(self.proxy_controller)\n self.intialize_hook_func(self.mitm_controller)\n\n # register autocomplete set security command\n self.parser_autcomplete_func[\n \"parser_set_security\"\n ] = self.wireless_controller.Settings.getCommandsSecurity\n self.parser_autcomplete_func[\n \"parser_set_hostapd_config\"\n ] = self.wireless_controller.Settings.getCommandsHostapd\n self.parser_autcomplete_func[\n \"parser_set_dhcpconf\"\n ] = self.wireless_controller.Settings.getCommandsDhcpConf\n self.parser_autcomplete_func[\n \"parser_set_dhcpmode\"\n ] = self.dns_controller.getCommandsDhcpMode\n\n self.commands = {\n \"interface\": \"interface\",\n \"interface_net\": \"interface_net\",\n \"ssid\": \"ssid\",\n \"bssid\": \"bssid\",\n \"channel\": \"channel\",\n \"proxy\": None, # only for settings proxy\n \"plugin\": None, # only for settings plugin\n \"mode\": None, # only for settings mdoe\n \"dhcpconf\": None, # only for settings dhcpconf\n \"dhcpmode\": None, # only for settings dhcpmode\n \"security\": \"enable_security\",\n \"hostapd_config\": \"enable_hostapd_config\",\n }\n\n # get all command plugins and proxies\n for ctr_name, ctr_instance in self.coreui.getController(None).items():\n if hasattr(ctr_instance, \"getInfo\"):\n for plugin_name, plugins_info in ctr_instance.getInfo().items():\n self.commands[plugin_name] = \"\"\n\n self.threads = {\"RogueAP\": [], \"Modules\": {}}", "def setUpClass(cls):\n\n cls.client = get_client()", "def setUpClass(cls):\n\n cls.client = get_client()", "def setUpClass(cls):\n\n cls.client = get_client()", "async def init(self):\n self.init_connection_params()\n self._pool = await self._create_pool()\n\n return self", "def initialize(self, extra=None):\n if extra is None:\n extra = {}\n self.client_name = extra.get(ExtraItems.CLIENT_NAME, \"noname\")\n self.logger.info(f\"Initializing {self.client_name} ...\")\n\n if self.multi_gpu:\n dist.init_process_group(backend=self.backend, init_method=self.init_method)\n self._set_cuda_device()\n self.logger.info(\n f\"Using multi-gpu training on rank {self.rank} (available devices: {torch.cuda.device_count()})\"\n )\n if self.rank > 0:\n self.logger.setLevel(logging.WARNING)\n\n if self.seed:\n monai.utils.set_determinism(seed=self.seed)\n torch.backends.cudnn.benchmark = self.benchmark\n\n # FL platform needs to provide filepath to configuration files\n self.app_root = extra.get(ExtraItems.APP_ROOT, \"\")\n\n # Read bundle config files\n self.bundle_root = os.path.join(self.app_root, self.bundle_root)\n\n config_train_files = self._add_config_files(self.config_train_filename)\n config_eval_files = self._add_config_files(self.config_evaluate_filename)\n config_filter_files = self._add_config_files(self.config_filters_filename)\n\n # Parse\n self.train_parser = ConfigParser()\n self.eval_parser = ConfigParser()\n self.filter_parser = ConfigParser()\n if len(config_train_files) > 0:\n self.train_parser.read_config(config_train_files)\n check_bundle_config(self.train_parser)\n if len(config_eval_files) > 0:\n self.eval_parser.read_config(config_eval_files)\n check_bundle_config(self.eval_parser)\n if len(config_filter_files) > 0:\n self.filter_parser.read_config(config_filter_files)\n\n # override some config items\n self.train_parser[RequiredBundleKeys.BUNDLE_ROOT] = self.bundle_root\n self.eval_parser[RequiredBundleKeys.BUNDLE_ROOT] = self.bundle_root\n # number of training epochs for each round\n if BundleKeys.TRAIN_TRAINER_MAX_EPOCHS in self.train_parser:\n self.train_parser[BundleKeys.TRAIN_TRAINER_MAX_EPOCHS] = self.local_epochs\n\n # remove checkpoint loaders\n if self.disable_ckpt_loading:\n disable_ckpt_loaders(self.train_parser)\n disable_ckpt_loaders(self.eval_parser)\n\n # set tracking configs for experiment management\n if self.tracking is not None:\n if isinstance(self.tracking, str) and self.tracking in DEFAULT_EXP_MGMT_SETTINGS:\n settings_ = DEFAULT_EXP_MGMT_SETTINGS[self.tracking]\n else:\n settings_ = ConfigParser.load_config_files(self.tracking)\n ConfigWorkflow.patch_bundle_tracking(parser=self.train_parser, settings=settings_)\n ConfigWorkflow.patch_bundle_tracking(parser=self.eval_parser, settings=settings_)\n\n # Get trainer, evaluator\n self.trainer = self.train_parser.get_parsed_content(\n BundleKeys.TRAINER, default=ConfigItem(None, BundleKeys.TRAINER)\n )\n self.evaluator = self.eval_parser.get_parsed_content(\n BundleKeys.EVALUATOR, default=ConfigItem(None, BundleKeys.EVALUATOR)\n )\n\n # Get filters\n self.pre_filters = self.filter_parser.get_parsed_content(\n FiltersType.PRE_FILTERS, default=ConfigItem(None, FiltersType.PRE_FILTERS)\n )\n self.post_weight_filters = self.filter_parser.get_parsed_content(\n FiltersType.POST_WEIGHT_FILTERS, default=ConfigItem(None, FiltersType.POST_WEIGHT_FILTERS)\n )\n self.post_evaluate_filters = self.filter_parser.get_parsed_content(\n FiltersType.POST_EVALUATE_FILTERS, default=ConfigItem(None, FiltersType.POST_EVALUATE_FILTERS)\n )\n self.post_statistics_filters = self.filter_parser.get_parsed_content(\n FiltersType.POST_STATISTICS_FILTERS, default=ConfigItem(None, FiltersType.POST_STATISTICS_FILTERS)\n )\n\n # Get data location\n self.dataset_root = self.train_parser.get_parsed_content(\n BundleKeys.DATASET_DIR, default=ConfigItem(None, BundleKeys.DATASET_DIR)\n )\n\n if self.multi_gpu:\n if self.rank > 0 and self.trainer:\n self.trainer.logger.setLevel(logging.WARNING)\n if self.rank > 0 and self.evaluator:\n self.evaluator.logger.setLevel(logging.WARNING)\n self.logger.info(f\"Initialized {self.client_name}.\")", "def __init__(self, hostname, port, protocol, auth, tenant_id, entry):\n self.auth = auth\n self.hostname = hostname\n self.port = port\n self.protocol = protocol\n self.tenant_id = tenant_id\n self._api = ContainerClient(hostname=hostname, auth=self.auth, protocol=protocol,\n port=port, entry=entry)", "def __init__(self, client_ident):\n\t\tthreading.Thread.__init__(self, None)\n\t\tself.client_ident\t\t= client_ident\n\t\tself.start()", "def initialize():\n manager.initialize()\n logs.exit_great_success()", "def _initialize(self):\n pass", "def _initialize(self):\n pass", "def _initialize(self):\n pass" ]
[ "0.7732741", "0.7679714", "0.71822697", "0.7145595", "0.70865464", "0.7035242", "0.7014164", "0.69719917", "0.6842564", "0.6798126", "0.6751508", "0.67443424", "0.6720339", "0.6716731", "0.67079747", "0.6699051", "0.66531205", "0.6575444", "0.65571284", "0.655429", "0.65435135", "0.6503959", "0.6481225", "0.64414793", "0.64094955", "0.64062655", "0.63983744", "0.63983744", "0.63983744", "0.63983744", "0.63983744", "0.63980764", "0.63769543", "0.6371524", "0.6371524", "0.63653016", "0.6362173", "0.6337859", "0.63310516", "0.63172334", "0.6285222", "0.6278442", "0.6265117", "0.62592584", "0.6251199", "0.6234024", "0.6231461", "0.6201123", "0.6188573", "0.6181794", "0.6172473", "0.61691004", "0.61648524", "0.61644155", "0.61576957", "0.6150513", "0.6149948", "0.6149758", "0.6146706", "0.6143411", "0.6139291", "0.6139291", "0.6139291", "0.6139291", "0.6139291", "0.6139291", "0.6139291", "0.6139291", "0.6129146", "0.61288613", "0.61271214", "0.6126603", "0.6123802", "0.61219144", "0.61198634", "0.61095566", "0.6106315", "0.61006", "0.6099208", "0.6097504", "0.6087057", "0.60732347", "0.60725206", "0.6072338", "0.60718304", "0.6070137", "0.60672265", "0.6054928", "0.60520065", "0.6047073", "0.6047073", "0.6047073", "0.6042663", "0.6041217", "0.6041162", "0.6039277", "0.60391587", "0.603856", "0.603856", "0.603856" ]
0.6048933
89
Get a keystone auth session.
def get_session(self): if self.session is None: # loader = loading.get_plugin_loader('password') # auth = loader.load_from_options(**self.auth_kwargs) auth = v3.Password(**self.auth_kwargs) self.session = session.Session(auth=auth) return self.session
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_session():\n assert config.AUTH_URL, \"Environment variable OS_AUTH_URL is not defined\"\n\n def _get_session(auth_url=None,\n username=None,\n password=None,\n project_name=None,\n user_domain_name=None,\n project_domain_name=None):\n auth_url = auth_url or config.AUTH_URL\n username = username or config.USERNAME\n password = password or config.PASSWORD\n project_name = project_name or config.PROJECT_NAME\n user_domain_name = user_domain_name or config.USER_DOMAIN_NAME\n project_domain_name = project_domain_name or config.PROJECT_DOMAIN_NAME\n\n if config.KEYSTONE_API_VERSION == 3:\n\n auth = identity.v3.Password(\n auth_url=auth_url,\n username=username,\n user_domain_name=user_domain_name,\n password=password,\n project_name=project_name,\n project_domain_name=project_domain_name)\n\n elif config.KEYSTONE_API_VERSION == 2:\n\n auth = identity.v2.Password(\n auth_url=auth_url,\n username=username,\n password=password,\n tenant_name=project_name)\n\n else:\n raise ValueError(\"Unexpected keystone API version: {}\".format(\n config.KEYSTONE_API_VERSION))\n\n return _session.Session(auth=auth)\n\n return _get_session", "def getSession(self):\n if self.accessToken is None:\n self.authenticate()\n\n s = requests.Session()\n s.auth = self.getAuthObj()\n s.headers = {\"Accept\": \"application/json\"}\n return s", "def session(self):\r\n # Use auto-auth to retrieve the session for a logged in user\r\n session = requests.Session()\r\n response = session.get(STUDIO_BASE_URL + \"/auto_auth?staff=true\")\r\n\r\n # Return the session from the request\r\n if response.ok:\r\n return session\r\n\r\n else:\r\n msg = \"Could not log in to use Studio restful API. Status code: {0}\".format(response.status_code)\r\n raise StudioApiLoginError(msg)", "def get_session(self):\n yield from self._ensure_session_valid()\n return self.session", "def get_session(self):\n return self._session()", "def session(self):\n return self.session_store.get_session()", "def _session(self):\n if self.session is None:\n self.session = create_session(self.config, self.auth)\n return self.session", "def get_session(self):\n return self.session", "def session(self):\n return self.session_store.get_session(backend=\"datastore\")", "def session(get_session):\n return get_session()", "def get_session():\n\n jwt_secret = base64.urlsafe_b64decode(os.getenv('AUTH0_CLIENT_SECRET'))\n claims = {\n 'sub': 'rf|airflow-user',\n 'iat': datetime.utcnow(),\n 'exp': datetime.utcnow() + timedelta(hours=3)\n }\n encoded_jwt = jwt.encode(claims, jwt_secret, algorithm='HS256')\n session = requests.Session()\n\n session.headers.update({'Authorization': 'Bearer {}'.format(encoded_jwt)})\n return session", "def session(self):\n return self.session_store.get_session()", "def session(self):\n return self.session_store.get_session()", "def getSession():\n return call(\"getSession\")", "def get_session(self):\n\n session = shopify.Session(self.shop_url)\n session.token = self.access_token\n shopify.ShopifyResource.activate_session(session)\n return shopify", "def getSession(self):\n session = app.settings.cherrypy.session.get(self.session)\n return session", "def session(self):\n if not hasattr(self, '_session'):\n self._session = FakeSession(self.version)\n self._session.auth = (self.key, 'ignore')\n return self._session", "def get_session(self):\n sst = SessionStore(session_key=self.session_key)\n sst[BACKEND_SESSION_KEY] = cas_backend\n return sst", "def session(self):\n if not self._session: #Create new session if none exists\n return self._new_session()\n return self._session", "def get_session() -> Any:\n session = boto3.session.Session()\n return session", "def getSession(self):\n return self.request.getSession()", "def get_oauth_session(self):\n return self.oauth_session", "def session(self):\n\n if not hasattr(self, \"_session\"):\n self._session = Session(\"guest\")\n return self._session", "def get_session(self, session_id):\n return Session(self.session_cache, self.sid, session_id, self._secret)", "def get_session(cls):\r\n if cls._session is not None:\r\n return cls._session\r\n else:\r\n raise RuntimeError('Session not set.')", "def _get_session():\n api_version = \"1.0\"\n originator = \"salt_cloud_{}_driver\".format(__virtualname__)\n url = config.get_cloud_config_value(\n \"url\", get_configured_provider(), __opts__, search_global=False\n )\n user = config.get_cloud_config_value(\n \"user\", get_configured_provider(), __opts__, search_global=False\n )\n password = config.get_cloud_config_value(\n \"password\", get_configured_provider(), __opts__, search_global=False\n )\n ignore_ssl = config.get_cloud_config_value(\n \"ignore_ssl\",\n get_configured_provider(),\n __opts__,\n default=False,\n search_global=False,\n )\n try:\n session = XenAPI.Session(url, ignore_ssl=ignore_ssl)\n log.debug(\n \"url: %s user: %s password: %s, originator: %s\",\n url,\n user,\n \"XXX-pw-redacted-XXX\",\n originator,\n )\n session.xenapi.login_with_password(user, password, api_version, originator)\n except XenAPI.Failure as ex:\n pool_master_addr = str(ex.__dict__[\"details\"][1])\n slash_parts = url.split(\"/\")\n new_url = \"/\".join(slash_parts[:2]) + \"/\" + pool_master_addr\n session = XenAPI.Session(new_url)\n log.debug(\n \"session is -> url: %s user: %s password: %s, originator:%s\",\n new_url,\n user,\n \"XXX-pw-redacted-XXX\",\n originator,\n )\n session.xenapi.login_with_password(user, password, api_version, originator)\n return session", "def get_session(self):\n return ESSession(self)", "def get_session() -> requests.Session:\n return _get_session_from_cache(thread_ident=threading.get_ident())", "def create_keystone_auth_session(args):\n version, auth_url = _discover_keystone_info(args.os_auth_url)\n project_name = args.os_project_name or args.os_tenant_name\n project_id = args.os_project_id or args.os_tenant_id\n\n # FIXME(tsv): we are depending on the keystone version interface here.\n # If keystone changes it, this code will need to be changed accordingly\n if version == 'v2.0':\n # create a V2 Password plugin\n from keystoneclient.auth.identity import v2\n auth_plugin = v2.Password(auth_url=auth_url,\n username=args.os_username,\n password=args.os_password,\n tenant_name=project_name,\n tenant_id=project_id)\n elif version == 'v3.0':\n # create a V3 Password plugin\n from keystoneclient.auth.identity import v3\n auth_plugin = v3.Password(auth_url=auth_url,\n username=args.os_username,\n user_id=args.os_user_id,\n user_domain_name=args.os_user_domain_name,\n user_domain_id=args.os_user_domain_id,\n password=args.os_password,\n project_id=project_id,\n project_name=project_name,\n project_domain_id=args.os_project_domain_id,\n project_domain_name=args.\n os_project_domain_name)\n else:\n raise ValueError('Error: unsupported keystone version!')\n return ks_session.Session(auth=auth_plugin, verify=not args.insecure)", "def session(self):\n return session", "def session(self):\n if self._session is None:\n self.init_session()\n\n return self._session", "def get_session(self):\n session = Session(self.settings)\n self.sessions.append(session)\n return session", "def get_keystone_token():\n req_json = {\n 'auth': {\n 'passwordCredentials': {\n 'username': CFG.username,\n 'password': CFG.password\n },\n },\n }\n\n header = '{\"Host\": \"identity.api.rackspacecloud.com\",'\n header += '\"Content-Type\": \"application/json\",\"Accept\":\"application/json\"}'\n url = CFG.auth_url\n\n response = http.post(url=url, header=header, body=req_json)\n response_body = json.loads(response.text)\n\n auth_token = response_body['access']['token']['id']\n\n return auth_token", "def login_to_apic(self):\n session = Session(URL, LOGIN, PASSWORD)\n resp = session.login()\n self.assertTrue(resp.ok)\n return session", "def get_session():\n if not hasattr(get_session, \"session\"):\n get_session.session = requests_cache.CachedSession(\n cache_name=CACHE_PATH.rstrip(\".sqlite\"),\n expire_after=518400, # 6 days\n )\n adapter = HTTPAdapter(max_retries=3)\n get_session.session.mount(\"http://\", adapter)\n get_session.session.mount(\"https://\", adapter)\n return get_session.session", "def get_auth(self):\n return self._auth", "def get_current_session(self):\n if self.session is not None:\n return self.session\n else:\n return None", "def get(database, session_id: SessionId):\n return database.sessions.find_one({\"session_id\": session_id})", "def get_session():\n request_session = requests.Session()\n\n # Try to use what was passed in for username/password...\n username = CMD.username\n password = CMD.password\n \n # ...if there was nothing passed in then try to read it from config file\n if ((username is None or username == \"\") and (password is None or password == \"\")):\n # Try to read username and password from config file, if it exists\n # Otherwise default to DEFAULT_USERNAME/DEFAULT_PASSWORD\n try:\n with open(\"config.json\") as config_file:\n config_data = json.load(config_file)\n if (config_data):\n username = config_data[\"username\"]\n password = config_data[\"password\"]\n except:\n LOG.exception(\"Unable to open \\\"/collector/config.json\\\" file\")\n username = DEFAULT_USERNAME\n password = DEFAULT_PASSWORD\n\n request_session.auth = (username, password)\n request_session.headers = {\"Accept\": \"application/json\",\n \"Content-Type\": \"application/json\",\n \"netapp-client-type\": \"grafana-\" + __version__}\n # Ignore the self-signed certificate issues for https\n request_session.verify = False\n return request_session", "def auth(session, db):\n\tif (session.auth != None) and db(db.User.id == session.auth).count() == 1:\n\t\treturn User(session.auth, db)\n\telse:\n\t\treturn None", "def get_session(client_id, client_secret):\n client = BackendApplicationClient(client_id=client_id)\n session = OAuth2Session(client=client)\n session.fetch_token(token_url='https://api.intra.42.fr/oauth/token', client_id=client_id,\n client_secret=client_secret)\n return session", "def session(self):\n return self.ssession()", "def get_session(*args, **kwargs):\n session = requests.session(*args, **kwargs)\n\n return session", "def get_session(client_key,\n client_secret,\n resource_owner_key=None,\n resource_owner_secret=None):\n if not resource_owner_key or not resource_owner_secret:\n resource_owner_key, resource_owner_secret = authorize(client_key, client_secret)\n return OAuth1Session(client_key,\n client_secret=client_secret,\n resource_owner_key=resource_owner_key,\n resource_owner_secret=resource_owner_secret)", "def get_session():\n session = scoped_session(sessionmaker(bind=engine))\n return session", "def get_session(self, session_id):\n return self._cache[session_id]", "def get_session(self, db_config=\"Database\"):\n # If the DB is not in the sessionmaker_dict, make it\n if db_config not in self.sessionmaker_dict:\n self.make_sessionmaker(db_config)\n\n # Get the sessionmaker object from the sessionmaker dict\n sessionmaker_obj = self.sessionmaker_dict[db_config]\n # Get a session from the sessionmaker\n session = sessionmaker_obj()\n\n return session", "def auth(self):\n return auth.get_auth()", "def session():\n def session():\n return BaseUrlSession()\n return session", "def auth(self):\n return auth.get_auth()", "def get_session_cookie(self):\n\n if self._login is not None and self._password is not None:\n session_key = self.encode_user(self._login, self._password)\n return {'sessionkey': session_key}\n else:\n return None", "def get_scoped_session():\n if scopedsessionclass is None:\n s = None\n else:\n s = scopedsessionclass()\n\n return s", "def auth_user_session():\n if \"user\" in request.cookies:\n userid = request.cookies[\"user\"]\n if userid:\n user = User.query.filter(User.id == userid).first()\n if user:\n if \"session_cookie\" in request.cookies and user.cookie == request.cookies[\"session_cookie\"]:\n if user.cookie_expiration > datetime.now():\n return user\n\n # Return none if failure\n return None", "def session_auth() -> str:\n email = request.form.get(\"email\")\n password = request.form.get(\"password\")\n\n if email is None:\n return jsonify({\"error\": \"email missing\"}), 400\n if password is None:\n return jsonify({\"error\": \"password missing\"}), 400\n\n user = User()\n objs = user.search()\n obj = None\n\n for i in objs:\n if i.__dict__['email'] == email:\n obj = i\n\n if not obj:\n return jsonify({\"error\": \"no user found for this email\"}), 404\n\n if obj.is_valid_password(password) is False:\n return jsonify({\"error\": \"wrong password\"}), 401\n\n from api.v1.app import auth\n from os import getenv\n\n session = auth.create_session(obj.id)\n cookie_name = getenv(\"SESSION_NAME\")\n\n res = jsonify(obj.to_json())\n res.set_cookie(cookie_name, session)\n\n return res", "def __session(self):\n return self.__client_session", "def session(self):\n\t\treturn self._session", "def session(request):\n session = get_test_db_session()\n request.cls.session = session\n return session", "def get_session():\n if MYSQL['username'] is None:\n raise ValueError(\"User name is mandatory\")\n\n if MYSQL['password'] is None:\n raise ValueError(\"Password is mandatory\")\n\n if MYSQL['host'] is None:\n raise ValueError(\"Host is mandatory\")\n\n if MYSQL['db_name'] is None:\n raise ValueError(\"Database Name is mandatory\")\n\n try:\n engine = create_engine(\n '{engine}://{username}:{password}@{host}/{db_name}'.format(**MYSQL),\n pool_size=MYSQL[\"pool_size\"],\n echo=MYSQL[\"debug\"]\n )\n\n session_factory = sessionmaker(bind=engine)\n sess = scoped_session(session_factory)\n return sess\n\n except Exception as err:\n print(err)\n exit()", "def get_session(self, renew: Optional[bool] = False) -> neo4j.work.simple.Session:\n if self.session is None or renew:\n sess = self.driver.session()\n self.session = sess\n return self.session", "def get_user(self):\n session_key = request.get_cookie(\n self.conf['auth.cookie_key'],\n secret=self.conf['auth.cookie_secret']\n )\n if session_key:\n with atomic(self.conf['auth.dbfile']) as cursor:\n try:\n username, email = next(cursor.execute(\"\"\"\n SELECT username, email\n FROM sessions\n INNER JOIN users ON users.userid = sessions.userid\n WHERE sessions.key = ?\n AND sessions.started <= (SELECT\n datetime('now', '+3 hour'))\n \"\"\", (session_key,)))\n except StopIteration:\n return\n else:\n return User(username, email, get_usergroups(cursor, \n username))", "def get_session_by_user(user):\n if user:\n return manager.get_session(user)\n else:\n return manager.get_default_session()", "def auth_create_session(self) -> str:\n self.__logger.debug('Eva.auth_create_session called')\n return self.__http_client.auth_create_session()", "def get():\n return login()", "def authenticate_and_get_user():\n try:\n gauth_token = request.form['gauth_token']\n response = authenticate_with_users_service(gauth_token)\n\n if response.status_code == 201:\n # authentication successful, store login in cookies\n session['user_id'] = response.json()['user_id']\n session['name'] = response.json()['name']\n session['gauth_token'] = gauth_token\n return response.content, response.status_code\n except (BadRequestKeyError, requests.exceptions.ConnectionError) as error:\n return f'Error: {error}.', 400", "def get_session_from_user(self, client_id):\n return self.connections[client_id][\"session_id\"]", "def get_session(id):\n session = Session.query.get(id)\n result = session_schema.dump(session).data\n return jsonify({'status': 'success', 'message': None, 'data': result}), 200", "def session(self):\n return self._session", "def session(self):\n return self._session", "def get_session(self):\r\n if self._config.has_key('database'):\r\n return self._builder.session(self._config['database'], self.get_threads())\r\n if not self._config.has_key('host'):\r\n raise Exception(\"Database engine host configuration is not found\")\r\n elif not self._config.has_key('dbpath'):\r\n raise Exception(\"Database path configuration is not found\")\r\n else:\r\n return self._builder.session(None, self.get_threads(), self._config['host'], self._config['dbpath'])", "def login(self):\n backend = self.backend\n self.session[backend.session_id_key] = self[\"id\"]\n self.session[backend.session_backend_key] = backend.session_backend_val\n self.session[backend.session_hash_key] = self._get_session_hash(\n self[\"password\"]\n )", "def get_session(*args, **kwargs):\n settings = _get_connection_settings(*args, **kwargs)\n return Session(settings)", "def session(self):\n return self.__session", "def internal_keystoneclient(request):\n token = cache.get(CACHE_CLIENT, None)\n old_client = cache.get(CACHE_TOKEN, None)\n if not token:\n #LOG.debug('There is no token cached -> New Password Session')\n idm_password_session = _password_session(request)\n keystoneclient = client.Client(session=idm_password_session)\n cache.set(CACHE_CLIENT, keystoneclient.session.get_token(), INTERNAL_CLIENT_CACHE_TIME)\n cache.set(CACHE_TOKEN, keystoneclient, INTERNAL_CLIENT_CACHE_TIME)\n #LOG.debug('Saved token: %s',keystoneclient.session.get_token())\n else:\n #LOG.debug('There is a cached token! (%s)',token)\n old_client._auth_token = token\n keystoneclient = old_client\n\n #LOG.debug('Using token: %s',keystoneclient.session.get_token())\n return keystoneclient", "def get_current(self):\n auth_token = session.get(\"auth_token\")\n print(auth_token)\n if not auth_token:\n return None\n user = db.user.find_one({\"auth_token\":auth_token})\n\n return user", "def test_client_get_session(self):\n server, client = loopback()\n session = client.get_session()\n assert isinstance(session, Session)", "def _fetch_herd_session():\n session = requests.Session()\n session.auth = (ADMIN_USERNAME, ADMIN_PASS)\n session.headers.update(HERD_HEADERS)\n\n return session", "def get_session(context, key):\n session_manager = getToolByName(context, 'session_data_manager')\n\n if not session_manager.hasSessionData():\n return None\n\n session = session_manager.getSessionData()\n\n if not key in session.keys():\n return None\n\n return session[key]", "def session(self) -> \"Session\":\n return self._instance", "def getSession(conn_string=None, req=None):\n global engine, session_factory\n if engine is None:\n engine = getEngine(conn_string)\n if session_factory is None:\n session_factory = scoped_session(sessionmaker(bind=engine))\n session = session_factory()\n return session", "def session(self) -> ClientSession:\r\n return self._session", "def get_session_secret():\n singleton = Secrets._get_or_make_singleton()\n return singleton.session_secret", "def _get_mongo_session(self, sid):\n return self.coll.find_one({'sid': sid})", "def session(self):\n\n return self._session", "def get_session():\n name = request.args.get('name')\n sch = Scheduler()\n return sch.get_session(name)", "def get_or_create_sessions(self):\n\t\tpath = f'{self.BIKE_ENDPOINT}user/current/session?{self.secret_key}'\n\t\tresponse = requests.get(path).json()\n\t\tself.check_api_key(response)\n\n\t\treturn response", "def _create_login_session(self):\r\n sess = requests.Session()\r\n r = sess.get(self.page(self.LOGIN_PAGE), verify=self.verify)\r\n if r.status_code == 200:\r\n csrf_token = EndaceWebSession.find_csrf_token_login(r.content)\r\n if csrf_token is None:\r\n raise Exception(\"Could not find CSRF token\")\r\n # Submit login form\r\n login_result = sess.post(self.page(self.LOGIN_ACTION),\r\n data={\r\n \"_csrf\": csrf_token,\r\n \"d_user_id\": \"user_id\",\r\n \"t_user_id\": \"string\",\r\n \"c_user_id\": \"string\",\r\n \"e_user_id\": \"true\",\r\n \"f_user_id\": str(self.username),\r\n \"f_password\": str(self.password),\r\n \"Login\": \"Login\"},\r\n headers={'Content-type': 'application/x-www-form-urlencoded'}\r\n )\r\n if login_result.status_code == 200 and len(sess.cookies) > 0:\r\n return sess\r\n else:\r\n raise Exception(\"Login failed\")\r\n else:\r\n raise Exception(\"Login failed\")", "def __session(self) -> boto3.Session:\n return self.__ctx.get_session()", "def _getHttpSession(self):\n\n if self.httpSession is None:\n self.httpSession = requests.Session()\n return self.httpSession", "def get_session(base_url, group_id, token, session_id):\n url = base_url + route_session.format(session_id=session_id)\n response = requests.get(url, headers=headers(group_id, token))\n return response", "def auth(self):\n return self.creds(\"admin@example.com\", cookie=\"USERTOKEN: authcookie\")", "def get_keystoneclient(self):\n try:\n if self.token is None:\n client = keystoneclient.Client(user_name=self.cluster_account.cluster_user_name,\n password=self.cluster_account.cluster_password,\n auth_url=self.cluster_account.cluster.auth_url,\n tenant_name=self.name,\n )\n self.token = json.dumps(client.auth_ref)\n else:\n client = keystoneclient.Client(auth_ref=json.loads(self.token))\n # keystoneclient authenticates lazily, i.e. It doensn't actually\n # authenticates until the first time it needs the token for\n # someting. We'd like to find out about failures now (in\n # particular, it's easier to clear a bad token here than somewhere\n # else in the code. authenticate() forces it to auth right now:\n client.authenticate()\n return client\n except AuthorizationFailure:\n # Clear the token if auth failed:\n self.token = None\n raise", "def get_user():\n try:\n if 'gauth_token' in session:\n response = authenticate_with_users_service(\n session['gauth_token'])\n if response.status_code == 201:\n return response.json()\n return None # Not signed in\n except requests.exceptions.ConnectionError:\n return None # Can't connect to users service", "def session(self) -> Session:\n if self._session is None:\n self._session = Session()\n\n return self._session", "def get_session(username, password, cookie_path=COOKIE_PATH, cache=True,\n cache_expiry=300, cache_path=CACHE_PATH, driver='chrome'):\n class AESOPAuth(AuthBase): # pylint: disable=too-few-public-methods\n \"\"\"AESOP authorization storage.\"\"\"\n\n def __init__(self, username, password, cookie_path, driver):\n \"\"\"Init.\"\"\"\n self.username = username\n self.password = password\n self.cookie_path = cookie_path\n self.driver = driver\n\n def __call__(self, r):\n \"\"\"Call is no-op.\"\"\"\n return r\n\n session = requests.Session()\n if cache:\n session = requests_cache.core.CachedSession(cache_name=cache_path,\n expire_after=cache_expiry)\n session.auth = AESOPAuth(username, password, cookie_path, driver)\n session.headers.update({'User-Agent': USER_AGENT})\n if os.path.exists(cookie_path):\n _LOGGER.debug(\"cookie found at: %s\", cookie_path)\n session.cookies = _load_cookies(cookie_path)\n else:\n _login(session)\n return session", "def get_node_session(self, node_session_id):\r\n node_session = self.node_manager.get_node_session(self.session,\r\n node_session_id,\r\n self.current_user)\r\n\r\n if node_session is None:\r\n raise tornado.web.HTTPError(404, 'NodeSession not found.')\r\n\r\n return node_session", "def test_server_get_session(self):\n server, client = loopback()\n session = server.get_session()\n assert isinstance(session, Session)", "def get(self, session_id):\n if session_id is None:\n raise ValueError('session_id is required and was not provided')\n\n response, _, headers = self._client.request_with_headers('GET', 'sessions/%s' % session_id)\n return SessionResponse(response, headers)", "def login(self):\n login = self.client.login(username=self.username, password=self.password)\n return login", "def login(self):\n login = self.client.login(username=self.username, password=self.password)\n return login", "def open_session(self):\n return self.Session()" ]
[ "0.8036264", "0.7163002", "0.705678", "0.7039849", "0.69910294", "0.69805986", "0.6977348", "0.69680965", "0.68606806", "0.6850855", "0.68429095", "0.6802599", "0.6802599", "0.6759136", "0.6674402", "0.6669936", "0.6636045", "0.6635824", "0.6607306", "0.6601134", "0.65489656", "0.65450686", "0.64846224", "0.64844596", "0.64843667", "0.64768106", "0.6463093", "0.64576906", "0.64478153", "0.64362794", "0.6430849", "0.64219624", "0.6421441", "0.64159423", "0.6407748", "0.6397224", "0.6370054", "0.6368857", "0.6364707", "0.63313466", "0.6327926", "0.6322956", "0.63099617", "0.6304905", "0.628237", "0.6280633", "0.62777853", "0.6274009", "0.6267728", "0.62605697", "0.62318504", "0.62209886", "0.6194785", "0.61607987", "0.6156329", "0.6153404", "0.61529833", "0.6137697", "0.612921", "0.61267936", "0.6121913", "0.6110482", "0.6110451", "0.61056805", "0.61042047", "0.61000603", "0.6089801", "0.6089801", "0.6072807", "0.6072699", "0.6071271", "0.60686666", "0.6067022", "0.60564435", "0.60489655", "0.6046946", "0.6022154", "0.60065794", "0.59871507", "0.5984202", "0.5981129", "0.59782124", "0.59750086", "0.59510267", "0.5946047", "0.59280515", "0.59279424", "0.59239537", "0.59215254", "0.5919619", "0.59173805", "0.59133583", "0.5911934", "0.5907784", "0.5894166", "0.588882", "0.5884845", "0.58831483", "0.58831483", "0.5882599" ]
0.6903718
8
Get a nova client instance.
def get_nova(self, version='2.1'): if self.nova is None: self.nova = novaclient.Client(version, session=self.get_session()) return self.nova
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_nova_client(self):\n region_name = CONF.region_name\n session = self._get_keystone_session()\n return novaclient.client.Client(2, session=session, region_name=region_name)", "def get_novaclient(self):\n # TODO: We ought to be able to derive this from the keystone client,\n # but it's proving trickier than I expected --isd\n return novaclient.Client(self.cluster_account.cluster_user_name,\n self.cluster_account.cluster_password,\n self.name,\n self.cluster_account.cluster.auth_url)", "def _get_neutron_client(self):\n session = self._get_keystone_session()\n return neutronclient.v2_0.client.Client(session=session)", "def gen_nova_client(self):\n\n print \"\\t* Generating nova client\"\n client = nClient.get_client_class('2')\n self.novaclient = client(self.username,\n self.password,\n self.tenant_name,\n self.auth_url,\n service_type='compute')", "def make_client(instance):\r\n neutron_client = utils.get_client_class(\r\n API_NAME,\r\n instance._api_version[API_NAME],\r\n API_VERSIONS,\r\n )\r\n instance.initialize()\r\n url = instance._url\r\n url = url.rstrip(\"/\")\r\n if '2.0' == instance._api_version[API_NAME]:\r\n client = neutron_client(username=instance._username,\r\n tenant_name=instance._tenant_name,\r\n password=instance._password,\r\n region_name=instance._region_name,\r\n auth_url=instance._auth_url,\r\n endpoint_url=url,\r\n token=instance._token,\r\n auth_strategy=instance._auth_strategy,\r\n insecure=instance._insecure,\r\n ca_cert=instance._ca_cert)\r\n return client\r\n else:\r\n raise exceptions.UnsupportedVersion(_(\"API version %s is not \"\r\n \"supported\") %\r\n instance._api_version[API_NAME])", "def get_keystone_client():\n username = os.environ.get('OS_USERNAME')\n password = os.environ.get('OS_PASSWORD')\n tenant = os.environ.get('OS_TENANT_NAME')\n url = os.environ.get('OS_AUTH_URL')\n assert username is not None\n assert password is not None\n assert tenant is not None\n assert url is not None\n cl = client.Client(username=username, password=password,\n tenant_name=tenant, auth_url=url)\n return cl", "def Client(api_version, *args, **kwargs):\r\n neutron_client = utils.get_client_class(\r\n API_NAME,\r\n api_version,\r\n API_VERSIONS,\r\n )\r\n return neutron_client(*args, **kwargs)", "def _keystone_client(context, version=(3, 0)):\n auth_plugin = token.Token(\n auth_url=CONF.keystone_authtoken.auth_uri,\n token=context.auth_token,\n project_id=context.project_id)\n client_session = session.Session(auth=auth_plugin,\n verify=False if\n CONF.keystone_authtoken.insecure else\n (CONF.keystone_authtoken.cafile or True))\n return client.Client(auth_url=CONF.keystone_authtoken.auth_uri,\n session=client_session, version=version)", "def nova(self, obj):\n\n if self._novaclient is not None:\n return self._novaclient\n params = self._build_conn_params(obj.user, obj.project)\n self._novaclient = driver_base.SenlinDriver().compute(params)\n return self._novaclient", "def _get_glance_client(self):\n session = self._get_keystone_session()\n return glanceclient.client.Client(2, session=session)", "def make_client(instance):\n network_client = utils.get_client_class(\n API_NAME,\n instance._api_version[API_NAME],\n API_VERSIONS)\n LOG.debug('Instantiating network client: %s', network_client)\n\n endpoint = instance.get_endpoint_for_service_type(\n API_NAME,\n region_name=instance._region_name,\n )\n\n return network_client(\n username=instance._username,\n tenant_name=instance._project_name,\n password=instance._password,\n region_name=instance._region_name,\n auth_url=instance._auth_url,\n endpoint_url=endpoint,\n token=instance.auth.get_token(instance.session),\n insecure=instance._insecure,\n ca_cert=instance._cacert,\n )", "def get_keystone(self, version='3'):\n if self.keystone is None:\n iface = os.getenv('OS_ENDPOINT_TYPE', \"public\")\n self.keystone = keystoneclient.Client(\n version=version,\n session=self.get_session(),\n interface=iface)\n return self.keystone", "def client():\n\n client = Client()\n return client", "def get_cinder(self, version='2'):\n if self.cinder is None:\n iface = os.getenv('OS_ENDPOINT_TYPE', \"public\")\n self.cinder = cinderclient.Client(version,\n session=self.get_session(),\n interface=iface)\n return self.cinder", "def get_client(version, **kwargs):\n endpoint = kwargs.get('os_endpoint') or kwargs.get('ceilometer_url')\n\n return Client(version, endpoint, **kwargs)", "def _get_client(self):\n _client = KOPS(provider=self.provider, config=self.config)\n return _client", "def _get_client(self):\n if self._client is None:\n self._client = self.boto.client(service_name='elb', region_name=self.boto.cli_region)\n\n return self._client", "def get_keystoneclient(self):\n try:\n if self.token is None:\n client = keystoneclient.Client(user_name=self.cluster_account.cluster_user_name,\n password=self.cluster_account.cluster_password,\n auth_url=self.cluster_account.cluster.auth_url,\n tenant_name=self.name,\n )\n self.token = json.dumps(client.auth_ref)\n else:\n client = keystoneclient.Client(auth_ref=json.loads(self.token))\n # keystoneclient authenticates lazily, i.e. It doensn't actually\n # authenticates until the first time it needs the token for\n # someting. We'd like to find out about failures now (in\n # particular, it's easier to clear a bad token here than somewhere\n # else in the code. authenticate() forces it to auth right now:\n client.authenticate()\n return client\n except AuthorizationFailure:\n # Clear the token if auth failed:\n self.token = None\n raise", "def get_client(self):\n return self.client", "def client(self):\n\n if self._client is None:\n self._client = self._get_client()\n return self._client", "def make_client(self, context):\n return Client(self.settings['client_routing'], context=context)", "def get_instance(instance):\n command = 'nova show %s' % instance\n return parse_output(Popen(command.split(), stdout=STDOUT,\n stderr=STDERR).communicate()[0])", "def get_client():\n return Client(__address, authkey='strumamor')", "def _get_client():\n\n return datastore.Client()", "def _get_client():\n\n return datastore.Client()", "def make_rest_client(\n service_key, options=None,\n app_name=None, app_version=None, version=None,\n **kwargs):\n cloud = get_config(\n service_key=service_key, options=options,\n app_name=app_name, app_version=app_version,\n **kwargs)\n return cloud.get_session_client(service_key, version=version)", "def client():\n return Client(**common_data.AUTH_ARGS)", "def get_client():\n\n return MongoClientManager().client", "def get_client(self, name):\n return self.get_clients(as_dict=True).get(name)", "def get_client():\n\n client = Elasticsearch(host=HOST, port=PORT, timeout=300)\n\n # wait for yellow status\n for _ in range(100):\n time.sleep(.1)\n try:\n # This errors because of decorators. Silence it.\n # pylint: disable=E1123\n client.cluster.health(wait_for_status='yellow')\n return client\n except ConnectionError:\n continue\n else:\n # timeout\n raise SkipTest(\"Elasticsearch failed to start.\")", "def get_esi_client(token=None):\n if token:\n return EsiClient(security=EveClient.get_esi_security(token), headers={'User-Agent': \"Krypted Platform\"})\n else:\n return EsiClient(headers={'User-Agent': \"Krypted Platform\"})", "def GetClientInstance(release_track=calliope_base.ReleaseTrack.ALPHA):\n api_version = _RELEASE_TRACK_TO_API_VERSION.get(release_track)\n return core_apis.GetClientInstance(_API_NAME, api_version)", "def get_neutron(self, version='2'):\n if self.neutron is None:\n self.neutron = neutronclient.Client(session=self.get_session())\n return self.neutron", "def internal_keystoneclient(request):\n token = cache.get(CACHE_CLIENT, None)\n old_client = cache.get(CACHE_TOKEN, None)\n if not token:\n #LOG.debug('There is no token cached -> New Password Session')\n idm_password_session = _password_session(request)\n keystoneclient = client.Client(session=idm_password_session)\n cache.set(CACHE_CLIENT, keystoneclient.session.get_token(), INTERNAL_CLIENT_CACHE_TIME)\n cache.set(CACHE_TOKEN, keystoneclient, INTERNAL_CLIENT_CACHE_TIME)\n #LOG.debug('Saved token: %s',keystoneclient.session.get_token())\n else:\n #LOG.debug('There is a cached token! (%s)',token)\n old_client._auth_token = token\n keystoneclient = old_client\n\n #LOG.debug('Using token: %s',keystoneclient.session.get_token())\n return keystoneclient", "def get_client():\n client_class = _import_by_path(settings.REDISIO_CLIENT_CLASS)\n return client_class(host=settings.REDISIO_HOST,\n port=settings.REDISIO_PORT,\n db=settings.REDISIO_DB)", "def get_arango_client() -> ArangoClient:\n return ArangoClient(hosts=ARANGO_HOST)", "def client(self):\r\n if self._client is None:\r\n self._client = self._client_cls(self._server, self._params, self)\r\n return self._client", "def network_client(self):\n if not self.client:\n self.client = get_client_from_cli_profile(NetworkManagementClient)\n return self.client", "def get_client(public_key: str, secret_key: str, **_):\n razorpay_client = razorpay.Client(auth=(public_key, secret_key))\n return razorpay_client", "def get(self, id: int) -> Client:\n\n return self.__clients[id]", "def _get_monasca_client(self):\n\n monasca_url = self.token_helper.get_service_endpoint('monitoring')\n keystone_url = self.token_helper.get_service_endpoint('identity') + 'v3'\n # All monasca data is stored in the admin project, so get a token\n # to that project\n token = self.token_helper.get_token_for_project('admin')\n\n return client.Client(api_version=api_version,\n endpoint=monasca_url,\n token=token,\n auth_url=keystone_url,\n project_name='admin',\n project_domain_name='Default',\n insecure=get_conf(\"insecure\"),\n user_agent=api.USER_AGENT)", "def Client(self):\n return self._client", "async def get_client(\n self,\n request: Request,\n client_id: str,\n client_secret: Optional[str] = None,\n ) -> Optional[OAuth2Client]:\n\n client_record = await self._db.query_one(\n Client.select(*OAuth2Client._fields, filters=\".id = <uuid>$id\"),\n id=client_id,\n )\n client_record = Client.from_obj(client_record)\n\n if client_record is not None:\n return OAuth2Client(\n client_id=client_record.client_id,\n client_secret=client_record.client_secret,\n grant_types=client_record.grant_types,\n response_types=client_record.response_types,\n redirect_uris=client_record.redirect_uris,\n scope=client_record.scope,\n )", "def client(self):\n client = VMwareClient(self.host, verify_ssl=False)\n client.login(self.settings.username, self.settings.password)\n return client", "def get_client():\n return storage.Client(project=project_id)", "def get_client_instance(cls, session, client_config, create=False):\n client = None\n if cls.SESSION_ID_KEY in session:\n client = session[cls.SESSION_ID_KEY]\n log.debug(\"Found OAuth client in session.\")\n if client is None and create:\n client = cls(client_config)\n session[cls.SESSION_ID_KEY] = client\n session.save()\n log.debug(\"No OAuth client in session - created new one.\")\n return client", "def _get_keystone_client(self, auth_creds):\n discover = keystone_discover.Discover(**auth_creds)\n\n for version_data in discover.version_data():\n version = version_data[\"version\"][0]\n if version <= 2:\n return keystone_client_v2.Client(insecure=True, **auth_creds)\n elif version == 3:\n return keystone_client_v3.Client(insecure=True, **auth_creds)\n\n raise Exception(\"Failed to discover keystone version \"\n \"for auth_url {0}\".format(\n auth_creds.get(\"auth_url\"))\n )", "def test_get_api_v1_client(self):\n\n client = get_api_v1_client()\n self.assertEqual(type(client), Client)", "def client(self):\n\t\t# pylint: disable=invalid-name\n\t\treturn self._client", "def gen_neutron_client(self):\n\n print \"\\t* Generating neutron client\"\n self.neutronclient = neutronclient.Client(auth_url=self.auth_url,\n username=self.username,\n password=self.password,\n tenant_name=self.tenant_name,\n region_name=self.region_name)", "def get_client(profile_name, region_name, svc, boto_client_params={}):\n session = get_session(profile_name, region_name)\n client = session.client(svc, **boto_client_params)\n return client", "def get_client():\n client = soundcloud.Client(client_id=CLIENT_ID)\n return client", "def compute_client(self):\n if not self.client:\n self.client = get_client_from_cli_profile(ComputeManagementClient)\n return self.client", "def client(self):\n return self._client", "def _get_client(self):\n credentials = service_account.Credentials.from_service_account_info(self.service_account_info)\n client = googleapiclient.discovery.build('container', 'v1', credentials=credentials)\n\n return client", "def _get_es_client(conf):\n return elasticsearch_client(conf)", "def client(self, id):\n return self.query(Client).filter(Client.id == id).one()", "def get_client(host, port=None, username=None,\n password=None, tenant=None,\n auth_url=None, auth_strategy=None,\n auth_token=None, region=None,\n is_silent_upload=False, insecure=False):\n\n if auth_url:\n force_strategy = 'keystone'\n else:\n force_strategy = None\n\n creds = dict(username=username,\n password=password,\n tenant=tenant,\n auth_url=auth_url,\n strategy=force_strategy or auth_strategy,\n region=region,\n )\n\n if creds['strategy'] == 'keystone' and not creds['auth_url']:\n msg = (\"--auth_url option or OS_AUTH_URL environment variable \"\n \"required when keystone authentication strategy is enabled\\n\")\n raise exception.ClientConfigurationError(msg)\n\n use_ssl = (creds['auth_url'] is not None and\n creds['auth_url'].find('https') != -1)\n\n client = HeatClient\n\n return client(host=host,\n port=port,\n use_ssl=use_ssl,\n auth_tok=auth_token,\n creds=creds,\n insecure=insecure)", "def get_client(self, clientname):\n client = self.dbsession.query(Client).filter_by(clientname=clientname).all()\n if not client:\n return self.create_client({'clientname': clientname})\n else:\n return client[0]", "def get_client(host, port=9200, url_prefix=None, http_auth=None, use_ssl=False,\n master_only=False, timeout=30):\n kwargs = compact_dict({\n 'hosts': [host], 'port': port, 'http_auth': http_auth,\n 'url_prefix': url_prefix, 'use_ssl': use_ssl,\n 'timeout': timeout\n })\n logger.debug(\"ES client kwargs = %s\", kwargs)\n try:\n client = elasticsearch.Elasticsearch(**kwargs)\n # Verify the version is acceptable.\n check_version(client)\n # Verify \"master_only\" status, if applicable\n check_master(client, master_only=master_only)\n return client\n except Exception as e: # noqa\n print(\"ERROR: Connection failure: {0}\".format(e.message))\n sys.exit(1)", "def get_client(\n client_id: str, x_additional_headers: Optional[Dict[str, str]] = None, **kwargs\n):\n request = GetClient.create(\n client_id=client_id,\n )\n return run_request(request, additional_headers=x_additional_headers, **kwargs)", "def get(host, port=None, version=None):\n port = 8081 if port is None else port\n version = \"v1\" if version is None else version\n\n if version not in VERSIONS.keys():\n raise RestException(f\"Unknown REST API version: {version}\")\n api_client_cls = VERSIONS[version]\n return api_client_cls(host=host, port=port)", "def __init__(self):\n super(NovaClientWrapper, self).__init__(\n retry_exceptions=(nova_exc.ConnectionRefused,\n nova_exc.Conflict),\n auth_exceptions=(nova_exc.Unauthorized),\n name=\"Nova\")", "def configure_client(self):\n self.client = self.get_redis_client()\n return self.client", "def get_client(args):\n if args.auth_strategy == 'userpass':\n creds = {'username': args.username, 'password': args.password}\n else:\n creds = None\n\n try:\n client = Client(rucio_host=args.host, auth_host=args.auth_host,\n account=args.account,\n auth_type=args.auth_strategy, creds=creds,\n ca_cert=args.ca_certificate, timeout=args.timeout)\n except CannotAuthenticate, error:\n logger.error(error)\n if not args.auth_strategy:\n if 'RUCIO_AUTH_TYPE' in os.environ:\n auth_type = os.environ['RUCIO_AUTH_TYPE']\n else:\n try:\n auth_type = config_get('client', 'auth_type')\n except (NoOptionError, NoSectionError):\n logger.error('Cannot get AUTH_TYPE')\n sys.exit(FAILURE)\n if auth_type == 'x509_proxy':\n logger.error('Please verify that your proxy is still valid and renew it if needed.')\n sys.exit(FAILURE)\n return client", "def _get_client(self, region_name):\n if region_name not in self._clients[self.SERVICE_NAME]:\n self._clients[self.SERVICE_NAME][region_name] = self._create_client()\n\n return self._clients[self.SERVICE_NAME][region_name]", "def _get_client_impl(self):\n api_version = self._get_api_version(None)\n if api_version not in self._client_impls:\n self._create_client_impl(api_version)\n return self._client_impls[api_version]", "def get(self, id_cliente):\n cliente = get_cliente_id(id_cliente)\n if not cliente:\n api.abort(404)\n else:\n return cliente", "def client(self):\n\n return self._client", "def api_client() -> APIClient:\n return APIClient()", "def get_client(access_key, secret_key, region='eu-west-1', service='ec2'):\n return boto3.client(\n service,\n aws_access_key_id=access_key,\n aws_secret_access_key=secret_key,\n region_name=region\n )", "def get_client(httpx_settings: Optional[dict] = None) -> \"PrefectClient\":\n ctx = prefect.context.get_settings_context()\n api = PREFECT_API_URL.value()\n\n if not api:\n # create an ephemeral API if none was provided\n from prefect.server.api.server import create_app\n\n api = create_app(ctx.settings, ephemeral=True)\n\n return PrefectClient(\n api,\n api_key=PREFECT_API_KEY.value(),\n httpx_settings=httpx_settings,\n )", "def lookup_client(self, ip_addr: str):\n try:\n conn_obj = self.client_list[ip_addr]\n except KeyError:\n raise Networking.Host.ClientNotFoundException\n\n if conn_obj is not None:\n return conn_obj\n else:\n raise Networking.Host.ClientNotFoundException", "def get_client(self, server_name=None, server_address=None):\n if server_name:\n for name, address in self.registry.servers.items():\n if name == server_name:\n return Client(address)\n return None\n elif server_address:\n return Client(server_address)", "def client() -> botocore.client.BaseClient:\n global _client\n if _client is None:\n endpoint_url = os.environ.get('LOCALSTACK_SNS_URL')\n # If endpoint_url is None, botocore constructs the default AWS URL\n _client = boto3.client('sns', endpoint_url=endpoint_url)\n return _client", "def get_client(self, service, region, account):\n\n client = AwsApi.CLIENTS_CACHE.get((service, region, account))\n if client:\n return client # from cache\n\n if region == '*':\n eprint(\"warn: unknown region ('*'), using the default ('{}')\", self.default_region)\n region = self.default_region\n\n if account == '*':\n eprint(\"warn: unknown account ('*'), using default session\")\n client = self.session.client(\n service,\n region_name=region\n )\n elif account == self.default_account:\n client = self.session.client(\n service,\n region_name=region\n )\n elif self.args.no_input:\n eprint(\"warn: unknown account ('{}') and --no-input set, using default session\", account)\n client = self.session.client(\n service,\n region_name=region\n )\n else:\n account_config = self.config.setdefault('aws', {}).setdefault('accounts', {}).setdefault(account, {})\n if not 'profile' in account_config:\n account_config['profile'] = input(\"Enter configured AWS profile for {}: \".format(account))\n client = boto3.Session(profile_name=account_config['profile']).client(service, region_name=region)\n\n AwsApi.CLIENTS_CACHE[(service, region, account)] = client\n return client", "def django_client() -> Client:\n\n return Client()", "def api_client() -> APIClient:\n\n return APIClient()", "def client(self,context,params):\n url = f\"https://api.freshbooks.com/accounting/account/{params['account_id']}/users/clients/{params['id']}\"\n result = json.loads(util.rest(\"GET\", url, {}, context[\"headers\"][\"access_token\"]).text)\n client = result[\"response\"][\"result\"][\"client\"]\n client_obj = FreshbooksClient(\n accounting_systemid=client['accounting_systemid'], \n first_name=client['fname'],\n last_name=client['lname'],\n email=client['email'],\n vat_name=client['vat_name'],\n vat_number=client['vat_number'],\n home_phone=client['home_phone'],\n organization=client['organization'],\n username=client['username']\n )\n return client_obj.__dict__", "def get_client(self):\n token = self.get_access_token()\n if self.client is None:\n credentials = AccessTokenCredentials(token, 'vetware/1.0')\n # credentials = SignedJwtAssertionCredentials(self.email, self.private_key,\n # \"https://www.googleapis.com/auth/calendar\")\n http = credentials.authorize(Http())\n self.client = build('calendar', 'v3', http=http)\n return self.client", "def get_client(self, ip_address):\n\n self.cur.execute(\n 'select * from authenticated_clients where ip_address=%s',\n (ip_address, )\n )\n return self.cur.fetchone()", "def client(self):\n response = requests.get(self._url(self._CLIENT_PATH), headers=self._headers)\n return response.json()", "def get_client() -> 'MongoCLient':\n client = pymongo.MongoClient()\n db = client['c3']\n c = db['json']\n return c", "def get_hponeview_client():\n manager_url = prepare_manager_url(CONF.oneview.manager_url)\n config = {\n \"ip\": manager_url,\n \"credentials\": {\n \"userName\": CONF.oneview.username,\n \"password\": CONF.oneview.password\n }\n }\n return hponeview_client.OneViewClient(config)", "def create_client(self) -> None:\n pass", "def get(cls, configuration: HttpClientConfiguration) -> HttpClient:\n client_type = configuration.client_type\n\n if client_type == HttpClientType.UAA:\n return cls._get_instance(configuration, ClientAuthType.TOKEN_UAA)\n\n elif client_type == HttpClientType.CONSOLE:\n return cls._get_instance(configuration, ClientAuthType.LOGIN_PAGE)\n\n elif client_type == HttpClientType.CONSOLE_NO_AUTH:\n return cls._get_instance(configuration, ClientAuthType.NO_AUTH)\n\n elif client_type == HttpClientType.APPLICATION:\n return cls._get_instance(configuration, ClientAuthType.TOKEN_CF)\n\n elif client_type == HttpClientType.CLOUD_FOUNDRY:\n return cls._get_instance(configuration, ClientAuthType.TOKEN_CF)\n\n elif client_type == HttpClientType.BROKER:\n return cls._get_instance(configuration, ClientAuthType.HTTP_BASIC)\n\n elif client_type == HttpClientType.WEBHDFS:\n return cls._get_instance(configuration, ClientAuthType.WEBHDFS)\n \n elif client_type == HttpClientType.SERVICE_TOOL:\n return cls._get_instance(configuration, ClientAuthType.NO_AUTH)\n\n elif client_type == HttpClientType.CLOUDERA:\n return cls._get_instance(configuration, ClientAuthType.HTTP_BASIC)\n\n else:\n raise HttpClientFactoryInvalidClientTypeException(client_type)", "def get_service(\n service_name: str,\n version: str = \"v1\",\n configuration: Configuration = None,\n secrets: Secrets = None,\n) -> Resource:\n return client(service_name, version=version, secrets=secrets)", "def get_client(brand: Optional[str] = None,\n client_id: Optional[str] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetClientResult:\n __args__ = dict()\n __args__['brand'] = brand\n __args__['clientId'] = client_id\n opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)\n __ret__ = pulumi.runtime.invoke('gcp:iap/getClient:getClient', __args__, opts=opts, typ=GetClientResult).value\n\n return AwaitableGetClientResult(\n brand=pulumi.get(__ret__, 'brand'),\n client_id=pulumi.get(__ret__, 'client_id'),\n display_name=pulumi.get(__ret__, 'display_name'),\n id=pulumi.get(__ret__, 'id'),\n secret=pulumi.get(__ret__, 'secret'))", "def resource_client(self):\n if not self.client:\n self.client = get_client_from_cli_profile(ResourceManagementClient)\n return self.client", "def get_client(service, cloud=False):\n if cloud:\n try:\n return os_client_config.make_client(service, cloud=cloud)\n except OpenStackConfigException:\n print('cloud config for %s not found' % cloud)\n return False\n else:\n try:\n return os_client_config.make_client(service)\n except (OpenStackConfigException, MissingRequiredOptions):\n print('%s client connection failed' % service)\n return False", "def get_client(service_account_json):\n api_scopes = ['https://www.googleapis.com/auth/cloud-platform']\n api_version = 'v1'\n discovery_api = 'https://cloudiot.googleapis.com/$discovery/rest'\n service_name = 'cloudiotcore'\n\n credentials = service_account.Credentials.from_service_account_file(\n service_account_json)\n scoped_credentials = credentials.with_scopes(api_scopes)\n\n discovery_url = '{}?version={}'.format(\n discovery_api, api_version)\n\n return discovery.build(\n service_name,\n api_version,\n discoveryServiceUrl=discovery_url,\n credentials=scoped_credentials)", "def get_mqtt_client(self, client_id: str) -> Client:\n return Client(client_id)", "async def get_docker_client(self) -> \"DockerClient\":", "def client(\n service_name: str, version: str = \"v1\", secrets: Secrets = None\n) -> Resource:\n credentials = load_credentials(secrets=secrets)\n return build(service_name, version=version, credentials=credentials)", "def get_client(self, user_id: int, client_name: str) -> Client:\n return self.clients[user_id][client_name]", "def authenticate_nova_user(self, keystone, user, password, tenant):\n ep = keystone.service_catalog.url_for(service_type='identity',\n endpoint_type='publicURL')\n return nova_client.Client(username=user, api_key=password,\n project_id=tenant, auth_url=ep)", "def base_client(self):\n return self._client", "def get_unibox_client(self):\n if self._gls_unibox_client is None:\n client = Client(\n self.gls_server,\n self.gls_port\n )\n client.test = self.gls_is_test\n self._gls_unibox_client = client\n\n return self._gls_unibox_client", "def fetch_boto3_client(service_name: str):\n region_name = load_aws_region_name()\n cache_key = f\"{region_name}-{service_name}\"\n\n if CLIENT_CACHE.get(cache_key):\n return CLIENT_CACHE[cache_key]\n\n config = Config(\n region_name=region_name,\n signature_version=\"v4\",\n retries={\"max_attempts\": 10, \"mode\": \"standard\"},\n )\n client = boto3.client(service_name, config=config) # type: ignore\n\n CLIENT_CACHE[cache_key] = client\n\n return client", "def _getClient(self, app_token=None):\n if app_token is None:\n from . import models\n app_token = models.Aplicacion.objects.get(app_id=self.app_id).app_token\n return api.OAuthAppClient(settings.CLIENT_ID, settings.CLIENT_SECRET, self.app_id, app_token)" ]
[ "0.8543932", "0.7357674", "0.73333573", "0.72716653", "0.7151151", "0.6966708", "0.69495815", "0.6702617", "0.667086", "0.6567023", "0.6537803", "0.6491878", "0.64906466", "0.64767176", "0.6461029", "0.64514875", "0.63861746", "0.63556963", "0.63503575", "0.63300276", "0.6299703", "0.62650293", "0.62271005", "0.6176792", "0.6176792", "0.6146839", "0.61323464", "0.60742897", "0.6069157", "0.60650337", "0.60613215", "0.6025373", "0.6010524", "0.598835", "0.5952971", "0.59460986", "0.59367174", "0.5932773", "0.59224784", "0.5920284", "0.5897889", "0.58885807", "0.58835113", "0.5881879", "0.58807296", "0.58708966", "0.5869076", "0.5857281", "0.5855981", "0.585168", "0.5823186", "0.5806518", "0.5805834", "0.57892257", "0.57813144", "0.5720146", "0.5720087", "0.5718537", "0.5706593", "0.5701717", "0.5691123", "0.56871414", "0.5678771", "0.5674654", "0.56678", "0.56508416", "0.56498784", "0.56484115", "0.56424236", "0.5637487", "0.56301343", "0.5628892", "0.56268334", "0.5619962", "0.5619425", "0.5616812", "0.56008565", "0.55981797", "0.5592277", "0.559", "0.55899155", "0.55691135", "0.55621034", "0.55460995", "0.55393165", "0.5537805", "0.5528755", "0.55156565", "0.55024844", "0.54972816", "0.54865104", "0.5470676", "0.5464916", "0.54584754", "0.54553586", "0.54476386", "0.5437502", "0.54345113", "0.543096", "0.5430091" ]
0.76938033
1
Get a neutron client instance.
def get_neutron(self, version='2'): if self.neutron is None: self.neutron = neutronclient.Client(session=self.get_session()) return self.neutron
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_neutron_client(self):\n session = self._get_keystone_session()\n return neutronclient.v2_0.client.Client(session=session)", "def _get_nova_client(self):\n region_name = CONF.region_name\n session = self._get_keystone_session()\n return novaclient.client.Client(2, session=session, region_name=region_name)", "def make_client(instance):\r\n neutron_client = utils.get_client_class(\r\n API_NAME,\r\n instance._api_version[API_NAME],\r\n API_VERSIONS,\r\n )\r\n instance.initialize()\r\n url = instance._url\r\n url = url.rstrip(\"/\")\r\n if '2.0' == instance._api_version[API_NAME]:\r\n client = neutron_client(username=instance._username,\r\n tenant_name=instance._tenant_name,\r\n password=instance._password,\r\n region_name=instance._region_name,\r\n auth_url=instance._auth_url,\r\n endpoint_url=url,\r\n token=instance._token,\r\n auth_strategy=instance._auth_strategy,\r\n insecure=instance._insecure,\r\n ca_cert=instance._ca_cert)\r\n return client\r\n else:\r\n raise exceptions.UnsupportedVersion(_(\"API version %s is not \"\r\n \"supported\") %\r\n instance._api_version[API_NAME])", "def Client(api_version, *args, **kwargs):\r\n neutron_client = utils.get_client_class(\r\n API_NAME,\r\n api_version,\r\n API_VERSIONS,\r\n )\r\n return neutron_client(*args, **kwargs)", "def _get_glance_client(self):\n session = self._get_keystone_session()\n return glanceclient.client.Client(2, session=session)", "def get_novaclient(self):\n # TODO: We ought to be able to derive this from the keystone client,\n # but it's proving trickier than I expected --isd\n return novaclient.Client(self.cluster_account.cluster_user_name,\n self.cluster_account.cluster_password,\n self.name,\n self.cluster_account.cluster.auth_url)", "def gen_neutron_client(self):\n\n print \"\\t* Generating neutron client\"\n self.neutronclient = neutronclient.Client(auth_url=self.auth_url,\n username=self.username,\n password=self.password,\n tenant_name=self.tenant_name,\n region_name=self.region_name)", "def client(self):\n\n if self._client is None:\n self._client = self._get_client()\n return self._client", "def client():\n\n client = Client()\n return client", "def get_client(self):\n return self.client", "def get_keystone_client():\n username = os.environ.get('OS_USERNAME')\n password = os.environ.get('OS_PASSWORD')\n tenant = os.environ.get('OS_TENANT_NAME')\n url = os.environ.get('OS_AUTH_URL')\n assert username is not None\n assert password is not None\n assert tenant is not None\n assert url is not None\n cl = client.Client(username=username, password=password,\n tenant_name=tenant, auth_url=url)\n return cl", "def get_client():\n return Client(__address, authkey='strumamor')", "def get_nova(self, version='2.1'):\n if self.nova is None:\n self.nova = novaclient.Client(version, session=self.get_session())\n return self.nova", "def _get_client(self):\n if self._client is None:\n self._client = self.boto.client(service_name='elb', region_name=self.boto.cli_region)\n\n return self._client", "def get_client(version, **kwargs):\n endpoint = kwargs.get('os_endpoint') or kwargs.get('ceilometer_url')\n\n return Client(version, endpoint, **kwargs)", "def make_client(instance):\n network_client = utils.get_client_class(\n API_NAME,\n instance._api_version[API_NAME],\n API_VERSIONS)\n LOG.debug('Instantiating network client: %s', network_client)\n\n endpoint = instance.get_endpoint_for_service_type(\n API_NAME,\n region_name=instance._region_name,\n )\n\n return network_client(\n username=instance._username,\n tenant_name=instance._project_name,\n password=instance._password,\n region_name=instance._region_name,\n auth_url=instance._auth_url,\n endpoint_url=endpoint,\n token=instance.auth.get_token(instance.session),\n insecure=instance._insecure,\n ca_cert=instance._cacert,\n )", "def network_client(self):\n if not self.client:\n self.client = get_client_from_cli_profile(NetworkManagementClient)\n return self.client", "def get_keystoneclient(self):\n try:\n if self.token is None:\n client = keystoneclient.Client(user_name=self.cluster_account.cluster_user_name,\n password=self.cluster_account.cluster_password,\n auth_url=self.cluster_account.cluster.auth_url,\n tenant_name=self.name,\n )\n self.token = json.dumps(client.auth_ref)\n else:\n client = keystoneclient.Client(auth_ref=json.loads(self.token))\n # keystoneclient authenticates lazily, i.e. It doensn't actually\n # authenticates until the first time it needs the token for\n # someting. We'd like to find out about failures now (in\n # particular, it's easier to clear a bad token here than somewhere\n # else in the code. authenticate() forces it to auth right now:\n client.authenticate()\n return client\n except AuthorizationFailure:\n # Clear the token if auth failed:\n self.token = None\n raise", "def client(self):\r\n if self._client is None:\r\n self._client = self._client_cls(self._server, self._params, self)\r\n return self._client", "def _get_client(self):\n _client = KOPS(provider=self.provider, config=self.config)\n return _client", "def get(self, id: int) -> Client:\n\n return self.__clients[id]", "def Client(self):\n return self._client", "def client(self):\n\t\t# pylint: disable=invalid-name\n\t\treturn self._client", "def gen_nova_client(self):\n\n print \"\\t* Generating nova client\"\n client = nClient.get_client_class('2')\n self.novaclient = client(self.username,\n self.password,\n self.tenant_name,\n self.auth_url,\n service_type='compute')", "def _keystone_client(context, version=(3, 0)):\n auth_plugin = token.Token(\n auth_url=CONF.keystone_authtoken.auth_uri,\n token=context.auth_token,\n project_id=context.project_id)\n client_session = session.Session(auth=auth_plugin,\n verify=False if\n CONF.keystone_authtoken.insecure else\n (CONF.keystone_authtoken.cafile or True))\n return client.Client(auth_url=CONF.keystone_authtoken.auth_uri,\n session=client_session, version=version)", "def get_client():\n client_class = _import_by_path(settings.REDISIO_CLIENT_CLASS)\n return client_class(host=settings.REDISIO_HOST,\n port=settings.REDISIO_PORT,\n db=settings.REDISIO_DB)", "def get_cinder(self, version='2'):\n if self.cinder is None:\n iface = os.getenv('OS_ENDPOINT_TYPE', \"public\")\n self.cinder = cinderclient.Client(version,\n session=self.get_session(),\n interface=iface)\n return self.cinder", "def make_client(self, context):\n return Client(self.settings['client_routing'], context=context)", "def client(self):\n return self._client", "def client():\n return Client(**common_data.AUTH_ARGS)", "def get_unibox_client(self):\n if self._gls_unibox_client is None:\n client = Client(\n self.gls_server,\n self.gls_port\n )\n client.test = self.gls_is_test\n self._gls_unibox_client = client\n\n return self._gls_unibox_client", "def get_client(self, name):\n return self.get_clients(as_dict=True).get(name)", "def compute_client(self):\n if not self.client:\n self.client = get_client_from_cli_profile(ComputeManagementClient)\n return self.client", "def get_client(host, port=None, username=None,\n password=None, tenant=None,\n auth_url=None, auth_strategy=None,\n auth_token=None, region=None,\n is_silent_upload=False, insecure=False):\n\n if auth_url:\n force_strategy = 'keystone'\n else:\n force_strategy = None\n\n creds = dict(username=username,\n password=password,\n tenant=tenant,\n auth_url=auth_url,\n strategy=force_strategy or auth_strategy,\n region=region,\n )\n\n if creds['strategy'] == 'keystone' and not creds['auth_url']:\n msg = (\"--auth_url option or OS_AUTH_URL environment variable \"\n \"required when keystone authentication strategy is enabled\\n\")\n raise exception.ClientConfigurationError(msg)\n\n use_ssl = (creds['auth_url'] is not None and\n creds['auth_url'].find('https') != -1)\n\n client = HeatClient\n\n return client(host=host,\n port=port,\n use_ssl=use_ssl,\n auth_tok=auth_token,\n creds=creds,\n insecure=insecure)", "def get_keystone(self, version='3'):\n if self.keystone is None:\n iface = os.getenv('OS_ENDPOINT_TYPE', \"public\")\n self.keystone = keystoneclient.Client(\n version=version,\n session=self.get_session(),\n interface=iface)\n return self.keystone", "def get_client():\n\n client = Elasticsearch(host=HOST, port=PORT, timeout=300)\n\n # wait for yellow status\n for _ in range(100):\n time.sleep(.1)\n try:\n # This errors because of decorators. Silence it.\n # pylint: disable=E1123\n client.cluster.health(wait_for_status='yellow')\n return client\n except ConnectionError:\n continue\n else:\n # timeout\n raise SkipTest(\"Elasticsearch failed to start.\")", "def get_client():\n\n return MongoClientManager().client", "def client(self):\n\n return self._client", "def _get_client():\n\n return datastore.Client()", "def _get_client():\n\n return datastore.Client()", "def get_client(args):\n if args.auth_strategy == 'userpass':\n creds = {'username': args.username, 'password': args.password}\n else:\n creds = None\n\n try:\n client = Client(rucio_host=args.host, auth_host=args.auth_host,\n account=args.account,\n auth_type=args.auth_strategy, creds=creds,\n ca_cert=args.ca_certificate, timeout=args.timeout)\n except CannotAuthenticate, error:\n logger.error(error)\n if not args.auth_strategy:\n if 'RUCIO_AUTH_TYPE' in os.environ:\n auth_type = os.environ['RUCIO_AUTH_TYPE']\n else:\n try:\n auth_type = config_get('client', 'auth_type')\n except (NoOptionError, NoSectionError):\n logger.error('Cannot get AUTH_TYPE')\n sys.exit(FAILURE)\n if auth_type == 'x509_proxy':\n logger.error('Please verify that your proxy is still valid and renew it if needed.')\n sys.exit(FAILURE)\n return client", "def get_mqtt_client(self, client_id: str) -> Client:\n return Client(client_id)", "def client(self) -> mqtt.Client:\n return self._client", "def get_arango_client() -> ArangoClient:\n return ArangoClient(hosts=ARANGO_HOST)", "def client(self):\n client = VMwareClient(self.host, verify_ssl=False)\n client.login(self.settings.username, self.settings.password)\n return client", "def _get_client_impl(self):\n api_version = self._get_api_version(None)\n if api_version not in self._client_impls:\n self._create_client_impl(api_version)\n return self._client_impls[api_version]", "def get_conn(self) -> ServiceBusAdministrationClient:\n conn = self.get_connection(self.conn_id)\n connection_string: str = str(conn.schema)\n if connection_string:\n client = ServiceBusAdministrationClient.from_connection_string(connection_string)\n else:\n extras = conn.extra_dejson\n credential: str | DefaultAzureCredential = self._get_field(extras=extras, field_name=\"credential\")\n fully_qualified_namespace = self._get_field(extras=extras, field_name=\"fully_qualified_namespace\")\n if not credential:\n credential = DefaultAzureCredential()\n client = ServiceBusAdministrationClient(\n fully_qualified_namespace=fully_qualified_namespace,\n credential=credential, # type: ignore[arg-type]\n )\n self.log.info(\"Create and returns ServiceBusAdministrationClient\")\n return client", "def _get_client(self, region_name):\n if region_name not in self._clients[self.SERVICE_NAME]:\n self._clients[self.SERVICE_NAME][region_name] = self._create_client()\n\n return self._clients[self.SERVICE_NAME][region_name]", "def configure_client(self):\n self.client = self.get_redis_client()\n return self.client", "def resource_client(self):\n if not self.client:\n self.client = get_client_from_cli_profile(ResourceManagementClient)\n return self.client", "def client(self) -> 'BaseClient':\n return self", "def get(cls, configuration: HttpClientConfiguration) -> HttpClient:\n client_type = configuration.client_type\n\n if client_type == HttpClientType.UAA:\n return cls._get_instance(configuration, ClientAuthType.TOKEN_UAA)\n\n elif client_type == HttpClientType.CONSOLE:\n return cls._get_instance(configuration, ClientAuthType.LOGIN_PAGE)\n\n elif client_type == HttpClientType.CONSOLE_NO_AUTH:\n return cls._get_instance(configuration, ClientAuthType.NO_AUTH)\n\n elif client_type == HttpClientType.APPLICATION:\n return cls._get_instance(configuration, ClientAuthType.TOKEN_CF)\n\n elif client_type == HttpClientType.CLOUD_FOUNDRY:\n return cls._get_instance(configuration, ClientAuthType.TOKEN_CF)\n\n elif client_type == HttpClientType.BROKER:\n return cls._get_instance(configuration, ClientAuthType.HTTP_BASIC)\n\n elif client_type == HttpClientType.WEBHDFS:\n return cls._get_instance(configuration, ClientAuthType.WEBHDFS)\n \n elif client_type == HttpClientType.SERVICE_TOOL:\n return cls._get_instance(configuration, ClientAuthType.NO_AUTH)\n\n elif client_type == HttpClientType.CLOUDERA:\n return cls._get_instance(configuration, ClientAuthType.HTTP_BASIC)\n\n else:\n raise HttpClientFactoryInvalidClientTypeException(client_type)", "def _get_client(self):\n try:\n client = boto3_cached_conn(\n 'iam', **self.conn_details)\n\n if not client:\n raise ValueError(f\"boto3_cached_conn returned null IAM client for {self.account_number}\")\n\n return client\n\n except Exception as e:\n self.on_failure.send(self, error=e)\n self.current_app.logger.exception(f\"Failed to obtain boto3 IAM client for account {self.account_number}.\", exc_info=False)\n raise e", "def _get_client(self):\n credentials = service_account.Credentials.from_service_account_info(self.service_account_info)\n client = googleapiclient.discovery.build('container', 'v1', credentials=credentials)\n\n return client", "def _get_monasca_client(self):\n\n monasca_url = self.token_helper.get_service_endpoint('monitoring')\n keystone_url = self.token_helper.get_service_endpoint('identity') + 'v3'\n # All monasca data is stored in the admin project, so get a token\n # to that project\n token = self.token_helper.get_token_for_project('admin')\n\n return client.Client(api_version=api_version,\n endpoint=monasca_url,\n token=token,\n auth_url=keystone_url,\n project_name='admin',\n project_domain_name='Default',\n insecure=get_conf(\"insecure\"),\n user_agent=api.USER_AGENT)", "def internal_keystoneclient(request):\n token = cache.get(CACHE_CLIENT, None)\n old_client = cache.get(CACHE_TOKEN, None)\n if not token:\n #LOG.debug('There is no token cached -> New Password Session')\n idm_password_session = _password_session(request)\n keystoneclient = client.Client(session=idm_password_session)\n cache.set(CACHE_CLIENT, keystoneclient.session.get_token(), INTERNAL_CLIENT_CACHE_TIME)\n cache.set(CACHE_TOKEN, keystoneclient, INTERNAL_CLIENT_CACHE_TIME)\n #LOG.debug('Saved token: %s',keystoneclient.session.get_token())\n else:\n #LOG.debug('There is a cached token! (%s)',token)\n old_client._auth_token = token\n keystoneclient = old_client\n\n #LOG.debug('Using token: %s',keystoneclient.session.get_token())\n return keystoneclient", "def base_client(self):\n return self._client", "def get_client(host, port=9200, url_prefix=None, http_auth=None, use_ssl=False,\n master_only=False, timeout=30):\n kwargs = compact_dict({\n 'hosts': [host], 'port': port, 'http_auth': http_auth,\n 'url_prefix': url_prefix, 'use_ssl': use_ssl,\n 'timeout': timeout\n })\n logger.debug(\"ES client kwargs = %s\", kwargs)\n try:\n client = elasticsearch.Elasticsearch(**kwargs)\n # Verify the version is acceptable.\n check_version(client)\n # Verify \"master_only\" status, if applicable\n check_master(client, master_only=master_only)\n return client\n except Exception as e: # noqa\n print(\"ERROR: Connection failure: {0}\".format(e.message))\n sys.exit(1)", "def GetClientInstance(release_track=calliope_base.ReleaseTrack.ALPHA):\n api_version = _RELEASE_TRACK_TO_API_VERSION.get(release_track)\n return core_apis.GetClientInstance(_API_NAME, api_version)", "def get_client(client_mgr):\n manager = getattr(client_mgr, 'manager', client_mgr)\n net_client = getattr(manager, 'networks_client')\n try:\n _params = manager.default_params_with_timeout_values.copy()\n except Exception:\n _params = {}\n client = LoadBalancersClient(net_client.auth_provider,\n net_client.service,\n net_client.region,\n net_client.endpoint_type,\n **_params)\n return client", "def get_client():\n client = soundcloud.Client(client_id=CLIENT_ID)\n return client", "def get_client():\n return storage.Client(project=project_id)", "def get_client(profile_name, region_name, svc, boto_client_params={}):\n session = get_session(profile_name, region_name)\n client = session.client(svc, **boto_client_params)\n return client", "def client(websession: aiohttp.ClientSession) -> RenaultClient:\n return RenaultClient(\n session=get_logged_in_session(websession),\n )", "def operations_client(self) -> operations_v1.OperationsAsyncClient:\n # Quick check: Only create a new client if we do not already have one.\n if self._operations_client is None:\n self._operations_client = operations_v1.OperationsAsyncClient(\n self.grpc_channel\n )\n\n # Return the client from cache.\n return self._operations_client", "def operations_client(self) -> operations_v1.OperationsAsyncClient:\n # Quick check: Only create a new client if we do not already have one.\n if self._operations_client is None:\n self._operations_client = operations_v1.OperationsAsyncClient(\n self.grpc_channel\n )\n\n # Return the client from cache.\n return self._operations_client", "def getClient(self):\n authDict = Configuration().read_auth_data_from_config()\n client_key = authDict['client_key']\n client_secret = authDict['client_secret']\n token = authDict['token']\n token_secret = authDict['token_secret']\n\n authorize_OAuth_ob = authorizeOAuth.AuthorizeOAuth(client_key,\n client_secret,\n token,\n token_secret,\n Configuration().read_board_id_config())\n\n trello_client_wrapper = authorize_OAuth_ob.getClient()\n self.set_list(trello_client_wrapper)\n return trello_client_wrapper", "def operations_client(self) -> operations_v1.OperationsClient:\n # Sanity check: Only create a new client if we do not already have one.\n if \"operations_client\" not in self.__dict__:\n self.__dict__[\"operations_client\"] = operations_v1.OperationsClient(\n self.grpc_channel\n )\n\n # Return the client from cache.\n return self.__dict__[\"operations_client\"]", "def _get_keystone_client(self, auth_creds):\n discover = keystone_discover.Discover(**auth_creds)\n\n for version_data in discover.version_data():\n version = version_data[\"version\"][0]\n if version <= 2:\n return keystone_client_v2.Client(insecure=True, **auth_creds)\n elif version == 3:\n return keystone_client_v3.Client(insecure=True, **auth_creds)\n\n raise Exception(\"Failed to discover keystone version \"\n \"for auth_url {0}\".format(\n auth_creds.get(\"auth_url\"))\n )", "def _get_es_client(conf):\n return elasticsearch_client(conf)", "def get_client(self, user_id: int, client_name: str) -> Client:\n return self.clients[user_id][client_name]", "def operations_client(self) -> operations_v1.OperationsClient:\n # Quick check: Only create a new client if we do not already have one.\n if self._operations_client is None:\n self._operations_client = operations_v1.OperationsClient(self.grpc_channel)\n\n # Return the client from cache.\n return self._operations_client", "def get_redis_client(self):\n\n client = Client(\n #connection_pool=connection_pool,\n host=self.backend_settings.get('HOST', 'localhost'),\n port=int(self.backend_settings.get('PORT', 6379)),\n io_loop=self.io_loop,\n password=self.backend_settings.get('PASSWORD', None),\n selected_db=int(self.backend_settings.get('DB', 0)),\n reconnect_callback=self.listen)\n\n return client", "def get_conn(self) -> WebClient:\n return self.client", "def nova(self, obj):\n\n if self._novaclient is not None:\n return self._novaclient\n params = self._build_conn_params(obj.user, obj.project)\n self._novaclient = driver_base.SenlinDriver().compute(params)\n return self._novaclient", "def get_client(httpx_settings: Optional[dict] = None) -> \"PrefectClient\":\n ctx = prefect.context.get_settings_context()\n api = PREFECT_API_URL.value()\n\n if not api:\n # create an ephemeral API if none was provided\n from prefect.server.api.server import create_app\n\n api = create_app(ctx.settings, ephemeral=True)\n\n return PrefectClient(\n api,\n api_key=PREFECT_API_KEY.value(),\n httpx_settings=httpx_settings,\n )", "def get_client(self, ip_address):\n\n self.cur.execute(\n 'select * from authenticated_clients where ip_address=%s',\n (ip_address, )\n )\n return self.cur.fetchone()", "def client() -> botocore.client.BaseClient:\n global _client\n if _client is None:\n endpoint_url = os.environ.get('LOCALSTACK_SNS_URL')\n # If endpoint_url is None, botocore constructs the default AWS URL\n _client = boto3.client('sns', endpoint_url=endpoint_url)\n return _client", "def get_conn(self) -> ServiceBusClient:\n conn = self.get_connection(self.conn_id)\n connection_string: str = str(conn.schema)\n if connection_string:\n client = ServiceBusClient.from_connection_string(connection_string, logging_enable=True)\n else:\n extras = conn.extra_dejson\n credential: str | DefaultAzureCredential = self._get_field(extras=extras, field_name=\"credential\")\n fully_qualified_namespace = self._get_field(extras=extras, field_name=\"fully_qualified_namespace\")\n if not credential:\n credential = DefaultAzureCredential()\n client = ServiceBusClient(\n fully_qualified_namespace=fully_qualified_namespace,\n credential=credential, # type: ignore[arg-type]\n )\n\n self.log.info(\"Create and returns ServiceBusClient\")\n return client", "def client(self):\n return self._thread._client", "def client():\n return IceCubedSyncClient(\"api_key\", \"secret\")", "def lookup_client(self, ip_addr: str):\n try:\n conn_obj = self.client_list[ip_addr]\n except KeyError:\n raise Networking.Host.ClientNotFoundException\n\n if conn_obj is not None:\n return conn_obj\n else:\n raise Networking.Host.ClientNotFoundException", "def get(host, port=None, version=None):\n port = 8081 if port is None else port\n version = \"v1\" if version is None else version\n\n if version not in VERSIONS.keys():\n raise RestException(f\"Unknown REST API version: {version}\")\n api_client_cls = VERSIONS[version]\n return api_client_cls(host=host, port=port)", "def get_client(self, service, region, account):\n\n client = AwsApi.CLIENTS_CACHE.get((service, region, account))\n if client:\n return client # from cache\n\n if region == '*':\n eprint(\"warn: unknown region ('*'), using the default ('{}')\", self.default_region)\n region = self.default_region\n\n if account == '*':\n eprint(\"warn: unknown account ('*'), using default session\")\n client = self.session.client(\n service,\n region_name=region\n )\n elif account == self.default_account:\n client = self.session.client(\n service,\n region_name=region\n )\n elif self.args.no_input:\n eprint(\"warn: unknown account ('{}') and --no-input set, using default session\", account)\n client = self.session.client(\n service,\n region_name=region\n )\n else:\n account_config = self.config.setdefault('aws', {}).setdefault('accounts', {}).setdefault(account, {})\n if not 'profile' in account_config:\n account_config['profile'] = input(\"Enter configured AWS profile for {}: \".format(account))\n client = boto3.Session(profile_name=account_config['profile']).client(service, region_name=region)\n\n AwsApi.CLIENTS_CACHE[(service, region, account)] = client\n return client", "def get_client(self):\n if self._client_state is not None:\n # Copy client state because `from_json_snapshot` modifies it...\n client = AxClient.from_json_snapshot(copy.deepcopy(self._client_state))\n else:\n client = AxClient(\n random_seed=self.seed,\n enforce_sequential_optimization=False,\n verbose_logging=False,\n )\n\n client.create_experiment(\n parameters=orion_space_to_axoptimizer_space(self.space),\n choose_generation_strategy_kwargs={\n \"num_initialization_trials\": self.n_initial_trials,\n \"max_parallelism_override\": self.max_trials,\n },\n objectives={\n \"objective\": ObjectiveProperties(minimize=True),\n **{\n o: ObjectiveProperties(minimize=True)\n for o in self.extra_objectives\n },\n },\n outcome_constraints=self.constraints,\n )\n\n yield client\n\n self._client_state = client.to_json_snapshot()", "def api_client() -> APIClient:\n return APIClient()", "def get(node_instance_id, logger, client, tenant_name):\n if tenant_name:\n logger.info('Explicitly using tenant `{0}`'.format(tenant_name))\n logger.info('Retrieving node instance {0}'.format(node_instance_id))\n try:\n node_instance = client.node_instances.get(node_instance_id)\n except CloudifyClientError as e:\n if e.status_code != 404:\n raise\n raise CloudifyCliError('Node instance {0} not found'.format(\n node_instance_id))\n\n print_data(NODE_INSTANCE_COLUMNS, node_instance, 'Node-instance:', 50)\n\n # print node instance runtime properties\n logger.info('Instance runtime properties:')\n for prop_name, prop_value in utils.decode_dict(\n node_instance.runtime_properties).iteritems():\n logger.info('\\t{0}: {1}'.format(prop_name, prop_value))\n logger.info('')", "def get_client(public_key: str, secret_key: str, **_):\n razorpay_client = razorpay.Client(auth=(public_key, secret_key))\n return razorpay_client", "def client(self, id):\n return self.query(Client).filter(Client.id == id).one()", "def _client(self):\n\n if self._suds_client is None:\n self._suds_client = suds.client.Client(SERVICE_WSDL_URL)\n # Add SOAP Security tokens\n self.set_security_token()\n\n return self._suds_client", "def client(self):\n response = requests.get(self._url(self._CLIENT_PATH), headers=self._headers)\n return response.json()", "def get_client(conn):\n # No database indicates a cluster connection\n if not conn.get('db', None):\n conn.pop('db', None)\n return connect_redis_cluster(conn)\n\n # Otherwise it's a regular redis connection\n return connect_redis(conn)", "def make_rest_client(\n service_key, options=None,\n app_name=None, app_version=None, version=None,\n **kwargs):\n cloud = get_config(\n service_key=service_key, options=options,\n app_name=app_name, app_version=app_version,\n **kwargs)\n return cloud.get_session_client(service_key, version=version)", "def get_client(service_account_json):\n api_scopes = ['https://www.googleapis.com/auth/cloud-platform']\n api_version = 'v1'\n discovery_api = 'https://cloudiot.googleapis.com/$discovery/rest'\n service_name = 'cloudiotcore'\n\n credentials = service_account.Credentials.from_service_account_file(\n service_account_json)\n scoped_credentials = credentials.with_scopes(api_scopes)\n\n discovery_url = '{}?version={}'.format(\n discovery_api, api_version)\n\n return discovery.build(\n service_name,\n api_version,\n discoveryServiceUrl=discovery_url,\n credentials=scoped_credentials)", "def get_client() -> 'MongoCLient':\n client = pymongo.MongoClient()\n db = client['c3']\n c = db['json']\n return c", "def create_client(self):\n client = iperf3.Client()\n client.duration = self._host[CONF_DURATION]\n client.server_hostname = self._host[CONF_HOST]\n client.port = self._host[CONF_PORT]\n client.num_streams = self._host[CONF_PARALLEL]\n client.protocol = self._host[CONF_PROTOCOL]\n client.verbose = False\n return client", "def raw_client(self):\r\n warnings.warn(\"raw_client is deprecated. use self.client.get_client instead\",\r\n DeprecationWarning, stacklevel=2)\r\n return self.client.get_client(write=True)", "def api_client() -> APIClient:\n\n return APIClient()", "def get_client(ip, port=8088, activity_callback=None):\n func = harmony_client.create_and_connect_client(ip, port, activity_callback)\n return run_in_loop_now('get_client', func)", "def _client():\n nonlocal client\n if not client:\n client = twitter.Twitter(authenticator)\n return client" ]
[ "0.83219314", "0.76559037", "0.7583974", "0.7498433", "0.7013949", "0.70020646", "0.69358927", "0.6753864", "0.6684781", "0.668257", "0.66047627", "0.65611005", "0.65462434", "0.65265024", "0.6525972", "0.6487087", "0.6480219", "0.64514047", "0.6430272", "0.63588655", "0.6354947", "0.63256377", "0.6315954", "0.62932724", "0.6275589", "0.6269347", "0.62648684", "0.62354976", "0.6217764", "0.62044597", "0.61740327", "0.6155584", "0.61382365", "0.61301327", "0.6123461", "0.6114563", "0.60738385", "0.6069977", "0.60424733", "0.60424733", "0.60119355", "0.5980806", "0.5960852", "0.5958094", "0.5949131", "0.59448403", "0.59390116", "0.5927686", "0.59249955", "0.5917323", "0.5905947", "0.58993375", "0.58863854", "0.5884792", "0.5883774", "0.5881139", "0.58778423", "0.5840491", "0.58229506", "0.58045334", "0.5802243", "0.5800419", "0.5792941", "0.5783483", "0.5774686", "0.5774686", "0.57728326", "0.5751257", "0.5744465", "0.57314664", "0.5728483", "0.572471", "0.57237256", "0.5717828", "0.5708637", "0.57069296", "0.5702195", "0.57012486", "0.56978655", "0.5692158", "0.5690392", "0.5673763", "0.5662936", "0.56591123", "0.5637542", "0.56369716", "0.56276846", "0.5626892", "0.5621746", "0.561444", "0.5608744", "0.560296", "0.55958533", "0.5594313", "0.55911267", "0.55903906", "0.558759", "0.55855644", "0.5574558", "0.55719674" ]
0.7441862
4
Get a glance client instance.
def get_glance(self, version='2'): if self.glance is None: self.glance = glanceclient(version, session=self.get_session()) return self.glance
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_glance_client(self):\n session = self._get_keystone_session()\n return glanceclient.client.Client(2, session=session)", "def client(self):\n\n if self._client is None:\n self._client = self._get_client()\n return self._client", "def client():\n\n client = Client()\n return client", "def get_client(self):\n return self.client", "def get_client():\n client = soundcloud.Client(client_id=CLIENT_ID)\n return client", "def get_client():\n return Client(__address, authkey='strumamor')", "def get_unibox_client(self):\n if self._gls_unibox_client is None:\n client = Client(\n self.gls_server,\n self.gls_port\n )\n client.test = self.gls_is_test\n self._gls_unibox_client = client\n\n return self._gls_unibox_client", "def _get_client():\n\n return datastore.Client()", "def _get_client():\n\n return datastore.Client()", "def client(self):\n\t\t# pylint: disable=invalid-name\n\t\treturn self._client", "def getClient(self):\n authDict = Configuration().read_auth_data_from_config()\n client_key = authDict['client_key']\n client_secret = authDict['client_secret']\n token = authDict['token']\n token_secret = authDict['token_secret']\n\n authorize_OAuth_ob = authorizeOAuth.AuthorizeOAuth(client_key,\n client_secret,\n token,\n token_secret,\n Configuration().read_board_id_config())\n\n trello_client_wrapper = authorize_OAuth_ob.getClient()\n self.set_list(trello_client_wrapper)\n return trello_client_wrapper", "def client():\n return Client(**common_data.AUTH_ARGS)", "def make_client(instance):\r\n neutron_client = utils.get_client_class(\r\n API_NAME,\r\n instance._api_version[API_NAME],\r\n API_VERSIONS,\r\n )\r\n instance.initialize()\r\n url = instance._url\r\n url = url.rstrip(\"/\")\r\n if '2.0' == instance._api_version[API_NAME]:\r\n client = neutron_client(username=instance._username,\r\n tenant_name=instance._tenant_name,\r\n password=instance._password,\r\n region_name=instance._region_name,\r\n auth_url=instance._auth_url,\r\n endpoint_url=url,\r\n token=instance._token,\r\n auth_strategy=instance._auth_strategy,\r\n insecure=instance._insecure,\r\n ca_cert=instance._ca_cert)\r\n return client\r\n else:\r\n raise exceptions.UnsupportedVersion(_(\"API version %s is not \"\r\n \"supported\") %\r\n instance._api_version[API_NAME])", "def get_client():\n client_class = _import_by_path(settings.REDISIO_CLIENT_CLASS)\n return client_class(host=settings.REDISIO_HOST,\n port=settings.REDISIO_PORT,\n db=settings.REDISIO_DB)", "def get_client():\n return storage.Client(project=project_id)", "def Client(self):\n return self._client", "def _get_client(self):\n credentials = service_account.Credentials.from_service_account_info(self.service_account_info)\n client = googleapiclient.discovery.build('container', 'v1', credentials=credentials)\n\n return client", "def client():\n return IceCubedSyncClient(\"api_key\", \"secret\")", "def _get_client_impl(self):\n api_version = self._get_api_version(None)\n if api_version not in self._client_impls:\n self._create_client_impl(api_version)\n return self._client_impls[api_version]", "def client(self):\r\n if self._client is None:\r\n self._client = self._client_cls(self._server, self._params, self)\r\n return self._client", "def get(self, id: int) -> Client:\n\n return self.__clients[id]", "def make_client(instance):\n network_client = utils.get_client_class(\n API_NAME,\n instance._api_version[API_NAME],\n API_VERSIONS)\n LOG.debug('Instantiating network client: %s', network_client)\n\n endpoint = instance.get_endpoint_for_service_type(\n API_NAME,\n region_name=instance._region_name,\n )\n\n return network_client(\n username=instance._username,\n tenant_name=instance._project_name,\n password=instance._password,\n region_name=instance._region_name,\n auth_url=instance._auth_url,\n endpoint_url=endpoint,\n token=instance.auth.get_token(instance.session),\n insecure=instance._insecure,\n ca_cert=instance._cacert,\n )", "def client(self):\n return self._client", "def get_client(version, **kwargs):\n endpoint = kwargs.get('os_endpoint') or kwargs.get('ceilometer_url')\n\n return Client(version, endpoint, **kwargs)", "def GetClientInstance(release_track=calliope_base.ReleaseTrack.ALPHA):\n api_version = _RELEASE_TRACK_TO_API_VERSION.get(release_track)\n return core_apis.GetClientInstance(_API_NAME, api_version)", "def Client(api_version, *args, **kwargs):\r\n neutron_client = utils.get_client_class(\r\n API_NAME,\r\n api_version,\r\n API_VERSIONS,\r\n )\r\n return neutron_client(*args, **kwargs)", "def make_client(self, context):\n return Client(self.settings['client_routing'], context=context)", "def _get_client(self):\n if self._client is None:\n self._client = self.boto.client(service_name='elb', region_name=self.boto.cli_region)\n\n return self._client", "def _get_client(self):\n _client = KOPS(provider=self.provider, config=self.config)\n return _client", "def client(self):\n\n return self._client", "def _get_neutron_client(self):\n session = self._get_keystone_session()\n return neutronclient.v2_0.client.Client(session=session)", "def client(websession: aiohttp.ClientSession) -> RenaultClient:\n return RenaultClient(\n session=get_logged_in_session(websession),\n )", "def get_client(self):\n token = self.get_access_token()\n if self.client is None:\n credentials = AccessTokenCredentials(token, 'vetware/1.0')\n # credentials = SignedJwtAssertionCredentials(self.email, self.private_key,\n # \"https://www.googleapis.com/auth/calendar\")\n http = credentials.authorize(Http())\n self.client = build('calendar', 'v3', http=http)\n return self.client", "def get_client(httpx_settings: Optional[dict] = None) -> \"PrefectClient\":\n ctx = prefect.context.get_settings_context()\n api = PREFECT_API_URL.value()\n\n if not api:\n # create an ephemeral API if none was provided\n from prefect.server.api.server import create_app\n\n api = create_app(ctx.settings, ephemeral=True)\n\n return PrefectClient(\n api,\n api_key=PREFECT_API_KEY.value(),\n httpx_settings=httpx_settings,\n )", "def get_client():\n\n return MongoClientManager().client", "def client(self) -> 'BaseClient':\n return self", "def get_client(args):\n if args.auth_strategy == 'userpass':\n creds = {'username': args.username, 'password': args.password}\n else:\n creds = None\n\n try:\n client = Client(rucio_host=args.host, auth_host=args.auth_host,\n account=args.account,\n auth_type=args.auth_strategy, creds=creds,\n ca_cert=args.ca_certificate, timeout=args.timeout)\n except CannotAuthenticate, error:\n logger.error(error)\n if not args.auth_strategy:\n if 'RUCIO_AUTH_TYPE' in os.environ:\n auth_type = os.environ['RUCIO_AUTH_TYPE']\n else:\n try:\n auth_type = config_get('client', 'auth_type')\n except (NoOptionError, NoSectionError):\n logger.error('Cannot get AUTH_TYPE')\n sys.exit(FAILURE)\n if auth_type == 'x509_proxy':\n logger.error('Please verify that your proxy is still valid and renew it if needed.')\n sys.exit(FAILURE)\n return client", "def GetClient(release_track):\n return apis.GetClientInstance(_API_NAME, GetApiVersion(release_track))", "def get_client(self, name):\n return self.get_clients(as_dict=True).get(name)", "def _client():\n nonlocal client\n if not client:\n client = twitter.Twitter(authenticator)\n return client", "def get_client() -> 'MongoCLient':\n client = pymongo.MongoClient()\n db = client['c3']\n c = db['json']\n return c", "def get_client_instance(cls, session, client_config, create=False):\n client = None\n if cls.SESSION_ID_KEY in session:\n client = session[cls.SESSION_ID_KEY]\n log.debug(\"Found OAuth client in session.\")\n if client is None and create:\n client = cls(client_config)\n session[cls.SESSION_ID_KEY] = client\n session.save()\n log.debug(\"No OAuth client in session - created new one.\")\n return client", "def get_api_client():\n\n global _API_CLIENT_HANDLE\n\n if not _API_CLIENT_HANDLE:\n context = get_context()\n server_config = context.get_server_config()\n\n pc_ip = server_config.get(\"pc_ip\")\n pc_port = server_config.get(\"pc_port\")\n username = server_config.get(\"pc_username\")\n password = server_config.get(\"pc_password\")\n\n update_api_client(host=pc_ip, port=pc_port, auth=(username, password))\n\n return _API_CLIENT_HANDLE", "def get_client(host, port=None, username=None,\n password=None, tenant=None,\n auth_url=None, auth_strategy=None,\n auth_token=None, region=None,\n is_silent_upload=False, insecure=False):\n\n if auth_url:\n force_strategy = 'keystone'\n else:\n force_strategy = None\n\n creds = dict(username=username,\n password=password,\n tenant=tenant,\n auth_url=auth_url,\n strategy=force_strategy or auth_strategy,\n region=region,\n )\n\n if creds['strategy'] == 'keystone' and not creds['auth_url']:\n msg = (\"--auth_url option or OS_AUTH_URL environment variable \"\n \"required when keystone authentication strategy is enabled\\n\")\n raise exception.ClientConfigurationError(msg)\n\n use_ssl = (creds['auth_url'] is not None and\n creds['auth_url'].find('https') != -1)\n\n client = HeatClient\n\n return client(host=host,\n port=port,\n use_ssl=use_ssl,\n auth_tok=auth_token,\n creds=creds,\n insecure=insecure)", "def getClient(self):\r\n client = SpreadsheetsService()\r\n\r\n try:\r\n client.GetWorksheetsFeed(self.spreadsheet_key, visibility='public',\r\n projection='basic')\r\n except gaierror:\r\n client = None\r\n\r\n return client", "def get_contribs_client():\n from mpcontribs.client import Client\n\n headers = get_consumer()\n\n if is_localhost():\n return Client(apikey=get_user_api_key())\n else:\n return Client(headers=headers)", "def get_client(self):\n if self._client_state is not None:\n # Copy client state because `from_json_snapshot` modifies it...\n client = AxClient.from_json_snapshot(copy.deepcopy(self._client_state))\n else:\n client = AxClient(\n random_seed=self.seed,\n enforce_sequential_optimization=False,\n verbose_logging=False,\n )\n\n client.create_experiment(\n parameters=orion_space_to_axoptimizer_space(self.space),\n choose_generation_strategy_kwargs={\n \"num_initialization_trials\": self.n_initial_trials,\n \"max_parallelism_override\": self.max_trials,\n },\n objectives={\n \"objective\": ObjectiveProperties(minimize=True),\n **{\n o: ObjectiveProperties(minimize=True)\n for o in self.extra_objectives\n },\n },\n outcome_constraints=self.constraints,\n )\n\n yield client\n\n self._client_state = client.to_json_snapshot()", "def get_client(profile_name, region_name, svc, boto_client_params={}):\n session = get_session(profile_name, region_name)\n client = session.client(svc, **boto_client_params)\n return client", "def api_client() -> APIClient:\n return APIClient()", "def _get_nova_client(self):\n region_name = CONF.region_name\n session = self._get_keystone_session()\n return novaclient.client.Client(2, session=session, region_name=region_name)", "def get_client(service_account_json):\n api_scopes = ['https://www.googleapis.com/auth/cloud-platform']\n api_version = 'v1'\n discovery_api = 'https://cloudiot.googleapis.com/$discovery/rest'\n service_name = 'cloudiotcore'\n\n credentials = service_account.Credentials.from_service_account_file(\n service_account_json)\n scoped_credentials = credentials.with_scopes(api_scopes)\n\n discovery_url = '{}?version={}'.format(\n discovery_api, api_version)\n\n return discovery.build(\n service_name,\n api_version,\n discoveryServiceUrl=discovery_url,\n credentials=scoped_credentials)", "def api_client() -> APIClient:\n\n return APIClient()", "def _keystone_client(context, version=(3, 0)):\n auth_plugin = token.Token(\n auth_url=CONF.keystone_authtoken.auth_uri,\n token=context.auth_token,\n project_id=context.project_id)\n client_session = session.Session(auth=auth_plugin,\n verify=False if\n CONF.keystone_authtoken.insecure else\n (CONF.keystone_authtoken.cafile or True))\n return client.Client(auth_url=CONF.keystone_authtoken.auth_uri,\n session=client_session, version=version)", "async def get_client(\n self,\n request: Request,\n client_id: str,\n client_secret: Optional[str] = None,\n ) -> Optional[OAuth2Client]:\n\n client_record = await self._db.query_one(\n Client.select(*OAuth2Client._fields, filters=\".id = <uuid>$id\"),\n id=client_id,\n )\n client_record = Client.from_obj(client_record)\n\n if client_record is not None:\n return OAuth2Client(\n client_id=client_record.client_id,\n client_secret=client_record.client_secret,\n grant_types=client_record.grant_types,\n response_types=client_record.response_types,\n redirect_uris=client_record.redirect_uris,\n scope=client_record.scope,\n )", "def _client(self) -> httpx.Client:\n return httpx.Client(\n base_url=self._base_url,\n headers=self._authorization_headers,\n proxies=self._proxies,\n )", "def client(self):\n response = requests.get(self._url(self._CLIENT_PATH), headers=self._headers)\n return response.json()", "def _client(self):\n\n if self._suds_client is None:\n self._suds_client = suds.client.Client(SERVICE_WSDL_URL)\n # Add SOAP Security tokens\n self.set_security_token()\n\n return self._suds_client", "def base_client(self):\n return self._client", "def get_conn(self) -> WebClient:\n return self.client", "def get_client(\n client_id: str, x_additional_headers: Optional[Dict[str, str]] = None, **kwargs\n):\n request = GetClient.create(\n client_id=client_id,\n )\n return run_request(request, additional_headers=x_additional_headers, **kwargs)", "def get_gql_client(settings):\n endpoint = settings['endpoint']\n token = settings['token']\n\n headers = dict()\n headers['Authorization'] = f'Bearer {token}'\n transport = RequestsHTTPTransport(\n url=endpoint,\n headers=headers,\n verify=True,\n retries=3,\n timeout=30)\n try:\n client = Client(transport=transport, fetch_schema_from_transport=True)\n except exceptions.ConnectTimeout as e:\n logger.error(e)\n raise\n return client", "def get_httpx_client() -> httpx.Client:\n return httpx.Client(**CLIENT_PARAMETERS) # type: ignore", "def get_http_client():\n store = file.Storage(TOKEN_STORE_FILE)\n creds = store.get()\n if not creds or creds.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_ID_FILE, SCOPES)\n creds = tools.run_flow(flow, store)\n return creds.authorize(Http())", "def _get_client(wsdl_url, cache_duration=(\"default\",)):\n global _suds_client\n\n print(wsdl_url)\n # Handle new or changed client request (create new client)\n if _suds_client is None or _suds_client.wsdl.url != wsdl_url:\n _suds_client = Client(wsdl_url)\n if cache_duration is None:\n _suds_client.set_options(cache=None)\n else:\n cache = _suds_client.options.cache\n # could add some error catching ...\n if cache_duration[0] == \"default\":\n cache.setduration(days=1)\n else:\n # noinspection PyTypeChecker\n cache.setduration(**dict([cache_duration]))\n\n return _suds_client", "def get_client(public_key: str, secret_key: str, **_):\n razorpay_client = razorpay.Client(auth=(public_key, secret_key))\n return razorpay_client", "def _get_client(self, region_name):\n if region_name not in self._clients[self.SERVICE_NAME]:\n self._clients[self.SERVICE_NAME][region_name] = self._create_client()\n\n return self._clients[self.SERVICE_NAME][region_name]", "def client(self) -> mqtt.Client:\n return self._client", "def _get_client(self):\n return Github(\n base_url=github_trigger.api_endpoint(),\n login_or_token=self.auth_token if self.auth_token else github_trigger.client_id(),\n password=None if self.auth_token else github_trigger.client_secret(),\n timeout=5,\n )", "def CreateClient():\n client = gdata.docs.client.DocsClient(source=SampleConfig.APP_NAME)\n client.http_client.debug = SampleConfig.DEBUG\n # Authenticate the user with CLientLogin, OAuth, or AuthSub.\n try:\n gdata.sample_util.authorize_client(\n client,\n service=client.auth_service,\n source=client.source,\n scopes=client.auth_scopes\n )\n except gdata.client.BadAuthentication:\n exit('Invalid user credentials given.')\n except gdata.client.Error:\n exit('Login Error')\n return client", "def get_keystone_client():\n username = os.environ.get('OS_USERNAME')\n password = os.environ.get('OS_PASSWORD')\n tenant = os.environ.get('OS_TENANT_NAME')\n url = os.environ.get('OS_AUTH_URL')\n assert username is not None\n assert password is not None\n assert tenant is not None\n assert url is not None\n cl = client.Client(username=username, password=password,\n tenant_name=tenant, auth_url=url)\n return cl", "def operations_client(self) -> operations_v1.OperationsClient:\n # Sanity check: Only create a new client if we do not already have one.\n if \"operations_client\" not in self.__dict__:\n self.__dict__[\"operations_client\"] = operations_v1.OperationsClient(\n self.grpc_channel\n )\n\n # Return the client from cache.\n return self.__dict__[\"operations_client\"]", "def get(cls, configuration: HttpClientConfiguration) -> HttpClient:\n client_type = configuration.client_type\n\n if client_type == HttpClientType.UAA:\n return cls._get_instance(configuration, ClientAuthType.TOKEN_UAA)\n\n elif client_type == HttpClientType.CONSOLE:\n return cls._get_instance(configuration, ClientAuthType.LOGIN_PAGE)\n\n elif client_type == HttpClientType.CONSOLE_NO_AUTH:\n return cls._get_instance(configuration, ClientAuthType.NO_AUTH)\n\n elif client_type == HttpClientType.APPLICATION:\n return cls._get_instance(configuration, ClientAuthType.TOKEN_CF)\n\n elif client_type == HttpClientType.CLOUD_FOUNDRY:\n return cls._get_instance(configuration, ClientAuthType.TOKEN_CF)\n\n elif client_type == HttpClientType.BROKER:\n return cls._get_instance(configuration, ClientAuthType.HTTP_BASIC)\n\n elif client_type == HttpClientType.WEBHDFS:\n return cls._get_instance(configuration, ClientAuthType.WEBHDFS)\n \n elif client_type == HttpClientType.SERVICE_TOOL:\n return cls._get_instance(configuration, ClientAuthType.NO_AUTH)\n\n elif client_type == HttpClientType.CLOUDERA:\n return cls._get_instance(configuration, ClientAuthType.HTTP_BASIC)\n\n else:\n raise HttpClientFactoryInvalidClientTypeException(client_type)", "def raw_client(self):\r\n warnings.warn(\"raw_client is deprecated. use self.client.get_client instead\",\r\n DeprecationWarning, stacklevel=2)\r\n return self.client.get_client(write=True)", "def GetClientFromFlags():\n log_request = FLAGS.log_request or FLAGS.log_request_response\n log_response = FLAGS.log_response or FLAGS.log_request_response\n api_endpoint = apitools_base.NormalizeApiEndpoint(FLAGS.api_endpoint)\n additional_http_headers = dict(x.split('=', 1) for x in FLAGS.add_header)\n credentials_args = {\n 'service_account_json_keyfile': os.path.expanduser(FLAGS.service_account_json_keyfile)\n }\n try:\n client = client_lib.IamV1(\n api_endpoint, log_request=log_request,\n log_response=log_response,\n credentials_args=credentials_args,\n additional_http_headers=additional_http_headers)\n except apitools_base.CredentialsError as e:\n print 'Error creating credentials: %s' % e\n sys.exit(1)\n return client", "def get_keystoneclient(self):\n try:\n if self.token is None:\n client = keystoneclient.Client(user_name=self.cluster_account.cluster_user_name,\n password=self.cluster_account.cluster_password,\n auth_url=self.cluster_account.cluster.auth_url,\n tenant_name=self.name,\n )\n self.token = json.dumps(client.auth_ref)\n else:\n client = keystoneclient.Client(auth_ref=json.loads(self.token))\n # keystoneclient authenticates lazily, i.e. It doensn't actually\n # authenticates until the first time it needs the token for\n # someting. We'd like to find out about failures now (in\n # particular, it's easier to clear a bad token here than somewhere\n # else in the code. authenticate() forces it to auth right now:\n client.authenticate()\n return client\n except AuthorizationFailure:\n # Clear the token if auth failed:\n self.token = None\n raise", "def client(self):\n return self._dxl_client", "def get_client() -> RabbitmqClient:\n # Replace the parameters with proper values for host, port, login and password\n # Change the value of exchange if needed.\n #\n # For any parameter that is not given here, the client tries to use a value from an environment variable\n # and most of the parameters also have a default value that is used if neither the constructor parameter\n # nor the environmental variable exist.\n # See tools/clients.py for details about the environmental variables and the default values.\n return RabbitmqClient(\n host=\"\",\n port=0,\n login=\"\",\n password=\"\",\n exchange=\"procem.examples_testing\",\n ssl=True,\n ssl_version=\"PROTOCOL_TLS\",\n exchange_autodelete=True,\n exchange_durable=False\n )", "def get_dropbox_client():\n return dropbox.client.DropboxClient(settings.DROPBOX_TOKEN)", "def get_api_ironic_client(get_session):\n def _get_api_ironic_client(version, is_api):\n if version == '1':\n if is_api:\n return api_clients.IronicApiClientV1(session=get_session())\n else:\n return client_v1.get_client(api_version=version,\n session=get_session())\n\n raise ValueError(\"Unexpected ironic version: {!r}\".format(version))\n\n return _get_api_ironic_client", "def client(self):\n app.testing = True\n client = app.test_client()\n\n with app.app_context():\n yield client", "def _get_auth_client(self, request):\n if self._auth_client is None:\n # Use PyFxa defaults if not specified\n server_url = fxa_conf(request, 'oauth_uri')\n auth_cache = self._get_cache(request)\n self._auth_client = OAuthClient(server_url=server_url, cache=auth_cache)\n\n return self._auth_client", "def _get_client(self):\n try:\n client = boto3_cached_conn(\n 'iam', **self.conn_details)\n\n if not client:\n raise ValueError(f\"boto3_cached_conn returned null IAM client for {self.account_number}\")\n\n return client\n\n except Exception as e:\n self.on_failure.send(self, error=e)\n self.current_app.logger.exception(f\"Failed to obtain boto3 IAM client for account {self.account_number}.\", exc_info=False)\n raise e", "def get_client(self, service, region, account):\n\n client = AwsApi.CLIENTS_CACHE.get((service, region, account))\n if client:\n return client # from cache\n\n if region == '*':\n eprint(\"warn: unknown region ('*'), using the default ('{}')\", self.default_region)\n region = self.default_region\n\n if account == '*':\n eprint(\"warn: unknown account ('*'), using default session\")\n client = self.session.client(\n service,\n region_name=region\n )\n elif account == self.default_account:\n client = self.session.client(\n service,\n region_name=region\n )\n elif self.args.no_input:\n eprint(\"warn: unknown account ('{}') and --no-input set, using default session\", account)\n client = self.session.client(\n service,\n region_name=region\n )\n else:\n account_config = self.config.setdefault('aws', {}).setdefault('accounts', {}).setdefault(account, {})\n if not 'profile' in account_config:\n account_config['profile'] = input(\"Enter configured AWS profile for {}: \".format(account))\n client = boto3.Session(profile_name=account_config['profile']).client(service, region_name=region)\n\n AwsApi.CLIENTS_CACHE[(service, region, account)] = client\n return client", "def _get_user_client(self):\n return api.OAuthClient(settings.CLIENT_ID, settings.CLIENT_SECRET, settings.USER, settings.PASSWORD)", "def client(self) -> WebClient:\n return WebClient(**self._get_conn_params())", "def make_rest_client(\n service_key, options=None,\n app_name=None, app_version=None, version=None,\n **kwargs):\n cloud = get_config(\n service_key=service_key, options=options,\n app_name=app_name, app_version=app_version,\n **kwargs)\n return cloud.get_session_client(service_key, version=version)", "def get_client(brand: Optional[str] = None,\n client_id: Optional[str] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetClientResult:\n __args__ = dict()\n __args__['brand'] = brand\n __args__['clientId'] = client_id\n opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)\n __ret__ = pulumi.runtime.invoke('gcp:iap/getClient:getClient', __args__, opts=opts, typ=GetClientResult).value\n\n return AwaitableGetClientResult(\n brand=pulumi.get(__ret__, 'brand'),\n client_id=pulumi.get(__ret__, 'client_id'),\n display_name=pulumi.get(__ret__, 'display_name'),\n id=pulumi.get(__ret__, 'id'),\n secret=pulumi.get(__ret__, 'secret'))", "def operations_client(self) -> operations_v1.OperationsClient:\n # Quick check: Only create a new client if we do not already have one.\n if self._operations_client is None:\n self._operations_client = operations_v1.OperationsClient(self.grpc_channel)\n\n # Return the client from cache.\n return self._operations_client", "def pure_client():\n return VoximplantClient(\n host='https://api.host.com',\n account_id='100500',\n api_key='secret',\n )", "def __get_client(self):\n flow = flow_from_clientsecrets(self.secrets_file,\n message=self.MISSING_SECRETS_MSG,\n scope=self.YOUTUBE_READ_WRITE_SCOPE)\n\n # TODO: Fix this! Came with boilerplate\n storage = Storage(\"%s-oauth2.json\" % sys.argv[0])\n credentials = storage.get()\n\n if credentials is None or credentials.invalid:\n flags = argparser.parse_args()\n credentials = run_flow(flow, storage, flags)\n\n youtube = build(self.YOUTUBE_API_SERVICE_NAME,\n self.YOUTUBE_API_VERSION,\n http=credentials.authorize(httplib2.Http()))\n return youtube", "def operations_client(self) -> operations_v1.OperationsAsyncClient:\n # Quick check: Only create a new client if we do not already have one.\n if self._operations_client is None:\n self._operations_client = operations_v1.OperationsAsyncClient(\n self.grpc_channel\n )\n\n # Return the client from cache.\n return self._operations_client", "def operations_client(self) -> operations_v1.OperationsAsyncClient:\n # Quick check: Only create a new client if we do not already have one.\n if self._operations_client is None:\n self._operations_client = operations_v1.OperationsAsyncClient(\n self.grpc_channel\n )\n\n # Return the client from cache.\n return self._operations_client", "def get_client(ip, port=8088, activity_callback=None):\n func = harmony_client.create_and_connect_client(ip, port, activity_callback)\n return run_in_loop_now('get_client', func)", "def django_client() -> Client:\n\n return Client()", "def get_cinder(self, version='2'):\n if self.cinder is None:\n iface = os.getenv('OS_ENDPOINT_TYPE', \"public\")\n self.cinder = cinderclient.Client(version,\n session=self.get_session(),\n interface=iface)\n return self.cinder", "def _http_client(self):\n\n self.__enforce_connected()\n return self.collection._http_client", "def GetApiClient(creds, api_service_name=None, api_version=None):\n if api_service_name is None:\n api_service_name = DEFAULT_API_SERVICE_NAME\n if api_version is None:\n api_version = DEFAULT_API_VERSION\n\n base_http_client = httplib2.Http()\n auth_http_client = creds.authorize(base_http_client)\n ab_client = apiclient.discovery.build(api_service_name, api_version,\n http=auth_http_client)\n return ab_client", "def get_client(self, user_id: int, client_name: str) -> Client:\n return self.clients[user_id][client_name]", "def client(self):\n return self._thread._client", "def typesense_client():\n client = typesense.Client({\n 'nodes': [{\n 'host': settings.TYPESENSE_HOST,\n 'port': settings.TYPESENSE_PORT,\n 'protocol': settings.TYPESENSE_PROTOCOL,\n }],\n 'api_key': settings.TYPESENSE_API_KEY,\n 'connection_timeout_seconds': settings.TYPESENSE_CONN_TIMEOUT,\n })\n return client" ]
[ "0.8825718", "0.7007617", "0.69371164", "0.6927475", "0.6900914", "0.68201137", "0.6812655", "0.6797418", "0.6797418", "0.6663279", "0.6625562", "0.6587854", "0.65376866", "0.6535913", "0.65322053", "0.6514193", "0.64772695", "0.64697844", "0.6460001", "0.64531165", "0.64016825", "0.6366114", "0.63543135", "0.63537127", "0.6349735", "0.634593", "0.6321513", "0.6312319", "0.6306634", "0.6287351", "0.6282323", "0.6274147", "0.6273042", "0.62480485", "0.61743873", "0.6161696", "0.61546624", "0.6137277", "0.6118562", "0.61092687", "0.6092889", "0.6081891", "0.60818076", "0.6016784", "0.6005252", "0.59917074", "0.5991363", "0.5982342", "0.5975139", "0.5969926", "0.5962039", "0.59588546", "0.59257287", "0.5916146", "0.5912942", "0.59037745", "0.58994263", "0.5893384", "0.5892108", "0.58899516", "0.5885425", "0.58675456", "0.5852548", "0.58509815", "0.58498704", "0.5835383", "0.5793565", "0.5784692", "0.57810515", "0.57788867", "0.57728285", "0.5768833", "0.5765682", "0.5738639", "0.5737487", "0.5736156", "0.5734189", "0.5723358", "0.5719278", "0.570171", "0.56916076", "0.56875455", "0.5687011", "0.56865203", "0.56664604", "0.5666457", "0.5663011", "0.56588715", "0.56489235", "0.5635183", "0.5632024", "0.5632024", "0.56204224", "0.5617851", "0.5609859", "0.5608046", "0.5603673", "0.5597206", "0.55875653", "0.5579513" ]
0.7610261
1
Get a cinder client instance.
def get_cinder(self, version='2'): if self.cinder is None: iface = os.getenv('OS_ENDPOINT_TYPE', "public") self.cinder = cinderclient.Client(version, session=self.get_session(), interface=iface) return self.cinder
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def client(self):\n\n if self._client is None:\n self._client = self._get_client()\n return self._client", "def get_client(self):\n return self.client", "def client(self):\r\n if self._client is None:\r\n self._client = self._client_cls(self._server, self._params, self)\r\n return self._client", "def client(self):\n\t\t# pylint: disable=invalid-name\n\t\treturn self._client", "def _get_client(self):\n if self._client is None:\n self._client = self.boto.client(service_name='elb', region_name=self.boto.cli_region)\n\n return self._client", "def get_client():\n return Client(__address, authkey='strumamor')", "def Client(self):\n return self._client", "def _get_glance_client(self):\n session = self._get_keystone_session()\n return glanceclient.client.Client(2, session=session)", "def get_client():\n client_class = _import_by_path(settings.REDISIO_CLIENT_CLASS)\n return client_class(host=settings.REDISIO_HOST,\n port=settings.REDISIO_PORT,\n db=settings.REDISIO_DB)", "def client(self):\n return self._client", "def get_client():\n\n return MongoClientManager().client", "def _get_client(self):\n credentials = service_account.Credentials.from_service_account_info(self.service_account_info)\n client = googleapiclient.discovery.build('container', 'v1', credentials=credentials)\n\n return client", "def client():\n\n client = Client()\n return client", "def client(self):\n\n return self._client", "def _get_client():\n\n return datastore.Client()", "def _get_client():\n\n return datastore.Client()", "def client():\n return Client(**common_data.AUTH_ARGS)", "def _get_neutron_client(self):\n session = self._get_keystone_session()\n return neutronclient.v2_0.client.Client(session=session)", "def get_client():\n client = soundcloud.Client(client_id=CLIENT_ID)\n return client", "def get_client(self):\n token = self.get_access_token()\n if self.client is None:\n credentials = AccessTokenCredentials(token, 'vetware/1.0')\n # credentials = SignedJwtAssertionCredentials(self.email, self.private_key,\n # \"https://www.googleapis.com/auth/calendar\")\n http = credentials.authorize(Http())\n self.client = build('calendar', 'v3', http=http)\n return self.client", "def _get_client_impl(self):\n api_version = self._get_api_version(None)\n if api_version not in self._client_impls:\n self._create_client_impl(api_version)\n return self._client_impls[api_version]", "def _get_client(self):\n _client = KOPS(provider=self.provider, config=self.config)\n return _client", "def client(self) -> 'BaseClient':\n return self", "def get_client() -> 'MongoCLient':\n client = pymongo.MongoClient()\n db = client['c3']\n c = db['json']\n return c", "def _get_nova_client(self):\n region_name = CONF.region_name\n session = self._get_keystone_session()\n return novaclient.client.Client(2, session=session, region_name=region_name)", "def make_client(self, context):\n return Client(self.settings['client_routing'], context=context)", "def make_client(instance):\r\n neutron_client = utils.get_client_class(\r\n API_NAME,\r\n instance._api_version[API_NAME],\r\n API_VERSIONS,\r\n )\r\n instance.initialize()\r\n url = instance._url\r\n url = url.rstrip(\"/\")\r\n if '2.0' == instance._api_version[API_NAME]:\r\n client = neutron_client(username=instance._username,\r\n tenant_name=instance._tenant_name,\r\n password=instance._password,\r\n region_name=instance._region_name,\r\n auth_url=instance._auth_url,\r\n endpoint_url=url,\r\n token=instance._token,\r\n auth_strategy=instance._auth_strategy,\r\n insecure=instance._insecure,\r\n ca_cert=instance._ca_cert)\r\n return client\r\n else:\r\n raise exceptions.UnsupportedVersion(_(\"API version %s is not \"\r\n \"supported\") %\r\n instance._api_version[API_NAME])", "def get_client(version, **kwargs):\n endpoint = kwargs.get('os_endpoint') or kwargs.get('ceilometer_url')\n\n return Client(version, endpoint, **kwargs)", "def get_client():\n return storage.Client(project=project_id)", "def _client():\n nonlocal client\n if not client:\n client = twitter.Twitter(authenticator)\n return client", "def _get_client(self, requester_name: str) -> Any:\n return self.datastore.get_client_for_requester(requester_name)", "def get_client(self, name):\n return self.get_clients(as_dict=True).get(name)", "def client():\n return IceCubedSyncClient(\"api_key\", \"secret\")", "def base_client(self):\n return self._client", "def get_vc3_client():\n c = SafeConfigParser()\n c.readfp(open(app.config['VC3_CLIENT_CONFIG']))\n\n try:\n client_api = client.VC3ClientAPI(c)\n return client_api\n except Exception as e:\n app.logger.error(\"Couldn't get vc3 client: {0}\".format(e))\n raise", "def make_client(instance):\n network_client = utils.get_client_class(\n API_NAME,\n instance._api_version[API_NAME],\n API_VERSIONS)\n LOG.debug('Instantiating network client: %s', network_client)\n\n endpoint = instance.get_endpoint_for_service_type(\n API_NAME,\n region_name=instance._region_name,\n )\n\n return network_client(\n username=instance._username,\n tenant_name=instance._project_name,\n password=instance._password,\n region_name=instance._region_name,\n auth_url=instance._auth_url,\n endpoint_url=endpoint,\n token=instance.auth.get_token(instance.session),\n insecure=instance._insecure,\n ca_cert=instance._cacert,\n )", "def get_conn(self) -> WebClient:\n return self.client", "async def get_client(\n self,\n request: Request,\n client_id: str,\n client_secret: Optional[str] = None,\n ) -> Optional[OAuth2Client]:\n\n client_record = await self._db.query_one(\n Client.select(*OAuth2Client._fields, filters=\".id = <uuid>$id\"),\n id=client_id,\n )\n client_record = Client.from_obj(client_record)\n\n if client_record is not None:\n return OAuth2Client(\n client_id=client_record.client_id,\n client_secret=client_record.client_secret,\n grant_types=client_record.grant_types,\n response_types=client_record.response_types,\n redirect_uris=client_record.redirect_uris,\n scope=client_record.scope,\n )", "def service_client(self):\n\n return self._service_client", "def _get_client(self):\n try:\n client = boto3_cached_conn(\n 'iam', **self.conn_details)\n\n if not client:\n raise ValueError(f\"boto3_cached_conn returned null IAM client for {self.account_number}\")\n\n return client\n\n except Exception as e:\n self.on_failure.send(self, error=e)\n self.current_app.logger.exception(f\"Failed to obtain boto3 IAM client for account {self.account_number}.\", exc_info=False)\n raise e", "def get_unibox_client(self):\n if self._gls_unibox_client is None:\n client = Client(\n self.gls_server,\n self.gls_port\n )\n client.test = self.gls_is_test\n self._gls_unibox_client = client\n\n return self._gls_unibox_client", "def _client(self):\n\n if self._suds_client is None:\n self._suds_client = suds.client.Client(SERVICE_WSDL_URL)\n # Add SOAP Security tokens\n self.set_security_token()\n\n return self._suds_client", "def client(self):\n response = requests.get(self._url(self._CLIENT_PATH), headers=self._headers)\n return response.json()", "async def get_docker_client(self) -> \"DockerClient\":", "def django_client() -> Client:\n\n return Client()", "def client(self) -> mqtt.Client:\n return self._client", "def get_client(args):\n if args.auth_strategy == 'userpass':\n creds = {'username': args.username, 'password': args.password}\n else:\n creds = None\n\n try:\n client = Client(rucio_host=args.host, auth_host=args.auth_host,\n account=args.account,\n auth_type=args.auth_strategy, creds=creds,\n ca_cert=args.ca_certificate, timeout=args.timeout)\n except CannotAuthenticate, error:\n logger.error(error)\n if not args.auth_strategy:\n if 'RUCIO_AUTH_TYPE' in os.environ:\n auth_type = os.environ['RUCIO_AUTH_TYPE']\n else:\n try:\n auth_type = config_get('client', 'auth_type')\n except (NoOptionError, NoSectionError):\n logger.error('Cannot get AUTH_TYPE')\n sys.exit(FAILURE)\n if auth_type == 'x509_proxy':\n logger.error('Please verify that your proxy is still valid and renew it if needed.')\n sys.exit(FAILURE)\n return client", "def get(self, id: int) -> Client:\n\n return self.__clients[id]", "def client(self) -> WebClient:\n return WebClient(**self._get_conn_params())", "def getClient(self):\n authDict = Configuration().read_auth_data_from_config()\n client_key = authDict['client_key']\n client_secret = authDict['client_secret']\n token = authDict['token']\n token_secret = authDict['token_secret']\n\n authorize_OAuth_ob = authorizeOAuth.AuthorizeOAuth(client_key,\n client_secret,\n token,\n token_secret,\n Configuration().read_board_id_config())\n\n trello_client_wrapper = authorize_OAuth_ob.getClient()\n self.set_list(trello_client_wrapper)\n return trello_client_wrapper", "def client(self):\n return self._thread._client", "def configure_client(self):\n self.client = self.get_redis_client()\n return self.client", "def Client(api_version, *args, **kwargs):\r\n neutron_client = utils.get_client_class(\r\n API_NAME,\r\n api_version,\r\n API_VERSIONS,\r\n )\r\n return neutron_client(*args, **kwargs)", "def network_client(self):\n if not self.client:\n self.client = get_client_from_cli_profile(NetworkManagementClient)\n return self.client", "def _get_client(self, region_name):\n if region_name not in self._clients[self.SERVICE_NAME]:\n self._clients[self.SERVICE_NAME][region_name] = self._create_client()\n\n return self._clients[self.SERVICE_NAME][region_name]", "def client(self):\n client = VMwareClient(self.host, verify_ssl=False)\n client.login(self.settings.username, self.settings.password)\n return client", "def get_api_client():\n\n global _API_CLIENT_HANDLE\n\n if not _API_CLIENT_HANDLE:\n context = get_context()\n server_config = context.get_server_config()\n\n pc_ip = server_config.get(\"pc_ip\")\n pc_port = server_config.get(\"pc_port\")\n username = server_config.get(\"pc_username\")\n password = server_config.get(\"pc_password\")\n\n update_api_client(host=pc_ip, port=pc_port, auth=(username, password))\n\n return _API_CLIENT_HANDLE", "def decapod_client(get_decapod_client):\n return get_decapod_client()", "def resource_client(self):\n if not self.client:\n self.client = get_client_from_cli_profile(ResourceManagementClient)\n return self.client", "def api_client() -> APIClient:\n return APIClient()", "def get_client_instance(cls, session, client_config, create=False):\n client = None\n if cls.SESSION_ID_KEY in session:\n client = session[cls.SESSION_ID_KEY]\n log.debug(\"Found OAuth client in session.\")\n if client is None and create:\n client = cls(client_config)\n session[cls.SESSION_ID_KEY] = client\n session.save()\n log.debug(\"No OAuth client in session - created new one.\")\n return client", "def api_client() -> APIClient:\n\n return APIClient()", "def get_client(service_account_json):\n api_scopes = ['https://www.googleapis.com/auth/cloud-platform']\n api_version = 'v1'\n discovery_api = 'https://cloudiot.googleapis.com/$discovery/rest'\n service_name = 'cloudiotcore'\n\n credentials = service_account.Credentials.from_service_account_file(\n service_account_json)\n scoped_credentials = credentials.with_scopes(api_scopes)\n\n discovery_url = '{}?version={}'.format(\n discovery_api, api_version)\n\n return discovery.build(\n service_name,\n api_version,\n discoveryServiceUrl=discovery_url,\n credentials=scoped_credentials)", "def compute_client(self):\n if not self.client:\n self.client = get_client_from_cli_profile(ComputeManagementClient)\n return self.client", "def get_keystone_client():\n username = os.environ.get('OS_USERNAME')\n password = os.environ.get('OS_PASSWORD')\n tenant = os.environ.get('OS_TENANT_NAME')\n url = os.environ.get('OS_AUTH_URL')\n assert username is not None\n assert password is not None\n assert tenant is not None\n assert url is not None\n cl = client.Client(username=username, password=password,\n tenant_name=tenant, auth_url=url)\n return cl", "def _http_client(self):\n\n self.__enforce_connected()\n return self.collection._http_client", "def get_dropbox_client():\n return dropbox.client.DropboxClient(settings.DROPBOX_TOKEN)", "def client(websession: aiohttp.ClientSession) -> RenaultClient:\n return RenaultClient(\n session=get_logged_in_session(websession),\n )", "def get_decapod_client():\n def _get_decapod_client():\n return decapodclient.V1Client(url=config.DECAPOD_URL,\n login=config.DECAPOD_LOGIN,\n password=config.DECAPOD_PASSWORD)\n\n return _get_decapod_client", "def _get_route_reflector_client(self):\n return self.__route_reflector_client", "def get(cls, configuration: HttpClientConfiguration) -> HttpClient:\n client_type = configuration.client_type\n\n if client_type == HttpClientType.UAA:\n return cls._get_instance(configuration, ClientAuthType.TOKEN_UAA)\n\n elif client_type == HttpClientType.CONSOLE:\n return cls._get_instance(configuration, ClientAuthType.LOGIN_PAGE)\n\n elif client_type == HttpClientType.CONSOLE_NO_AUTH:\n return cls._get_instance(configuration, ClientAuthType.NO_AUTH)\n\n elif client_type == HttpClientType.APPLICATION:\n return cls._get_instance(configuration, ClientAuthType.TOKEN_CF)\n\n elif client_type == HttpClientType.CLOUD_FOUNDRY:\n return cls._get_instance(configuration, ClientAuthType.TOKEN_CF)\n\n elif client_type == HttpClientType.BROKER:\n return cls._get_instance(configuration, ClientAuthType.HTTP_BASIC)\n\n elif client_type == HttpClientType.WEBHDFS:\n return cls._get_instance(configuration, ClientAuthType.WEBHDFS)\n \n elif client_type == HttpClientType.SERVICE_TOOL:\n return cls._get_instance(configuration, ClientAuthType.NO_AUTH)\n\n elif client_type == HttpClientType.CLOUDERA:\n return cls._get_instance(configuration, ClientAuthType.HTTP_BASIC)\n\n else:\n raise HttpClientFactoryInvalidClientTypeException(client_type)", "def load_ckanclient(self):\n user = get_action('get_site_user')({'model': model, 'ignore_auth': True}, {})\n api_url = urlparse.urljoin(config.get('ckan.site_url'), 'api')\n ckan = ckanclient.CkanClient(\n base_location=api_url,\n api_key=user.get('apikey'),\n is_verbose=True,\n )\n\n return ckan", "def get_client(public_key: str, secret_key: str, **_):\n razorpay_client = razorpay.Client(auth=(public_key, secret_key))\n return razorpay_client", "def make_rest_client(\n service_key, options=None,\n app_name=None, app_version=None, version=None,\n **kwargs):\n cloud = get_config(\n service_key=service_key, options=options,\n app_name=app_name, app_version=app_version,\n **kwargs)\n return cloud.get_session_client(service_key, version=version)", "def get_handler(cls):\n if cls.__instance is None:\n cls.__instance = AliceBlueApi()\n return cls.__instance", "def get_client_adapter(self, is_controller=True, is_cli=False):\n from datrium.rpc.client.impl.http_remote_method_invoker import HTTPRemoteMethodInvoker\n return HTTPRemoteMethodInvoker(is_controller=is_controller, is_cli=is_cli)", "def get_client(httpx_settings: Optional[dict] = None) -> \"PrefectClient\":\n ctx = prefect.context.get_settings_context()\n api = PREFECT_API_URL.value()\n\n if not api:\n # create an ephemeral API if none was provided\n from prefect.server.api.server import create_app\n\n api = create_app(ctx.settings, ephemeral=True)\n\n return PrefectClient(\n api,\n api_key=PREFECT_API_KEY.value(),\n httpx_settings=httpx_settings,\n )", "def get_client(profile_name, region_name, svc, boto_client_params={}):\n session = get_session(profile_name, region_name)\n client = session.client(svc, **boto_client_params)\n return client", "def GetClientInstance(release_track=calliope_base.ReleaseTrack.ALPHA):\n api_version = _RELEASE_TRACK_TO_API_VERSION.get(release_track)\n return core_apis.GetClientInstance(_API_NAME, api_version)", "def _get_mongo_client():\n return pymongo.MongoClient(mongo_uri)", "def _get_client(self):\n return Github(\n base_url=github_trigger.api_endpoint(),\n login_or_token=self.auth_token if self.auth_token else github_trigger.client_id(),\n password=None if self.auth_token else github_trigger.client_secret(),\n timeout=5,\n )", "def get_client_for_requester(self, requester_name: str) -> ProlificClient:\n return self.get_session_for_requester(requester_name)", "def client(self):\n return self._dxl_client", "def get_redis_client(self):\n\n client = Client(\n #connection_pool=connection_pool,\n host=self.backend_settings.get('HOST', 'localhost'),\n port=int(self.backend_settings.get('PORT', 6379)),\n io_loop=self.io_loop,\n password=self.backend_settings.get('PASSWORD', None),\n selected_db=int(self.backend_settings.get('DB', 0)),\n reconnect_callback=self.listen)\n\n return client", "def get_keystoneclient(self):\n try:\n if self.token is None:\n client = keystoneclient.Client(user_name=self.cluster_account.cluster_user_name,\n password=self.cluster_account.cluster_password,\n auth_url=self.cluster_account.cluster.auth_url,\n tenant_name=self.name,\n )\n self.token = json.dumps(client.auth_ref)\n else:\n client = keystoneclient.Client(auth_ref=json.loads(self.token))\n # keystoneclient authenticates lazily, i.e. It doensn't actually\n # authenticates until the first time it needs the token for\n # someting. We'd like to find out about failures now (in\n # particular, it's easier to clear a bad token here than somewhere\n # else in the code. authenticate() forces it to auth right now:\n client.authenticate()\n return client\n except AuthorizationFailure:\n # Clear the token if auth failed:\n self.token = None\n raise", "def _get_auth_client(self, request):\n if self._auth_client is None:\n # Use PyFxa defaults if not specified\n server_url = fxa_conf(request, 'oauth_uri')\n auth_cache = self._get_cache(request)\n self._auth_client = OAuthClient(server_url=server_url, cache=auth_cache)\n\n return self._auth_client", "def fetch_boto3_client(service_name: str):\n region_name = load_aws_region_name()\n cache_key = f\"{region_name}-{service_name}\"\n\n if CLIENT_CACHE.get(cache_key):\n return CLIENT_CACHE[cache_key]\n\n config = Config(\n region_name=region_name,\n signature_version=\"v4\",\n retries={\"max_attempts\": 10, \"mode\": \"standard\"},\n )\n client = boto3.client(service_name, config=config) # type: ignore\n\n CLIENT_CACHE[cache_key] = client\n\n return client", "def get_client(host, port=None, username=None,\n password=None, tenant=None,\n auth_url=None, auth_strategy=None,\n auth_token=None, region=None,\n is_silent_upload=False, insecure=False):\n\n if auth_url:\n force_strategy = 'keystone'\n else:\n force_strategy = None\n\n creds = dict(username=username,\n password=password,\n tenant=tenant,\n auth_url=auth_url,\n strategy=force_strategy or auth_strategy,\n region=region,\n )\n\n if creds['strategy'] == 'keystone' and not creds['auth_url']:\n msg = (\"--auth_url option or OS_AUTH_URL environment variable \"\n \"required when keystone authentication strategy is enabled\\n\")\n raise exception.ClientConfigurationError(msg)\n\n use_ssl = (creds['auth_url'] is not None and\n creds['auth_url'].find('https') != -1)\n\n client = HeatClient\n\n return client(host=host,\n port=port,\n use_ssl=use_ssl,\n auth_tok=auth_token,\n creds=creds,\n insecure=insecure)", "def get_client(\n client_id: str, x_additional_headers: Optional[Dict[str, str]] = None, **kwargs\n):\n request = GetClient.create(\n client_id=client_id,\n )\n return run_request(request, additional_headers=x_additional_headers, **kwargs)", "def client(self):\n app.testing = True\n client = app.test_client()\n\n with app.app_context():\n yield client", "def redis_client(self) -> Redis:\n if self._redis_client is None:\n redis_client = Redis(connection_pool=self.redis_conn_pool)\n\n self._redis_client = redis_client\n\n self._logger.debug(\n \"[%s]: Initialized Redis client: %s\", self.__name__, self._redis_client\n )\n\n return self._redis_client", "def operations_client(self) -> operations_v1.OperationsClient:\n # Sanity check: Only create a new client if we do not already have one.\n if \"operations_client\" not in self.__dict__:\n self.__dict__[\"operations_client\"] = operations_v1.OperationsClient(\n self.grpc_channel\n )\n\n # Return the client from cache.\n return self.__dict__[\"operations_client\"]", "def _getClient(self, app_token=None):\n if app_token is None:\n from . import models\n app_token = models.Aplicacion.objects.get(app_id=self.app_id).app_token\n return api.OAuthAppClient(settings.CLIENT_ID, settings.CLIENT_SECRET, self.app_id, app_token)", "def _get_user_client(self):\n return api.OAuthClient(settings.CLIENT_ID, settings.CLIENT_SECRET, settings.USER, settings.PASSWORD)", "def getInstance():\n if Car.inst is None: Car.inst = Car()\n return Car.inst", "def operations_client(self) -> operations_v1.OperationsAsyncClient:\n # Quick check: Only create a new client if we do not already have one.\n if self._operations_client is None:\n self._operations_client = operations_v1.OperationsAsyncClient(\n self.grpc_channel\n )\n\n # Return the client from cache.\n return self._operations_client", "def operations_client(self) -> operations_v1.OperationsAsyncClient:\n # Quick check: Only create a new client if we do not already have one.\n if self._operations_client is None:\n self._operations_client = operations_v1.OperationsAsyncClient(\n self.grpc_channel\n )\n\n # Return the client from cache.\n return self._operations_client", "def get_client(self, service):\n try:\n return boto3.client(service, region_name=self.region, config=self.proxy_config)\n except ClientError as e:\n fail(\"AWS %s service failed with exception: %s\" % (service, e))", "def _keystone_client(context, version=(3, 0)):\n auth_plugin = token.Token(\n auth_url=CONF.keystone_authtoken.auth_uri,\n token=context.auth_token,\n project_id=context.project_id)\n client_session = session.Session(auth=auth_plugin,\n verify=False if\n CONF.keystone_authtoken.insecure else\n (CONF.keystone_authtoken.cafile or True))\n return client.Client(auth_url=CONF.keystone_authtoken.auth_uri,\n session=client_session, version=version)", "def get_novaclient(self):\n # TODO: We ought to be able to derive this from the keystone client,\n # but it's proving trickier than I expected --isd\n return novaclient.Client(self.cluster_account.cluster_user_name,\n self.cluster_account.cluster_password,\n self.name,\n self.cluster_account.cluster.auth_url)" ]
[ "0.73013985", "0.72929657", "0.6969015", "0.6965939", "0.6891236", "0.6887506", "0.6877181", "0.68472075", "0.67757213", "0.6775586", "0.67501026", "0.6738668", "0.6736952", "0.6705865", "0.6692368", "0.6692368", "0.6643166", "0.66352206", "0.66271734", "0.659036", "0.6577383", "0.6570557", "0.6537029", "0.6479422", "0.6460731", "0.64103746", "0.63697803", "0.63651067", "0.630351", "0.627443", "0.6261896", "0.6252973", "0.625175", "0.6234725", "0.62333035", "0.6227105", "0.6190839", "0.61789876", "0.61542165", "0.6103414", "0.6100714", "0.60721475", "0.6065914", "0.6060051", "0.6053513", "0.6035415", "0.60342705", "0.60309404", "0.5999695", "0.59968066", "0.59923095", "0.5988702", "0.5978299", "0.59687036", "0.59421676", "0.59384674", "0.5932278", "0.5921993", "0.5916509", "0.58975565", "0.588477", "0.5882175", "0.588208", "0.5878701", "0.5839597", "0.5832287", "0.58222824", "0.582187", "0.58129734", "0.5808456", "0.5796144", "0.5791118", "0.57884645", "0.57818735", "0.57702863", "0.5756057", "0.575373", "0.57470304", "0.5734509", "0.57251835", "0.57038885", "0.57030296", "0.5700515", "0.5696694", "0.5691291", "0.5689002", "0.5680519", "0.5676583", "0.5673659", "0.5659796", "0.5653885", "0.5652598", "0.56501925", "0.5642539", "0.56419", "0.5641525", "0.5641525", "0.56392074", "0.5637972", "0.5626993" ]
0.7038478
2
Get a swift client.Connection instance.
def get_swift(self): if self.swift is None: self.swift = swiftclient.Connection( auth_version='3', authurl=self.auth_kwargs["auth_url"], user=self.auth_kwargs["username"], key=self.auth_kwargs["password"], tenant_name=self.auth_kwargs["project_name"] ) return self.swift
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_connection(self):\n c = httplib.HTTPConnection(self.server)\n return c", "def get_swiftclient():\n swift_conn = swiftclient.client.Connection(\n authurl=os.environ.get(\"OS_AUTH_URL\"),\n user=os.environ.get(\"OS_USERNAME\"),\n key=os.environ.get(\"OS_PASSWORD\"),\n tenant_name=os.environ.get(\"OS_TENANT_NAME\"),\n auth_version=\"2.0\",\n )\n return swift_conn", "def get_connection(self):\n from pymongo.connection import Connection\n \n if self._connection is None:\n self._connection = Connection(self.host, self.port)\n return self._connection", "def __get_connection(self) -> HTTPConnection:\n return HTTPConnection(self.__host, self.__port)", "def _make_swift_connection(self, auth_url, user, key):\n snet = self.snet\n logger.debug(_(\"Creating Swift connection with \"\n \"(auth_address=%(auth_url)s, user=%(user)s, \"\n \"snet=%(snet)s)\") % locals())\n return swift_client.Connection(\n authurl=auth_url, user=user, key=key, snet=snet)", "def __GetConnection(self):\n\n self.conn = httplib.HTTPConnection(BLIP_API_URL)\n return self.conn", "def connection(self) -> \"Connection[Any]\":\n return self._conn", "def get_connection(self):\n if self.conn is None or self.conn.closed != 0:\n self._connect()\n logger.debug(f'The connection object is: {self.conn}.')\n return self.conn", "def connect():\n return connection.Connection(username=api_user,\n api_key=api_key,\n region=api_region)", "def connection(self) -> Connection:\n if not self._connection:\n self._connection = self.engine.connect()\n\n return self._connection", "def get(self, conn_alias: str) -> \"BaseDBAsyncClient\":\n storage: Dict[str, \"BaseDBAsyncClient\"] = self._get_storage()\n try:\n return storage[conn_alias]\n except KeyError:\n connection: BaseDBAsyncClient = self._create_connection(conn_alias)\n storage[conn_alias] = connection\n return connection", "def get_conn(self) -> ServiceBusClient:\n conn = self.get_connection(self.conn_id)\n connection_string: str = str(conn.schema)\n if connection_string:\n client = ServiceBusClient.from_connection_string(connection_string, logging_enable=True)\n else:\n extras = conn.extra_dejson\n credential: str | DefaultAzureCredential = self._get_field(extras=extras, field_name=\"credential\")\n fully_qualified_namespace = self._get_field(extras=extras, field_name=\"fully_qualified_namespace\")\n if not credential:\n credential = DefaultAzureCredential()\n client = ServiceBusClient(\n fully_qualified_namespace=fully_qualified_namespace,\n credential=credential, # type: ignore[arg-type]\n )\n\n self.log.info(\"Create and returns ServiceBusClient\")\n return client", "def get_connection(self):\n return self._connection", "def get_connection(self):\n return self._connection", "def get_connection(self):\n if self.__connection is None:\n from pymongo import MongoClient\n from ir_config import IRConfig\n self.__connection = MongoClient(\n IRConfig.get_instance().get('db_host', self.__default_host), \n IRConfig.get_instance().get_int('db_port', self.__default_port))\n return self.__connection", "def get_connection(self):\n return self.connection", "def get_connection(self):\n return self.connection", "def connection(self, connection=None):\n if connection is None:\n return self.engine.acquire()\n return ConnectionProxy(connection=connection)", "def get_connection(self):\n return self.application.get_connection()", "def connection(self):\n try:\n con = self.thread.connection\n except AttributeError:\n con = self.steady_connection()\n self.thread.connection = con\n return con", "def get_connection(self, timeout=None):\n timeout = timeout if timeout else self.timeout\n if self.ssl:\n return HTTPSConnection(self.hostname, self.port, timeout=timeout)\n return HTTPConnection(self.hostname, self.port, timeout=timeout)", "def _get_connection(self, conf):\n return get_session()", "def get_conn(self) -> WebClient:\n return self.client", "def connection():\n return _MockConnection()", "def _get_connection(reconnect=False):\n global _connection\n identity = get_identity()\n # Connect to the database if not already connected\n if _connection.get(identity) is None or reconnect:\n try:\n _connection[identity] = Connection(**_connection_settings)\n except Exception, e:\n raise ConnectionError(\"Cannot connect to the database:\\n%s\" % e)\n return _connection[identity]", "def get_connection(hostname, logger):\n return Connection(\n hostname,\n logger=logger,\n sudo=needs_sudo(),\n )", "def get_conn(self):\n conn = self.get_connection(self.conn_id)\n service_options = conn.extra_dejson\n return BlockBlobService(account_name=conn.login,\n account_key=conn.password, **service_options)", "def get(self, conn_id: str) -> Connection:\n return Connection.from_dict(self.query(f'{CONNECTION_URL}/{conn_id}'))", "def get_connection(self):\n\n\t\treturn dbapi.connect(credentials.SERVER,\\\n\t\t\t\t\t\t\t credentials.PORT,\\\n\t\t\t\t\t\t\t credentials.USER,\\\n\t\t\t\t\t\t\t credentials.PASSWORD)", "async def get(self, conn_id: str) -> Connection:\n return Connection.from_dict(await self.query(f'{CONNECTION_URL}/{conn_id}'))", "def getConnection(server, port):\n connectionUrl = 'http://%s:%s/' % (server, port)\n\n conn = Connection(connectionUrl, 'v0')\n return conn", "def get_connection(self):\n\t\tfrom pymongo import MongoClient\n\n\t\tif self._connection is None:\n\t\t\tself._connection = MongoClient(host=self.url, max_pool_size=10)\n\n\t\treturn self._connection", "def getConnection(self):\n\n return self._connection", "def getconnection(self):\n # If we were able to create the shim_tcpsocket, then we attempt to call\n # getconnection() on the shim tcp server socket first. If we were unable \n # to create it or get a SocketWouldBlockError, we default to the basic\n # repy getconnection() call. The reason for this is to ensure that even\n # if the shimstack breaks down, we are able to revert to the default repy\n # connection.\n if self.tcpserversocket_dict['shim_tcpsocket']:\n try:\n rip, rport, sockobj = self.tcpserversocket_dict['shim_tcpsocket'].getconnection()\n return (rip, rport, ShimSocket(sockobj, self.shim_object)) \n except SocketWouldBlockError:\n pass\n\n return self.tcpserversocket_dict['repy_tcpsocket'].getconnection()", "def connection(self):\n return self.get_connection()", "def get_connection(self, timeout=None):\n return BlockingConnection(\n ConnectionParameters(host=self.host,\n credentials=self.get_credentials(),\n connection_attempts=1,\n retry_delay=0,\n socket_timeout=timeout))", "def get_conn(self, *args, **kwargs):\n connections = self.__connections_for('get_conn', args=args, kwargs=kwargs)\n\n if len(connections) == 1:\n return connections[0]\n else:\n return connections", "async def get(self):\n if self._connect_kwargs == None:\n raise IllegalAccessError(\"DB connection parameters not set yet\")\n\n if not hasattr(self._tl, \"conn\"):\n self._tl.conn = await r.connect(**self._connect_kwargs)\n\n return self._tl.conn", "def _connection(self) -> dropbox.Dropbox:\n if not hasattr(self._per_thread, \"connection\"):\n self._per_thread.connection = self._token.connect()\n return self._per_thread.connection", "async def get_connection(self, username: str, password: str) -> asyncssh.connect:\n conn = await asyncssh.connect(self.ip, known_hosts=None, username=username, password=password,\n server_host_key_algs=['ssh-rsa'])\n # return created connection\n return conn", "def get_connection():\n\n return MongoClientManager().client.__getattr__(MONGODB_SETTINGS['db'])", "def get_backend():\n return Connection()", "def get_client(conn):\n # No database indicates a cluster connection\n if not conn.get('db', None):\n conn.pop('db', None)\n return connect_redis_cluster(conn)\n\n # Otherwise it's a regular redis connection\n return connect_redis(conn)", "def get_conn(self):\n self.conn = self.get_client_type('sns')\n return self.conn", "def get_conn(args):\n\n # connect this thing\n from pyVmomi import vim\n from pyVim.connect import SmartConnect, Disconnect\n import atexit\n try:\n si = SmartConnect(host=args.host, port=args.port, user=args.user, pwd=args.password)\n except Exception as exc:\n if isinstance(exc, vim.fault.HostConnectFault) and '[SSL: CERTIFICATE_VERIFY_FAILED]' in exc.msg:\n try:\n import ssl\n default_context = ssl._create_default_https_context\n ssl._create_default_https_context = ssl._create_unverified_context\n si = SmartConnect(\n host=args.host,\n port=args.port,\n user=args.user,\n pwd=args.password,\n )\n ssl._create_default_https_context = default_context\n except Exception as exc1:\n raise Exception(exc1)\n else:\n import ssl\n context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)\n context.verify_mode = ssl.CERT_NONE\n si = SmartConnect(\n host=args.host,\n port=args.port,\n user=args.user,\n pwd=args.password,\n sslContext=context)\n atexit.register(Disconnect, si)\n return si", "def connection(self) -> Connection:\n\n if not self.is_connected:\n state = current_app.extensions[\"zodb\"]\n connection = _app_ctx_stack.top.zodb_connection = state.db.open()\n _app_ctx_stack.top.zodb_transfers = connection.getTransferCounts()\n connection_opened.send()\n transaction.begin()\n return _app_ctx_stack.top.zodb_connection", "def get_conn(cls):\n\n if not cls.conn or not cls.conn.open:\n cls.connect()\n\n try:\n cls.conn.ping() # ping to test if the current conn is working\n except MySQLdb.OperationalError:\n cls.connect()\n\n return cls.conn", "def get_conn(self):\n http_authorized = self._authorize()\n return build('storage', 'v1', http=http_authorized)", "def _get_connection(self, request_host, request_port, is_ssl):\n connection_host = request_host\n connection_port = request_port\n system_proxy = self._get_system_proxy(is_ssl)\n if system_proxy:\n connection_host = system_proxy.host\n connection_port = system_proxy.port\n\n # Use an IP address because WPR may override DNS settings.\n if self._real_dns_lookup:\n connection_ip = self._real_dns_lookup(connection_host)\n if not connection_ip:\n logging.critical(\n 'Unable to find IP for host name: %s', connection_host)\n return None\n connection_host = connection_ip\n\n if is_ssl:\n connection = DetailedHTTPSConnection(connection_host, connection_port)\n if system_proxy:\n connection.set_tunnel(request_host, request_port)\n else:\n connection = DetailedHTTPConnection(connection_host, connection_port)\n return connection", "def connection(self):\n return self._connection", "def connection(self):\n return self._connection", "def connection(self):\n return self._connection", "def _unthreadsafe_get_connection(self):\n return PooledDBConnection(self, self._queue.get())", "def client(self):\n\n if self._client is None:\n self._client = self._get_client()\n return self._client", "def connect(self, **kw_params):\r\n if self.connection_cls:\r\n return self.connection_cls(region=self, **kw_params)", "def client(self):\r\n if self._client is None:\r\n self._client = self._client_cls(self._server, self._params, self)\r\n return self._client", "def retrieve(self, connectionId) :\n conn = None\n\n try :\n conn = self.remoteConnections[connectionId]\n except :\n print 'Error retrieving connection with id ' + connectionId\n\n return conn", "def __make_connection(self):\n return self.euca.make_connection()", "def get_http_connection(host, is_secure):\n return self.conn.new_http_connection(host, is_secure)", "def buildProtocol(self, addr):\n return ClientConnection()", "def get_connection(self, account_id, datacenter=None, network=None):\r\n if not self.client.auth \\\r\n or not getattr(self.client.auth, 'username', None) \\\r\n or not getattr(self.client.auth, 'api_key', None):\r\n raise SoftLayerError(\r\n 'Client instance auth must be BasicAuthentication.')\r\n\r\n client = MessagingConnection(\r\n account_id, endpoint=self.get_endpoint(datacenter, network))\r\n client.authenticate(self.client.auth.username,\r\n self.client.auth.api_key)\r\n return client", "def get_vc_connection(self):\n\n if self._vc_connection:\n # if connected return the connection.\n if self._vc_connection.is_connected(5) and \\\n self._vc_connection.is_peer_connected(5):\n _log.debug('Returning current connection')\n return self._vc_connection\n\n _log.debug(\"Resetting connection as the peer wasn't responding.\")\n # reset the connection so we can try it again below.\n self._vc_connection.kill()\n self._vc_connection = None\n\n def sync_status_to_vc(status, context):\n \"\"\"\n Sync the status of the current vcp object with that of the one that\n is connected to the vc instance.\n\n :param status:\n :param context:\n \"\"\"\n conn = self._vc_connection\n conn.vip.health.set_status(status, context)\n\n self.vip.health.add_status_callback(sync_status_to_vc)\n\n def enable_connection_heartbeat():\n \"\"\"\n Start publishing the heartbeat with the status messages.\n \"\"\"\n conn = self._vc_connection\n status = self.vip.health.get_status()\n conn.vip.health.set_status(\n status['status'], status['context']\n )\n conn.vip.heartbeat.start()\n\n # We are going to use an identity of platform.address_hash for\n # connections to vc. This should allow us unique connection as well\n # as allowing the vc to filter on the heartbeat status of the pubsub\n # message to determine context.\n vcp_identity_on_vc = 'platform.'\n\n # First check to see if there is a peer with a volttron.central\n # identity, if there is use it as the manager of the platform.\n peers = self.vip.peerlist().get(timeout=5)\n if VOLTTRON_CENTRAL in peers:\n _log.debug('VC is a local peer, using {} as instance_id'.format(\n self._instance_id))\n self._vc_connection = build_agent(\n self.core.address,\n # peer=VOLTTRON_CENTRAL,\n publickey=self.core.publickey,\n secretkey=self.core.secretkey,\n serverkey=self._vc_serverkey,\n identity=self._instance_id,\n agent_class=VCConnection\n )\n self._vc_connection.set_main_agent(self)\n if self._vc_connection.is_connected() and \\\n self._vc_connection.is_peer_connected():\n _log.debug(\"Connection has been established to local peer.\")\n else:\n _log.error('Unable to connect to local peer!')\n if self._vc_connection.is_connected():\n enable_connection_heartbeat()\n\n return self._vc_connection\n\n if self._vc_address is None or self._vc_serverkey is None:\n _log.warn('volttron_central_address is None in config store '\n 'and volttron.central is not a peer.')\n _log.warn('Recommend adding volttron.central address or adding a '\n '\"config\" file to the config store.')\n return None\n\n self._vc_connection = build_agent(\n identity=vcp_identity_on_vc,\n # peer=VOLTTRON_CENTRAL,\n address=self._vc_address,\n serverkey=self._vc_serverkey,\n publickey=self.core.publickey,\n secretkey=self.core.secretkey,\n agent_class=VCConnection\n )\n\n self._vc_connection.set_main_agent(self)\n if not self._vc_connection.is_peer_connected():\n _log.error('Peer: {} is not connected to the external platform'\n .format(self._vc_connection.peer))\n self._vc_connection.kill()\n self._vc_connection = None\n self._registration_state = RegistrationStates.NotRegistered\n return None\n\n if self._vc_connection.is_connected():\n enable_connection_heartbeat()\n\n return self._vc_connection", "def _get_client(self):\n self.logger.info('Connecting to MySQL running at \"%s\"...',\n self._connection_params['host'])\n\n # https://dev.mysql.com/doc/connector-python\n return connector.connect(**self._connection_params)", "def connect(self, host, auth):\n return Connection(host, auth)", "def _make_connection(self, callback):\n if isinstance(callback, MethodType):\n connection = _MethodConnection(self, callback)\n else:\n connection = _DirectConnection(self, callback)\n return connection", "def get_client() -> 'MongoCLient':\n client = pymongo.MongoClient()\n db = client['c3']\n c = db['json']\n return c", "def _get_conn(self):\n return redis.Redis(connection_pool=self.pool)", "def get_connection(self, params):\r\n return Redis(connection_pool=self.get_or_create_connection_pool(params))", "def client():\n\n client = Client()\n return client", "def get_connection(self, simple_rows=False):\n return self.open(simple_rows)", "def establish_connection(self):\n conninfo = self.client\n for name, default_value in items(self.default_connection_params):\n if not getattr(conninfo, name, None):\n setattr(conninfo, name, default_value)\n if conninfo.hostname == 'localhost':\n conninfo.hostname = '127.0.0.1'\n conn = self.Connection(host=conninfo.host,\n userid=conninfo.userid,\n password=conninfo.password,\n login_method=conninfo.login_method,\n virtual_host=conninfo.virtual_host,\n insist=conninfo.insist,\n ssl=conninfo.ssl,\n connect_timeout=conninfo.connect_timeout)\n conn.client = self.client\n return conn", "def get_connection(self, host, login, passwd, conn):\n # force all string values to unicode\n host = unicode(host)\n login = unicode(login)\n passwd = unicode(passwd) if passwd is not None else None\n\n connection = self._search_connection(host, login, passwd, conn)\n\n if (connection is None):\n self._connections_lock.acquireWrite()\n try:\n # we have to search for the connection again after aquireing the write lock\n # as the thread previously holding the write lock may have already added our connection\n connection = self._search_connection(host, login, passwd, conn)\n if (connection is None):\n # create a new connection if a matching connection does not already exist\n connection = wvmConnection(host, login, passwd, conn)\n\n # add new connection to connection dict\n if host in self._connections:\n self._connections[host].append(connection)\n else:\n self._connections[host] = [connection]\n finally:\n self._connections_lock.release()\n\n elif not connection.connected:\n # try to (re-)connect if connection is closed\n connection.connect()\n\n if connection.connected:\n # return libvirt connection object\n return connection.connection\n else:\n # raise libvirt error\n raise libvirtError(connection.last_error)", "def __new__(cls, host=None, user=None, client=None):\n cls.__check_parameters(host=host, user=user)\n if client is None:\n raise InvalidClientException(\"Integrated Client during connection creation can't be None\")\n return super(Connection, cls).__new__(cls, host=host, user=user, client=client)", "def getconnection(self):\n\n # If we were able to create the affix_tcpsocket, then we attempt to call\n # getconnection() on the affix tcp server socket first. If we were unable \n # to create it or get a SocketWouldBlockError, we default to the basic\n # repy getconnection() call. The reason for this is to ensure that even\n # if the affixstack breaks down, we are able to revert to the default repy\n # connection.\n if self.tcpserversocket_dict['affix_tcpsocket']:\n try:\n rip, rport, sockobj = self.tcpserversocket_dict['affix_tcpsocket'].getconnection()\n return (rip, rport, AffixSocket(sockobj, self.affix_object)) \n except SocketWouldBlockError:\n pass\n\n return self.tcpserversocket_dict['repy_tcpsocket'].getconnection()", "def get_db_client(self, connection_name: str) -> BaseDBAsyncClient:\n return self._db_client_map[connection_name]", "def _threadsafe_get_connection(self):\n with self._lock:\n next_con = self._nextConnection\n con = PooledDBConnection(self, self._connections[next_con])\n next_con += 1\n if next_con >= len(self._connections):\n next_con = 0\n self._nextConnection = next_con\n return con", "def connection():\n global _connection\n if _connection is None:\n _connection = StrictRedis.from_url(REDIS_URL)\n return _connection", "def get_connection(connection_id: Optional[str] = None,\n location: Optional[str] = None,\n project: Optional[str] = None,\n view: Optional[str] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetConnectionResult:\n __args__ = dict()\n __args__['connectionId'] = connection_id\n __args__['location'] = location\n __args__['project'] = project\n __args__['view'] = view\n opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)\n __ret__ = pulumi.runtime.invoke('google-native:connectors/v1:getConnection', __args__, opts=opts, typ=GetConnectionResult).value\n\n return AwaitableGetConnectionResult(\n auth_config=pulumi.get(__ret__, 'auth_config'),\n config_variables=pulumi.get(__ret__, 'config_variables'),\n connector_version=pulumi.get(__ret__, 'connector_version'),\n create_time=pulumi.get(__ret__, 'create_time'),\n description=pulumi.get(__ret__, 'description'),\n destination_configs=pulumi.get(__ret__, 'destination_configs'),\n envoy_image_location=pulumi.get(__ret__, 'envoy_image_location'),\n image_location=pulumi.get(__ret__, 'image_location'),\n labels=pulumi.get(__ret__, 'labels'),\n lock_config=pulumi.get(__ret__, 'lock_config'),\n log_config=pulumi.get(__ret__, 'log_config'),\n name=pulumi.get(__ret__, 'name'),\n node_config=pulumi.get(__ret__, 'node_config'),\n service_account=pulumi.get(__ret__, 'service_account'),\n service_directory=pulumi.get(__ret__, 'service_directory'),\n ssl_config=pulumi.get(__ret__, 'ssl_config'),\n status=pulumi.get(__ret__, 'status'),\n subscription_type=pulumi.get(__ret__, 'subscription_type'),\n suspended=pulumi.get(__ret__, 'suspended'),\n update_time=pulumi.get(__ret__, 'update_time'))", "def openConnection():\n connection = nj.GraphDatabase.driver(\n uri=URI, auth=nj.basic_auth(USER, PASSWORD))\n return connection", "def _get_connection(self) -> Connection:\n # TODO(101) is there a problem with having just one db connection?\n # Will this cause bugs with failed commits?\n curr_thread = threading.get_ident()\n if curr_thread not in self.conn or self.conn[curr_thread] is None:\n try:\n conn = sqlite3.connect(self.db_path)\n conn.row_factory = StringIDRow\n self.conn[curr_thread] = conn\n except sqlite3.Error as e:\n raise MephistoDBException(e)\n return self.conn[curr_thread]", "def real_conn(self) -> Connection:\n return self._real_conn", "def conn(self):\n conn = self.engine.connect()\n return conn", "def get_client():\n return Client(__address, authkey='strumamor')", "def connection(self):\n return self.session.connection", "def _getClientConnection(self):\n self.client = twisted_client.DivvyClient(self.host, self.port, timeout=1.0)\n return self.client.connection.deferred", "def get_connection():\n return MongoClient(\"mongodb://username:password@localhost:27017\")", "def exposed_getconn(self):\n return self._conn", "def _create_connection(self, host, port):\n return pika.BlockingConnection(pika.ConnectionParameters(host=host,\n port=port))", "def connect(self):\n if self.server is not None:\n # TODO might want to give client debug flag option\n self.client = xmpp.Client(server=self.server, port=self.port, debug=[])\n con = self.client.connect(server=(self.server, self.port))\n\n # making helper classes, order is relevant, since roster is used by the others\n self._roster = self._RosterManager(self.client)\n self.iq_handler = self._IQHandler(self._roster, self.client)\n self._pres_manager = self._PresenceManager(self._roster, self.client)\n return con", "def client(self) -> \"SparkConnectClient\":\n return self._client", "def get_connection(self, session_cls=None):\n # If this connection has to be created within an existing session,\n # ``session_cls`` will be provided as an argument.\n # Otherwise, fetch a new ``session_cls`` from ``get_session()``\n if session_cls is None:\n session_cls = self.get_session()\n\n conn = session_cls()\n conn = self._execute_database_specific_connection_statements(conn)\n\n return conn", "def get_conn(self):\n conn = sqlite3.connect(self.uri)\n conn.row_factory = sqlite3.Row\n return conn", "def get_conn(self) -> ServiceBusAdministrationClient:\n conn = self.get_connection(self.conn_id)\n connection_string: str = str(conn.schema)\n if connection_string:\n client = ServiceBusAdministrationClient.from_connection_string(connection_string)\n else:\n extras = conn.extra_dejson\n credential: str | DefaultAzureCredential = self._get_field(extras=extras, field_name=\"credential\")\n fully_qualified_namespace = self._get_field(extras=extras, field_name=\"fully_qualified_namespace\")\n if not credential:\n credential = DefaultAzureCredential()\n client = ServiceBusAdministrationClient(\n fully_qualified_namespace=fully_qualified_namespace,\n credential=credential, # type: ignore[arg-type]\n )\n self.log.info(\"Create and returns ServiceBusAdministrationClient\")\n return client", "def get_socket():\n return socket.create_connection((HOST, PORT))", "def get_connection(cls):\n return cls.database.connection", "def Client(self):\n return self._client", "def get_client():\n client = soundcloud.Client(client_id=CLIENT_ID)\n return client", "def connection(self):\n ctx = stack.top\n if ctx is not None:\n if not hasattr(ctx, 'simple_connection'):\n ctx.simple_connection = connect_to_region(\n self.app.config['AWS_REGION'],\n aws_access_key_id = self.app.config['AWS_ACCESS_KEY_ID'],\n aws_secret_access_key = self.app.config['AWS_SECRET_ACCESS_KEY'],\n )\n\n return ctx.simple_connection", "def connect(*, blocking=False, **kwargs):\n from .config import Config\n from .grpc.conn_aio import Connection as GrpcConnection\n\n if blocking:\n from .blocking._aiowrapper import ConnectionThunk\n\n return ConnectionThunk(lambda: GrpcConnection(Config.create(**kwargs)))\n else:\n cfg = Config.create(**kwargs)\n conn = GrpcConnection(cfg)\n\n return conn", "def connect(self):\n if self.connection is not None:\n logger.info(\" connection: %s \" % (self.connection is not None))\n if not self.connection.opened():\n logger.info(\"connection is closed\")\n return self.reconect()\n\n if self.connection.opened():\n return self.connection\n try:\n self.connection = connect(**self.options)\n except Exception as e:\n logger.critical(\"Unable to connect to DB: {0}\".format(e.message))\n raise\n\n return self.connection" ]
[ "0.72027934", "0.7086128", "0.6808257", "0.67928064", "0.6765041", "0.6715244", "0.66940707", "0.66649985", "0.6593805", "0.6565049", "0.65543133", "0.6501697", "0.64977556", "0.64977556", "0.6481265", "0.6448724", "0.6448724", "0.64482147", "0.6444177", "0.643537", "0.64260095", "0.640639", "0.6387979", "0.6386397", "0.63863015", "0.6384387", "0.637795", "0.6331377", "0.6307586", "0.63047826", "0.63004154", "0.62573373", "0.62164253", "0.6214887", "0.6207792", "0.6203459", "0.62022585", "0.6184195", "0.61717415", "0.6154511", "0.61394733", "0.61209935", "0.6084505", "0.60574734", "0.60370207", "0.60214037", "0.60071987", "0.6004653", "0.59981155", "0.5989194", "0.5989194", "0.5989194", "0.5986221", "0.5984841", "0.5982854", "0.5970417", "0.5969393", "0.5956844", "0.59536386", "0.59442604", "0.59407395", "0.5929427", "0.5921854", "0.5916758", "0.591064", "0.5910086", "0.59026253", "0.5900598", "0.5878425", "0.5874571", "0.58734244", "0.5857263", "0.5843772", "0.58350116", "0.5830042", "0.5824662", "0.58169943", "0.58100593", "0.58057606", "0.5804068", "0.5802659", "0.5800145", "0.5798276", "0.57958317", "0.57913", "0.5771743", "0.57595384", "0.5752036", "0.5744037", "0.5741932", "0.57372075", "0.5722242", "0.5711132", "0.57095176", "0.5700162", "0.56913084", "0.5690351", "0.56893575", "0.5683607", "0.5679453" ]
0.7363057
0
Get a keystone client instance.
def get_keystone(self, version='3'): if self.keystone is None: iface = os.getenv('OS_ENDPOINT_TYPE', "public") self.keystone = keystoneclient.Client( version=version, session=self.get_session(), interface=iface) return self.keystone
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_keystoneclient(self):\n try:\n if self.token is None:\n client = keystoneclient.Client(user_name=self.cluster_account.cluster_user_name,\n password=self.cluster_account.cluster_password,\n auth_url=self.cluster_account.cluster.auth_url,\n tenant_name=self.name,\n )\n self.token = json.dumps(client.auth_ref)\n else:\n client = keystoneclient.Client(auth_ref=json.loads(self.token))\n # keystoneclient authenticates lazily, i.e. It doensn't actually\n # authenticates until the first time it needs the token for\n # someting. We'd like to find out about failures now (in\n # particular, it's easier to clear a bad token here than somewhere\n # else in the code. authenticate() forces it to auth right now:\n client.authenticate()\n return client\n except AuthorizationFailure:\n # Clear the token if auth failed:\n self.token = None\n raise", "def get_keystone_client():\n username = os.environ.get('OS_USERNAME')\n password = os.environ.get('OS_PASSWORD')\n tenant = os.environ.get('OS_TENANT_NAME')\n url = os.environ.get('OS_AUTH_URL')\n assert username is not None\n assert password is not None\n assert tenant is not None\n assert url is not None\n cl = client.Client(username=username, password=password,\n tenant_name=tenant, auth_url=url)\n return cl", "def _keystone_client(context, version=(3, 0)):\n auth_plugin = token.Token(\n auth_url=CONF.keystone_authtoken.auth_uri,\n token=context.auth_token,\n project_id=context.project_id)\n client_session = session.Session(auth=auth_plugin,\n verify=False if\n CONF.keystone_authtoken.insecure else\n (CONF.keystone_authtoken.cafile or True))\n return client.Client(auth_url=CONF.keystone_authtoken.auth_uri,\n session=client_session, version=version)", "def _get_client():\n\n return datastore.Client()", "def _get_client():\n\n return datastore.Client()", "def internal_keystoneclient(request):\n token = cache.get(CACHE_CLIENT, None)\n old_client = cache.get(CACHE_TOKEN, None)\n if not token:\n #LOG.debug('There is no token cached -> New Password Session')\n idm_password_session = _password_session(request)\n keystoneclient = client.Client(session=idm_password_session)\n cache.set(CACHE_CLIENT, keystoneclient.session.get_token(), INTERNAL_CLIENT_CACHE_TIME)\n cache.set(CACHE_TOKEN, keystoneclient, INTERNAL_CLIENT_CACHE_TIME)\n #LOG.debug('Saved token: %s',keystoneclient.session.get_token())\n else:\n #LOG.debug('There is a cached token! (%s)',token)\n old_client._auth_token = token\n keystoneclient = old_client\n\n #LOG.debug('Using token: %s',keystoneclient.session.get_token())\n return keystoneclient", "def _get_glance_client(self):\n session = self._get_keystone_session()\n return glanceclient.client.Client(2, session=session)", "def get_client():\n return Client(__address, authkey='strumamor')", "def client(self):\n\n if self._client is None:\n self._client = self._get_client()\n return self._client", "def get_client(self):\n return self.client", "def get_client():\n client_class = _import_by_path(settings.REDISIO_CLIENT_CLASS)\n return client_class(host=settings.REDISIO_HOST,\n port=settings.REDISIO_PORT,\n db=settings.REDISIO_DB)", "def _get_neutron_client(self):\n session = self._get_keystone_session()\n return neutronclient.v2_0.client.Client(session=session)", "def _get_client(self):\n _client = KOPS(provider=self.provider, config=self.config)\n return _client", "def _get_keystone_client(self, auth_creds):\n discover = keystone_discover.Discover(**auth_creds)\n\n for version_data in discover.version_data():\n version = version_data[\"version\"][0]\n if version <= 2:\n return keystone_client_v2.Client(insecure=True, **auth_creds)\n elif version == 3:\n return keystone_client_v3.Client(insecure=True, **auth_creds)\n\n raise Exception(\"Failed to discover keystone version \"\n \"for auth_url {0}\".format(\n auth_creds.get(\"auth_url\"))\n )", "def client():\n\n client = Client()\n return client", "def get_client():\n\n return MongoClientManager().client", "def get_client():\n return storage.Client(project=project_id)", "def _get_nova_client(self):\n region_name = CONF.region_name\n session = self._get_keystone_session()\n return novaclient.client.Client(2, session=session, region_name=region_name)", "def _get_client(self):\n if self._client is None:\n self._client = self.boto.client(service_name='elb', region_name=self.boto.cli_region)\n\n return self._client", "def client(self):\r\n if self._client is None:\r\n self._client = self._client_cls(self._server, self._params, self)\r\n return self._client", "def client():\n return Client(**common_data.AUTH_ARGS)", "def get_client():\n client = soundcloud.Client(client_id=CLIENT_ID)\n return client", "def client(self):\n\t\t# pylint: disable=invalid-name\n\t\treturn self._client", "def Client(self):\n return self._client", "def get_client(host, port=None, username=None,\n password=None, tenant=None,\n auth_url=None, auth_strategy=None,\n auth_token=None, region=None,\n is_silent_upload=False, insecure=False):\n\n if auth_url:\n force_strategy = 'keystone'\n else:\n force_strategy = None\n\n creds = dict(username=username,\n password=password,\n tenant=tenant,\n auth_url=auth_url,\n strategy=force_strategy or auth_strategy,\n region=region,\n )\n\n if creds['strategy'] == 'keystone' and not creds['auth_url']:\n msg = (\"--auth_url option or OS_AUTH_URL environment variable \"\n \"required when keystone authentication strategy is enabled\\n\")\n raise exception.ClientConfigurationError(msg)\n\n use_ssl = (creds['auth_url'] is not None and\n creds['auth_url'].find('https') != -1)\n\n client = HeatClient\n\n return client(host=host,\n port=port,\n use_ssl=use_ssl,\n auth_tok=auth_token,\n creds=creds,\n insecure=insecure)", "def get_client() -> 'MongoCLient':\n client = pymongo.MongoClient()\n db = client['c3']\n c = db['json']\n return c", "def client(self):\n return self._client", "def get_novaclient(self):\n # TODO: We ought to be able to derive this from the keystone client,\n # but it's proving trickier than I expected --isd\n return novaclient.Client(self.cluster_account.cluster_user_name,\n self.cluster_account.cluster_password,\n self.name,\n self.cluster_account.cluster.auth_url)", "def get_mqtt_client(self, client_id: str) -> Client:\n return Client(client_id)", "def get(self, id: int) -> Client:\n\n return self.__clients[id]", "def client(self):\n\n return self._client", "def client(self) -> mqtt.Client:\n return self._client", "def get_client(public_key: str, secret_key: str, **_):\n razorpay_client = razorpay.Client(auth=(public_key, secret_key))\n return razorpay_client", "def configure_client(self):\n self.client = self.get_redis_client()\n return self.client", "def make_client(self, context):\n return Client(self.settings['client_routing'], context=context)", "def get_redis_client(self):\n\n client = Client(\n #connection_pool=connection_pool,\n host=self.backend_settings.get('HOST', 'localhost'),\n port=int(self.backend_settings.get('PORT', 6379)),\n io_loop=self.io_loop,\n password=self.backend_settings.get('PASSWORD', None),\n selected_db=int(self.backend_settings.get('DB', 0)),\n reconnect_callback=self.listen)\n\n return client", "def get_client(access_key, secret_key, region='eu-west-1', service='ec2'):\n return boto3.client(\n service,\n aws_access_key_id=access_key,\n aws_secret_access_key=secret_key,\n region_name=region\n )", "def get_esi_client(token=None):\n if token:\n return EsiClient(security=EveClient.get_esi_security(token), headers={'User-Agent': \"Krypted Platform\"})\n else:\n return EsiClient(headers={'User-Agent': \"Krypted Platform\"})", "async def get_client(\n self,\n request: Request,\n client_id: str,\n client_secret: Optional[str] = None,\n ) -> Optional[OAuth2Client]:\n\n client_record = await self._db.query_one(\n Client.select(*OAuth2Client._fields, filters=\".id = <uuid>$id\"),\n id=client_id,\n )\n client_record = Client.from_obj(client_record)\n\n if client_record is not None:\n return OAuth2Client(\n client_id=client_record.client_id,\n client_secret=client_record.client_secret,\n grant_types=client_record.grant_types,\n response_types=client_record.response_types,\n redirect_uris=client_record.redirect_uris,\n scope=client_record.scope,\n )", "def _init_keystone_client(self, username, password, tenant_id, auth_url):\n\n __logger__.debug(\"Init Keystone Client\")\n self.keystone_client = KeystoneClient(username=username, password=password, tenant_id=tenant_id,\n auth_url=auth_url)", "def get_client(version, **kwargs):\n endpoint = kwargs.get('os_endpoint') or kwargs.get('ceilometer_url')\n\n return Client(version, endpoint, **kwargs)", "def get_client(\n client_id: str, x_additional_headers: Optional[Dict[str, str]] = None, **kwargs\n):\n request = GetClient.create(\n client_id=client_id,\n )\n return run_request(request, additional_headers=x_additional_headers, **kwargs)", "def _init_keystone_admin_client(self, api_version):\n self.keystone_sentry = self.d.sentry['keystone'][0]\n keystone_ip = self.keystone_sentry.info['public-address']\n if self._get_openstack_release() >= self.xenial_queens:\n api_version = 3\n client_class = keystone_client.Client\n if api_version == 3:\n client_class = keystone_client_v3.Client\n session, auth = u.get_keystone_session(\n keystone_ip,\n api_version=api_version,\n username='admin',\n password='openstack',\n project_name='admin',\n user_domain_name='admin_domain',\n project_domain_name='admin_domain')\n self.keystone = client_class(session=session)\n self.keystone.auth_ref = auth.get_access(session)", "def make_client(instance):\n network_client = utils.get_client_class(\n API_NAME,\n instance._api_version[API_NAME],\n API_VERSIONS)\n LOG.debug('Instantiating network client: %s', network_client)\n\n endpoint = instance.get_endpoint_for_service_type(\n API_NAME,\n region_name=instance._region_name,\n )\n\n return network_client(\n username=instance._username,\n tenant_name=instance._project_name,\n password=instance._password,\n region_name=instance._region_name,\n auth_url=instance._auth_url,\n endpoint_url=endpoint,\n token=instance.auth.get_token(instance.session),\n insecure=instance._insecure,\n ca_cert=instance._cacert,\n )", "def get_client(self, name):\n return self.get_clients(as_dict=True).get(name)", "def client(self) -> 'BaseClient':\n return self", "def get_client(httpx_settings: Optional[dict] = None) -> \"PrefectClient\":\n ctx = prefect.context.get_settings_context()\n api = PREFECT_API_URL.value()\n\n if not api:\n # create an ephemeral API if none was provided\n from prefect.server.api.server import create_app\n\n api = create_app(ctx.settings, ephemeral=True)\n\n return PrefectClient(\n api,\n api_key=PREFECT_API_KEY.value(),\n httpx_settings=httpx_settings,\n )", "def _get_client(self):\n credentials = service_account.Credentials.from_service_account_info(self.service_account_info)\n client = googleapiclient.discovery.build('container', 'v1', credentials=credentials)\n\n return client", "def get_cinder(self, version='2'):\n if self.cinder is None:\n iface = os.getenv('OS_ENDPOINT_TYPE', \"public\")\n self.cinder = cinderclient.Client(version,\n session=self.get_session(),\n interface=iface)\n return self.cinder", "def redis_client(self) -> Redis:\n return self.app.key_value_store.redis_client", "def make_client(instance):\r\n neutron_client = utils.get_client_class(\r\n API_NAME,\r\n instance._api_version[API_NAME],\r\n API_VERSIONS,\r\n )\r\n instance.initialize()\r\n url = instance._url\r\n url = url.rstrip(\"/\")\r\n if '2.0' == instance._api_version[API_NAME]:\r\n client = neutron_client(username=instance._username,\r\n tenant_name=instance._tenant_name,\r\n password=instance._password,\r\n region_name=instance._region_name,\r\n auth_url=instance._auth_url,\r\n endpoint_url=url,\r\n token=instance._token,\r\n auth_strategy=instance._auth_strategy,\r\n insecure=instance._insecure,\r\n ca_cert=instance._ca_cert)\r\n return client\r\n else:\r\n raise exceptions.UnsupportedVersion(_(\"API version %s is not \"\r\n \"supported\") %\r\n instance._api_version[API_NAME])", "def getClient(self):\n authDict = Configuration().read_auth_data_from_config()\n client_key = authDict['client_key']\n client_secret = authDict['client_secret']\n token = authDict['token']\n token_secret = authDict['token_secret']\n\n authorize_OAuth_ob = authorizeOAuth.AuthorizeOAuth(client_key,\n client_secret,\n token,\n token_secret,\n Configuration().read_board_id_config())\n\n trello_client_wrapper = authorize_OAuth_ob.getClient()\n self.set_list(trello_client_wrapper)\n return trello_client_wrapper", "def get_client(self):\n token = self.get_access_token()\n if self.client is None:\n credentials = AccessTokenCredentials(token, 'vetware/1.0')\n # credentials = SignedJwtAssertionCredentials(self.email, self.private_key,\n # \"https://www.googleapis.com/auth/calendar\")\n http = credentials.authorize(Http())\n self.client = build('calendar', 'v3', http=http)\n return self.client", "def _getStorageClient(app):\n \n if config.get(\"aws_s3_gateway\"):\n log.debug(\"_getStorageClient getting S3Client\")\n client = S3Client(app)\n else:\n log.debug(\"_getStorageClient getting MemClient\")\n client = MemClient(app)\n return client", "def base_client(self):\n return self._client", "def client(self, id):\n return self.query(Client).filter(Client.id == id).one()", "def _get_client(self):\n try:\n client = boto3_cached_conn(\n 'iam', **self.conn_details)\n\n if not client:\n raise ValueError(f\"boto3_cached_conn returned null IAM client for {self.account_number}\")\n\n return client\n\n except Exception as e:\n self.on_failure.send(self, error=e)\n self.current_app.logger.exception(f\"Failed to obtain boto3 IAM client for account {self.account_number}.\", exc_info=False)\n raise e", "def get_client_instance(cls, session, client_config, create=False):\n client = None\n if cls.SESSION_ID_KEY in session:\n client = session[cls.SESSION_ID_KEY]\n log.debug(\"Found OAuth client in session.\")\n if client is None and create:\n client = cls(client_config)\n session[cls.SESSION_ID_KEY] = client\n session.save()\n log.debug(\"No OAuth client in session - created new one.\")\n return client", "def _get_client_impl(self):\n api_version = self._get_api_version(None)\n if api_version not in self._client_impls:\n self._create_client_impl(api_version)\n return self._client_impls[api_version]", "def get_client(self, service, region, account):\n\n client = AwsApi.CLIENTS_CACHE.get((service, region, account))\n if client:\n return client # from cache\n\n if region == '*':\n eprint(\"warn: unknown region ('*'), using the default ('{}')\", self.default_region)\n region = self.default_region\n\n if account == '*':\n eprint(\"warn: unknown account ('*'), using default session\")\n client = self.session.client(\n service,\n region_name=region\n )\n elif account == self.default_account:\n client = self.session.client(\n service,\n region_name=region\n )\n elif self.args.no_input:\n eprint(\"warn: unknown account ('{}') and --no-input set, using default session\", account)\n client = self.session.client(\n service,\n region_name=region\n )\n else:\n account_config = self.config.setdefault('aws', {}).setdefault('accounts', {}).setdefault(account, {})\n if not 'profile' in account_config:\n account_config['profile'] = input(\"Enter configured AWS profile for {}: \".format(account))\n client = boto3.Session(profile_name=account_config['profile']).client(service, region_name=region)\n\n AwsApi.CLIENTS_CACHE[(service, region, account)] = client\n return client", "def get(cls, configuration: HttpClientConfiguration) -> HttpClient:\n client_type = configuration.client_type\n\n if client_type == HttpClientType.UAA:\n return cls._get_instance(configuration, ClientAuthType.TOKEN_UAA)\n\n elif client_type == HttpClientType.CONSOLE:\n return cls._get_instance(configuration, ClientAuthType.LOGIN_PAGE)\n\n elif client_type == HttpClientType.CONSOLE_NO_AUTH:\n return cls._get_instance(configuration, ClientAuthType.NO_AUTH)\n\n elif client_type == HttpClientType.APPLICATION:\n return cls._get_instance(configuration, ClientAuthType.TOKEN_CF)\n\n elif client_type == HttpClientType.CLOUD_FOUNDRY:\n return cls._get_instance(configuration, ClientAuthType.TOKEN_CF)\n\n elif client_type == HttpClientType.BROKER:\n return cls._get_instance(configuration, ClientAuthType.HTTP_BASIC)\n\n elif client_type == HttpClientType.WEBHDFS:\n return cls._get_instance(configuration, ClientAuthType.WEBHDFS)\n \n elif client_type == HttpClientType.SERVICE_TOOL:\n return cls._get_instance(configuration, ClientAuthType.NO_AUTH)\n\n elif client_type == HttpClientType.CLOUDERA:\n return cls._get_instance(configuration, ClientAuthType.HTTP_BASIC)\n\n else:\n raise HttpClientFactoryInvalidClientTypeException(client_type)", "def _get_client(self, requester_name: str) -> Any:\n return self.datastore.get_client_for_requester(requester_name)", "def get_client(host, port=9200, url_prefix=None, http_auth=None, use_ssl=False,\n master_only=False, timeout=30):\n kwargs = compact_dict({\n 'hosts': [host], 'port': port, 'http_auth': http_auth,\n 'url_prefix': url_prefix, 'use_ssl': use_ssl,\n 'timeout': timeout\n })\n logger.debug(\"ES client kwargs = %s\", kwargs)\n try:\n client = elasticsearch.Elasticsearch(**kwargs)\n # Verify the version is acceptable.\n check_version(client)\n # Verify \"master_only\" status, if applicable\n check_master(client, master_only=master_only)\n return client\n except Exception as e: # noqa\n print(\"ERROR: Connection failure: {0}\".format(e.message))\n sys.exit(1)", "def client(self):\n client = VMwareClient(self.host, verify_ssl=False)\n client.login(self.settings.username, self.settings.password)\n return client", "def get_client(self, clientname):\n client = self.dbsession.query(Client).filter_by(clientname=clientname).all()\n if not client:\n return self.create_client({'clientname': clientname})\n else:\n return client[0]", "def _get_es_client(conf):\n return elasticsearch_client(conf)", "def make_rest_client(\n service_key, options=None,\n app_name=None, app_version=None, version=None,\n **kwargs):\n cloud = get_config(\n service_key=service_key, options=options,\n app_name=app_name, app_version=app_version,\n **kwargs)\n return cloud.get_session_client(service_key, version=version)", "def get_swiftclient():\n swift_conn = swiftclient.client.Connection(\n authurl=os.environ.get(\"OS_AUTH_URL\"),\n user=os.environ.get(\"OS_USERNAME\"),\n key=os.environ.get(\"OS_PASSWORD\"),\n tenant_name=os.environ.get(\"OS_TENANT_NAME\"),\n auth_version=\"2.0\",\n )\n return swift_conn", "def _get_client(self, region_name):\n if region_name not in self._clients[self.SERVICE_NAME]:\n self._clients[self.SERVICE_NAME][region_name] = self._create_client()\n\n return self._clients[self.SERVICE_NAME][region_name]", "def fetch_boto3_client(service_name: str):\n region_name = load_aws_region_name()\n cache_key = f\"{region_name}-{service_name}\"\n\n if CLIENT_CACHE.get(cache_key):\n return CLIENT_CACHE[cache_key]\n\n config = Config(\n region_name=region_name,\n signature_version=\"v4\",\n retries={\"max_attempts\": 10, \"mode\": \"standard\"},\n )\n client = boto3.client(service_name, config=config) # type: ignore\n\n CLIENT_CACHE[cache_key] = client\n\n return client", "def django_client() -> Client:\n\n return Client()", "def get_unibox_client(self):\n if self._gls_unibox_client is None:\n client = Client(\n self.gls_server,\n self.gls_port\n )\n client.test = self.gls_is_test\n self._gls_unibox_client = client\n\n return self._gls_unibox_client", "def _client():\n nonlocal client\n if not client:\n client = twitter.Twitter(authenticator)\n return client", "def service_client(self):\n\n return self._service_client", "def _get_dask_client(client: Optional[Client]) -> Client:\n if client is None:\n return default_client()\n else:\n return client", "def compute_client(self):\n if not self.client:\n self.client = get_client_from_cli_profile(ComputeManagementClient)\n return self.client", "def _client(self):\n\n if self._suds_client is None:\n self._suds_client = suds.client.Client(SERVICE_WSDL_URL)\n # Add SOAP Security tokens\n self.set_security_token()\n\n return self._suds_client", "def get_client(self, ip_address):\n\n self.cur.execute(\n 'select * from authenticated_clients where ip_address=%s',\n (ip_address, )\n )\n return self.cur.fetchone()", "def Client(api_version, *args, **kwargs):\r\n neutron_client = utils.get_client_class(\r\n API_NAME,\r\n api_version,\r\n API_VERSIONS,\r\n )\r\n return neutron_client(*args, **kwargs)", "def get_client(self, server_name=None, server_address=None):\n if server_name:\n for name, address in self.registry.servers.items():\n if name == server_name:\n return Client(address)\n return None\n elif server_address:\n return Client(server_address)", "def client():\n return IceCubedSyncClient(\"api_key\", \"secret\")", "def get_conn(self) -> ServiceBusClient:\n conn = self.get_connection(self.conn_id)\n connection_string: str = str(conn.schema)\n if connection_string:\n client = ServiceBusClient.from_connection_string(connection_string, logging_enable=True)\n else:\n extras = conn.extra_dejson\n credential: str | DefaultAzureCredential = self._get_field(extras=extras, field_name=\"credential\")\n fully_qualified_namespace = self._get_field(extras=extras, field_name=\"fully_qualified_namespace\")\n if not credential:\n credential = DefaultAzureCredential()\n client = ServiceBusClient(\n fully_qualified_namespace=fully_qualified_namespace,\n credential=credential, # type: ignore[arg-type]\n )\n\n self.log.info(\"Create and returns ServiceBusClient\")\n return client", "def get_client(profile_name, region_name, svc, boto_client_params={}):\n session = get_session(profile_name, region_name)\n client = session.client(svc, **boto_client_params)\n return client", "def client() -> botocore.client.BaseClient:\n global _client\n if _client is None:\n endpoint_url = os.environ.get('LOCALSTACK_SNS_URL')\n # If endpoint_url is None, botocore constructs the default AWS URL\n _client = boto3.client('sns', endpoint_url=endpoint_url)\n return _client", "def get_client():\n\n client = Elasticsearch(host=HOST, port=PORT, timeout=300)\n\n # wait for yellow status\n for _ in range(100):\n time.sleep(.1)\n try:\n # This errors because of decorators. Silence it.\n # pylint: disable=E1123\n client.cluster.health(wait_for_status='yellow')\n return client\n except ConnectionError:\n continue\n else:\n # timeout\n raise SkipTest(\"Elasticsearch failed to start.\")", "def get_client(self, service):\n try:\n return boto3.client(service, region_name=self.region, config=self.proxy_config)\n except ClientError as e:\n fail(\"AWS %s service failed with exception: %s\" % (service, e))", "def get_client(self, client_id, device_certificate_path,\r\n device_private_key_path):\r\n # Performing the discovery of the core belonging to the same group of\r\n # the client.\r\n try:\r\n if not self.discovery_completed():\r\n self._discover_core(\r\n client_id,\r\n device_certificate_path,\r\n device_private_key_path)\r\n\r\n # Creating the client.\r\n return edge_st_sdk.aws.aws_client.AWSClient(\r\n client_id,\r\n device_certificate_path,\r\n device_private_key_path,\r\n self._group_ca_path,\r\n self._core_info)\r\n\r\n except (EdgeSTInvalidDataException, EdgeSTInvalidOperationException) \\\r\n as e:\r\n raise e", "def GetClientInstance(release_track=calliope_base.ReleaseTrack.ALPHA):\n api_version = _RELEASE_TRACK_TO_API_VERSION.get(release_track)\n return core_apis.GetClientInstance(_API_NAME, api_version)", "def network_client(self):\n if not self.client:\n self.client = get_client_from_cli_profile(NetworkManagementClient)\n return self.client", "def redis_client(self) -> Redis:\n if self._redis_client is None:\n redis_client = Redis(connection_pool=self.redis_conn_pool)\n\n self._redis_client = redis_client\n\n self._logger.debug(\n \"[%s]: Initialized Redis client: %s\", self.__name__, self._redis_client\n )\n\n return self._redis_client", "def get_client(self, user_id: int, client_name: str) -> Client:\n return self.clients[user_id][client_name]", "def storage_client(self):\n if not self.client:\n resource_group = self.desired_state_definition.get(\"resource_group\")\n storage_account = self.desired_state_definition.get(\"storage_account\")\n if resource_group and storage_account:\n client = get_client_from_cli_profile(StorageManagementClient)\n storage_keys = client.storage_accounts.list_keys(resource_group, storage_account)\n storage_keys = {v.key_name: v.value for v in storage_keys.keys}\n\n self.client = CloudStorageAccount(storage_account, storage_keys['key1'])\n else:\n raise Exception(\"azure_resource.resource_group and azure_resource.storage_account must be defined\")\n return self.client", "def client(db):\n client = ClientFactory()\n db.session.commit()\n return client", "def get_client_by_id(self, client_id=None):\n # search client_id in list and return the client object\n for client in self.client_list:\n if client_id == client.client_id:\n return client.copy()\n\n # return empty client otherwise\n return Client()", "def get_client(self, service):\n if self.cfg.iam == \"\":\n return boto3.client(service, region_name=self.cfg.region)\n\n if self.cfg.credentials == {}:\n logger.info(\"assume Role: {}\".format(self.cfg.iam))\n sts_client = boto3.client(\"sts\")\n self.cfg.credentials = sts_client.assume_role(\n RoleArn=self.cfg.iam, RoleSessionName=\"ssm-run\")[\"Credentials\"]\n\n return boto3.client(\n service,\n region_name=self.cfg.region,\n aws_access_key_id=self.cfg.credentials[\"AccessKeyId\"],\n aws_secret_access_key=self.cfg.credentials[\"SecretAccessKey\"],\n aws_session_token=self.cfg.credentials[\"SessionToken\"])", "def make_client(service_key, constructor=None, options=None, **kwargs):\n cloud = get_config(service_key=service_key, options=options, **kwargs)\n if not constructor:\n constructor = cloud_config._get_client(service_key)\n return cloud.get_legacy_client(service_key, constructor)", "def api_client() -> APIClient:\n return APIClient()", "def client(self):\n return self._thread._client", "def affirm_client(self):\n if self.user_name and self.password and self.host:\n uri = f'mongodb://{self.user_name}:{self.password}@\\\n {self.host}/{self.db_name}'\n client = MongoClient(uri)\n elif self.host:\n client = MongoClient(self.host)\n else:\n client = MongoClient()\n return client[self.db_name]", "def api_client() -> APIClient:\n\n return APIClient()" ]
[ "0.81075597", "0.8094052", "0.7934589", "0.76208735", "0.76208735", "0.74618495", "0.7417962", "0.7401862", "0.73747593", "0.729122", "0.7265158", "0.7255715", "0.7149866", "0.7061689", "0.70608056", "0.704505", "0.70287734", "0.7017763", "0.6964742", "0.6892199", "0.6867459", "0.6832228", "0.6828826", "0.67909855", "0.67823434", "0.66901654", "0.66786575", "0.6658291", "0.6635703", "0.66225225", "0.65699565", "0.65342003", "0.6505242", "0.64883447", "0.64559484", "0.64362234", "0.6428313", "0.64138645", "0.6412533", "0.6408575", "0.63980645", "0.63866055", "0.6386347", "0.63661265", "0.6365748", "0.6357952", "0.6342555", "0.63354284", "0.6332988", "0.6324958", "0.6318564", "0.6311683", "0.6299948", "0.6277214", "0.6265434", "0.6255868", "0.62515795", "0.62422824", "0.6226325", "0.6226184", "0.62234163", "0.6211045", "0.6194838", "0.61903137", "0.618987", "0.6188192", "0.61857796", "0.61764824", "0.61730343", "0.6157125", "0.61568207", "0.6154721", "0.6136813", "0.613554", "0.612757", "0.61188906", "0.6093412", "0.6079554", "0.60791445", "0.6073983", "0.60735106", "0.6069449", "0.6066076", "0.60649276", "0.6063842", "0.6063527", "0.60533565", "0.60474235", "0.60408956", "0.60351264", "0.60350657", "0.60346836", "0.6026527", "0.6020938", "0.59909415", "0.5989719", "0.5988053", "0.598052", "0.59671545", "0.59638226" ]
0.7938663
2
'voltage' should be a dict of numpy arrays of floatingpoint numbers. The keys of 'voltage' are integers, 03. Each element of 'voltage' should start and end near zero. 'repetitions' and 'rate' should be integers.
def __init__( self, voltage={0:(0, 0)}, rate=500, repetitions=1, board_name='cDAQ1Mod1', voltage_limits=None, num_channels=7): self.board_name = board_name #Check Measurement and Automation Explorer self._taskHandle = ctypes.c_void_p(0) self.num_channels = num_channels DAQmxErrChk(api.DAQmxCreateTask("", ctypes.byref(self._taskHandle))) DAQmxErrChk(api.DAQmxCreateAOVoltageChan( self._taskHandle, self.board_name + "/ao0:%i"%(num_channels - 1), "", ctypes.c_double(-10.0), #Minimum voltage ctypes.c_double(10.0), #Maximum voltage 10348, #DAQmx_Val_Volts; don't question it! ctypes.c_void_p(0), #NULL )) self.num_points_written = ctypes.c_long(0) self._unwritten_voltages = False self._unplayed_voltages = False self.set_voltage_and_timing(voltage, rate, repetitions, voltage_limits) return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_voltage_data(self):\n self.voltage_record = {}\n self.threshold_value = {}\n for l in self.network.layers:\n if 'v' in self.network.layers[l].__dict__:\n self.voltage_record[l] = self.network.monitors['{:}_voltages'.format(l)].get('v')\n if 'thresh' in self.network.layers[l].__dict__:\n self.threshold_value[l] = self.network.layers[l].thresh", "def str_voltages(self, key, bypass_voltage):\r\n # If we already have the key, we're done\r\n if key in self.string_keys:\r\n return self.string_keys[key]\r\n model = key[0] # unpack the key\r\n pattern = key[1:]\r\n index = len(self.string_voltages)\r\n self.string_keys[key] = index\r\n # compute the combined voltage array\r\n try:\r\n cindex, multiple = pattern[0]\r\n svoltages = self.cell_voltages[cindex] * multiple\r\n for cindex, multiple in pattern[1:]:\r\n svoltages += self.cell_voltages[cindex] * multiple\r\n except:\r\n svoltages = self.cell_voltages[pattern[0]] * pattern[1]\r\n\r\n if bypass_voltage > 0:\r\n bypassed = svoltages < -bypass_voltage\r\n svoltages[bypassed] = -bypass_voltage\r\n self.string_voltages.append({\r\n 'voltages': svoltages,\r\n 'bypass': bypassed,\r\n })\r\n else:\r\n self.string_voltages.append({\r\n 'voltages': svoltages,\r\n 'bypass': None,\r\n })\r\n logger.debug(f'[{index:04d}] SV {pattern}')\r\n return index", "def update_fft(data):\n if data is None or data['rate'] is None:\n raise PreventUpdate\n x = np.fft.rfftfreq(len(data['val_list']), d=data['rate'])[10:]\n y = np.abs(np.fft.rfft(data['val_list']))[10:]\n return {'x': [x], 'y': [y]}, [0], len(y)", "def voltage_conversion(self):\r\n\t\tvoltage = ((self.data[0] * 256 + self.data[1]) / 65536.0) * 5.0\r\n\t\t\r\n\t\treturn {'v' : voltage}", "def __init__(self, parent):\n \n #60 32 bit integers are recorded for the amplifier sample time index \n self.sample_time_index = []\n for i in range(60):\n sample_time = np.int32(struct.unpack('i', parent.rhd.read(4)))[0]\n self.sample_time_index.append(sample_time)\n\n #Amplifier voltages for each channel\n self.electrode_traces = {}#key: channel name value: voltage trce\n for amp in parent._AMPLIFIER_CHANNELS:\n electrode_voltage_trace = []\n #60 samples per channel, int16\n for i in range(60):\n electrode_voltage = np.uint16(struct.unpack('H', parent.rhd.read(2)))[0]\n electrode_voltage_trace.append(electrode_voltage)\n self.electrode_traces[amp] = electrode_voltage_trace \n\n #Get voltage from Aux input channels\n self.auxilary_traces = {}\n for aux in parent._AUX_CHANNELS:\n aux_voltage_trace = []\n #15 samples per channel, int16\n for i in range(15):\n aux_voltage = np.uint16(struct.unpack('H', parent.rhd.read(2)))[0]\n aux_voltage_trace.append(aux_voltage)\n self.auxilary_traces[aux] = aux_voltage_trace \n\n #get voltage from supply voltage channels\n self.supply_voltages = {}\n for sup in parent._SUPPLY_VOLTAGE_CHANNELS:\n sup_voltage_list = []\n for i in range(1):\n sup_voltage = np.uint16(struct.unpack('H', parent.rhd.read(2)))[0]\n sup_voltage_list.append(sup_voltage)\n self.supply_voltages[sup] = sup_voltage_list \n\n #get voltage from temerature sensor channels\n self.temerature_sensor_readings = {}\n for n in range(parent._TEMP_SENSORS):\n temp_list = []\n for i in range(1):\n temperature = np.int16(struct.unpack('h', parent.rhd.read(2)))[0]\n temp_list.append(temperature)\n self.temerature_sensor_readings[n] = temp_list \n\n #Get voltage ADC inputs\n self.board_adc_input_voltages = {}\n for adc in parent._ADC_INPUT_CHANNELS:\n adc_input_list = []\n for i in range(60):\n adc_input = np.uint16(struct.unpack('H', parent.rhd.read(2)))[0]\n adc_input_list.append(adc_input)\n self.board_adc_input_voltages[adc] = adc_input_list \n\n #Get digital input values\n self.board_digital_inputs = {}\n for dig in parent._DIGITAL_INPUT_CHANNELS :\n digital_input_list = []\n for i in range(60):\n digital_input = np.uint16(struct.unpack('H', parent.rhd.read(2)))[0]\n digital_input_list.append(digital_input)\n self.board_digital_inputs[dig.native_channel_name] = digital_input_list", "def s4_1min_1freq(powerData1,timevec,elevaData,azitmData):\n\t#TODO : empezar desde el primer minuto del tiempo, no desde el inicio del dictionaries\n\t#TODO : calcular s4 para l1 y l2 al mismo tiempo, deberia reducir la mitad del tiempo\n\t#TODO : dont take into account snr2 if this comes with a lot of 0000000 zeros\n\ts4_values1=[]\n\n\ts4_times=[]\n\ts4_avgSNR1 = [] #\n\ts4_avgSNR2 = [] #\n\ts4_points1 = [] # s4_points_per_minute\n\n\ts4_timesr=[]\n\ts4_elev=[]\n\ts4_azit=[]\n\n\tfor eachminute in range(0,1440):\n\t\ts4_times.append(eachminute/60.0)\n\n\ttmp_amplitudes1 = []\n\ttmp_amplitudesdB1=[]\n\ttmp_elevations = []\n\ttmp_azimuths = []\n\n\tinit_index=0\n\n\tarr=np.array(timevec)+np.ones([len(timevec)])*(18.0/3600.0) # #SEPTENTRIO USES DATA from 1 MINUTE GPS TIME\n\t########################\n\tfor eachminute in s4_times:\n\t\tidxarray = (arr >= eachminute) & (arr < (eachminute+(1/60.0)) )# bool array\n\t\ttmp_amplitudesdB1 = powerData1[idxarray]\n\t\ttmp_elevations = elevaData[idxarray]\n\t\ttmp_azimuths = azitmData[idxarray]\n\t\ttmp_amplitudes1=list(map(pow10,tmp_amplitudesdB1))#use numpy.power\n\n\n\t\tif len(tmp_amplitudes1)>0:\n\t\t\ts4_1 = np.std(tmp_amplitudes1,ddof=1) / np.mean(tmp_amplitudes1)\n\t\telse:\n\t\t\ts4_1 = float(\"nan\")\n\n\t\ts4_values1.append(s4_1)\n\t\ts4_avgSNR1.append(np.mean(tmp_amplitudesdB1))\n\t\ts4_timesr.append(eachminute+1/60.0) #Septentrio has the timestamp 1 min in advance\n\t\ts4_points1.append(len(tmp_amplitudes1))\n\t\ts4_elev.append(np.mean(tmp_elevations))\n\t\ts4_azit.append(np.mean(tmp_azimuths))\n\n\treturn s4_values1,s4_timesr,s4_points1,s4_elev,s4_azit,s4_avgSNR1", "def incremental_rv(\n wavelength: ndarray,\n flux: ndarray,\n *,\n mask: Optional[Union[Quantity, ndarray]] = None,\n percent: float = 10,\n **kwargs,\n) -> Tuple[ndarray, ndarray]:\n positions = log_chunks(wavelength, percent)\n velocities = []\n for pos1, pos2 in zip(positions[:-1], positions[1:]):\n pos_mask = (wavelength >= pos1) & (wavelength < pos2)\n if np.sum(pos_mask) <= 1:\n # 1 or less points in this section\n continue\n\n x = wavelength[pos_mask]\n y = flux[pos_mask]\n if mask is not None:\n z = mask[pos_mask]\n else:\n z = mask # None\n try:\n rv_calc = rv_precision(x, y, mask=z, **kwargs).value\n except:\n rv_calc = np.nan\n velocities.append([np.nanmean(x), rv_calc])\n\n x, rv = np.asarray(velocities).T\n return x, rv", "def get_voltage_rating(self):\n summary = self.get_version_summary()\n pattern = '\\$.*? .*? .*? .*? .*? (.*?) .*? .*? .*? \\r\\n' \n rating = int(re.findall(pattern,summary).pop())\n return rating", "def get_ratio_metrics(\n ratio_metric_specs: Dict[iter8id, RatioMetricSpec], \n counter_metric_specs: Dict[iter8id, CounterMetricSpec], \n counter_metrics: Dict[iter8id, Dict[iter8id, CounterDataPoint]], \n versions: Iterable[Version],\n start_time: datetime) -> Dict[iter8id, Dict[iter8id, RatioDataPoint]]:\n rmd = {version.id: {} for version in versions} # initialize rmd\n\n # populate rmd\n for ratio_metric_spec in ratio_metric_specs.values():\n query_spec = RatioQuerySpec(\n version_label_keys = versions[0].version_labels.keys(),\n numerator_template = counter_metric_specs[ratio_metric_spec.numerator].query_template,\n denominator_template = counter_metric_specs[ratio_metric_spec.denominator].query_template,\n start_time = start_time\n )\n prmq = PrometheusRatioMetricQuery(query_spec, versions)\n current_time = datetime.now(timezone.utc)\n rmd_from_prom = prmq.query_from_spec(current_time)\n\n for version in versions:\n if version.id in rmd_from_prom:\n rmd[version.id][ratio_metric_spec.id] = rmd_from_prom[version.id]\n else:\n if version.id in counter_metrics and counter_metrics[version.id][ratio_metric_spec.denominator].value:\n rmd[version.id][ratio_metric_spec.id] = RatioDataPoint(\n value = 0,\n timestamp = current_time,\n status = StatusEnum.zeroed_ratio\n )\n else:\n rmd[version.id][ratio_metric_spec.id] = RatioDataPoint(\n value = None,\n timestamp = current_time,\n status = StatusEnum.absent_version_in_prom_response\n )\n \"\"\"if a version cannot be found in the list of ratio metrics returned by prometheus, then the value of the ratio is set to zero if denominator is non-zero, and is set to None otherwise.\n \"\"\"\n\n return rmd", "def test_get_voltage_maps(self):\n pass", "def parsesetting(conf, rate, loopnum):\n global numpy, math, funcchoose\n cp = numpy.array([float(val)/1000 for val in conf[0] if val != ''])\n ncp = len(cp)\n ct = numpy.array([float(val)/1000 for val in conf[1][:ncp]])\n cv = numpy.array([float(val) for val in conf[2][:ncp]])\n \n dcp = numpy.array([float(val)/1000 for val in conf[3][:ncp]]) \n dct = numpy.array([float(val)/1000 for val in conf[4][:ncp]]) \n dcv = numpy.array([float(val)/1000 for val in conf[5][:ncp]]) \n\n special = numpy.array([int(val) for val in conf[6][:ncp]])\n reserve = [val for val in conf[7][:ncp]]\n for i in range(len(reserve)):\n reserve[i] = [float(part) for part in reserve[i].split(';')]\n\n cp += loopnum * dcp\n totalt = cp[-1] + ct[-1] # the last interval plus change\n\n changes = []\n for i in range(ncp):\n vprev = cv[i-1] + loopnum * dcv[i-1]\n vthis = cv[i] + loopnum * dcv[i]\n timescale = ct[i] + loopnum * dct[i]\n if timescale == 0:\n changes += [[vthis]]\n else:\n intervals = int(timescale * rate) # implicit rounding down\n tsteps = numpy.linspace(0, intervals/rate, intervals + 1)\n\n try:\n funcshape = funcchoose[special[i]]\n except KeyError:\n raise NotImplementedError(\"Time dependence: %d\" %special[i])\n\n if funcshape == 'adiabatic':\n A, B = numpy.power([vprev, vthis], -0.5)\n a = (A - B) / timescale\n vals = 1 / (A - a * tsteps)**2\n elif funcshape == 'exponential':\n timeconstant = reserve[i][0] / 1000 # it is in ms\n if vthis < vprev:\n vals = numpy.max([vprev * numpy.exp(-tsteps/timeconstant), [vthis] * (intervals+1)], axis=0)\n else:\n vals = numpy.min([vprev * numpy.exp(tsteps/timeconstant), [vthis] * len(tsteps)], axis=0)\n elif funcshape == 'sine':\n params = reserve[i]\n\n deltaamp = params[2]\n deltafreq = params[3]\n amplitude = params[0] + loopnum * deltaamp\n freq = params[1] + loopnum * deltafreq\n\n vals = 0.5 * amplitude * numpy.sin(2 * numpy.pi * tsteps * freq) + vthis\n elif funcshape == 'linear':\n vals = (vthis - vprev) * tsteps / timescale + vprev\n else:\n raise ValueError\n\n if tsteps[-1] < timescale:\n vals = numpy.append(vals, vthis)\n vals = numpy.append(vals, vthis)\n changes += [list(vals)]\n\n intervals = int(math.ceil(totalt * rate))\n tlist = numpy.linspace(0, intervals/rate, intervals+1)\n\n icp = 0\n counter = 0\n values = []\n for t in tlist:\n if icp < (ncp-1) and t >= cp[icp + 1]:\n icp += 1\n counter = 0\n\n if counter == 0:\n nvals = len(changes[icp])\n\n if counter < nvals:\n newval = changes[icp][counter]\n counter += 1\n else:\n newval = changes[icp][-1]\n values += [newval]\n return numpy.array(values)", "def addRateParams(spec, data_card, channels, modifiers):\n measurements = [\n measurement[\"config\"][\"poi\"] for measurement in spec[\"measurements\"]\n ]\n signal_mods = [modifier[0] for modifier in modifiers if modifier[0] in measurements]\n\n for idxc, channel in enumerate(channels):\n for idxs, sample in enumerate((spec[\"channels\"][idxc][\"samples\"])):\n is_signal = any(mod[\"name\"] in signal_mods for mod in sample[\"modifiers\"])\n if not is_signal:\n for mod in spec[\"channels\"][idxc][\"samples\"][idxs][\"modifiers\"]:\n # normfactor or shapefactor\n if \"normfactor\" in mod[\"type\"] or \"shapefactor\" in mod[\"type\"]:\n for measurement in spec[\"measurements\"]:\n for param in measurement[\"config\"][\"parameters\"]:\n data_card.rateParams.update(\n {f\"{channel}AND\" + sample[\"name\"]: []}\n )\n if mod[\"name\"] == param[\"name\"]:\n data_card.rateParams[\n f\"{channel}AND\" + sample[\"name\"]\n ].append([[mod[\"name\"], 1, 0, param[\"bounds\"]], \"\"])\n else:\n data_card.rateParams[\n f\"{channel}AND\" + sample[\"name\"]\n ].append([[mod[\"name\"], 1, 0], \"\"])", "def increaseFreq(self, desHz):\n from scipy.interpolate import interp1d\n import time\n from numpy import linspace, floor\n from decimal import getcontext, Decimal\n\n if desHz > 1000: # set max freq here \n raise ValueError('Max Frequency is 1000 (3 decimal places)')\n now = time.asctime(time.localtime(time.time())) \n stamp = ''.join(['%% The following created by alog_manip.MOOSalog.MOOSalog.increaseFreq\\n%% ', now])\n increase_msg = ''.join(['%% Resultant Frequency: ',str(desHz),' Hz'])\n # hiHz = {}\n self.outData = {} # erase pre-existing dict\n self.outData['header'] = [stamp,increase_msg,'%%%%'] + self.srcData['header']\n\n def create_msgs():\n \"\"\" Puts interpolated data into dict outData\n Primary interpolation function for increaseFreq\n Consider using uniaxial spline --> would have one function for all of dictionary dat\n \"\"\"\n getcontext().prec = 3 # will round to 3 decimal places\n orig_times = sorted(dat)\n for n in range(len(dat) - 1):\n linfun = interp1d([orig_times[n], orig_times[n+1]], \\\n [dat[orig_times[n]], dat[orig_times[n+1]]])\n dt = orig_times[n+1] - orig_times[n] # current\n freq = 1/dt # current\n if dt < (1/desHz):\n print('found instance where Freq already at/above desired Freq')\n else:\n new_dt = dt*freq/desHz\n new_times = linspace(orig_times[n],orig_times[n+1],floor(dt/new_dt))\n # print(new_times)\n new_values = linfun(new_times)\n # rounded_values = [float(Decimal(\"%.3f\" % e)) for e in new_values]\n rounded_times = [float(Decimal(\"%.3f\" % e)) for e in new_times]\n for m in range(len(rounded_times)):\n # this_time = int(new_times[m]*100000)/100000 # 5 decimal places in timstamp\n self.outData[sens][meas][rounded_times[m]] = new_values[m]\n\n ## go thru and pull out dictionaries {time: value} then send to interpolation func\n for sens in self.srcData:\n if sens is not 'header':\n self.outData[sens] = {}\n for meas in self.srcData[sens]:\n self.outData[sens][meas] = {}\n dat = self.srcData[sens][meas]\n if len(dat) == 1:\n self.outData[sens][meas] = dat # only 1 data point, no interp\n else:\n create_msgs()", "def test_1d_freq():\n \n dic,data = ng.pipe.read(\"common_data/1d_pipe/test.ft\")\n assert data.shape == (4096,)\n assert data.dtype == 'float32'\n assert round(data[0],2) == -63789.66\n assert round(data[1],2) == -63159.88\n assert round(data[100],2) == -29308.34\n write_readback(dic,data)\n check_ppm_limits(dic,data,0,[297.92, -99.82])", "def vibrate(self, pattern):\n raise NotImplementedError", "def get_slack_voltage(self):\n first_val = self.__voltage_no_load\n second_val = self.__voltage_no_load * np.exp(-2j * np.pi/3)\n third_val = self.__voltage_no_load * np.exp(2j * np.pi/3)\n matrix = np.array(([first_val], [second_val], [third_val]), dtype=np.complex128)\n slack_voltage = np.tile(matrix, (self.get_nb_brackets()-1, 1))\n return slack_voltage", "def pressure_dict(calib, f, t):\n #array mode\n try:\n pressure = []\n \"\"\"Equation expecting pressure period in microseconds, so divide f by 1,000,000. \"\"\"\n uf = [x/1000000 for x in f]\n for f_x, t_x in zip(uf, t):\n T0 = calib['T1'] + calib['T2']*t_x + calib['T3']*math.pow(t_x,2) + calib['T4']*math.pow(t_x,3)\n w = 1-T0*T0*f_x*f_x\n temp = (0.6894759*((calib['C1']+calib['C2']*t_x+calib['C3']*t_x*t_x)*w*(1-(calib['D1']+calib['D2']*t_x)*w)-14.7))\n pressure.append(round(temp,2))\n #single mode\n except:\n T0 = calib['T1'] + calib['T2']*t + calib['T3']*math.pow(t,2) + calib['T4']*math.pow(t,3)\n w = 1-T0*T0*f*f\n pressure = (0.6894759*((calib['C1']+calib['C2']*t+calib['C3']*t*t)*w*(1-(calib['D1']+calib['D2']*t)*w)-14.7))\n return pressure", "def populateDict(self):\n for dev in self.dcDict:\n range = self.dcDict[dev]['range']\n for devChannel in self.dcDict[dev]['devChannels']:\n channel = self.dcDict[dev]['devChannels'][devChannel]['channel']\n comstring = str(channel)+'r'\n yield self.ser.write(comstring)\n encoded = yield self.ser.read(3)\n seq = int(binascii.hexlify(encoded[0:2]),16)\n voltage = round(range[0] + float(seq) / (2**16 - 1) * float(range[1]-range[0]),2)\n self.dcDict[dev]['devChannels'][devChannel]['value'] = voltage", "def _volumetric_flux(recarray, modeltime, extrapolate_kper=False):\n pd = import_optional_dependency(\n \"pandas\",\n error_message=\"ZoneBudget._volumetric_flux() requires pandas.\",\n )\n\n nper = len(modeltime.nstp)\n volumetric_data = {}\n zones = np.unique(recarray[\"zone\"])\n\n for key in recarray.dtype.names:\n volumetric_data[key] = []\n\n if extrapolate_kper:\n volumetric_data.pop(\"kstp\")\n perlen = modeltime.perlen\n totim = np.add.accumulate(perlen)\n for per in range(nper):\n idx = np.where(recarray[\"kper\"] == per)[0]\n\n if len(idx) == 0:\n continue\n\n temp = recarray[idx]\n\n for zone in zones:\n if zone == 0:\n continue\n\n zix = np.where(temp[\"zone\"] == zone)[0]\n\n if len(zix) == 0:\n raise Exception\n\n for key in recarray.dtype.names:\n if key == \"totim\":\n volumetric_data[key].append(totim[per])\n\n elif key == \"tslen\":\n volumetric_data[\"perlen\"].append(perlen[per])\n\n elif key == \"kstp\":\n continue\n\n elif key == \"kper\":\n volumetric_data[key].append(per)\n\n elif key == \"zone\":\n volumetric_data[key].append(zone)\n\n else:\n tmp = np.nanmean(temp[zix][key])\n vol = tmp * perlen[per]\n volumetric_data[key].append(vol)\n\n else:\n n = 0\n tslen = {}\n dtotim = {}\n totim = modeltime.totim\n for ix, nstp in enumerate(modeltime.nstp):\n for stp in range(nstp):\n idx = np.where(\n (recarray[\"kper\"] == ix) & (recarray[\"kstp\"] == stp)\n )\n if len(idx[0]) == 0:\n continue\n elif n == 0:\n tslen[(stp, ix)] = totim[n]\n else:\n tslen[(stp, ix)] = totim[n] - totim[n - 1]\n dtotim[(stp, ix)] = totim[n]\n n += 1\n\n ltslen = [tslen[(rec[\"kstp\"], rec[\"kper\"])] for rec in recarray]\n if len(np.unique(recarray[\"totim\"])) == 1:\n ltotim = [dtotim[(rec[\"kstp\"], rec[\"kper\"])] for rec in recarray]\n recarray[\"totim\"] = ltotim\n\n for name in recarray.dtype.names:\n if name in (\"zone\", \"kstp\", \"kper\", \"tslen\", \"totim\"):\n volumetric_data[name] = recarray[name]\n else:\n volumetric_data[name] = recarray[name] * ltslen\n\n return pd.DataFrame.from_dict(volumetric_data)", "def serialize_numpy(self, buff, numpy):\n try:\n _x = self\n buff.write(_struct_3I.pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))\n _x = self.header.frame_id\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self\n buff.write(_struct_51B.pack(_x.temp_1_curr, _x.temp_1_min, _x.temp_1_max, _x.temp_2_curr, _x.temp_2_min, _x.temp_2_max, _x.temp_3_curr, _x.temp_3_min, _x.temp_3_max, _x.temp_4_curr, _x.temp_4_min, _x.temp_4_max, _x.temp_5_curr, _x.temp_5_min, _x.temp_5_max, _x.temp_6_curr, _x.temp_6_min, _x.temp_6_max, _x.akku_voltage_curr, _x.akku_voltage_min, _x.akku_voltage_max, _x.hals_motor_voltage_curr, _x.hals_motor_voltage_min, _x.hals_motor_voltage_max, _x.hals_logik_voltage_curr, _x.hals_logik_voltage_min, _x.hals_logik_voltage_max, _x.tablett_logik_voltage_curr, _x.tablett_logik_voltage_min, _x.tablett_logik_voltage_max, _x.arm_logik_voltage_curr, _x.arm_logik_voltage_min, _x.arm_logik_voltage_max, _x.tablett_motor_voltage_curr, _x.tablett_motor_voltage_min, _x.tablett_motor_voltage_max, _x.hals_motor_current_curr, _x.hals_motor_current_min, _x.hals_motor_current_max, _x.hals_logik_current_curr, _x.hals_logik_current_min, _x.hals_logik_current_max, _x.tablett_logik_current_curr, _x.tablett_logik_current_min, _x.tablett_logik_current_max, _x.arm_logik_current_curr, _x.arm_logik_current_min, _x.arm_logik_current_max, _x.tablett_motor_current_curr, _x.tablett_motor_current_min, _x.tablett_motor_current_max))\n except struct.error, se: self._check_types(se)\n except TypeError, te: self._check_types(te)", "def set_voltages(): \n #0) set parameters\n from project_parameters import trapFile,multipoleControls,reg,driveFrequency,ax,az,phi,coefs\n import pickle\n with open(trapFile,'rb') as f:\n trap = pickle.load(f)\n V,X,Y,Z=trap.instance.DC,trap.instance.X,trap.instance.Y,trap.instance.Z\n tc=trap.configuration\n C = tc.multipoleControl\n el = []\n #1) check if trap_knobs has been run yet, creating multipoleControl and multipoleKernel\n if tc.trap_knobs != True:\n return 'WARNING: You must run trap_knobs first!'\n #2a) determine electrode voltages directly\n elif multipoleControls: # note plurality to contrast from attribute\n el = np.dot(C,coefs.T) # these are the electrode voltages\n #2b) determine electrode volages indirectly\n else:\n charge = tc.charge\n mass = tc.mass\n V0 = mass*(2*np.pi*frequencyRF)**2/charge\n U2 = az*V0/8\n U1 = U2+ax*V0/4\n U3 = 2*U1*np.tan(2*np.pi*(phi+tc.thetaRF)/180)\n U1p= np.sqrt(U1**2+U3**2/2)\n U4 = U1p*tc.Qrf[4]/tc.Qrf[1]\n U5 = U1p*tc.Qrf[5]/tc.Qrf[1]\n inp = np.array([E[0], E[1], E[2], U1, U2, U3, U4, U5]).T\n mCf = tc.multipoleCoefficients[1:9,:]\n el = np.dot(mCf.T,inp) # these are the electrode voltages\n el = np.real(el)\n #3) regularize if set to do so\n reg = 0\n if reg: \n C = el\n Lambda = np.linalg.lstsq(tc.multipoleKernel,C)\n Lambda=Lambda[0]\n el = el-(np.dot(tc.multipoleKernel,Lambda))\n return el", "def calibrate_decide(voltage, serial):\n # Based on the SONIC serial number, get the Krypton calibration coeffs\n if serial == 'Gill R2A 0043':\n coeffs = krypton_1199\n elif serial == 'Gill HS 000046':\n coeffs = krypton_1094\n\n # make a storage array\n rho = np.zeros_like(voltage)\n\n # see the percentage of wrong measurements\n num_corrupt_values = (voltage < 0).sum() / len(voltage)\n # after the original script: set negative voltages to nan\n voltage[voltage <= 0] = 0.01\n # if too many values are corrupt, fill all with nans and return\n if num_corrupt_values > 0.2:\n rho.fill(np.nan)\n return rho\n else:\n\n # get rho using full range coeffs\n XKw = coeffs['path_len'] * coeffs['Kwf']\n logV0 = np.log(coeffs['V0f'])\n rho_temp = (np.log(voltage) - logV0) / XKw\n\n # determine new coeffs based on the \"temporary\" values\n if np.mean(rho_temp) > 9:\n if verbose:\n print('high')\n XKw = coeffs['path_len'] * coeffs['Kwh']\n logV0 = np.log(coeffs['V0h'])\n else:\n if verbose:\n print('low')\n XKw = coeffs['path_len'] * coeffs['Kwl']\n logV0 = np.log(coeffs['V0l'])\n # re-calculate rho with these coefficients\n rho = (np.log(voltage) - logV0) / XKw\n\n return rho", "def update_recruiting(self, rate):\n self.recruit = int(np.ceil(self.INITIAL_POPULATION*rate))", "def __init__(self):\n\n\n self.dtype = np.dtype([\n ('fault_flags', np.uint32),\n ('raw_x', np.int16),\n ('raw_y', np.int16),\n ('raw_z', np.int16),\n ('accel_x', np.float32),\n ('accel_y', np.float32),\n ('accel_z', np.float32), \n ('pitch', np.float32),\n ('roll', np.float32), \n ])\n \n self._accel_indices = [0, 4, 5, 6]\n \n self.data = np.array([(0, 0.1, 12, 1234, 0.12345678901234, 0.12345678901234, 0.12345678901234, 0.12345678901234, 0.12345678901234)], dtype=self.dtype)\n self.data['fault_flags'] = 0\n self.data['raw_x'] = 0.1\n self.data['raw_y'] = 12\n self.data['raw_z'] = 31929\n self.data['accel_x'] = 0.12345678901234\n self.data['accel_y'] = 0.23456789012345\n self.data['accel_z'] = 0.34567890123456\n self.data['pitch'] = 0.1000\n self.data['roll'] = 0.2000\n\n\n #print len(self.data.tostring(order=\"C\"))", "def test_RV():\n\n spec = IGRINSSpectrum(file=file)\n\n assert spec.uncertainty is not None\n assert hasattr(spec, \"barycentric_correct\")\n\n correction_velocity = spec.estimate_barycorr()\n\n assert isinstance(spec.RA, astropy.units.quantity.Quantity)\n assert isinstance(spec.DEC, astropy.units.quantity.Quantity)\n assert correction_velocity is not None\n assert isinstance(correction_velocity, astropy.units.quantity.Quantity)\n\n new_spec = spec.barycentric_correct()\n assert new_spec is not None\n assert isinstance(new_spec, Spectrum1D)", "def test_get_engVoltage(self):\n for app_num, servo_type in app_nr.items():\n try:\n par = self.get_parameter(servo_type, app_num, ENG_VOLTAGE_IDX, ENG_VOLTAGE_SUB)\n param_obj = self.__dict__[servo_type]._get_engVoltage()\n acs_par, completion = param_obj.get_sync()\n if(completion.code):\n print \"\\nError code found in engVoltage...\"\n continue\n self.data_match(acs_par, par)\n except NackEx:\n continue", "def case():\r\n #ppc = {\"version\": '2'}\r\n ppc = {}\r\n ##----- Power Flow Data -----##\r\n ## system MVA base\r\n ppc[\"baseMVA\"] = 100.0\r\n\r\n ## bus data\r\n # bus_i type Pd Qd Gs Bs area Vm Va baseKV zone Vmax Vmin\r\n ppc[\"bus\"] = array([\r\n [1, 3, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0],\r\n [2, 1, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0],\r\n [3, 1, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0],\r\n [4, 1, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0],\r\n [5, 1, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0],\r\n [6, 1, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0],\r\n [7, 1, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0],\r\n [8, 1, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0],\r\n [9, 1, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0],\r\n [10, 1, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0],\r\n [11, 1, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0],\r\n [12, 1, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0],\r\n [13, 1, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0],\r\n [14, 1, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0],\r\n [15, 1, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0],\r\n [16, 1, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0]\r\n ])\r\n\r\n ## generator data\r\n # bus, Pg, Qg, Qmax, Qmin, Vg, mBase, status, Pmax, Pmin, Pc1, Pc2,\r\n # Qc1min, Qc1max, Qc2min, Qc2max, ramp_agc, ramp_10, ramp_30, ramp_q, apf\r\n ppc[\"gen\"] = array([\r\n [1,\t0,\t0,\t10,\t-10,\t1.0224,\t100,\t1,\t10,\t-10,\t0,\t0,\t0,\t0,\t0,\t0,\t0,\t0,\t0,\t0, 0, 0,0, 0, 0],\r\n [3 ,0, 0, 50e-3, -50e-3, 1, 100, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,0, 0, 0],\r\n [5 , 0, 0, 10e-3, -10e-3, 1, 100, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,0, 0, 0],\r\n [10 , 0, 0, 10e-3, -10e-3, 1, 100, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,0, 0, 0],\r\n [13 ,0, 0, 10e-3, -10e-3, 1, 100, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,0, 0, 0],\r\n [15 , 0, 0, 50e-3, -50e-3, 1, 100, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,0, 0, 0]\r\n ])\r\n load_b = array([2, 4, 9, 12, 14])\r\n ppc[\"bus\"][load_b, 2] = multiply(array([-2.1125, -0.2231, -0.1664, -0.0719, -1.4633]).T, 0.03)\r\n ppc[\"bus\"][load_b, 3] = multiply(array([1.6492, 0.4054, 0.8599, 0.8845, 0.6778]).T, 0.03)\r\n ## branch data\r\n # fbus, tbus, r, x, b, rateA, rateB, rateC, ratio, angle, status, angmin, angmax\r\n ppc[\"branch\"] = array([\r\n [1, 2, 0.0, 0.0, 0.0, 250, 250, 250, 0, 0, 1, -360, 360, 0,0, 0, 0, 0,0, 0, 0],\r\n [1, 8, 0.0, 0.0, 0.0, 250, 250, 250, 0, 0, 1, -360, 360, 0,0, 0, 0, 0,0, 0, 0],\r\n [1, 15, 0.0, 0.0, 0.0, 250, 250, 250, 0, 0, 1, -360, 360, 0,0, 0, 0, 0,0, 0, 0],\r\n [2, 3, 0.0, 0.0, 0.0, 250, 250, 250, 0, 0, 1, -360, 360, 0,0, 0, 0, 0,0, 0, 0],\r\n [2, 6, 0.0, 0.0, 0.0, 250, 250, 250, 0, 0, 1, -360, 360, 0,0, 0, 0, 0,0, 0, 0],\r\n [2, 7, 0.0, 0.0, 0.0, 250, 250, 250, 0, 0, 1, -360, 360, 0,0, 0, 0, 0,0, 0, 0],\r\n [3, 4, 0.0, 0.0, 0.0, 250, 250, 250, 0, 0, 1, -360, 360, 0,0, 0, 0, 0,0, 0, 0],\r\n [4, 5, 0.0, 0.0, 0.0, 250, 250, 250, 0, 0, 1, -360, 360, 0,0, 0, 0, 0,0, 0, 0],\r\n [8, 9, 0.0, 0.0, 0.0, 250, 250, 250, 0, 0, 1, -360, 360, 0,0, 0, 0, 0,0, 0, 0],\r\n [8, 12, 0.0, 0.0, 0.0, 250, 250, 250, 0, 0, 1, -360, 360, 0,0, 0, 0, 0,0, 0, 0],\r\n [8, 13, 0.0, 0.0, 0.0, 250, 250, 250, 0, 0, 1, -360, 360, 0,0, 0, 0, 0,0, 0, 0],\r\n [9, 10, 0.0, 0.0, 0.0, 250, 250, 250, 0, 0, 1, -360, 360, 0,0, 0, 0, 0,0, 0, 0],\r\n [9, 14, 0.0, 0.0, 0.0, 250, 250, 250, 0, 0, 1, -360, 360, 0,0, 0, 0, 0,0, 0, 0],\r\n [10, 11, 0.0, 0.0, 0.0, 250, 250, 250, 0, 0, 1, -360, 360, 0,0, 0, 0, 0,0, 0, 0],\r\n [15, 16, 0.0, 0.0, 0.0, 250, 250, 250, 0, 0, 1, -360, 360, 0,0, 0, 0, 0,0, 0, 0]\r\n ])\r\n R1 = 0.43\r\n L1 = 0.4e-3\r\n RS1 = 0.32\r\n LS1 = 0.39e-3\r\n Zbase = (0.4*0.4/100)\r\n branch_phase =array([\r\n [1, 1, 2, 188, R1, L1],\r\n [2, 1 ,8, 346, R1, L1],\r\n [3 ,1 ,15,501, R1 ,L1],\r\n [4, 2, 3, 130, RS1,LS1],\r\n [5, 2, 6, 145, RS1,LS1],\r\n [6, 2 ,7, 157, RS1,LS1],\r\n [7, 3, 4, 185, RS1,LS1],\r\n [8, 4, 5, 1000,RS1,LS1],\r\n [9, 8 ,9, 416, RS1,LS1],\r\n [10,8 ,12,130, RS1,LS1],\r\n [11,8 ,13,121, RS1,LS1],\r\n [12,9 ,10,130, RS1,LS1],\r\n [13,9 ,14,127, RS1,LS1],\r\n [14,10,11,251, RS1,LS1],\r\n [15,15,16,345, RS1,LS1]\r\n ])\r\n ppc[\"branch\"][:, [2,3]] = multiply(array([branch_phase[:, 4]*branch_phase[:, 3], branch_phase[:, 4]*branch_phase[:, 4]*100*pi]).T,0.001/Zbase)\r\n\r\n ##----- OPF Data -----##\r\n ## area data\r\n # area refbus\r\n\r\n\r\n ## generator cost data\r\n # 1 startup shutdown n x1 y1 ... xn yn\r\n # 2 startup shutdown n c(n-1) ... c0\r\n\r\n\r\n return ppc", "def gt_voltages(self, key):\r\n # If we already have the (model, G, T) key, we're done\r\n if key in self.cell_keys:\r\n return self.cell_keys[key]\r\n model, insolation, temperature = key # unpack the key\r\n index = len(self.cell_voltages)\r\n self.cell_keys[key] = index\r\n self.cell_voltages.append(\r\n model.voltage([(i, insolation, temperature) for i in self.currents]))\r\n logger.debug(f'[{index:04d}] CV {insolation:.1f} {temperature:.1f}{DEG}C')\r\n return index", "def setValues(\n self,\n frameRate: int = None,\n timeScale: int = None,\n vpDecorations: Boolean = ON,\n vpBackground: Boolean = OFF,\n compass: Boolean = OFF,\n ):\n pass", "def get_voltages(self):\n if self.v is None or self.dirty is True:\n v = self.simulator.get_voltages()\n n_compartments = self.neuron_collection.total_compartments()\n self.v = np.array(v).reshape([len(v) / n_compartments, n_compartments])\n\n self.dirty = False\n t = int(self.T / self.dt)\n return self.v[:t, :]", "def meas_voltage(instrument, v_range=10, resolution=0.003):\n return float(instrument.query('MEAS:VOLTage:DC? %s,%s' % (v_range, resolution)))", "def test_get_voltage_map_item(self):\n pass", "def generate_subs(speed, source_speed, source_subs):\r\n if speed == source_speed:\r\n return source_subs\r\n\r\n coefficient = 1.0 * speed / source_speed\r\n subs = {\r\n 'start': [\r\n int(round(timestamp * coefficient)) for\r\n timestamp in source_subs['start']\r\n ],\r\n 'end': [\r\n int(round(timestamp * coefficient)) for\r\n timestamp in source_subs['end']\r\n ],\r\n 'text': source_subs['text']}\r\n return subs", "def lin_decoded(voltage, sample_interval, lin_version):\n decoded_lin = []\n for i in range(len(voltage) / sample_interval):\n decoded_lin.append(\n int(most_common(voltage[i * sample_interval:i * sample_interval + sample_interval])))\n results = []\n sync_break = decoded_lin[0:13]\n sync_field = decoded_lin[15:22]\n sync_field_to_hex = \"\"\n for i in range(len(sync_field)):\n sync_field_to_hex += str(sync_field[i])\n sync_field_hex = \"{0:0>2X}\".format(int(sync_field_to_hex, 2))\n rest_of_lin = decoded_lin[25:len(decoded_lin)]\n id_field = \"\"\n parity_bits = \"\"\n data_field = []\n id_field = \"{0:0>2X}\".format(\n int(\"\".join(map(str, decoded_lin[25:33][::-1])), 2))\n if lin_version == \"-e\":\n pid = \"{0:0>2X}\".format(\n int(\"\".join(map(str, decoded_lin[25:31][::-1])), 2))\n else:\n pid = \"{0:0>2X}\".format(\n int(\"\".join(map(str, decoded_lin[25:29][::-1])), 2))\n parity_bits = \"{0:b}\".format(\n int(\"\".join(map(str, decoded_lin[25:33][::-1])), 2))[0:2]\n length = 0\n if int(pid, 16) >= 0 and int(pid, 16) < 31:\n length = 2\n elif int(pid, 16) >= 32 and int(pid, 16) < 47:\n length = 4\n else:\n length = 8\n for x in range(length):\n data_field.append(\"{0:0>2X}\".format(\n int(\"\".join(map(str, decoded_lin[(35 + (x * 10)):(43 + (x * 10))][::-1])), 2)))\n checksum = \"{0:0>2X}\".format(\n int(\"\".join(map(str, decoded_lin[(35 + (length * 10)):(43 + (length * 10))][::-1])), 2))\n return id_field, int(parity_bits), data_field, checksum", "def calc(self,index,counter_values):\n gr = self.grSign * self.grPitch['Value'].value\n m = self.mSign * self.mPitch['Value'].value\n \n offsetG,offsetM = self.checkOffset()\n beta = self.toRadians(gr) - (math.pi/2.0) - offsetG\n theta = (math.pi/2.0) - (self.toRadians(m)) - offsetM\n alpha = (2.0*theta) + beta\n numerator = (math.sin(alpha) + math.sin(beta))\n denominator = (self.DiffrOrder * self.look_at_grx())\n wavelength = numerator / denominator\n \n if wavelength == 0.0:\n energy_physicalmot = 0.0\n else:\n energy_physicalmot = self.hc / wavelength\n #if self.FixedM2Pit: \n Cff = math.cos(beta)/math.cos(alpha)\n if energy_physicalmot < 0 :\n #warning: wavelength se vuelve negativo ... ??????\n energy_physicalmot = energy_physicalmot *(-1) \n \n # Real Energy is equal to the energy calculated by the encoders\n # minus an offset that depends on the same energy calculated by the \n # encoders:\n # E_physicalmot = Ereal + offset\n # with offset = a*Ereal + b\n # This implies that: Ereal = (Ephysicalmot - b)/(1+a) \n a_coeff = self.EnergyDP.a_offset_coeff\n b_coeff = self.EnergyDP.b_offset_coeff\n numerator = energy_physicalmot - b_coeff\n denominator = 1 + a_coeff\n energy = numerator / denominator\n \n if index == 1:\n return energy\n elif index == 2:\n return Cff", "def interpret_parameters(self) :\n\n if hasattr(self,'exposure_schedule') and self.exposure_schedule is not None :\n if isinstance(self.exposure_schedule,float) :\n self.exposure_schedule = [np.repeat(self.exposure_schedule,24)]\n\n elif isinstance(self.exposure_schedule,int) :\n temp = self.exposure_schedule\n self.exposure_schedule = [np.zeros(24)]\n self.exposure_schedule[0][temp] = 1\n\n elif isinstance(self.exposure_schedule,dict) :\n temp = self.exposure_schedule\n self.exposure_schedule = [np.zeros(24)]\n for x in temp.items() :\n self.exposure_schedule[0][int(x[0])] = x[1] \n\n elif isinstance(self.exposure_schedule,np.ndarray) :\n if len(np.shape(self.exposure_schedule)) == 1 and np.shape(self.exposure_schedule)[0] == 24 :\n self.exposure_schedule = [self.exposure_schedule]\n elif len(np.shape(self.exposure_schedule)) == 2 and np.shape(self.exposure_schedule)[1] == 24 :\n # split an array of multiple schedules into a list of single schedule arrays\n self.exposure_schedule = np.split(self.exposure_schedule,np.shape(self.exposure_schedule)[0])\n else :\n raise ValueError(\"Exposure schedule not a comprehensible numpy array, \" +\n \"must be length 24 in first or second dimension\")\n\n elif isinstance(self.exposure_schedule,list) :\n if len(self.exposure_schedule) == 24 and all(isinstance(x,(int,float)) for x in self.exposure_schedule) :\n self.exposure_schedule = [np.array(self.exposure_schedule)]\n \n for i in range(len(self.exposure_schedule)) :\n if isinstance(self.exposure_schedule[i],float) :\n self.exposure_schedule[i] = np.repeat(self.exposure_schedule[i],24)\n\n elif isinstance(self.exposure_schedule[i],int) :\n temp = self.exposure_schedule[i]\n self.exposure_schedule[i] = np.zeros(24)\n self.exposure_schedule[i][temp] = 1\n\n elif isinstance(self.exposure_schedule[i],dict) :\n temp = self.exposure_schedule[i]\n self.exposure_schedule[i] = np.zeros(24)\n for x in temp.items() :\n self.exposure_schedule[i][int(x[0])] = x[1] \n\n elif isinstance(self.exposure_schedule[i],np.ndarray) :\n if not (len(np.shape(self.exposure_schedule[i])) == 1 \n and np.shape(self.exposure_schedule[i])[0] == 24 ):\n raise ValueError(\"Exposure schedule list contains an incomprehensible entry, \" + \n \"a numpy array that is not length 24\")\n \n elif isinstance(self.exposure_schedule[i],list) :\n if len(self.exposure_schedule[i]) == 24 :\n self.exposure_schedule[i] = np.array(self.exposure_schedule[i])\n else :\n raise ValueError(\"Exposure schedule list contains an incomprehensible entry, \" + \n \"a list that is not length 24\")\n \n else :\n raise TypeError(\"Exposure schedule list contains an incomprehensible entry\")\n\n else :\n raise TypeError(\"Exposure schedule must be a list of length-24 numpy arrays or similar\")\n ###################################################################################################### \n if hasattr(self,'year_selection') and self.year_selection is not None :\n if isinstance(self.year_selection,int) :\n if self.year_selection==0:\n self.year_selection = [np.array([x]) for x in self.dataset_years]\n else:\n self.year_selection = [np.array([self.year_selection])]\n elif isinstance(self.year_selection,np.ndarray) :\n if len(np.shape(self.year_selection)) == 1 :\n self.year_selection = [self.year_selection]\n else :\n raise ValueError(\"Year selection should be a list of numpy arrays, \" +\n \"provided numpy array has incomprehensible shape\")\n elif isinstance(self.year_selection,list) :\n if all([isinstance(x,int) for x in self.year_selection]) and all(x!=0 for x in self.year_selection) :\n self.year_selection = [np.array(self.year_selection)]\n else :\n i=0\n for k in range(len(self.year_selection)) :\n if isinstance(self.year_selection[i],int) :\n if self.year_selection[i] == 0 :\n temp = self.year_selection[0:i] + [np.array([x]) for x in self.dataset_years]\n if i != len(self.year_selection)-1 : \n temp = temp + self.year_selection[i+1:]\n self.year_selection = temp\n i = i + len(self.dataset_years) - 1\n else :\n self.year_selection[i] = np.array([self.year_selection[i]])\n elif isinstance(self.year_selection[i],list) :\n self.year_selection[i] = np.array(self.year_selection[i])\n elif not isinstance(self.year_selection[i],np.ndarray) :\n raise TypeError(\"Year selection list must contain ints, lists, or numpy arrays\")\n i=i+1\n else :\n raise TypeError(\"Year selection must be an int, numpy array, or list of numpy arrays\")\n\n for i in range(len(self.year_selection)) :\n if all(self.year_selection[i] == 0) :\n self.year_selection[i] = np.array(self.dataset_years)\n #####################################################################################################\n if hasattr(self,'units') and self.units is not None :\n if isinstance(self.units,str) :\n self.units = [self.units]\n elif isinstance(self.units,list) :\n if not all(isinstance(x,str) for x in self.units) :\n raise TypeError(\"Units input must be a list of strings\")\n else :\n raise TypeError(\"Units input must be a list of strings\")\n\n for i in range(len(self.units)) :\n if not isinstance(self.units[i],str) :\n raise TypeError(\"Units input must be a list of strings\")\n if self.units[i] not in [\"SED\",\"UVIh\",\"UVI\",\"J m-2\",\"W m-2\",\"mW m-2\"] :\n raise ValueError(\"Units input must be list of accepted unit strings, \" +\n \"those being SED, UVIh, J m-2, UVI, W m-2, or mW m-2\")\n\n\n if hasattr(self,'bin_width') :\n if self.bin_width is None :\n self.bin_width = []\n for unit in self.units :\n self.bin_width.append({\n \"SED\" : 0.1, \n \"J m-2\" : 10, \n \"UVI\" : 0.1, \n \"W m-2\" : 0.0025, \n \"mW m-2\" : 2.5\n }[unit])\n elif isinstance(self.bin_width,(int,float)) :\n self.bin_width = [self.bin_width]\n\n\n return self", "def get_voltage(self):\n summary = \" \".join(self.get_summary().split())\n pattern = '\\$.... .. (.*?) .*? .*? .*? .*? . .*? .*? . . . .*?'\n voltage = float(re.findall(pattern,summary).pop())\n return voltage", "def degradation_due_to_frame_rate_reduction(deg_cod_v, deg_scal_v, framerate):\n t1 = 30.98\n t2 = 1.29\n t3 = 64.65\n deg_frame_rate_v = 0\n if framerate < 24:\n deg_frame_rate_v = (100 - deg_cod_v - deg_scal_v) * (t1 - t2 * framerate) / (t3 + framerate)\n deg_frame_rate_v = utils.constrain(deg_frame_rate_v, 0.0, 100.0)\n return deg_frame_rate_v", "def test_post_voltage_maps(self):\n pass", "def get_spectrum_data():\n from resistics.spectra.data import SpectrumData\n import numpy as np\n\n # add some data\n startTime = \"2020-01-01 00:00:00.000000\"\n stopTime = \"2020-01-01 00:00:00.062500\"\n data = {}\n data[\"Ex\"] = np.array([1 + 3j, -2 + 5j, 7 - 6j, 3 + 2j, 4 + 8j])\n data[\"Ey\"] = np.array([12 - 4j, -6 + 2j, 2 + 6j, -4 - 2j, -6 - 6j])\n data[\"Hx\"] = np.array([-3 + 3j, -11 + 7j, 4 - 1j, 1 + 9j, 2 + 2j])\n data[\"Hy\"] = np.array([2 + 9j, 9 + 1j, 8 + 8j, 6 + 2j, 5 + 2j])\n specData = SpectrumData(8, 5, 128, startTime, stopTime, data)\n evalfreq = np.array([24, 40])\n return specData, evalfreq", "def _prevalent_freq(self, data, framerate):\n if not(np.std(data) == 0):\n data = (data-np.mean(data))/np.std(data)\n transform = np.fft.rfft(data)\n freqs = np.fft.rfftfreq(len(data), 1.0/framerate) \n freqs = 60*freqs\n band_pass = np.where((freqs < 40) | (freqs > 240) )[0]\n transform[band_pass] = 0\n transform = np.abs(transform)**2\n sos = scipy.signal.butter(3, 0.2, output='sos')\n transform = scipy.signal.sosfilt(sos, transform)\n powers = np.argsort(-1*transform)\n hr, power = self._respiration_rejection([freqs[powers[0]], freqs[powers[1]]],[transform[powers[0]], transform[powers[1]]])\n return hr, power", "def _build_parsed_values(self):\n match = SAMPLE_REGEX.match(self.raw_data)\n \n if not match:\n raise SampleException(\"No regex match of parsed sample data: [%s]\" %\n self.decoded_raw)\n \n log.trace(\"Matching sample [%s], [%s], [%s], [%s], [%s], [%s], [%s], [%s], [%s], [%s], [%s], [%s]\",\n match.group(1),match.group(2),match.group(3),match.group(4),match.group(5),\n match.group(6),match.group(7),match.group(8),match.group(9),match.group(10),\n match.group(11),match.group(12))\n res_5 = float(match.group(1))\n res_x1 = float(match.group(2))\n res_x5 = float(match.group(3))\n h_5 = float(match.group(4))\n h_x1 = float(match.group(5))\n h_x5 = float(match.group(6))\n eh = float(match.group(7))\n ref_temp_v = float(match.group(8))\n ref_temp_c = float(match.group(9))\n res_temp_v = float(match.group(10))\n res_temp_c = float(match.group(11))\n batt_v = float(match.group(12))\n \n \n result = [{DataParticleKey.VALUE_ID: BarsDataParticleKey.RESISTIVITY_5,\n DataParticleKey.VALUE: res_5},\n {DataParticleKey.VALUE_ID: BarsDataParticleKey.RESISTIVITY_X1,\n DataParticleKey.VALUE: res_x1},\n {DataParticleKey.VALUE_ID: BarsDataParticleKey.RESISTIVITY_X5,\n DataParticleKey.VALUE: res_x5},\n {DataParticleKey.VALUE_ID: BarsDataParticleKey.HYDROGEN_5,\n DataParticleKey.VALUE: h_5},\n {DataParticleKey.VALUE_ID: BarsDataParticleKey.HYDROGEN_X1,\n DataParticleKey.VALUE: h_x1},\n {DataParticleKey.VALUE_ID: BarsDataParticleKey.HYDROGEN_X5,\n DataParticleKey.VALUE: h_x5},\n {DataParticleKey.VALUE_ID: BarsDataParticleKey.EH_SENSOR,\n DataParticleKey.VALUE: eh},\n {DataParticleKey.VALUE_ID: BarsDataParticleKey.REFERENCE_TEMP_VOLTS,\n DataParticleKey.VALUE: ref_temp_v},\n {DataParticleKey.VALUE_ID: BarsDataParticleKey.REFERENCE_TEMP_DEG_C,\n DataParticleKey.VALUE: ref_temp_c},\n {DataParticleKey.VALUE_ID: BarsDataParticleKey.RESISTIVITY_TEMP_VOLTS,\n DataParticleKey.VALUE: res_temp_v},\n {DataParticleKey.VALUE_ID: BarsDataParticleKey.RESISTIVITY_TEMP_DEG_C,\n DataParticleKey.VALUE: res_temp_c},\n {DataParticleKey.VALUE_ID: BarsDataParticleKey.BATTERY_VOLTAGE,\n DataParticleKey.VALUE: batt_v}\n ]\n \n return result", "def setvoltages(self):\n pass", "def ratio_4_doc(shot, dir, num_probes = 16):\n # data = [[0] *3 for i in range(num_probes)]\n # magdata = hdr.getMagData(shot)\n probe_locs = get_probeLocs_calib_setup(shot)\n data=hdr.getquikData(shot)\n time,eastcurrent,westcurrent = loadcurrent(shot)#using eastcurrent\n ratios = [[0]*3 for i in range(num_probes)]\n for probe in range(num_probes):\n ratio =1\n inverted = False\n # fig, (ax1, ax2, ax3) = plt.subplots(3, 1, sharex=True)\n B=sp.signal.detrend(cumtrapz(data.unCalibData[dir,probe,:], data.time))\n plot_time = data.time[:-1]\n if(np.max(B[2000:6000]) < abs(np.average(B[2000:6000]))):\n # print(\"\\ninverted!\")\n inverted = True\n # B = B* -1\n # ratio = -1\n\n r = probe_locs[probe]\n max_current = polyPeak_noPlot(time,eastcurrent)\n # if(np.max(eastcurrent) < -1*(np.min(eastcurrent))):\n # max_current = -1*np.min(eastcurrent)\n helmB = helmholtz2(r,max_current)\n\n # THis is intentional! I am only using shots where the cmponent is lined\n # up with the z-direction of the helmholz field\n # helmB[2] = helmB[2]*-1\n max_theoretical = np.max(helmB[2])\n max_measured = polyPeak_noPlot(plot_time, B)\n\n\n ratio = ratio * max_theoretical/max_measured\n if ratio > 30000 or ratio < -30000:\n ratio = 0\n\n\n ratios[probe][dir] = ratio\n # print(\"\\tRatio is: %f\" %(ratio))\n # if(inverted and ratio <0):\n # print(\"Inverted and ratio reflects that\")\n # elif(not inverted and ratio <0):\n if probe ==1:\n print(\"\\n Ratio: %5f \\n\\t max_measured: %3f, \\n\\t max_theoretical: %5f\"%(ratio,max_measured,max_theoretical ) )\n\n # Compute the median of the non-zero elements\n # m = np.median(foo[foo > 0])\n # Assign the median to the zero elements\n # foo[foo == 0] = m\n return ratios", "def get_voltage(self):\n status = self.get_status_response()\n volts = status[20] + (status[21] * 0x100) + (status[22] * 0x10000) + (status[23] * 0x1000000)\n volts = float(volts)\n volts /= (1000.0 * 1000.0)\n return volts\n #end get_voltage", "def test_2d_freq():\n dic,data = ng.pipe.read(\"common_data/2d_pipe/test.ft2\")\n assert data.shape == (2048, 4096)\n assert data.dtype == 'float32'\n assert round(data[0,1],2) == 1601.83\n assert round(data[10,22],2) == 3079.44\n write_readback(dic,data)\n check_ppm_limits(dic,data,0,[174.84, 65.21])\n check_ppm_limits(dic,data,1,[253.90, -143.80])", "def getRatio(probe_num, position_vector, shot_range, dir, day ='050119r'):\n ratio_x = 0\n ratio_y = 0\n ratio_z = 0\n # helm_B = [0,0,0]\n divideby = 0\n for shot in range(shot_range[0], shot_range[1]+1):\n print( 'On shot ', day+str(shot), ' for probe ',probe_num)\n x,y,z, currmax,helmB_new = probe_calib(day+str(shot), probe_num, position_vector,dir)\n ratio_x = ratio_x + x\n ratio_y = ratio_y + y\n ratio_z = ratio_z + z\n # helm_B = [helm_B[i] + helmB_new[i] for i in len(helmB)]\n divideby = divideby + 1 #averaging over the number of shots\n ratio_Bx = ratio_x/divideby\n ratio_By = ratio_y/divideby\n ratio_Bz = ratio_z/divideby\n # helmB = [helm_B]/divideby\n # print ratio_Bx, ratio_By, ratio_Bz, helmB\n # print(\"ratio_Bx %f, ratio_By %f, ratio_Bz %f, helmB%s\"%(ratio_Bx, ratio_By, ratio_Bz, helmB))\n Bx_sqr =ratio_x**2\n By_sqr =ratio_y**2\n Bz_sqr =ratio_z**2\n B = Bx_sqr + By_sqr+ Bz_sqr\n norm_factor = np.sqrt(B)\n ratio_Bx, ratio_By, ratio_Bz = [ratio_Bx, ratio_By, ratio_Bz]/norm_factor\n\n return (ratio_Bx, ratio_By, ratio_Bz, norm_factor)", "def __init__(self):\n self.counts = [0] * 10\n self.values = [2000] * 10\n self.epsilon = 0.1", "def structured_spectra(pivData, d=25*0.33, **kwargs):\n \n es = corrLib.energy_spectrum(pivData, d=d)\n es = es.loc[es.k>0] # make sure the 1/es.k step won't encounter error\n \n x, y = xy_bin(es.k, es.E, **kwargs)\n y *= 2 * np.pi * x\n x = (2 * np.pi / x) ** 2 / 9\n spectra = pd.DataFrame({'l_r': x, 'E': y}).set_index('l_r').sort_index()\n \n return spectra", "def test_velocity(self):\n sol = Mader(p_cj=3.0e11, d_cj=8.0e5, gamma=3.0, u_piston=0.0)\n # r must contain 2 elements, otherwise the density and pressure are nan\n r = np.array([0.7, 0.8])\n t = 6.25e-6\n solrt = sol(r, t)\n np.testing.assert_allclose(solrt.velocity[0], 144000.0)", "def test_frequency(self):\n self.assertAlmostEqual(self.tunneling.frequency.value_si, self.frequency, 4)", "def test_frequency(self):\n self.assertAlmostEqual(self.tunneling.frequency.value_si, self.frequency, 4)", "def test_double_ended_two_matching_sections_and_two_asym_atts():\n from dtscalibration import DataStore\n\n cable_len = 100.0\n nt = 5\n time = np.arange(nt)\n nx_per_sec = 4\n nx = nx_per_sec * 9\n x = np.linspace(0.0, cable_len, nx)\n ts_cold = 4.0 + np.cos(time) * 4\n ts_warm = 20.0 + -np.sin(time) * 4\n ts_ground = np.linspace(1, 9, num=nx_per_sec)\n\n C_p = 1324 # 1/2 * E0 * v * K_+/lam_+^4\n eta_pf = np.cos(time) / 10 + 1 # eta_+ (gain factor forward channel)\n eta_pb = np.sin(time) / 10 + 1 # eta_- (gain factor backward channel)\n C_m = 5000.0\n eta_mf = np.cos(time + np.pi / 8) / 10 + 1\n eta_mb = np.sin(time + np.pi / 8) / 10 + 1\n dalpha_r = 0.005284\n dalpha_m = 0.004961\n dalpha_p = 0.005607\n gamma = 482.6\n talph_fw = 0.95\n talph_bw = 0.85\n\n temp_real_kelvin = np.zeros((len(x), nt)) + 273.15\n temp_real_kelvin[:nx_per_sec] += ts_cold[None]\n temp_real_kelvin[nx_per_sec : 2 * nx_per_sec] += ts_warm[None]\n temp_real_kelvin[2 * nx_per_sec : 3 * nx_per_sec] += ts_ground[:, None]\n temp_real_kelvin[3 * nx_per_sec : 4 * nx_per_sec] += ts_ground[::-1, None]\n temp_real_kelvin[5 * nx_per_sec : 6 * nx_per_sec] += ts_ground[:, None] + 5\n temp_real_kelvin[6 * nx_per_sec : 7 * nx_per_sec] += ts_ground[:, None] + 5\n temp_real_kelvin[7 * nx_per_sec : 8 * nx_per_sec] += ts_warm[None]\n temp_real_kelvin[8 * nx_per_sec : 9 * nx_per_sec] += ts_cold[None]\n\n temp_real_celsius = temp_real_kelvin - 273.15\n\n st = (\n eta_pf[None]\n * C_p\n * np.exp(-dalpha_r * x[:, None])\n * np.exp(-dalpha_p * x[:, None])\n * np.exp(gamma / temp_real_kelvin)\n / (np.exp(gamma / temp_real_kelvin) - 1)\n )\n st[3 * nx_per_sec :] *= talph_fw\n st[6 * nx_per_sec :] *= talph_fw\n ast = (\n eta_mf[None]\n * C_m\n * np.exp(-dalpha_r * x[:, None])\n * np.exp(-dalpha_m * x[:, None])\n / (np.exp(gamma / temp_real_kelvin) - 1)\n )\n rst = (\n eta_pb[None]\n * C_p\n * np.exp(-dalpha_r * (-x[:, None] + cable_len))\n * np.exp(-dalpha_p * (-x[:, None] + cable_len))\n * np.exp(gamma / temp_real_kelvin)\n / (np.exp(gamma / temp_real_kelvin) - 1)\n )\n rst[: 3 * nx_per_sec] *= talph_bw\n rst[: 6 * nx_per_sec] *= talph_bw\n rast = (\n eta_mb[None]\n * C_m\n * np.exp(-dalpha_r * (-x[:, None] + cable_len))\n * np.exp(-dalpha_m * (-x[:, None] + cable_len))\n / (np.exp(gamma / temp_real_kelvin) - 1)\n )\n\n ds = DataStore(\n {\n \"TMPR\": ([\"x\", \"time\"], temp_real_celsius),\n \"st\": ([\"x\", \"time\"], st),\n \"ast\": ([\"x\", \"time\"], ast),\n \"rst\": ([\"x\", \"time\"], rst),\n \"rast\": ([\"x\", \"time\"], rast),\n \"userAcquisitionTimeFW\": ([\"time\"], np.ones(nt)),\n \"userAcquisitionTimeBW\": ([\"time\"], np.ones(nt)),\n \"cold\": ([\"time\"], ts_cold),\n \"warm\": ([\"time\"], ts_warm),\n },\n coords={\"x\": x, \"time\": time},\n attrs={\"isDoubleEnded\": \"1\"},\n )\n\n sections = {\n \"cold\": [slice(0.0, x[nx_per_sec - 1])],\n \"warm\": [slice(x[nx_per_sec], x[2 * nx_per_sec - 1])],\n }\n ms = [\n (\n slice(x[2 * nx_per_sec], x[3 * nx_per_sec - 1]),\n slice(x[3 * nx_per_sec], x[4 * nx_per_sec - 1]),\n True,\n ),\n (\n slice(x[5 * nx_per_sec], x[6 * nx_per_sec - 1]),\n slice(x[6 * nx_per_sec], x[7 * nx_per_sec - 1]),\n False,\n ),\n ]\n\n ds.calibration_double_ended(\n sections=sections,\n st_var=0.5,\n ast_var=0.5,\n rst_var=0.1,\n rast_var=0.1,\n method=\"wls\",\n solver=\"sparse\",\n trans_att=[x[3 * nx_per_sec], x[6 * nx_per_sec]],\n matching_sections=ms,\n )\n\n assert_almost_equal_verbose(temp_real_celsius, ds.tmpf.values, decimal=7)\n assert_almost_equal_verbose(temp_real_celsius, ds.tmpb.values, decimal=7)\n assert_almost_equal_verbose(temp_real_celsius, ds.tmpw.values, decimal=7)\n pass", "def calibrate_high(voltage, serial):\n # Based on the SONIC serial number, get the Krypton calibration coeffs\n if serial == 'Gill R2A 0043':\n coeffs = krypton_1199\n elif serial == 'Gill HS 000046':\n coeffs = krypton_1094\n\n # make a storage array\n rho = np.zeros_like(voltage)\n\n # see the percentage of wrong measurements\n num_corrupt_values = (voltage <= 0).sum() / len(voltage)\n # after the original script: set negative voltages to nan\n voltage[voltage <= 0] = 0.01\n # if too many values are corrupt, fill all with nans and return\n if num_corrupt_values > 0.2:\n rho.fill(np.nan)\n return rho\n # if enough values are okay:\n else:\n # get \"high range\" coefficients\n XKw = coeffs['path_len'] * coeffs['Kwh']\n logV0 = np.log(coeffs['V0h'])\n # calculate density\n rho = (np.log(voltage) - logV0) / XKw\n\n return rho", "def get_scaneagle_input_rv_statistics(rv_dict):\n mu = np.zeros(len(rv_dict))\n std_dev = np.eye(len(rv_dict))\n i = 0\n for rvs in rv_dict:\n if rvs == 'Mach_number':\n mu[i] = rv_dict[rvs]['mean']\n std_dev[i,i] = rv_dict[rvs]['std_dev']\n elif rvs == 'CT':\n mu[i] = rv_dict[rvs]['mean']\n std_dev[i,i] = rv_dict[rvs]['std_dev']\n elif rvs == 'W0':\n mu[i] = rv_dict[rvs]['mean']\n std_dev[i,i] = rv_dict[rvs]['std_dev']\n elif rvs == 'mrho':\n mu[i] = rv_dict[rvs]['mean']\n std_dev[i,i] = rv_dict[rvs]['std_dev']\n elif rvs == 'R':\n mu[i] = rv_dict[rvs]['mean']\n std_dev[i,i] = rv_dict[rvs]['std_dev']\n elif rvs == 'load_factor':\n mu[i] = rv_dict[rvs]['mean']\n std_dev[i,i] = rv_dict[rvs]['std_dev']\n elif rvs == 'E':\n mu[i] = rv_dict[rvs]['mean']\n std_dev[i,i] = rv_dict[rvs]['std_dev']\n elif rvs == 'G':\n mu[i] = rv_dict[rvs]['mean']\n std_dev[i,i] = rv_dict[rvs]['std_dev']\n elif rvs == 'altitude':\n mu[i] = rv_dict[rvs]['mean']\n std_dev[i,i] = rv_dict[rvs]['std_dev']\n i += 1\n\n return mu, std_dev", "def calc_out_voltage(self, input_photocurrent_file):\n pass", "def supported_vertical_rates(self):\n supp_code = self._block[2] & 0x1F\n return tools.DictFilter(self._ref_rates, supp_code)", "def benthos_psa916_dict(calib, signal):\n\n #array mode\n try:\n altitude = []\n for signal_x in signal:\n temp = (300 * signal_x / calib['ScaleFactor']) + calib['Offset']\n altitude.append(temp)\n #single mode\n except:\n altitude = (300 * signal / calib['ScaleFactor']) + calib['Offset']\n return altitude", "def generate_data(freq, params, model):\n # Generate limit spectrum\n limit_spectrum = model(params)\n \n # Create data with noise from equation (A1) of Anderson (1990)\n data = -limit_spectrum * np.log(np.random.rand(len(freq)))\n return data, limit_spectrum", "def estimated_voltage(clocks, clock_threshold, voltage_scale):\n return [1 + ((clock > clock_threshold) * (1e-3 * voltage_scale * (clock-clock_threshold))) for clock in clocks]", "def drum_in_pattern_rate(pianoroll, beat_resolution, tolerance=0.1):\n if beat_resolution not in (4, 6, 8, 9, 12, 16, 18, 24):\n raise ValueError(\n \"Unsupported beat resolution. Only support 4, 6, 8 ,9, 12, 16, 18 and 42.\"\n )\n\n def _drum_pattern_mask(res, tol):\n \"\"\"Return a drum pattern mask with the given tolerance.\"\"\"\n if res == 24:\n drum_pattern_mask = np.tile([1.0, tol, 0.0, 0.0, 0.0, tol], pianoroll.shape[0]//6)\n elif res == 12:\n drum_pattern_mask = np.tile([1.0, tol, tol], 4)\n elif res == 6:\n drum_pattern_mask = np.tile([1.0, tol, tol], 2)\n elif res == 18:\n drum_pattern_mask = np.tile([1.0, tol, 0.0, 0.0, 0.0, tol], 3)\n elif res == 9:\n drum_pattern_mask = np.tile([1.0, tol, tol], 3)\n elif res == 16:\n drum_pattern_mask = np.tile([1.0, tol, 0.0, tol], 4)\n elif res == 8:\n drum_pattern_mask = np.tile([1.0, tol], 4)\n elif res == 4:\n drum_pattern_mask = np.tile([1.0, tol], 2)\n return drum_pattern_mask\n\n drum_pattern_mask = _drum_pattern_mask(beat_resolution, tolerance)\n n_in_pattern = np.sum(drum_pattern_mask * np.count_nonzero(pianoroll, 1))\n return n_in_pattern / np.count_nonzero(pianoroll)", "def single_freq_time(params):\n f = params[0]\n t = params[1]\n Ref_param = params[2] \n Ref = pp.Reflectometer_Output(Ref_param.file_path, [f], [t],\n Ref_param.n_cross_section,\n Ref_param.FWR_dimension, True, \n Ref_param.receiver_file_name)\n return Ref.E_out", "def set_r_1k_volt(self,volt):\n self.spi_disable()\n self.spi_enable()\n data1=volt*256//self.vref\n data1=int(data1)\n temp0=AD5601_mode[\"r_1k\"]|((data1>>2)&0x00ff)\n temp1=(data1<<6)&0x00ff\n data=[temp0,temp1]\n return self.spi_device.write(data)", "def fluxes_actual_to_increments(example_dict):\n\n edge_heights_m_agl = get_grid_cell_edges(example_dict[HEIGHTS_KEY])\n grid_cell_widths_metres = get_grid_cell_widths(edge_heights_m_agl)\n\n num_examples = len(example_dict[VALID_TIMES_KEY])\n num_heights = len(example_dict[HEIGHTS_KEY])\n\n grid_cell_width_matrix_metres = numpy.reshape(\n grid_cell_widths_metres, (1, num_heights)\n )\n grid_cell_width_matrix_metres = numpy.repeat(\n grid_cell_width_matrix_metres, repeats=num_examples, axis=0\n )\n\n down_flux_matrix_w_m02 = get_field_from_dict(\n example_dict=example_dict, field_name=SHORTWAVE_DOWN_FLUX_NAME\n )\n up_flux_matrix_w_m02 = get_field_from_dict(\n example_dict=example_dict, field_name=SHORTWAVE_UP_FLUX_NAME\n )\n\n down_flux_increment_matrix_w_m03 = numpy.diff(\n down_flux_matrix_w_m02, axis=1, prepend=0.\n ) / grid_cell_width_matrix_metres\n\n up_flux_increment_matrix_w_m03 = numpy.diff(\n up_flux_matrix_w_m02, axis=1, prepend=0.\n ) / grid_cell_width_matrix_metres\n\n # down_flux_increment_matrix_w_m03 = numpy.maximum(\n # down_flux_increment_matrix_w_m03, 0.\n # )\n # up_flux_increment_matrix_w_m03 = numpy.maximum(\n # up_flux_increment_matrix_w_m03, 0.\n # )\n\n vector_target_names = example_dict[VECTOR_TARGET_NAMES_KEY]\n found_down_increment = SHORTWAVE_DOWN_FLUX_INC_NAME in vector_target_names\n found_up_increment = SHORTWAVE_UP_FLUX_INC_NAME in vector_target_names\n\n if not found_down_increment:\n vector_target_names.append(SHORTWAVE_DOWN_FLUX_INC_NAME)\n if not found_up_increment:\n vector_target_names.append(SHORTWAVE_UP_FLUX_INC_NAME)\n\n down_increment_index = vector_target_names.index(\n SHORTWAVE_DOWN_FLUX_INC_NAME\n )\n up_increment_index = vector_target_names.index(SHORTWAVE_UP_FLUX_INC_NAME)\n example_dict[VECTOR_TARGET_NAMES_KEY] = vector_target_names\n\n if found_down_increment:\n example_dict[VECTOR_TARGET_VALS_KEY][..., down_increment_index] = (\n down_flux_increment_matrix_w_m03\n )\n else:\n example_dict[VECTOR_TARGET_VALS_KEY] = numpy.insert(\n example_dict[VECTOR_TARGET_VALS_KEY], obj=down_increment_index,\n values=down_flux_increment_matrix_w_m03, axis=-1\n )\n\n if found_up_increment:\n example_dict[VECTOR_TARGET_VALS_KEY][..., up_increment_index] = (\n up_flux_increment_matrix_w_m03\n )\n else:\n example_dict[VECTOR_TARGET_VALS_KEY] = numpy.insert(\n example_dict[VECTOR_TARGET_VALS_KEY], obj=up_increment_index,\n values=up_flux_increment_matrix_w_m03, axis=-1\n )\n\n return example_dict", "def test_put_voltage_map_item(self):\n pass", "def metric(data, experiment, **kwargs):\n cycles = data.get(\"cycles\")\n freq = experiment.sampler.get(\"frequency\")\n if cycles is None or freq is None:\n return None\n return cycles / freq", "def temp_advection(bufr):\r\n items = list(bufr.items())\r\n for item in items:\r\n if item[0] == 'PROF' or item[0] == b'PROF':\r\n bufr_prof = item[1]\r\n models = list(bufr_prof.keys())\r\n num_models = len(models)\r\n dates = list(bufr_prof[list(bufr_prof.keys())[0]].keys())\r\n num_dates = len(dates)\r\n num_times = len(bufr_prof[list(bufr_prof.keys())[0]][dates[0]].keys())\r\n num_features = num_models * num_times\r\n\r\n advection_array = np.zeros((num_dates, num_features))\r\n\r\n def advection_index(V1, V2):\r\n \"\"\"\r\n The advection index measures the strength of veering/backing of wind.\r\n :param V1: array wind vector at lower model level\r\n :param V2: array wind vector at higher model level\r\n :return: index of projection of (V2 - V1) onto V1\r\n \"\"\"\r\n proj = V2 - np.dot(V1, V2) * V1 / np.linalg.norm(V1)\r\n diff = V1 - V2\r\n sign = np.sign(np.arctan2(diff[1], diff[0]))\r\n return sign * np.linalg.norm(proj)\r\n\r\n # Here comes the giant ugly loop.\r\n sample = 0\r\n for date in dates:\r\n feature = 0\r\n for model in models:\r\n try:\r\n for eval_date in bufr_prof[model][date].keys():\r\n items = bufr_prof[model][date][eval_date].items()\r\n for item in items:\r\n if item[0] == 'UWND' or item[0] == b'UWND':\r\n u = item[1]\r\n if item[0] == 'VWND' or item[0] == b'VWND':\r\n v = item[1]\r\n try:\r\n V1 = np.array([u[0], v[0]])\r\n V2 = np.array([u[1], v[1]])\r\n except IndexError:\r\n print('Not enough wind levels available for advection calculation; omitting...')\r\n return\r\n advection_array[sample, feature] = advection_index(V1, V2)\r\n feature += 1\r\n except KeyError: #date doesn't exist\r\n pass\r\n sample += 1\r\n\r\n return advection_array", "def correct_voltage_offset_per_power(\n power: np.ndarray,\n current: np.ndarray,\n voltage: np.ndarray,\n frequency: Union[float, np.ndarray],\n n_peak_width: int,\n n_std_as_bin: int,\n debug: bool = False,\n):\n # Copy the data to preserve the original\n new_voltage = np.copy(voltage)\n\n # Iterate on the extra dimensions if any\n it = np.nditer(power[..., 0, 0], [\"multi_index\"])\n\n for b in it:\n index = it.multi_index\n\n # Compute the value of the Shapiro step\n step = shapiro_step(\n frequency if isinstance(frequency, float) else frequency[index]\n )\n\n # Those arrays are guaranteed to be 2D\n p = power[index]\n c = current[index]\n v = new_voltage[index]\n\n # Determine the noise on the data by looking at the zero resistance state\n # of the lowest measurement power\n lpower_index = np.argmin(p[:, 0])\n _, std = compute_voltage_offset(\n c[lpower_index, :], v[lpower_index, :], n_peak_width\n )\n\n # Compute the step fraction to use when binning to get a high resolution\n # histogram\n step_fraction = n_std_as_bin * std / step\n\n # Compute the histogram of the steps and get the voltage in unit of shapiro steps\n # As a consequence steps are an interger value\n volt_1d, histo = bin_power_shapiro_steps(p, c, v, frequency, step_fraction)\n\n # Iterate over the line of the histo and find the peaks (ie Shapiro steps)\n for j, h in enumerate(histo):\n\n # Enforce that the peaks are at least of about 1 (ignore fractional steps)\n peaks, _ = find_peaks(h, distance=0.95 / step_fraction, height=max(h) / 2)\n\n # Calculate deviation of each peak and average\n dev = np.average([volt_1d[i] - round(volt_1d[i]) for i in peaks])\n\n # Subctract the offset of each line\n v[j] -= dev * step\n\n return new_voltage", "def __init__(self, \n frequencyR, frequencyG, frequencyB,\n phaseR, phaseG, phaseB,\n widthR=127, widthG=127, widthB=127,\n minR=127, minG=127, minB=127, \n step=np.radians(30)):\n self.frequencyR = frequencyR\n self.frequencyG = frequencyG\n self.frequencyB = frequencyB\n self.phaseR = phaseR\n self.phaseG = phaseG\n self.phaseB = phaseB\n self.x = 0\n self.step = step\n self.widthR = widthR\n self.widthG = widthG\n self.widthB = widthB\n self.minR = minR\n self.minG = minG\n self.minB = minB", "def fluxes_increments_to_actual(example_dict):\n\n edge_heights_m_agl = get_grid_cell_edges(example_dict[HEIGHTS_KEY])\n grid_cell_widths_metres = get_grid_cell_widths(edge_heights_m_agl)\n\n num_examples = len(example_dict[VALID_TIMES_KEY])\n num_heights = len(example_dict[HEIGHTS_KEY])\n\n grid_cell_width_matrix_metres = numpy.reshape(\n grid_cell_widths_metres, (1, num_heights)\n )\n grid_cell_width_matrix_metres = numpy.repeat(\n grid_cell_width_matrix_metres, repeats=num_examples, axis=0\n )\n\n down_flux_increment_matrix_w_m03 = get_field_from_dict(\n example_dict=example_dict, field_name=SHORTWAVE_DOWN_FLUX_INC_NAME\n )\n up_flux_increment_matrix_w_m03 = get_field_from_dict(\n example_dict=example_dict, field_name=SHORTWAVE_UP_FLUX_INC_NAME\n )\n\n down_flux_matrix_w_m02 = numpy.cumsum(\n down_flux_increment_matrix_w_m03 * grid_cell_width_matrix_metres,\n axis=1\n )\n up_flux_matrix_w_m02 = numpy.cumsum(\n up_flux_increment_matrix_w_m03 * grid_cell_width_matrix_metres,\n axis=1\n )\n\n down_flux_matrix_w_m02 = numpy.maximum(down_flux_matrix_w_m02, 0.)\n up_flux_matrix_w_m02 = numpy.maximum(up_flux_matrix_w_m02, 0.)\n\n vector_target_names = example_dict[VECTOR_TARGET_NAMES_KEY]\n found_down_flux = SHORTWAVE_DOWN_FLUX_NAME in vector_target_names\n found_up_flux = SHORTWAVE_UP_FLUX_NAME in vector_target_names\n\n if not found_down_flux:\n vector_target_names.append(SHORTWAVE_DOWN_FLUX_NAME)\n if not found_up_flux:\n vector_target_names.append(SHORTWAVE_UP_FLUX_NAME)\n\n down_flux_index = vector_target_names.index(SHORTWAVE_DOWN_FLUX_NAME)\n up_flux_index = vector_target_names.index(SHORTWAVE_UP_FLUX_NAME)\n example_dict[VECTOR_TARGET_NAMES_KEY] = vector_target_names\n\n if found_down_flux:\n example_dict[VECTOR_TARGET_VALS_KEY][..., down_flux_index] = (\n down_flux_matrix_w_m02\n )\n else:\n example_dict[VECTOR_TARGET_VALS_KEY] = numpy.insert(\n example_dict[VECTOR_TARGET_VALS_KEY],\n obj=down_flux_index, values=down_flux_matrix_w_m02, axis=-1\n )\n\n if found_up_flux:\n example_dict[VECTOR_TARGET_VALS_KEY][..., up_flux_index] = (\n up_flux_matrix_w_m02\n )\n else:\n example_dict[VECTOR_TARGET_VALS_KEY] = numpy.insert(\n example_dict[VECTOR_TARGET_VALS_KEY],\n obj=up_flux_index, values=up_flux_matrix_w_m02, axis=-1\n )\n\n return example_dict", "def hp34401a_read_voltage_rng_res(hp_meter , v_range, v_resolution):\n hp_meter.write(\"MEAS:VOLT:DC? \" + str(v_range) + \" , \" + str(v_resolution))\n return float(hp_meter.read())", "def test_diff_analog_in_cal_5v_loop(self):\n for g in self.l.gains:\n for s,c,e in [(5, 11, .1), (2.5, 10, .03)]:\n v = self.l.input(channels=(c,c,c,c), gains=(g,g,g,g))\n r = v[0]\n if s*g > 20:\n if s*g > 25:\n self.assertTrue(v[3],\n \"%s should be overvoltage (%g, %g)\" % (v,s,g))\n continue\n for i in r:\n self.assertTrue(abs(s-i) < e,\n \"%g is not %g, channel %g, gain %g\" % (i,s,c,g))", "def length_vs_rate_2(Lmax=200, Lmin=102.4, p=75, fmax=1e12, p1=database['K+'],\r\n p2=database['pi+'], p3=database['p+'], E=1e6, n=20,\r\n ng=50, nl=50, nf=100, delta_p=1.6e-2, just_pi=False,\r\n set_freq=5.7e9, count=True):\r\n if set_freq == None:\r\n print('Estimated time: {0}'.format(timing(n*ng*nl*nf*3.84e-05)))\r\n else:\r\n print('Estimated time: {0}'.format(timing(n*ng*nl*3.84e-05)))\r\n i = 0\r\n while i < 1e7:\r\n i += 1\r\n start = time.time()\r\n L_range = np.linspace(Lmin, Lmax, n)\r\n opt_gap, opt_freq, opt_cav, opt_disp, rate = [], [], [], [], []\r\n for L in L_range:\r\n if count == True:\r\n print(f'L={L}')\r\n opt = cavity_gap_comp(L_t=L-22.8, p=p, fmax=fmax, p1=p1, p2=p2, p3=p3,\r\n E=E, plot=False, delta_p=delta_p, n=ng, nf=nf,\r\n nl=nl, just_pi=just_pi, set_freq=set_freq,\r\n count=count)\r\n opt_freq.append(opt[0])\r\n opt_gap.append(opt[1])\r\n opt_cav.append(opt[2])\r\n opt_disp.append(opt[3])\r\n rate.append(decay_rate(L, p1, p)*1e-6)\r\n opt_freq, opt_disp, rate = np.array(opt_freq), np.array(opt_disp), np.array(rate)\r\n# ratio = (opt_disp*rate)/(opt_freq*L)\r\n# ratio *= (np.max(opt_freq)-np.min(opt_freq))/(np.max(ratio)-np.min(ratio))\r\n# ratio += ((np.min(opt_freq))-np.min(ratio))\r\n fig = plt.figure(figsize=[14, 4])\r\n host = fig.add_subplot(1, 1, 1)\r\n fig.subplots_adjust(right=0.75)\r\n par1 = host.twinx()\r\n# par2 = host.twinx()\r\n# par2.spines['right'].set_position(('axes', 1.2))\r\n# make_patch_spines_invisible(par2)\r\n# par2.spines['right'].set_visible(True) \r\n p1, = host.plot(L_range, rate, 'r', alpha=0.8, lw=2, label='decay rate in decay region')\r\n p2, = par1.plot(L_range, opt_disp, 'b', alpha=0.8, lw=2, label='min disp')\r\n# p3, = par2.plot(L_range, opt_freq, 'b', alpha=0.8, lw=2, label='frequency')\r\n# p4, = par2.plot(L_range, ratio, 'g', alpha=0.8, lw=2, label='ratio')\r\n host.set_xlim(Lmin, Lmax)\r\n host.set_xlabel('Distance between Target and Decay Region / m', fontsize=15)\r\n host.set_ylabel('Decay Rate in Decay Region / MHz', fontsize=15)\r\n par1.set_ylabel('Minimum Displacement / mm', fontsize=15)\r\n# par2.set_ylabel('Frequency / Hz', fontsize=15)\r\n host.yaxis.label.set_color(p1.get_color())\r\n par1.yaxis.label.set_color(p2.get_color())\r\n# par2.yaxis.label.set_color(p3.get_color())\r\n host.tick_params(axis='y', colors=p1.get_color())\r\n par1.tick_params(axis='y', colors=p2.get_color())\r\n# par2.tick_params(axis='y', colors=p3.get_color())\r\n# lines = [p1, p2, p3, p4]\r\n# lines = [p1, p2]\r\n# host.legend(lines, [l.get_label() for l in lines], loc=[0.45, 0.8], fontsize=20)\r\n host.minorticks_on()\r\n par1.minorticks_on()\r\n host.set_ylim(0)\r\n par1.set_ylim(0)\r\n host.grid()\r\n host_yticks = np.arange(0, int(np.max(rate))+2, 1)\r\n par1_yticks = np.arange(0, (int(np.max(opt_disp)/100)+1)*100, 100)\r\n host.set_yticks(host_yticks)\r\n host.set_yticklabels(host_yticks)\r\n par1.set_yticks(par1_yticks)\r\n par1.set_yticklabels(par1_yticks)\r\n host.set_title(r'Unwanted particle displacement and observed $K^+$'+' decay rate\\nas a function of target distance', fontsize=15)\r\n plt.show()\r\n fig.savefig('Length_vs_Rate_75.pdf', bbox_inches='tight')\r\n index = np.argmax(opt_disp)\r\n# index = np.argmax(ratio)\r\n print(f'{timing(time.time()-start)}')\r\n return [opt_disp[index], L_range[index], opt_gap[index], opt_cav[index], opt_freq[index], rate[index]]", "def FV(rate, nper, pmt, pv):\n if type(pmt) == int:\n pmt = np.array([pmt])\n else:\n pmt = np.array(pmt)\n if nper <= 0:\n print(\"nper needs to be greater than zero.\")\n elif nper != len(pmt) and sum(pmt) != 0:\n print(\"pmt vector length needs to match nper or be zero.\")\n else:\n pv_fv = pv * (1 + rate) ** nper\n fv_pmt = [(pmt[i - 1] * (1 + rate) ** (len(pmt) - i)) for i in np.arange(1, len(pmt) + 1, 1)]\n return(sum(fv_pmt) + pv_fv)", "def calibration_spectra(num_energies, num_samples):\n fixed_header = (\n 1*8 # SSID\n + 4*8 # SCET Coarse time\n + 4*8 # Duration\n + 2*8 # Quiet time\n + 4*4 # Live time\n + 2*8 # Avg Temperature\n + 1 # Spare\n + 1 # Comp Schema accum S\n + 3 # Comp Schema accum K\n + 3 # Comp Schema accum M\n + 4*8 # Detector mask\n + 4 # Spare\n + 12 # Pixel mask\n + 1*8 # Sub spectrum mask\n + 2 # Spare\n + 8*( # 8 x \n 2 # Spare\n + 10 # Number of spectral points\n + 10 # Number of summed channels in spectral point\n + 10 # Lowest channel in sub spectrum \n )\n + 2*8 # Number of structure in packet\n )\n\n variable = (\n num_samples * (\n 4 # Spare\n + 5 # Detector ID\n + 4 # Pixel ID\n + 3 # Sub spec ID\n + 16 # Number of compressed spectral points\n + num_energies*1*8 # Compressed spectral point\n\n )\n )\n\n return fixed_header, variable", "def fluxes_to_heating_rate(example_dict):\n\n down_flux_matrix_w_m02 = get_field_from_dict(\n example_dict=example_dict, field_name=SHORTWAVE_DOWN_FLUX_NAME\n )\n up_flux_matrix_w_m02 = get_field_from_dict(\n example_dict=example_dict, field_name=SHORTWAVE_UP_FLUX_NAME\n )\n pressure_matrix_pascals = get_field_from_dict(\n example_dict=example_dict, field_name=PRESSURE_NAME\n ) + 0.\n\n dummy_pressure_matrix_pascals = (\n pressure_matrix_pascals[:, [-1]] +\n (pressure_matrix_pascals[:, [-1]] - pressure_matrix_pascals[:, [-2]])\n )\n pressure_matrix_pascals = numpy.concatenate(\n (pressure_matrix_pascals, dummy_pressure_matrix_pascals), axis=1\n )\n\n net_flux_matrix_w_m02 = down_flux_matrix_w_m02 - up_flux_matrix_w_m02\n dummy_net_flux_matrix_w_m02 = (\n net_flux_matrix_w_m02[:, [-1]] +\n (net_flux_matrix_w_m02[:, [-1]] - net_flux_matrix_w_m02[:, [-2]])\n )\n net_flux_matrix_w_m02 = numpy.concatenate(\n (net_flux_matrix_w_m02, dummy_net_flux_matrix_w_m02), axis=1\n )\n\n coefficient = GRAVITY_CONSTANT_M_S02 / DRY_AIR_SPECIFIC_HEAT_J_KG01_K01\n\n # heating_rate_matrix_k_day01 = DAYS_TO_SECONDS * coefficient * (\n # numpy.gradient(net_flux_matrix_w_m02, axis=1) /\n # numpy.absolute(numpy.gradient(pressure_matrix_pascals, axis=1))\n # )\n\n heating_rate_matrix_k_day01 = DAYS_TO_SECONDS * coefficient * (\n numpy.diff(net_flux_matrix_w_m02, axis=1) /\n numpy.absolute(numpy.diff(pressure_matrix_pascals, axis=1))\n )\n\n error_checking.assert_is_numpy_array_without_nan(net_flux_matrix_w_m02)\n error_checking.assert_is_numpy_array_without_nan(pressure_matrix_pascals)\n heating_rate_matrix_k_day01[numpy.isnan(heating_rate_matrix_k_day01)] = 0.\n\n vector_target_names = example_dict[VECTOR_TARGET_NAMES_KEY]\n found_heating_rate = SHORTWAVE_HEATING_RATE_NAME in vector_target_names\n if not found_heating_rate:\n vector_target_names.append(SHORTWAVE_HEATING_RATE_NAME)\n\n heating_rate_index = vector_target_names.index(SHORTWAVE_HEATING_RATE_NAME)\n example_dict[VECTOR_TARGET_NAMES_KEY] = vector_target_names\n\n if found_heating_rate:\n example_dict[VECTOR_TARGET_VALS_KEY][..., heating_rate_index] = (\n heating_rate_matrix_k_day01\n )\n else:\n example_dict[VECTOR_TARGET_VALS_KEY] = numpy.insert(\n example_dict[VECTOR_TARGET_VALS_KEY],\n obj=heating_rate_index, values=heating_rate_matrix_k_day01, axis=-1\n )\n\n return example_dict", "def calc_output(line, react_cap=None, gen_res_high=225, gen_res_low=50):\n # unpack\n t, v, i = line\n t_diff = t[1] - t[0]\n # assert t_diff == 1e-9 # time scale should be 1ns.\n # values based on current measurment. Assuming voltage waveform is aligned.\n\n # validation on the real maxima/minima of current\n assert i.argmax() < i.argmin(), 'Current valley before peak, signal is inverted!'\n\n v_min = min(v)\n v_max = max(v)\n v_max_time = np.where(v == v_max)[0][0] # first value where voltage has maximum\n # v_min_time = np.where(v == v_min)[0][-1] # last value where voltage has minimum\n # assert v_max_time < v_min_time, 'Voltage valley before peak, signal inverted!'\n c_peak_time = i[0:v_max_time].argmax() # current peak is before voltage maximum\n c_max = i[c_peak_time]\n\n c_valley_time = i.argmin()\n c_min = min(i)\n assert i[c_valley_time] == c_min\n\n # some validation\n assert c_peak_time < c_valley_time, 'Current valley before peak, signal is inverted!'\n assert MAX_VOLTAGE_MIN <= v_max < MAX_VOLTAGE_MAX, 'Max voltage error (%r)' % v_max\n assert MAX_CURRENT_MIN <= c_max < MAX_CURRENT_MAX, 'Max current error (%r)' % c_max\n\n # Find the settling time of the current. Than use the time where the current is stable\n # to calculate the final pulse voltage. This pulse final voltage is then used to calculate\n # the settling time and risetime of the voltage.\n\n # all parts of current inside 10% of maximum, till end of pulse\n i_time_settling_options = [abs(x) < 0.1 * c_max for x in i[0:c_valley_time]]\n ranges = count_ranges(i_time_settling_options)\n range_before, range_pulse = find_longest_ranges(ranges, 2) # [end, length]\n end_pulse = range_pulse[0]\n i_time_settling = range_pulse[0] - range_pulse[1]\n # average of voltage during pulse when current is < 5% of max current\n v_pulse = np.mean(v[i_time_settling:end_pulse])\n # all parts of current inside 10% of maximum, till end of pulse\n v_time_settling_options = [abs(x - v_pulse) < (0.1 * v_pulse) for x in v]\n ranges = count_ranges(v_time_settling_options)\n if ranges == []: # if too much oscillations, a range cannot be found. Increase the bounds:\n # all parts of current inside 10% of maximum, till end of pulse\n v_time_settling_options = [abs(x - v_pulse) < (0.3 * v_pulse) for x in v]\n ranges = count_ranges(v_time_settling_options)\n print('Warning, voltage settling options increased from 10% to 30%!')\n assert ranges != [], \"Error! Line is too unstable.\"\n pulse = find_longest_ranges(ranges, 1) # pulse=[end,length] of voltage pulse stable\n settling_end = pulse[0] - pulse[1] # voltage pulse stable start\n # recalculate pulse voltage\n v_pulse_new = np.mean(v[settling_end:pulse[0]])\n if v_pulse > 13e3: # pulses for highest voltages have to be stable. Lower voltages are always less stable.\n assert abs(v_pulse-v_pulse_new)/v_pulse_new < 0.01, 'Pulse voltage unstable.'\n t_settling_end = t[settling_end] # voltage pulse stable start time\n v05 = 0.05 * v_pulse\n settling_start = np.where(v > v05)[0][0]\n t_settling_start = t[settling_start] # when v first rises above 0.05 of final\n t_settling = t_settling_end - t_settling_start\n v10 = 0.1 * v_pulse\n v90 = 0.9 * v_pulse\n t_rise_start = t[np.where(v > v10)[0][0]]\n t_rise_end = t[np.where(v > v90)[0][0]]\n t_rise = t_rise_end - t_rise_start\n rise_rate = (v90 - v10) / (t_rise)\n v_overshoot = v_max / v_pulse\n pulse_stable = int((settling_end + end_pulse) / 2) # point where the pulse is very stable\n # energy\n p = (v * i) # for this to be correct, make sure lines are aligned in b_correct_lines using offset 'v_div'\n e = integrate.cumtrapz(p, t, initial=0)\n p_rise = p[settling_start:pulse_stable]\n e_rise = e[settling_start:pulse_stable][-1]\n p_res = np.append(i[0:pulse_stable] ** 2 * gen_res_high, i[pulse_stable:] ** 2 * gen_res_low)\n # 1/2*C*V^2 is energy stored in capacitor, which is lost after discharging pulse.\n # e_cap = 1 / 2 * react_cap * v_pulse ** 2\n e_res = integrate.cumtrapz(p_res, t, initial=0)\n e_res_total = e_res[-1]\n e_plasma = e[-1] # energy to plasma is energy in positive pulse except charge on capacitor.\n\n # Correct the time axis to have 0 at the start of the pulse\n start = t[settling_start]\n t = t - start\n\n # all these values are added to the pickle and xlsx with 'output_' prepend in calc_run.py\n data = {\n 't': t,\n 'v': v,\n 'c': i,\n 'c_min': c_min,\n 'c_max': c_max,\n 'v_min': v_min,\n 'v_max': v_max,\n 'v_pulse': v_pulse,\n 't_settling': t_settling,\n 't_rise': t_rise,\n 'rise_rate': rise_rate,\n 'v_overshoot': v_overshoot,\n 'p': p,\n 'e': e,\n 'p_rise': p_rise,\n 'e_rise': e_rise,\n\n 'p_res': p_res,\n 'e_res': e_res,\n 'e_res_total': e_res_total,\n # 'e_cap': e_cap,\n 'e_plasma': e_plasma,\n\n 'start': start,\n 'end': t[end_pulse],\n # 'start_index': settling_start,\n # 'end_index': end_pulse,\n # 'test': i_time_settling\n }\n return data", "def test_get_integration_time_vals():\n test_file = os.path.join(DATA_PATH, \"paper_test_file.uvh5\")\n test_uv = UVData()\n test_uv.read(test_file)\n\n baseline_array = np.array(list(set(test_uv.baseline_array)))\n inttime_array = utils.get_integration_time(test_uv, reds=baseline_array)\n test_shape = (test_uv.Nbls, test_uv.Ntimes)\n test_array = test_uv.integration_time.copy()\n test_array = test_array.reshape(test_shape)\n assert np.allclose(test_array, inttime_array)", "def missing_reg(self):\n keys = []\n values = []\n count = [0] * 24\n\n for hour in self.data_file.buckets:\n for i in range(len(self.data_file.buckets[hour])):\n data_pt = self.data_file.buckets[hour][i]\n if data_pt['type'] == 'slow':\n time_before = self.data_file.buckets[hour][i - 1]['timestamp']\n time_slow = self.data_file.buckets[hour][i]['timestamp']\n if i != len(self.data_file.buckets[hour]) - 1:\n time_after = self.data_file.buckets[hour][i + 1]['timestamp']\n missing_reg_interval(keys, values, time_before, time_after, hour)\n else:\n missing_reg_interval(keys, values, time_before, time_slow, hour)\n if (time_slow - time_before) / float(Config.BOUNDARY) > 1:\n count[hour] += round((time_slow - time_before) / float(Config.BOUNDARY))\n missing_regular = dict(zip(keys, values))\n\n logger.info(f\"missing regular due to slow updates per hour: {count}\")\n logger.info(f\"missing regular due to slow updates: {missing_regular}\")\n logger.info(f\"total missing regular due to slow updates: {sum(count)}\")\n Config.ANALYSIS.write(\"\\n\")\n return missing_regular", "def __init__(self,\n time_step: float,\n acceleration: Callable[[float], float],\n initial_value: float = 0.0,\n initial_velocity: float = 0.0,\n num_steps: int = 50,\n collected_data: Iterable[Dict[str, float]] = [],\n time_cumulation: float = 100,\n acceleration_error_constant: float = 10.0,\n start_time: float = 0.0):\n self.acceleration = acceleration\n self._timestep = time_step\n self._start_time = start_time\n\n self._previous_values = [None] * num_steps\n self._previous_values[0] = initial_value\n self._previous_values[1] = (\n time_step * initial_velocity + initial_value)\n\n self._initial_value = initial_value\n self._initial_velocity = initial_velocity\n\n self._num_steps = num_steps\n self._max_collected_data_time: float = max(\n map(operator.itemgetter('time'), collected_data), default=0.0)\n self._collected_data = collected_data\n self.feedback = unary_linear_interpolator.UnaryLinearInterpolator(\n list(map(operator.itemgetter('time'), collected_data)),\n list(map(operator.itemgetter('altitude'), collected_data)))\n\n self._last_index = self.fill_values(self._previous_values)\n\n self.past_n_steps = math.ceil(time_cumulation / (time_step * 1000))\n self._acceleration_error_constant = acceleration_error_constant\n self._velocity_storage = []", "def radiation_measurement_analysis():\n import pint\n ureg = pint.UnitRegistry()\n\n mrem_h = ureg.parse_units('mrem') / ureg.hour\n m = ureg.parse_units('meters')\n s = ureg.parse_units('seconds')\n\n # Measurements of background radiation\n bg_dist = ureg.parse_expression('10 m') # estimate of how far away we are wrt background\n background_rows = [\n dict(vid=1, distance=bg_dist, rad=0.023 * mrem_h, capture_time=0.0 * s),\n dict(vid=1, distance=bg_dist, rad=0.022 * mrem_h, capture_time=0.0 * s),\n dict(vid=1, distance=bg_dist, rad=0.023 * mrem_h, capture_time=4.0 * s),\n dict(vid=1, distance=bg_dist, rad=0.021 * mrem_h, capture_time=5.0 * s),\n dict(vid=1, distance=bg_dist, rad=0.023 * mrem_h, capture_time=11.0 * s),\n dict(vid=1, distance=bg_dist, rad=0.023 * mrem_h, capture_time=16.0 * s),\n dict(vid=1, distance=bg_dist, rad=0.024 * mrem_h, capture_time=20.0 * s),\n ]\n\n # Measurements of sample radiation\n esp_dist = ureg.parse_expression('1 inch').to(m) / 2 # estimate of how far we are from the sample when very close\n dist0_rows = [\n dict(vid=2, distance=esp_dist, rad=0.060 * mrem_h, capture_time=0.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.061 * mrem_h, capture_time=3.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.060 * mrem_h, capture_time=5.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.059 * mrem_h, capture_time=9.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.060 * mrem_h, capture_time=10.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.059 * mrem_h, capture_time=11.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.057 * mrem_h, capture_time=12.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.058 * mrem_h, capture_time=13.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.059 * mrem_h, capture_time=14.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.060 * mrem_h, capture_time=15.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.061 * mrem_h, capture_time=16.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.062 * mrem_h, capture_time=18.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.062 * mrem_h, capture_time=18.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.064 * mrem_h, capture_time=20.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.065 * mrem_h, capture_time=22.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.066 * mrem_h, capture_time=23.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.065 * mrem_h, capture_time=24.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.063 * mrem_h, capture_time=25.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.065 * mrem_h, capture_time=26.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.064 * mrem_h, capture_time=27.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.064 * mrem_h, capture_time=27.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.065 * mrem_h, capture_time=28.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.063 * mrem_h, capture_time=30.0 * s),\n ]\n\n dist0_v2_rows = [\n dict(vid=3, distance=esp_dist, rad=0.012 * mrem_h, capture_time=0.0 * s),\n dict(vid=3, distance=esp_dist, rad=0.011 * mrem_h, capture_time=1.0 * s),\n dict(vid=3, distance=esp_dist, rad=0.013 * mrem_h, capture_time=8.0 * s),\n dict(vid=3, distance=esp_dist, rad=0.013 * mrem_h, capture_time=9.0 * s),\n ]\n\n close_rows = [\n dict(vid=4, distance=0.5 * m, rad=0.013 * mrem_h, capture_time=0.0 * s),\n dict(vid=4, distance=0.5 * m, rad=0.014 * mrem_h, capture_time=5.0 * s),\n dict(vid=4, distance=0.5 * m, rad=0.012 * mrem_h, capture_time=7.0 * s),\n dict(vid=4, distance=0.5 * m, rad=0.011 * mrem_h, capture_time=15.0 * s),\n dict(vid=4, distance=0.5 * m, rad=0.012 * mrem_h, capture_time=16.0 * s),\n ]\n\n mid_rows = [\n dict(vid=5, distance=1.0 * m, rad=0.014 * mrem_h, capture_time=0.0 * s),\n dict(vid=5, distance=1.0 * m, rad=0.015 * mrem_h, capture_time=5.0 * s),\n dict(vid=5, distance=1.0 * m, rad=0.013 * mrem_h, capture_time=10.0 * s),\n ]\n\n far_rows = [\n dict(vid=6, distance=2.0 * m, rad=0.023 * mrem_h, capture_time=0.0 * s),\n dict(vid=6, distance=2.0 * m, rad=0.025 * mrem_h, capture_time=0.1 * s),\n ]\n\n # guess_dist = ureg.parse_expression('0.3 m') # estimate of how far away we are wrt background\n # guess_rows = [\n # dict(vid=9, distance=guess_dist, rad=0.030 * mrem_h, capture_time=0.0 * s),\n # dict(vid=9, distance=guess_dist, rad=0.041 * mrem_h, capture_time=2.0 * s),\n # dict(vid=9, distance=guess_dist, rad=0.051 * mrem_h, capture_time=3.0 * s),\n # ]\n\n rows = dist0_rows + background_rows + dist0_v2_rows + close_rows + mid_rows + far_rows\n # rows += guess_rows\n\n import pandas as pd\n import numpy as np\n table = pd.DataFrame(rows)\n\n # Ensure comparable units\n units = {\n 'rad': mrem_h,\n 'distance': m,\n 'capture_time': s,\n }\n for key, unit in units.items():\n table[key] = table[key].apply(lambda c: c.to(unit).m)\n table['rad'] = table['rad'].astype(float)\n table['distance'] = table['distance'].astype(float)\n\n # Weight each measurement based on the amount of time the measurement was\n # sustained in the video.\n average_rad_rows = []\n for vid, group in table.groupby('vid'):\n from statsmodels.stats.weightstats import DescrStatsW\n weights = (-1 * group['capture_time'].diff(periods=-1).fillna(0)) / group['capture_time'].iloc[-1]\n table.loc[group.index, 'weight'] = weights\n values = group['rad']\n weighted_stats = DescrStatsW(values, weights=weights, ddof=0)\n dists = group['distance'].unique()\n assert len(dists) == 1\n average_rad_rows.append({\n 'vid': vid,\n 'distance': dists[0],\n 'rad_mean': weighted_stats.mean,\n 'rad_std': weighted_stats.std,\n })\n stats_table = pd.DataFrame(average_rad_rows)\n\n bg_row = stats_table.loc[stats_table['distance'].argmax()]\n fg_row = stats_table.loc[stats_table['distance'].argmin()]\n\n # -------------------\n ADD_DUMMY_VALUES = 0\n if ADD_DUMMY_VALUES:\n # Hack: because we don't have enough samples we can fudge the value\n # knowning that the value should be the background radiation in the\n # limit.\n\n dummy_measurements = []\n extra_support = 1\n for idx in range(3, 3 + extra_support):\n dummy_row = {\n 'vid': -idx,\n 'distance': bg_row['distance'] + idx,\n 'rad_mean': bg_row['rad_mean'],\n 'rad_std': 0.01,\n }\n dummy_measurements.append(dummy_row)\n\n # also add an extra value close to the sample\n rad_bg = bg_row['rad_mean']\n rad_above_bg = fg_row['rad_mean'] - rad_bg\n dummy_row = {\n 'vid': -1,\n 'distance': fg_row['distance'] / 2,\n 'rad_mean': rad_bg + (rad_above_bg * 4),\n 'rad_std': 0.5,\n }\n dummy_measurements.append(dummy_row)\n\n # dummy_row = {\n # 'vid': -2,\n # 'distance': fg_row['distance'] / 4,\n # 'rad_mean': rad_bg + (rad_above_bg * 16),\n # }\n # dummy_measurements.append(dummy_row)\n\n dummy_stats = pd.DataFrame(dummy_measurements)\n dummy_stats['weight'] = 0.5\n stats_table['weight'] = 1.0\n stats_table2 = pd.concat([stats_table, dummy_stats]).reset_index(drop=True).sort_values('distance')\n else:\n stats_table2 = stats_table\n # -------------------\n\n import scipy\n scipy.optimize.curve_fit\n\n # Because we know the radiation should follow an inverse square law wrt to\n # distance, we can fit a polynomial of degree 2 (parabola) to interpolate /\n # extrapolate the **inverse** values.\n x = stats_table2['distance'].values\n y = stats_table2['rad_mean'].values\n s = stats_table2['rad_std'].values\n\n # Model the squared falloff directly\n def invsquare(x, a, b):\n return a * (1 / (0.01 + x ** 2)) + b\n # bg_row['rad_mean']\n # Use curve_fit to constrain the first coefficient to be zero\n try:\n coef = scipy.optimize.curve_fit(invsquare, x, y, sigma=s, method='trf')[0]\n except Exception as ex:\n coef = None\n print(f'ex={ex}')\n\n # Also fit one to the raw weighted points as a sanity check\n # inv_poly2 = Polynomial.fit(table['distance'], 1 / table['rad'], w=table['weight'], deg=2)\n\n import kwplot\n sns = kwplot.autosns()\n plt = kwplot.autoplt()\n # ax = sns.boxplot(data=table, x='distance', y='rad', width=0.1)\n\n # Add in points to show each observation\n ax = sns.relplot(x=\"distance\", y=\"rad\", data=table, size=4, color=\".3\",\n linewidth=0, alpha=0.5, palette='deep')\n\n ax = plt.gca()\n ax.set_xlabel('distance from sample ({})'.format(str(units['distance'])))\n ax.set_ylabel('radiation dosage ({})'.format(str(units['rad'])))\n\n max_meters = 10\n\n extrap_x = np.linspace(0, max_meters, 1000)\n if coef is not None:\n extrap_y1 = invsquare(extrap_x, *coef)\n # extrap_y2 = 1 / inv_poly2(extrap_x)\n ax.plot(stats_table2['distance'].values, stats_table2['rad_mean'].values, 'rx')\n ax.plot(stats_table['distance'].values, stats_table['rad_mean'].values, 'bo')\n ax.plot(extrap_x, extrap_y1, '--')\n ax.set_ylim(0.001, 0.1)\n ax.set_yscale('log')\n # ax.plot(extrap_x, extrap_y2, '--')", "def __init__(self):\n self.counts = [0] * 10\n self.values = [0] * 10\n self.epsilon = 0.1", "def __init__(self, drift_detector, size):\r\n self.drift_detector = drift_detector\r\n self.sample = 0\r\n self.reservoir = Reservoir(size)\r\n self.buffer = Buffer(size)\r\n self.confidence = 0.05\r\n self.recent_interval = []\r\n self.timestamp = 0\r\n self.vol_drift_found = False\r\n self.drift_found = False\r\n self.pre_drift_point = -1\r\n self.rolling_index = 0\r\n for i in range(size * 2 + 1):\r\n self.recent_interval.append(0.0)", "def calc_elv_spectra(self, red, comp, src):\n if ((src in red.data.keys())\n & (src in red.data.keys())):\n # check that the wavelenth grids are identical\n delt_wave = red.data[src].waves - comp.data[src].waves\n if np.sum(np.absolute(delt_wave)) > 0.01*u.micron:\n warnings.warn(\"wavelength grids not equal for %s\" % src,\n UserWarning)\n else:\n # reference band\n red_V = red.data['BAND'].get_band_mag('V')\n comp_V = comp.data['BAND'].get_band_mag('V')\n\n # setup the needed variables\n self.waves[src] = red.data[src].waves\n n_waves = len(self.waves[src])\n self.exts[src] = np.zeros(n_waves)\n self.uncs[src] = np.zeros(n_waves)\n self.npts[src] = np.zeros(n_waves)\n\n # only compute the extinction for good, positive fluxes\n print(comp.data[src].npts)\n print(comp.data[src].fluxes)\n indxs, = np.where((red.data[src].npts > 0)\n & (comp.data[src].npts > 0)\n & (red.data[src].fluxes.value > 0)\n & (comp.data[src].fluxes.value > 0))\n self.exts[src][indxs] = \\\n (-2.5*np.log10(red.data[src].fluxes[indxs]\n / comp.data[src].fluxes[indxs])\n + (comp_V[0] - red_V[0]))\n self.uncs[src][indxs] = np.sqrt(\n np.square(_flux_unc_as_mags(red.data[src].fluxes[indxs],\n red.data[src].uncs[indxs]))\n + np.square(_flux_unc_as_mags(comp.data[src].fluxes[indxs],\n comp.data[src].uncs[indxs]))\n + np.square(red_V[1])\n + np.square(comp_V[1]))\n self.npts[src][indxs] = np.full(len(indxs), 1)", "def __init__(self, *args, in_out=\"N\", rate=0):\n for arg in args:\n details=[]\n details.append(in_out)\n details.append(rate)\n self.videos[arg]=details", "def regrow(self, **kwargs):\n self.resources[self.resources >= self.min_value] += self.revive_rate\n self.resources[self.resources >= self.max_value] = self.max_value", "def _prorated_values(rate: str) -> Iterable[Tuple[str, Number]]:\n match = re.match(r'(?P<value>[\\d.]+)/(?P<period>\\w+)$', rate)\n res = cast(re.Match, match).groupdict()\n value = float(res['value'])\n value_per_second = value / get_period_seconds(res['period'])\n for period in ('minute', 'hour', 'day', 'month', 'year'):\n period_value = value_per_second * get_period_seconds(period)\n yield period, period_value", "def exotic_ratios( dividends, density, nSamples=5000 ):\n\n probabilities = prices_from_dividends(dividends)\n n = len(probabilities)\n monte_carlo = sample_exotics( dividends=dividends, density=density, nSamples=nSamples )\n win = monte_carlo['win']\n exacta = monte_carlo[\"exacta\"]\n trifecta = monte_carlo[\"trifecta\"]\n\n nTotal = nSamples\n while True:\n monte_carlo_ = sample_exotics(dividends=dividends, density=density, nSamples=nSamples)\n win_ = monte_carlo_['win']\n exacta_ = monte_carlo_[\"exacta\"]\n trifecta_ = monte_carlo_[\"trifecta\"]\n win.update(win_)\n exacta.update(exacta_)\n trifecta.update(trifecta_)\n nTotal += nSamples\n exacta_ratios = [[0.] * n for _ in range(n)]\n for ex in exacta:\n winner = ex[0]\n second = ex[1]\n p1 = probabilities[winner]\n p2 = probabilities[second]\n conditional_prob = (exacta[ex]/win[winner])\n harville_conditional_prob = p2/(1-p1)\n exacta_ratios[winner][second] = round(conditional_prob/harville_conditional_prob-1,3)\n pprint(exacta_ratios)\n np.savetxt(\"derby.csv\", np.array(exacta_ratios), delimiter=' & ', fmt='%2.2e', newline=' \\\\\\\\\\n')\n return {\"exacta\":exacta_ratios}", "def __init__(self, multiplier=1e-1):\r\n self.multiplier = multiplier", "def charging_current_and_voltage(self):\n done, data = self._request('GG')\n if done:\n milliamps = float(data[0])\n millivolts = float(data[1])\n return {\n 'amps': float(milliamps) / 1000 if milliamps > 0 else 0.0,\n 'volts': float(millivolts) / 1000 if millivolts > 0 else 0.0\n }\n\n raise EvseError", "def rateLaws(rxnList,formModus):\n \n eqList = dict()\n texeqList = dict()\n \n for rxn in rxnList.keys():\n subst_num = str()\n prod_num = str()\n subst_km = str()\n prod_km =str() # only needed if irreversible backwards\n \n # create first the numerator, as it is invariant over all rate laws\n for rct in rxnList[rxn]['rct'].keys():\n tID = 't_' + rct\n km = 'K_r_' + rxn + '_' + tID + rxnList[rxn]['rct'][rct]['suffix']\n #numerator\n t_num = tID+'^'+str(abs(rxnList[rxn]['rct'][rct]['stoi']))\n # process substrates\n if rxnList[rxn]['rct'][rct]['stoi'] < 0:\n if (subst_num == str()):\n subst_num = t_num\n subst_km = km + '^' + str(abs(rxnList[rxn]['rct'][rct]['stoi']))\n else:\n subst_num = subst_num + '*' + t_num\n subst_km = subst_km + '*' + km + '^' + str(abs(rxnList[rxn]['rct'][rct]['stoi']))\n else:\n if prod_num == str():\n prod_num = t_num\n prod_km = km + '^' + str(abs(rxnList[rxn]['rct'][rct]['stoi']))\n else:\n prod_num = prod_num + '*' + t_num\n prod_km = prod_km + '*' + km + '^' + str(abs(rxnList[rxn]['rct'][rct]['stoi']))\n \n # Create denominator in the different reaction form cases\n if formModus == 'rm':\n subSites = dict()\n prodSites = dict()\n denSites = dict()\n for rct in rxnList[rxn]['rct'].keys():\n tDen = '(t_' + rct + '/' +'K_r_' + rxn + '_t_' + rct + rxnList[rxn]['rct'][rct]['suffix'] + ')'\n for i in range(abs(rxnList[rxn]['rct'][rct]['stoi'])):\n if rxnList[rxn]['rct'][rct]['stoi'] < 0:\n subSites[str(rxnList[rxn]['rct'][rct]['actsit']+i*0.1)]=tDen\n else:\n prodSites[str(rxnList[rxn]['rct'][rct]['actsit']+i*0.1)]=tDen\n # sort all sites in a decreasing order\n for site in sorted(set(prodSites.keys()+subSites.keys()),reverse=1):\n if site in denSites.keys():\n 1\n elif (site in prodSites.keys()) and (site in subSites.keys()):\n denSites[site] = '(1+' + subSites[site] + '+' + prodSites[site]\n elif (site in prodSites.keys()) and float(site).is_integer():\n denSites[site] = '(1+' + prodSites[site]\n elif (site in subSites.keys()) and float(site).is_integer():\n denSites[site] = '(1+' + subSites[site]\n elif site in subSites.keys():\n intSite = str(float(site).__floordiv__(1))\n if intSite in prodSites.keys():\n denSites[intSite] = '(' + prodSites[intSite] + '+' + '(1+' + subSites[site] + ')*(1+' + subSites[intSite] +')'\n else:\n denSites[site] = '(1+' + subSites[site]\n elif site in prodSites.keys():\n intSite = str(float(site).__floordiv__(1))\n if intSite in subSites.keys():\n denSites[intSite] = '(' + subSites[intSite]+ '+' + '(1+' + prodSites[site] + ')*(1+'+ prodSites[intSite] + ')'\n else:\n denSites[site] = '(1+' + prodSites[site]\n \n # Add the inhibitors to the denominators\n for rct in rxnList[rxn]['inh'].keys():\n tDen = '(t_' + rct + '/' +'K_r_' + rxn + '_t_' + rct + rxnList[rxn]['inh'][rct]['suffix'] + ')'\n if rxnList[rxn]['inh'][rct]['hill'] == 0:\n tDen = tDen + '^' + 'KH_r_' + rxn + '_t_' + rct + rxnList[rxn]['inh'][rct]['suffix']\n elif rxnList[rxn]['inh'][rct]['hill'] != 1:\n tDen = tDen + '^' + str(rxnList[rxn]['inh'][rct]['hill'])\n \n site = str(rxnList[rxn]['inh'][rct]['actsit'])\n if not(site in denSites.keys()):\n site = str(float(site).__floordiv__(1))\n if rxnList[rxn]['inh'][rct]['subtype'] == 'uncompetitive': # uncompetitive inh\n denSites['i'+rct] = '(1+' + tDen\n elif rxnList[rxn]['inh'][rct]['subtype'] == 'competitive':# comp inhibition\n denSites[site] = denSites[site] + '+' + tDen\n elif rxnList[rxn]['inh'][rct]['subtype'] == 'noncompetitive':# noncomp inhibition\n denSites[site] = denSites[site] + '+' + tDen\n denSites['i'+rct] = '(1+' + tDen\n else: raise NameError('invalid inhibitor type (valid: uncompetitive,competitive,noncompetitive) in ' + rxn)\n \n # stitch the denominator together\n denTerm = ''\n for site in denSites.keys():\n denTerm = denTerm + denSites[site] + ')*'\n denTerm = denTerm[:-1]\n \n # convinience kinetics denominator\n elif formModus == 'cc':\n subSites = dict()\n prodSites = dict()\n for rct in rxnList[rxn]['rct'].keys():\n tDen = '(t_' + rct + '/' +'K_r_' + rxn + '_t_' + rct + rxnList[rxn]['rct'][rct]['suffix'] + ')'\n for i in range(abs(rxnList[rxn]['rct'][rct]['stoi'])):\n if i == 0:\n t_den = '(1'\n t_den = t_den + '+' + tDen + '^' + str(i+1)\n if rxnList[rxn]['rct'][rct]['stoi'] < 0:\n subSites[rct] = t_den\n else:\n prodSites[rct] = t_den\n \n denTerm = ''\n for site in subSites.keys():\n denTerm = denTerm + subSites[site] + ')*'\n \n denTerm = denTerm[:-1] + '+'\n for site in prodSites.keys():\n denTerm = denTerm + prodSites[site] + ')*'\n denTerm = denTerm[:-1]\n # Add the inhibitors\n for rct in rxnList[rxn]['inh']:\n tDen = '(t_' + rct + '/' +'K_r_' + rxn + '_t_' + rct + rxnList[rxn]['inh'][rct]['suffix'] + ')'\n if rxnList[rxn]['inh'][rct]['hill'] == 0:\n tDen = tDen + '^' + 'KH_r_' + rxn + '_t_' + rct + rxnList[rxn]['inh'][rct]['suffix']\n elif rxnList[rxn]['inh'][rct]['hill'] != 1:\n tDen = tDen + '^' + str(rxnList[rxn]['inh'][rct]['hill']) \n denTerm = '(' + denTerm + ')*(1+' + tDen + ')'\n \n \n # Stitch the equations together:\n # reversible:\n if rxnList[rxn]['rev'] == '0':\n eq = '1/('+subst_km +')*(' + subst_num + '-' + prod_num + '/Keqr_' + rxn + ')/(' + denTerm + ')'\n texeq = '\\\\frac{\\\\frac{1}{' + subst_km + '}*\\\\left('+ subst_num + '-' + '\\\\frac{' + prod_num + '}{Keqr_' + rxn + '}\\\\right)}{' + denTerm + '}'\n \n elif rxnList[rxn]['rev'] == '1':#irreversible\n eq = '(('+ subst_num +')/('+ subst_km +'))/(' + denTerm + ')'\n texeq = '\\\\frac{\\\\frac{'+ subst_num +'}{'+ subst_km +'}}{' + denTerm + '}'\n else: #irreversibly backwards\n eq = '-(('+ prod_num +')/('+ prod_km +'))/(' + denTerm + ')'\n texeq = '-\\\\frac{\\\\frac{'+ prod_num+'}{'+ prod_km +'}}{' + denTerm + '}'\n # Add the activator term:\n for rct in rxnList[rxn]['act']:\n \n if rxnList[rxn]['act'][rct]['subtype'] == 'mm':\n # 1 / ((K/L)^n + 1)\n tDen = '(' + 'K_r_' + rxn + '_t_' + rct + rxnList[rxn]['act'][rct]['suffix'] + '/' + 't_' + rct + ')'\n \n if rxnList[rxn]['act'][rct]['hill'] == 0:\n tDen = tDen + '^' + 'KH_r_' + rxn + '_t_' + rct + rxnList[rxn]['act'][rct]['suffix']\n elif rxnList[rxn]['act'][rct]['hill'] != 1:\n tDen = tDen + '^' + str(rxnList[rxn]['act'][rct]['hill'])\n \n eq = '(1/(' + tDen + '+1))*' + eq\n texeq = '(1/(' + tDen + '+1))*' + texeq\n \n elif rxnList[rxn]['act'][rct]['subtype'] == 'cc':\n # 1 + (L/K^n)\n tDen = '(t_' + rct + '/' +'K_r_' + rxn + '_t_' + rct + rxnList[rxn]['act'][rct]['suffix'] + ')'\n \n if rxnList[rxn]['act'][rct]['hill'] == 0:\n tDen = tDen + '^' + 'KH_r_' + rxn + '_t_' + rct + rxnList[rxn]['act'][rct]['suffix']\n elif rxnList[rxn]['act'][rct]['hill'] != 1:\n tDen = tDen + '^' + str(rxnList[rxn]['act'][rct]['hill'])\n eq = '(1+' + tDen + ')*' + eq\n texeq = '(1+' + tDen + ')*' + texeq\n \n else:\n raise NameError('invalid activator style (valid: mm,cc) in ' + rxn)\n \n eq = re.sub('\\^1(?![0-9\\.])','',eq)\n eq = re.sub('\\\\)\\\\(',')*(',eq)\n texeq = re.sub('\\^1(?![0-9\\.])','',texeq)\n texeq = re.sub('\\\\)\\\\(',')*(',texeq)\n \n # do some latex editing\n texeq = re.sub('\\((?=t\\_.{4}\\/K.{14}\\))',r'\\\\frac{',texeq)\n texeq = re.sub('\\/(?=K.{14}\\))','}{',texeq)\n texeq = re.sub('(?<=}{K.{14})\\)','}',texeq)\n texeq = re.sub('(?<!left)\\(',r'\\\\left(',texeq)\n texeq = re.sub('(?<!right)\\)',r'\\\\right)',texeq)\n texeq = texeq.replace('_','\\_')\n texeq = re.sub(r'\\\\',r'\\\\\\\\',texeq)\n eqList[rxn] = eq\n texeqList[rxn] = re.sub('\\\\\\\\+', '\\\\\\\\', texeq)\n \n return(dict(eqList = eqList,texeqList = texeqList))", "def generate_exptime_table(self, ):\n\n # Perform calculation for all stars in biased sample\n Ndraw = self.NBIAS\n\n np.random.seed(seed=None)\n\n # Allocate memory for exposure times\n t_tots = np.zeros(Ndraw)\n tpbpcs = []\n pct_obs_iwas = []\n lammax_obs_iwas = []\n specs = []\n\n \"\"\"\n Calculate the exposure times and spectra in each bandpass for each\n star in biased sample\n \"\"\"\n\n # Loop over stars in this sample\n for i in range(Ndraw):\n #print(\"HIP %i, %.2f pc, %s \" %(hip[i], dist[i], stype[i]))\n\n # Set system parameters for this star\n self.prep_ith_star(i)\n\n # Calculate the time to observe the complete spectrum\n t_tots[i], tpbpc, spectrum, iwa = self.complete_spectrum_time()\n\n tpbpcs.append(tpbpc)\n pct_obs_iwas.append(iwa[0])\n specs.append(spectrum)\n\n # Calculate channel widths\n deltas = []\n for channel in CHANNELS:\n l = default_luvoir(channel=channel)\n deltas.append(l.lammax - l.lammin)\n self.deltas = np.array(deltas)\n\n # Calculate channel fractional completeness\n self.channel_weights = (self.deltas / np.sum(self.deltas))\n\n # Calculate completeness for each star in sample\n self.completeness = np.sum(np.array(pct_obs_iwas) * self.channel_weights, axis = 1)\n\n \"\"\"\n Make a Lookup Table of Exposure times for each star in sample\n \"\"\"\n\n tpbpcs_rect = [] # Time per bandpass\n tpcs_rect = [] # Time per channel\n\n # Loop over all the stars in sample\n for idrew in range(self.NBIAS):\n\n tpbpcs_rect.append([])\n tpcs_rect.append([])\n bp_names = []\n bp_chan = []\n\n # Loop over all the LUVOIR channels\n for ichan in range(len(CHANNELS)):\n\n tpcs_rect[idrew].append(0.0)\n\n # Loop over all the bands in this channel\n for iband in range(len(tpbpcs[0][ichan])):\n\n bp_names.append(\"%s %i\" %(CHANNELS[ichan], iband+1))\n bp_chan.append(ichan)\n tpbpcs_rect[idrew].append(tpbpcs[idrew][ichan][iband])\n tpcs_rect[idrew][ichan] += tpbpcs[idrew][ichan][iband]\n\n # Make np arrays\n tpbpcs_rect = np.array(tpbpcs_rect)\n tpcs_rect = np.array(tpcs_rect)\n bp_names = np.array(bp_names)\n bp_chan = np.array(bp_chan)\n\n # Make infs --> nans\n infmask = ~np.isfinite(tpbpcs_rect)\n tpbpcs_rect[infmask] = np.nan\n infmask = ~np.isfinite(tpcs_rect)\n tpcs_rect[infmask] = np.nan\n\n # Set attributes\n self.tpbpcs_rect = tpbpcs_rect\n self.tpcs_rect = tpcs_rect\n self.bp_names = bp_names\n self.bp_chan = bp_chan\n\n \"\"\"\n New completeness calculations\n \"\"\"\n\n bandpasses = []\n\n # Loop over telescope channels\n for j, channel in enumerate(CHANNELS):\n\n # Channel dependent bandwidth?\n if type(self.bandwidth) is float:\n bandwidth = self.bandwidth\n else:\n assert len(self.bandwidth) == len(CHANNELS)\n bandwidth = self.bandwidth[j]\n\n # Get the channel specific telescope parameters\n luvoir = default_luvoir(channel=channel)\n self.cn.telescope = luvoir\n\n # Calculate the bandpass edges\n edges = calculate_bandpass_edges(luvoir.lammin, luvoir.lammax, bandwidth = bandwidth)\n\n # Calculate the number of bandpasses\n Nbands = len(edges) - 1\n\n # Loop over bandpasses\n for i in range(Nbands):\n\n # Get the max, min, and middle wavelenths for this bandpass\n lammin = edges[i]\n lammax = edges[i+1]\n\n bandpasses.append([lammin, lammax])\n\n bandpasses = np.array(bandpasses)\n lmin, lmax = np.min(np.hstack(bandpasses)), np.max(np.hstack(bandpasses))\n\n # Fractional completeness of each bandpass\n bp_frac = ((bandpasses[:,1] - bandpasses[:,0]) / (lmax - lmin)) / np.sum((bandpasses[:,1] - bandpasses[:,0]) / (lmax - lmin))\n\n # Completeness by target\n tot_completeness = np.sum(np.isfinite(self.tpbpcs_rect) * bp_frac, axis=1)\n\n # Fraction of stars in biased sample that can completely observe each bandpass\n frac_bias_bp = np.sum(np.isfinite(tpbpcs_rect)*1.0, axis=0) / self.NBIAS\n\n # Set attributes\n self.bandpasses = bandpasses\n self.bp_frac = bp_frac\n self.tot_completeness = tot_completeness\n self.frac_bias_bp = frac_bias_bp\n\n self._make_pandas_table()\n\n return", "def __init__(self, freq=None, **kwargs):\n if freq is not None:\n assert isinstance(freq, np.ndarray)\n self.freq = freq\n for key, val in kwargs.items():\n setattr(self, key, val)", "def PV(rate, nper, pmt, fv):\n if type(pmt) == int:\n pmt = np.array([pmt])\n else:\n pmt = np.array(pmt)\n if nper <= 0:\n print(\"nper needs to be greater than zero.\")\n elif nper != len(pmt) and sum(pmt) != 0:\n print(\"pmt vector length needs to match nper or be zero.\")\n else:\n pv_fv = fv / (1 + rate) ** nper\n fv_pmt = [(pmt[i - 1] / (1 + rate) ** i) for i in np.arange(1, len(pmt) + 1, 1)]\n return(sum(fv_pmt) + pv_fv)", "def VIntervals(self, *args):\n return _Adaptor3d.Adaptor3d_Surface_VIntervals(self, *args)", "def frequency(self):\n return self.reference_clock_speed / 4096 / self.prescale_reg", "def refraction(el, temp, press, hum, freq=150.0):\n\n fvec = np.vectorize(qp.qp_refraction, [np.double])\n delta = fvec(el, temp, press, hum, freq)\n if delta.shape == ():\n return delta[()]\n return delta", "def voltage(self):\n\t\treturn self._voltage", "def format_pitch_data(pd):\n\n\tfor t in pd.keys():\n\t\tpd[t] = pd[t]['Pitch'] \t # make each value just the pitch, instead of a sub-dict\n\t\tif pd[t] == 0:\n\t\t\tdel pd[t]\t\t # if pitch is 0, remove from dictionary\n\n\t# now, pd is dict where each key is time (x value) and each value is pitch (y value)\n\t# to format for graph input, make list of dicts containing x-y pairs\n\tdatapoints_list = []\n\tfor t in pd.keys():\n\t\tdatapoint = {}\n\t\tdatapoint[\"x\"] = t\n\t\tdatapoint[\"y\"] = pd[t]\n\t\tdatapoints_list.append(datapoint)\n\n\t# sort the list by the value of \"x\"\n\tdatapoints_sorted = sorted(datapoints_list, key=itemgetter(\"x\"))\n\n\t# with this sorted list, do some data smoothing\n\t# pull out every nth item\n\ti = 0\n\tdatapoints_keep = []\n\twhile i < len(datapoints_sorted):\n\t\tdatapoints_keep.append(datapoints_sorted[i])\n\t\ti += 50\n\t# make sure last item is included so length of curve isn't lost\n\tdatapoints_keep.append(datapoints_sorted[-1])\n\n\t# print \"num of datapoints:\", len(datapoints_keep)\n\t# print datapoints_keep[:100]\n\n\treturn json.dumps(datapoints_keep, sort_keys=True)", "def test_peak_statistics_with_derivatives(RE):\n x = \"motor\"\n y = \"det\"\n num_points = 100\n ps = PeakStats(x, y, calc_derivative_and_stats=True)\n RE.subscribe(ps)\n RE(scan([det], motor, -5, 5, num_points))\n\n assert hasattr(ps, \"derivative_stats\")\n der_fields = [\"x\", \"y\", \"min\", \"max\", \"com\", \"cen\", \"crossings\", \"fwhm\", \"lin_bkg\"]\n for field in der_fields:\n assert hasattr(ps.derivative_stats, field), f\"{field} is not an attribute of ps.der\"\n\n assert type(ps.derivative_stats.x) is np.ndarray\n assert type(ps.derivative_stats.y) is np.ndarray\n assert type(ps.derivative_stats.min) is tuple\n assert type(ps.derivative_stats.max) is tuple\n assert type(ps.derivative_stats.com) is np.float64\n assert type(ps.derivative_stats.cen) is np.float64\n assert type(ps.derivative_stats.crossings) is np.ndarray\n if len(ps.derivative_stats.crossings) >= 2:\n assert type(ps.derivative_stats.fwhm) is float\n else:\n assert ps.derivative_stats.fwhm is None\n assert len(ps.derivative_stats.x) == num_points - 1\n assert len(ps.derivative_stats.y) == num_points - 1\n assert np.allclose(np.diff(ps.y_data), ps.derivative_stats.y, atol=1e-10)" ]
[ "0.5384202", "0.53538734", "0.5352361", "0.529449", "0.5280764", "0.5153001", "0.51174885", "0.5016881", "0.4978907", "0.494465", "0.49282992", "0.48579437", "0.48414347", "0.48238522", "0.48228803", "0.4811493", "0.48023042", "0.47906768", "0.4784123", "0.47702235", "0.47668195", "0.47523436", "0.4744751", "0.47311082", "0.46993333", "0.46937576", "0.4684164", "0.46839955", "0.46802992", "0.4666011", "0.46657655", "0.46656844", "0.4664771", "0.46623403", "0.46622065", "0.46615285", "0.4652443", "0.46512824", "0.4650716", "0.4650018", "0.4645726", "0.46362936", "0.46326092", "0.4632066", "0.46277177", "0.46224588", "0.46040237", "0.45922688", "0.4590224", "0.45898008", "0.45883372", "0.45883372", "0.45841423", "0.4583698", "0.45734164", "0.4572633", "0.45642802", "0.45608968", "0.45570153", "0.4556946", "0.45564565", "0.45557985", "0.45525447", "0.4550317", "0.4543322", "0.4518897", "0.45163485", "0.4513047", "0.45067874", "0.45066768", "0.45010784", "0.44992313", "0.44960526", "0.44949278", "0.44844446", "0.44811276", "0.44803578", "0.4472687", "0.4469025", "0.44634467", "0.44623235", "0.44611928", "0.44514012", "0.4440114", "0.44379905", "0.44358277", "0.4433245", "0.44206008", "0.44199413", "0.44142586", "0.4410703", "0.44080323", "0.44066802", "0.44057843", "0.44017068", "0.44006136", "0.43995562", "0.43952912", "0.43931904", "0.43920046" ]
0.55513275
0
You shouldn't have to call this yourself unless you're trying to optimize performance. This should get called automatically, as needed, by 'scan'.
def write_voltage(self): if self._unplayed_voltages: raise UserWarning( "After you write voltages to the DAQ card, you have to scan\n" + "the voltage at least once before you can set the voltage\n" + "again.") DAQmxErrChk(api.DAQmxWriteAnalogF64( self._taskHandle, len(self.voltage) // self.num_channels, 0, ctypes.c_double(10.0), #Timeout for writing. Hope it ain't this slow. 1, #DAQmx_Val_GroupByScanNumber (interleaved) numpy.ctypeslib.as_ctypes(self.voltage), ctypes.byref(self.num_points_written), ctypes.c_void_p(0) )) print self.num_points_written.value, print "points written to each DAQ channel." self._unwritten_voltages = False self._unplayed_voltages = True return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _scan(self): # pragma: no cover\n raise NotImplementedError()", "def scan(self):\n return", "def scan(self) -> List[int]:", "def scan(self) -> List[int]:", "def scan(self) -> list[int]:", "def scan(self) -> Collection[int]:\n ...", "def test_get_scans(self):\n pass", "def on_scanner_finish(self, scanner):", "def scan(self, mask):", "def scan(this):\n\t\treturn this._SCAN", "def test_get_scan(self):\n pass", "def on_scanner_start(self, scanner):", "def get_scan(self):\n pass", "def scan(src):\n walker = Walker()\n walker.scan(src)\n return", "def test_scan_file(self):\n self.run_scan(self.filename, 1)", "def __init__(self, scan=None):\n super().__init__(scan=scan)", "def test_scan_recursive(self):\n self.run_scan(self.tempdir, self.root_fcount + self.nest_fcount + 1)", "def splitbyscans(self,scans):\n ind=np.where(np.array([val in scans for dum,val in enumerate(self.scan)]))[0]\n self.splitbyind(ind)", "def fast_scan(self):\n self._scanned = True\n return self._scanner.scan(self._ips, self._ports)", "def next():", "def next():", "def _handle_scan(self, data):\n self.lidar_data = data.ranges", "def test_filtered_scan(self):\n self.run_scan(self.tempdir, self.root_fcount + self.nest_fcount, ext=\".txt\")", "def test_start_scan(self):\n pass", "def scan_item(barcode):\n return scanner.scan(barcode)", "def scan(self):\n for angle in range(self.MIDPOINT-400, self.MIDPOINT+401, 100):\n self.servo(angle)\n self.scan_data[angle] = self.read_distance()\n #sort the scan data for easier analysis\n self.scan_data = OrderedDict(sorted(self.scan_data.items()))", "def customScanStackOverFlow():\n\n\t# data\n\tperl_count = 0\n\tc_count = 0\n\tcsharp_count = 0\n\tcplus_count = 0\n\tjava_count = 0\n\tbash_count = 0\n\tpython_count = 0\n\truby_count = 0\n\thtml_count = 0\n\tphp_count = 0\n\tsql_count = 0\n\tjavascript_count = 0\n\n\t# dataFile\n\tperlUrl_file = \"DATA/perlUrl.data\"\n\tcUrl_file = \"DATA/cUrl.data\"\n\tcsharpUrl_file = \"DATA/csharpUrl.data\"\n\tcplusUrl_file = \"DATA/cplusUrl.data\"\n\tjavaUrl_file = \"DATA/javaUrl.data\"\n\tbashUrl_file = \"DATA/bashUrl.data\"\n\tpythonUrl_file = \"DATA/pythonUrl.data\"\n\trubyUrl_file = \"DATA/rubyUrl.data\"\n\thtmlUrl_file = \"DATA/htmlUrl.data\"\n\tphpUrl_file = \"DATA/phpUrl.data\"\n\tsqlUrl_file = \"DATA/sqlUrl.data\"\n\tjavascriptUrl_file = \"DATA/javascriptUrl.data\"\n\t\n\n\t# look for existing log file\n\tstartNumber = 1\n\tif os.path.isfile(\"DATA/log/customScan.log\"):\n\t\tlogFile = open(\"DATA/log/customScan.log\", \"r\")\n\t\tfor line in logFile:\n\t\t\tlineWithoutBackN = line.replace(\"\\n\", \"\")\n\t\t\tlineInArray = lineWithoutBackN.split(\",\")\n\t\t\t\n\t\t\tif lineInArray[0] == \"Current topic\":\n\t\t\t\tstartNumber = int(lineInArray[1])\n\t\t\telif lineInArray[0] == \"perl\":\n\t\t\t\tperl_count = int(lineInArray[1])\n\t\t\telif lineInArray[0] == \"c\":\n\t\t\t\tc_count = int(lineInArray[1])\n\t\t\telif lineInArray[0] == \"c++\":\n\t\t\t\tcplus_count = int(lineInArray[1])\n\t\t\telif lineInArray[0] == \"c#\":\n\t\t\t\tcsharp_count = int(lineInArray[1])\n\t\t\telif lineInArray[0] == \"Java\":\n\t\t\t\tjava_count = int(lineInArray[1])\n\t\t\telif lineInArray[0] == \"Bash\":\n\t\t\t\tbash_count = int(lineInArray[1])\n\t\t\telif lineInArray[0] == \"Python\":\n\t\t\t\tpython_count = int(lineInArray[1])\n\t\t\telif lineInArray[0] == \"Ruby\":\n\t\t\t\truby_count = int(lineInArray[1])\n\t\t\telif lineInArray[0] == \"Html\":\n\t\t\t\thtml_count = int(lineInArray[1])\n\t\t\telif lineInArray[0] == \"Php\":\n\t\t\t\tphp_count = int(lineInArray[1])\n\t\t\telif lineInArray[0] == \"SQL\":\n\t\t\t\tsql_count = int(lineInArray[1])\n\t\t\telif lineInArray[0] == \"JavaScript\":\n\t\t\t\tjavascript_count = int(lineInArray[1])\n\t\t\t\t\n\t\tlogFile.close()\n\t\t\t\n\n\t# Look for existing data file\n\tif not os.path.isfile(perlUrl_file):\n\t\tfileToInit = open(perlUrl_file, \"w\")\n\t\tfileToInit.close()\n\tif not os.path.isfile(cUrl_file):\n\t\tfileToInit = open(cUrl_file, \"w\") \n\t\tfileToInit.close()\n\tif not os.path.isfile(cplusUrl_file):\n\t\tfileToInit = open(cplusUrl_file, \"w\") \n\t\tfileToInit.close()\n\tif not os.path.isfile(csharpUrl_file):\n\t\tfileToInit = open(csharpUrl_file, \"w\")\n\t\tfileToInit.close()\n\tif not os.path.isfile(javaUrl_file):\n\t\tfileToInit = open(javaUrl_file, \"w\")\n\t\tfileToInit.close()\n\tif not os.path.isfile(bashUrl_file):\n\t\tfileToInit = open(bashUrl_file, \"w\")\n\t\tfileToInit.close()\n\tif not os.path.isfile(pythonUrl_file):\n\t\tfileToInit = open(pythonUrl_file, \"w\")\n\t\tfileToInit.close()\n\tif not os.path.isfile(rubyUrl_file):\n\t\tfileToInit = open(rubyUrl_file, \"w\")\n\t\tfileToInit.close()\n\tif not os.path.isfile(htmlUrl_file):\n\t\tfileToInit = open(htmlUrl_file, \"w\")\n\t\tfileToInit.close()\n\tif not os.path.isfile(phpUrl_file):\n\t\tfileToInit = open(phpUrl_file, \"w\")\n\t\tfileToInit.close()\n\tif not os.path.isfile(sqlUrl_file):\n\t\tfileToInit = open(sqlUrl_file, \"w\")\n\t\tfileToInit.close()\n\tif not os.path.isfile(javascriptUrl_file):\n\t\tfileToInit = open(javascriptUrl_file, \"w\")\n\t\tfileToInit.close()\t\n\n\t# Url Generation\n\tprefix = \"http://stackoverflow.com/questions/\"\n\tfor number in range(startNumber, 200):\n\t\tgeneratedUrl = prefix + str(number) + \"/\"\n\t\t\n\t\t# Get title of the page\n\t\tpage = requests.get(generatedUrl)\n\t\ttree = html.fromstring(page.content)\n\t\ttitle = tree.xpath('//title/text()')\n\t\ttitleInArray = title[0].split('-')\n\t\tsleep(1)\n\n\t\t# Write data in log file\n\t\tlogFile = open(\"DATA/log/customScan.log\", \"w\")\n\t\tlogFile.write(\"Current topic,\"+str(number)+\"\\n\")\n\t\tlogFile.write(\"perl,\" +str(perl_count) +\"\\n\")\n\t\tlogFile.write(\"c,\" +str(c_count) +\"\\n\")\n\t\tlogFile.write(\"c++,\" +str(cplus_count) +\"\\n\")\n\t\tlogFile.write(\"c#,\" +str(csharp_count) +\"\\n\")\n\t\tlogFile.write(\"Java,\" +str(java_count) +\"\\n\")\n\t\tlogFile.write(\"Bash,\" +str(bash_count) +\"\\n\")\n\t\tlogFile.write(\"Python,\"+str(python_count) +\"\\n\")\n\t\tlogFile.write(\"Ruby,\"+str(ruby_count) +\"\\n\")\n\t\tlogFile.write(\"Html,\"+str(html_count) +\"\\n\")\n\t\tlogFile.write(\"Php,\"+str(php_count) +\"\\n\")\n\t\tlogFile.write(\"SQL,\"+str(sql_count) +\"\\n\")\n\t\tlogFile.write(\"JavaScript,\"+str(javascript_count) +\"\\n\")\n\t\tlogFile.close()\n\t\t\n\t\t# Scan Subject\n\t\tif titleInArray[0] != \"Page Not Found \":\n\t\t\tmainSubject = titleInArray[0]\n\t\t\tprecision = titleInArray[1]\n\n\t\t\tprint \"[\"+str(number)+\"] \"+title[0]\n\n\t\t\tfor mesh in mainSubject.split(\" \"):\n\t\t\t\tif mesh.lower() == \"perl\":\n\t\t\t\t\tperl_count = perl_count + 1\n\t\t\t\t\tfileToAdd = open(perlUrl_file, \"a\")\n\t\t\t\t\tfileToAdd.write(generatedUrl+\"\\n\")\n\t\t\t\t\tfileToAdd.close()\n\t\t\t\telif mesh.lower() == \"c\":\n\t\t\t\t\tc_count = c_count + 1\n\t\t\t\t\tfileToAdd = open(cUrl_file, \"a\")\n\t\t\t\t\tfileToAdd.write(generatedUrl+\"\\n\")\n\t\t\t\t\tfileToAdd.close()\n\t\t\t\telif mesh.lower() == \"c#\":\n\t\t\t\t\tcsharp_count = csharp_count + 1\n\t\t\t\t\tfileToAdd = open(csharpUrl_file, \"a\")\n\t\t\t\t\tfileToAdd.write(generatedUrl+\"\\n\")\n\t\t\t\t\tfileToAdd.close()\n\t\t\t\telif mesh.lower() == \"c++\":\n\t\t\t\t\tcplus_count = cplus_count + 1\n\t\t\t\t\tfileToAdd = open(cplusUrl_file, \"a\")\n\t\t\t\t\tfileToAdd.write(generatedUrl+\"\\n\")\n\t\t\t\t\tfileToAdd.close()\n\t\t\t\telif mesh.lower() == \"java\":\n\t\t\t\t\tjava_cout = java_count + 1\n\t\t\t\t\tfileToAdd = open(javaUrl_file, \"a\")\n\t\t\t\t\tfileToAdd.write(generatedUrl+\"\\n\")\n\t\t\t\t\tfileToAdd.close()\n\t\t\t\telif mesh.lower() == \"bash\":\n\t\t\t\t\tbash_count = bash_count + 1\n\t\t\t\t\tfileToAdd = open(bashUrl_file, \"a\")\n\t\t\t\t\tfileToAdd.write(generatedUrl+\"\\n\")\n\t\t\t\t\tfileToAdd.close()\n\t\t\t\telif mesh.lower() == \"python\":\n\t\t\t\t\tpython_count = python_count + 1\n\t\t\t\t\tfileToAdd = open(pythonUrl_file, \"a\")\n\t\t\t\t\tfileToAdd.write(generatedUrl+\"\\n\")\n\t\t\t\t\tfileToAdd.close()\n\t\t\t\telif mesh.lower() == \"ruby\":\n\t\t\t\t\truby_count = ruby_count + 1\n\t\t\t\t\tfileToAdd = open(rubyUrl_file, \"a\")\n\t\t\t\t\tfileToAdd.write(generatedUrl+\"\\n\")\n\t\t\t\t\tfileToAdd.close()\n\t\t\t\telif mesh.lower() == \"html\":\n\t\t\t\t\thtml_count = html_count + 1\n\t\t\t\t\tfileToAdd = open(htmlUrl_file, \"a\")\n\t\t\t\t\tfileToAdd.write(generatedUrl+\"\\n\")\n\t\t\t\t\tfileToAdd.close()\n\t\t\t\telif mesh.lower() == \"php\":\n\t\t\t\t\tphp_count = php_count + 1\n\t\t\t\t\tfileToAdd = open(phpUrl_file, \"a\")\n\t\t\t\t\tfileToAdd.write(generatedUrl+\"\\n\")\n\t\t\t\t\tfileToAdd.close()\n\t\t\t\telif mesh.lower() == \"sql\":\n\t\t\t\t\tsql_count = sql_count + 1\n\t\t\t\t\tfileToAdd = open(sqlUrl_file, \"a\")\n\t\t\t\t\tfileToAdd.write(generatedUrl+\"\\n\")\n\t\t\t\t\tfileToAdd.close()\n\t\t\t\telif mesh.lower() == \"mysql\":\n\t\t\t\t\tsql_count = sql_count + 1\n\t\t\t\t\tfileToAdd = open(sqlUrl_file, \"a\")\n\t\t\t\t\tfileToAdd.write(generatedUrl+\"\\n\")\n\t\t\t\t\tfileToAdd.close()\n\t\t\t\telif mesh.lower() == \"javascript\":\n\t\t\t\t\tjavascript_count = javascript_count + 1\n\t\t\t\t\tfileToAdd = open(javascriptUrl_file, \"a\")\n\t\t\t\t\tfileToAdd.write(generatedUrl+\"\\n\")\n\t\t\t\t\tfileToAdd.close()\n\t\t\t\n\t\t\tfor mesh in precision.split(\" \"):\n\t\t\t\tif mesh.lower() == \"perl\": \n\t\t\t\t\tperl_count = perl_count + 1\n\t\t\t\t\tfileToAdd = open(perlUrl_file, \"a\")\n\t\t\t\t\tfileToAdd.write(generatedUrl+\"\\n\")\n\t\t\t\t\tfileToAdd.close()\n\t\t\t\telif mesh.lower() == \"c\": \n\t\t\t\t\tc_count = c_count + 1\n\t\t\t\t\tfileToAdd = open(cUrl_file, \"a\")\n\t\t\t\t\tfileToAdd.write(generatedUrl+\"\\n\")\n\t\t\t\t\tfileToAdd.close()\n\t\t\t\telif mesh.lower() == \"c#\":\n\t\t\t\t\tcsharp_count = csharp_count + 1 \n\t\t\t\t\tfileToAdd = open(csharpUrl_file, \"a\")\n\t\t\t\t\tfileToAdd.write(generatedUrl+\"\\n\")\n\t\t\t\t\tfileToAdd.close() \n\t\t\t\telif mesh.lower() == \"c++\":\n\t\t\t\t\tcplus_count = cplus_count + 1\n\t\t\t\t\tfileToAdd = open(cplusUrl_file, \"a\")\n\t\t\t\t\tfileToAdd.write(generatedUrl+\"\\n\")\n\t\t\t\t\tfileToAdd.close()\n\t\t\t\telif mesh.lower() == \"java\": \n\t\t\t\t\tjava_cout = java_count + 1\n\t\t\t\t\tfileToAdd = open(javaUrl_file, \"a\")\n\t\t\t\t\tfileToAdd.write(generatedUrl+\"\\n\")\n\t\t\t\t\tfileToAdd.close()\n\t\t\t\telif mesh.lower() == \"bash\": \n\t\t\t\t\tbash_count = bash_count + 1\n\t\t\t\t\tfileToAdd = open(bashUrl_file, \"a\")\n\t\t\t\t\tfileToAdd.write(generatedUrl+\"\\n\")\n\t\t\t\t\tfileToAdd.close()\n\t\t\t\telif mesh.lower() == \"python\":\n\t\t\t\t\tpython_count = python_count + 1\n\t\t\t\t\tfileToAdd = open(pythonUrl_file, \"a\")\n\t\t\t\t\tfileToAdd.write(generatedUrl+\"\\n\")\n\t\t\t\t\tfileToAdd.close()\n\t\t\t\telif mesh.lower() == \"ruby\":\n\t\t\t\t\truby_count = ruby_count + 1\n\t\t\t\t\tfileToAdd = open(rubyUrl_file, \"a\")\n\t\t\t\t\tfileToAdd.write(generatedUrl+\"\\n\")\n\t\t\t\t\tfileToAdd.close()\n\t\t\t\telif mesh.lower() == \"html\":\n\t\t\t\t\thtml_count = html_count + 1\n\t\t\t\t\tfileToAdd = open(htmlUrl_file, \"a\")\n\t\t\t\t\tfileToAdd.write(generatedUrl+\"\\n\")\n\t\t\t\t\tfileToAdd.close()\n\t\t\t\telif mesh.lower() == \"php\":\n\t\t\t\t\tphp_count = php_count + 1\n\t\t\t\t\tfileToAdd = open(phpUrl_file, \"a\")\n\t\t\t\t\tfileToAdd.write(generatedUrl+\"\\n\")\n\t\t\t\t\tfileToAdd.close()\n\t\t\t\telif mesh.lower() == \"sql\":\n\t\t\t\t\tsql_count = sql_count + 1\n\t\t\t\t\tfileToAdd = open(sqlUrl_file, \"a\")\n\t\t\t\t\tfileToAdd.write(generatedUrl+\"\\n\")\n\t\t\t\t\tfileToAdd.close()\n\t\t\t\telif mesh.lower() == \"mysql\":\n\t\t\t\t\tsql_count = sql_count + 1\n\t\t\t\t\tfileToAdd = open(sqlUrl_file, \"a\")\n\t\t\t\t\tfileToAdd.write(generatedUrl+\"\\n\")\n\t\t\t\t\tfileToAdd.close()\n\t\t\t\telif mesh.lower() == \"javascript\":\n\t\t\t\t\tjavascript_count = javascript_count + 1\n\t\t\t\t\tfileToAdd = open(javascriptUrl_file, \"a\")\n\t\t\t\t\tfileToAdd.write(generatedUrl+\"\\n\")\n\t\t\t\t\tfileToAdd.close()\n\n\n\tprint \"perl: \" +str(perl_count) +\"\\n\"\n\tprint \"c: \" +str(c_count) +\"\\n\"\n\tprint \"c++: \" +str(cplus_count) +\"\\n\"\n\tprint \"c#: \" +str(csharp_count) +\"\\n\"\n\tprint \"Java: \" +str(java_count) +\"\\n\"\n\tprint \"Bash: \" +str(bash_count) +\"\\n\"\t\n\tprint \"Python: \"+str(python_count) +\"\\n\"\n\tprint \"Ruby: \"+str(ruby_count) +\"\\n\"\n\tprint \"Html: \"+str(html_count) +\"\\n\"\n\tprint \"Php: \"+str(php_count) +\"\\n\"\n\tprint \"SQL: \"+str(sql_count) +\"\\n\"\n\tprint \"JavaScript: \"+str(javascript_count) +\"\\n\"\n\n\n\tfileLog = open(\"DATA/CustomScan.csv\", \"w\")\n\tfileLog.write(\"perl,\" +str(perl_count) +\"\\n\")\n\tfileLog.write(\"c,\" +str(c_count) +\"\\n\")\n\tfileLog.write(\"c++,\" +str(cplus_count) +\"\\n\")\n\tfileLog.write(\"c#,\" +str(csharp_count) +\"\\n\")\n\tfileLog.write(\"Java,\" +str(java_count) +\"\\n\")\n\tfileLog.write(\"Bash,\" +str(bash_count) +\"\\n\")\n\tfileLog.write(\"Python,\"+str(python_count) +\"\\n\")\n\tfileLog.write(\"Ruby,\"+str(ruby_count) +\"\\n\")\n\tfileLog.write(\"Html,\"+str(html_count) +\"\\n\")\n\tfileLog.write(\"Php,\"+str(php_count) +\"\\n\")\n\tfileLog.write(\"SQL,\"+str(sql_count) +\"\\n\")\n\tfileLog.write(\"JavaScript,\"+str(javascript_count) +\"\\n\")\n\tfileLog.close()\n\n\n\treturn 0", "def Scan(self, plugin):\n raise 'Method not implemented'", "def do_scan(self, arg):\n results = self._table.scan()\n for item in results[\"Items\"]:\n self._print(_pretty(item))", "def scan(self):\n for angle in range(self.MIDPOINT-350, self.MIDPOINT+350, 35):\n self.servo(angle)\n self.scan_data[angle] = self.read_distance()\n #sort the scan data for easier analysis\n self.scan_data = OrderedDict(sorted(self.scan_data.items()))", "def process_scan(self, msg):\n print(msg)\n self.last_scan = msg", "def scan(self, param):\n\t\tself.left(355)", "def test_run_a_scan_on_sdp_subarray_in_low():", "def collect_scans(table_type, items):\n table = get_table(table_type)\n lst = []\n for key, value in items:\n table.insert(key, value)\n lst.append(key)\n\n scans = []\n for key in lst:\n val, scan = table.lookup(key)\n scans.append(scan)\n\n ave_scan = sum(scans) / float(len(items))\n return ave_scan", "def scanr(func, start, itr):\n if not callable(func):\n raise TypeError(\"First argument to scanr must be callable\")\n itr = iter(itr)\n \n return _scanr(func, start, itr)", "def probe(self):", "def find_offsets(self):\r\n\r\n #### Begin functionality here\r\n\r\n return()", "def process_raw_filelist():\n\n scan_list = []\n curr_scan = \"\"\n\n tools = generate_tools_list()\n\n print(\"Beginning scan of %s\" % os.path.join(sites_dir, \"ua-mac/raw_data/stereoTop\"))\n dates = sorted(os.listdir(os.path.join(sites_dir, \"ua-mac/raw_data/stereoTop\")))\n for date in dates:\n if date not in limit_dates:\n continue\n date_dir = os.path.join(os.path.join(sites_dir, \"ua-mac/raw_data/stereoTop\"), date)\n print(\"Scanning %s\" % date_dir)\n\n timestamps = sorted(os.listdir(date_dir))\n for ts in timestamps:\n ts_dir = os.path.join(date_dir, ts)\n\n meta, lbin, rbin = None, None, None\n\n files = os.listdir(ts_dir)\n for fname in files:\n fpath = os.path.join(ts_dir, fname)\n if fname.endswith(\"metadata.json\"):\n meta = fpath\n if fname.endswith(\"left.bin\"):\n lbin = fpath\n if fname.endswith(\"right.bin\"):\n rbin = fpath\n\n # TODO: More logging\n if meta and lbin and rbin:\n scan = get_scan_from_metadata(meta)\n\n if scan and scan != curr_scan:\n if len(scan_list) > 0:\n print(\"%s - [%s] %s datasets\" % (date, curr_scan, len(scan_list)))\n create_scan_dax(date, curr_scan, scan_list, tools)\n # TODO: Temporary\n return\n\n scan_list = []\n curr_scan = scan\n\n elif len(scan_list) > scan_size_limit and scan_size_limit > 0:\n print(\"%s - [%s] %s datasets\" % (date, curr_scan, len(scan_list)))\n create_scan_dax(date, curr_scan, scan_list, tools)\n return\n\n # TODO: What do we do if there is no scan in the metadata? \"unknown_scan_{date}\"?\n scan_list.append({\"left\": lbin, \"right\": rbin, \"metadata\": meta})\n\n if len(scan_list) > 0:\n print(\"%s - [%s] %s datasets\" % (date, curr_scan, len(scan_list)))\n create_scan_dax(date, curr_scan, scan_list, tools)", "def _getscanind(self):\n \n zamin = self.za.min()\n first = np.where(self.za==zamin)[0]\n self.scan = np.zeros(self.spec.shape[0])\n if zamin < 0:\n cs = first[np.where((first - np.roll(first, 1)) != 1)[0]]\n ss = first[np.where((np.roll(first,-1) - first) != 1)[0]] + 1\n ce = ss \n se = np.roll((cs - 1) % self.za.size, -1) + 1\n for k, val in enumerate(cs):\n self.scan[val:se[k] + 1] = k\n else:\n moves = np.diff(self.za)\n max_ind = np.where(moves==moves.max())[0]\n turnover = self.za.size\n diffs = np.diff(max_ind)\n if np.unique(diffs).size > 1:\n raise ValueError, 'Can\\'t deal with non-uniform cal data yet.'\n if max_ind.size > 1:\n turnover = diffs[0]\n cs = ce = np.array([])\n ss = np.arange(self.za.size)[::turnover]\n se = np.roll((ss - 1) % self.za.size, -1)\n for k, val in enumerate(ss):\n self.scan[val:se[k] + 1] = k\n \n self.ind = {'cs': cs, 'ce': ce, 'ss': ss, 'se': se}\n self.nscan = np.unique(self.scan).size", "def _collect_all(self):", "def full_scan(self):\n self._scanned = True\n return self._scanner.scan(self._ips, self._ports, arguments='-sV -sS -T4')", "def test_run_a_scan_on_sdp_subarray_in_mid():", "def scan(self):\n for angle in range(self.MIDPOINT-350, self.MIDPOINT+350, 50):\n self.servo(angle)\n self.scan_data[angle] = self.read_distance()", "def map_read(array, read, res=1.0):\n start, stop = read\n array[int(ceil(start/res)):int(ceil(stop/res))] += 1", "def skip(self) -> \"Scanner\":\n raise NotImplementedError", "def handle_scan_results():\n global scan_results\n\n if scan_results.qsize() > 0:\n # Grab the latest detections from the scan result queue.\n _scan_data = scan_results.get()\n for _sonde in _scan_data:\n # Extract frequency & type info\n _freq = _sonde[0]\n _type = _sonde[1]\n\n if _freq in autorx.task_list:\n # Already decoding this sonde, continue.\n continue\n else:\n logging.info(\"Detected new %s sonde on %.3f MHz!\" % (_type, _freq/1e6))\n\n # Break if we don't support this sonde type.\n if (_type not in VALID_SONDE_TYPES):\n logging.error(\"Unsupported sonde type: %s\" % _type)\n continue\n\n if allocate_sdr(check_only=True) is not None :\n # There is a SDR free! Start the decoder on that SDR\n start_decoder(_freq, _type)\n\n elif (allocate_sdr(check_only=True) is None) and ('SCAN' in autorx.task_list):\n # We have run out of SDRs, but a scan thread is running.\n # Stop the scan thread and take that receiver!\n stop_scanner()\n start_decoder(_freq, _type)\n else:\n # We have no SDRs free.\n # TODO: Alert the user that a sonde was detected, but no SDR was available,\n # but don't do this EVERY time we detect the sonde...\n pass", "def update(self, scan):\n\n # Check the input type and convert to ndarray if a list\n # and ensure that the input is a 1D vector\n if not isinstance(scan, (list, np.ndarray)):\n error_msg = \"argument wrong type, expected list or numpy.ndarray\"\n raise TypeError(error_msg)\n else:\n scan_array = np.asarray(scan)\n element_count = scan_array.size\n scan_array.shape = (element_count)\n\n # Remove the last entry in the array to maintain num_prev_scans\n # most recent scans\n num_scans = self.__unordered_data.shape[0]\n if num_scans > self.__num_prev_scans:\n num_scans-=1\n self.__unordered_data = \\\n np.delete(self.__unordered_data, num_scans,axis=0)\n\n # Return the scan if it is the very first input into the array\n if self.__unordered_data.size == 0:\n self.__N = scan_array.size;\n self.__unordered_data = np.vstack([scan_array])\n return self.__unordered_data[0].tolist()\n else:\n # Check every scan has same number of measurements as first scan\n if scan_array.size != self.__N:\n raise ValueError(\"number of data points not consistent\")\n else:\n self.__unordered_data = \\\n np.vstack([scan_array, self.__unordered_data])\n self.__ordered_data = copy.deepcopy(self.__unordered_data)\n num_scans+=1\n\n # sort by column\n self.__ordered_data.sort(axis=0)\n\n # determine the median val for each column\n if num_scans%2:\n median_val = self.__ordered_data[(num_scans + 1) / 2 - 1, :]\n else:\n median_val_1 = self.__ordered_data[num_scans / 2 - 1, :]\n median_val_2 = self.__ordered_data[(num_scans + 2) / 2 - 1, :]\n median_val = (median_val_1 + median_val_2) / 2.0\n\n median_val = median_val.tolist()\n return median_val", "def _scan_scores(self,handle, consumer):\n read_and_call(handle, consumer.scores, start=\"Smith-Waterman\")", "def scan( gaps, center, dist ):\n return _scan_reverse( gaps, center, dist ), _scan_forward( gaps, center, dist )", "def tblscans(self):\n sql = '''select to_char(value, 'FM99999999999999990') retvalue from \n v$sysstat where name = 'table scans (long tables)' '''\n self.cur.execute(sql)\n res = self.cur.fetchall()\n for i in res:\n print(i[0])", "def _scan_alignment(self,handle, consumer):\n while 1:\n line = handle.readline()\n if not line:\n break\n if is_blank_line(line):\n continue\n else:\n consumer.query_alignment(line)\n read_and_call(handle, consumer.positive_alignment)\n read_and_call(handle, consumer.hit_alignment)", "def _scan_profilewidth(self,handle,consumer):\n read_and_call(handle, consumer.profilewidth, contains=\"Nseqs1\")", "def undo_scan(self, sub_array_id: int):", "def _addScans(self, scans):\n if not len(scans):\n return\n self.logger.log('adding %d scans to currently active %s' % (len(scans), self.cname))\n it = iter(list(scans)) # we use an separate iterator so we\n # can grab the uniterated objects if necessary\n lastone = None # preserve the one that *caused* the overflow to handle in new container\n try:\n for scan in it:\n lastone = scan\n self.ourcontainer.add(scan.to_UrlObject(), scan.safety)\n setattr(scan, self.added_property_name, True)\n self.session.add(scan)\n self.session.add(self.ourcontainer)\n except ContainerFullError:\n self.logger.log('scans overflowed')\n overflow = [lastone] + list(it)\n self._newcontainer(overflow[0].siginfo)\n self._addScans(overflow)", "def post_add_scan_CB(self) -> None:\n pass", "def apply(self) -> None:", "def apply(self) -> None:", "def scan(self, item, col):\n if item.nxt == col.token.typ:\n col.add(item.shifted())", "def task_scanned(now_task):", "def scan(self):\n self.tokfile = open(self.tokfile_path, 'w')\n word = ''\n for line in open(self.srcfile):\n for ch in line:\n if ch in alphanum: \n word += ch\n else:\n if word:\n try:\n self.print_tok('$int', int(word))\n except ValueError:\n if word in self.reserved: \n self.print_tok('$' + word)\n else:\n self.print_tok('$id', word)\n if ch in special:\n self.print_tok(ch)\n word = ''\n self.tokfile.close()", "def reduce_run():", "def computePValues(options,whole_mapped_data,mapped_data_per_size_per_register,phase,cycle):\n min_reads_mapped_to_a_phased_register=3\n min_reads_in_a_window=10\n chromosome_hits=[]\n for chromosome in sorted(mapped_data_per_size_per_register):\n chromosome_hits.append(chromosome)\n fhr=open(options.output_directory_per_run+\"/\"+options.input_filename+\"_\"+str(phase)+\"_\"+str(cycle)+\"_\"+chromosome+\".regionsOfInterest\",\"r\")\n fhw=open(options.output_directory_per_run+\"/\"+options.input_filename+\"_\"+str(phase)+\"_\"+str(cycle)+\"_\"+chromosome+\".regionsOfInterest.concentrated\",\"w\")\n for line in fhr:\n register,start,end=line.strip().split()\n register=int(register)\n start=int(start)\n end=int(end)\n \n begin=start\n #print(chromosome,register,start,end)\n sys.stdout.flush()\n while begin+(phase*min_reads_mapped_to_a_phased_register) <= end+1:\n finish=begin+(phase*cycle)-1\n \n k=0\n for i in range(begin,finish+1):\n #print(chromosome,register,i,phase,start,end)\n try:\n k+=mapped_data_per_size_per_register[chromosome][register][i]\n except KeyError:\n pass\n #print(\"Next\")\n if k<min_reads_mapped_to_a_phased_register: \n begin+=phase\n continue\n \n num_all_reads=0\n for i in range(begin,finish+1):\n try:\n num_all_reads+=whole_mapped_data[chromosome][i]\n except KeyError:\n pass\n if num_all_reads<min_reads_in_a_window:\n begin+=phase\n continue\n \n n=0\n \"\"\"print(\"reached here\")\n sys.stdout.flush()\"\"\"\n # register_i is an iterator different from register\n for register_i in sorted(mapped_data_per_size_per_register[chromosome]):\n for i in range(begin,finish+1):\n try:\n n+=mapped_data_per_size_per_register[chromosome][register_i][i]\n except KeyError:\n pass\n \"\"\"if chromosome==\"Chr1\":\n print(str(n)+\" \"+str(num_all_reads)+\"\\n\")\"\"\"\n if n/num_all_reads<0.3:\n begin+=phase\n continue\n m=cycle*2\n pvalue=0\n for x in range(k,m+1):\n numerator=nCr((phase-1)*m,n-x)*nCr(m,x)\n pvalue+=numerator\n denominator=nCr(phase*m,n)\n pvalue=pvalue/denominator\n #print(chromosome,begin,finish,k,n,m,num_all_reads,pvalue,n/num_all_reads)\n if pvalue>=options.pvalue_cutoff:\n begin+=phase\n continue\n stuffs_to_be_printed_to_file=[register,begin,finish,k,n,m,num_all_reads,n/num_all_reads,pvalue]\n fhw.write(\"\\t\".join(map(str,stuffs_to_be_printed_to_file))+\"\\n\")\n sys.stdout.flush()\n begin+=phase", "def map():", "def _scan_lengths(self,handle, consumer):\n read_and_call(handle, consumer.lengths, start=\"length1=\")", "def __call__(self, PID):\n i = 0\n pairs = 0\n outputdata = []\n for recordpair in self.data:\n pair = makeSAMpairFromStringTuple(recordpair, reorder=False)\n for stream in self.options.orderedStreams:\n # In SP mode, stream.next() returns a pair or None. In MP\n # it's more complicated, we pass back an array of dicts where\n # each one deinfes a pair (or not) depending on whether it is \n # filtered out by the stream.\n result = stream.next(pair, self.options)\n if result['matched']:\n if stream.op(OP_NOUT):\n continue\n\n # Copy stats for passing back.\n copy_of_stats = copy.deepcopy(stream.stats)\n copy_of_global = copy.deepcopy(self.options.orderedStreams[0].globalstats)\n\n # Reset original stats. Each subset of stats will\n # be integrated separately\n EmptyList(stream.stats)\n EmptyList(self.options.orderedStreams[0].globalstats)\n\n # First handle FASTQ output\n dataBucketFASTQ = []\n\n # Store root filename\n froot = result['output'][0]\n\n if stream.op(OP_FASTQ) or stream.op(OP_FASTQPP):\n if stream.op(OP_FASTQ):\n newpair,froot = self.ProcessPair(OP_FASTQ, stream, froot, pair)\n else:\n newpair,froot = self.ProcessPair(OP_FASTQPP, stream, froot, pair)\n if self.writeToFiles:\n if stream.op(OP_FASTQ) and stream.op(OP_SH):\n outputf1 = \"%s.sh.fastq.PID.%d\" %(froot,PID)\n if not stream.op(OP_INFO):\n dataBucketFASTQ = [open(outputf1, \"a\"),\n None,\n ]\n else:\n dataBucketFASTQ = [None,\n None,\n ]\n elif stream.op(OP_FASTQPP):\n outputf1 = \"%s.pp.1.fastq.PID.%d\" %(froot,PID)\n outputf2 = \"%s.pp.2.fastq.PID.%d\" %(froot,PID)\n if not stream.op(OP_INFO):\n dataBucketFASTQ = [open(outputf1, \"a\"),\n open(outputf2, \"a\"),\n ]\n else:\n dataBucketFASTQ = [None,\n None,\n ]\n elif stream.op(OP_FASTQ):\n outputf1 = \"%s.1.fastq.PID.%d\" %(froot,PID)\n outputf2 = \"%s.2.fastq.PID.%d\" %(froot,PID)\n if not stream.op(OP_INFO):\n dataBucketFASTQ = [open(outputf1, \"a\"),\n open(outputf2, \"a\"),\n ]\n else:\n dataBucketFASTQ = [None,\n None,\n ]\n else:\n if not stream.op(OP_INFO):\n dataBucketFASTQ = [StringIO.StringIO(), \n StringIO.StringIO(),\n ]\n else:\n dataBucketFASTQ = [None,\n None,\n ]\n if not stream.op(OP_INFO):\n newpair.writeFASTQ(dataBucketFASTQ, closeWhenDone=False)\n\n\n # Now Handle SAM output\n dataBucketSAM = []\n\n if stream.op(OP_SAM) or stream.op(OP_SAMPP):\n if stream.op(OP_SAM):\n newpair,froot = self.ProcessPair(OP_SAM, stream, froot, pair)\n else:\n newpair,froot = self.ProcessPair(OP_SAMPP, stream, froot, pair)\n if self.writeToFiles:\n if stream.op(OP_SAMPP):\n outputf = \"%s.pp.sam.PID.%d\" %(froot,PID)\n if not stream.op(OP_INFO):\n dataBucketSAM = [open(outputf, \"a\"),]\n else:\n dataBucketSAM = [None,]\n # OP_SAM (no OP_PP)\n else:\n outputf = \"%s.sam.PID.%d\" %(froot,PID)\n if not stream.op(OP_INFO):\n dataBucketSAM = [open(outputf, \"a\"),]\n else:\n dataBucketSAM = [None,]\n else:\n if not stream.op(OP_INFO):\n dataBucketSAM = [StringIO.StringIO(),]\n else:\n dataBucketSAM = [None,]\n if not stream.op(OP_INFO):\n newpair.writeSAM(dataBucketSAM[0], closeWhenDone=False)\n\n\n result['output'][0] = froot\n # Return results\n if stream.op(OP_SAM) or stream.op(OP_SAMPP) or \\\n stream.op(OP_FASTQ) or stream.op(OP_FASTQPP):\n if self.writeToFiles:\n if stream.op(OP_INFO):\n files_for_output = []\n else:\n files_for_output = result['output']\n outputdata.append({ 'datastrings' : '',\n 'files': files_for_output,\n 'name': result['name'],\n 'stats': copy_of_stats,\n 'gzipped' : stream.op(OP_GZ),\n 'sam,pp' : stream.op(OP_SAMPP),\n 'fastq,pp' : stream.op(OP_FASTQPP),\n 'sh' : stream.op(OP_SH),\n 'globalstats': copy_of_global,\n })\n else:\n pairvalueList = []\n for db in dataBucketFASTQ + dataBucketSAM:\n if db is None:\n pairvalueList.append(None)\n else:\n # If a StringIO object has nothing written \n # to it, the getvalue() call will throw an \n # exception about the object not having a \n # buf attribute. In this case we append None\n try:\n vv = db.getvalue()\n pairvalueList.append(vv)\n except:\n pairvalueList.append(None)\n\n # \"info\" operator quashes SAM,FASTQ output\n if stream.op(OP_INFO):\n pairvalueList = []\n files_for_output = []\n else:\n files_for_output = result['output']\n outputdata.append({ 'datastrings' : pairvalueList,\n 'files': files_for_output,\n 'name': result['name'],\n 'stats': copy_of_stats,\n 'gzipped' : stream.op(OP_GZ),\n 'sam,pp' : stream.op(OP_SAMPP),\n 'fastq,pp' : stream.op(OP_FASTQPP),\n 'sh' : stream.op(OP_SH),\n 'globalstats': copy_of_global,\n })\n\n for db in dataBucketFASTQ + dataBucketSAM:\n try:\n db.close()\n except:\n pass\n\n if not stream.op(OP_PASS):\n break\n \n\n # No matching data. We'll return an \"empty\" output dict\n if len(outputdata) == 0:\n stream = self.options.orderedStreams[0]\n empty = SAMStream('none', '')\n outputdata = [{ 'datastrings' : '',\n 'files': [],\n 'name': empty.name,\n 'stats': empty.stats,\n 'gzipped' : False,\n 'sam,pp' : False,\n 'fastq,pp' : False,\n 'sh' : False,\n 'globalstats': stream.globalstats\n },]\n return self.ID, outputdata", "def rescanblockchain(self, *args, **kwargs):\n pass", "def match(self, input_reader):\n pass", "def _scan(self, keys, cursor, *args):\n cursor = int(cursor)\n (pattern, _type, count), _ = extract_args(args, (\"*match\", \"*type\", \"+count\"))\n count = 10 if count is None else count\n data = sorted(keys)\n bits_len = (len(keys) - 1).bit_length()\n cursor = bin_reverse(cursor, bits_len)\n if cursor >= len(keys):\n return [0, []]\n result_cursor = cursor + count\n result_data = []\n\n regex = compile_pattern(pattern) if pattern is not None else None\n\n def match_key(key: bytes) -> Union[bool, Match[bytes], None]:\n return regex.match(key) if regex is not None else True\n\n def match_type(key) -> bool:\n return _type is None or casematch(key_value_type(self._db[key]).value, _type)\n\n if pattern is not None or _type is not None:\n for val in itertools.islice(data, cursor, cursor + count):\n compare_val = val[0] if isinstance(val, tuple) else val\n if match_key(compare_val) and match_type(compare_val):\n result_data.append(val)\n else:\n result_data = data[cursor: cursor + count]\n\n if result_cursor >= len(data):\n result_cursor = 0\n return [str(bin_reverse(result_cursor, bits_len)).encode(), result_data]", "def scanChecks(motor, start, stop, step, param1, param2=-1, param3=-1):\n\tgenericScanChecks(True, False, motor, start, stop, step, param1, param2, param3)", "def scanOptions(self, options):", "def _optimise(self):\n pass", "def scan_callback(self, scan):\n # Fill some cells in the map just so we can see that something is\n # being published.\n Lresol = 1 / myRes\n r = scan.ranges[0]\n xt = [self.position[0] + 1, self.position[1] + 1, self.position[2]]\n # for k in range(0,len(scan.ranges)-1):\n scanAngles = np.linspace(scan.angle_max, scan.angle_min, len(scan.ranges))\n lidar_local = np.array(\n [xt[0] + scan.ranges * np.cos(scanAngles + xt[2]), xt[1] - (scan.ranges * np.sin(scanAngles + xt[2]))])\n\n # print len(lidar_local[1])\n xtg = [int(np.ceil(xt[0] * Lresol)), int(np.ceil(xt[1] * Lresol))]\n self._map.grid[xtg[1], xtg[0]] = 0 # set the robot position grid as empty\n\n for k in range(0, len(scan.ranges) - 1):\n if scan.ranges[k] < scan.range_max:\n rtl = np.ceil(lidar_local[:, k] * Lresol)\n rtli = [0, 0]\n rtli[0] = int(rtl[0])\n rtli[1] = int(rtl[1])\n l = bresenham(xtg, rtli)\n self.EISM(l.path, scan.ranges[k])\n # Now that the map is updated, publish it!\n rospy.loginfo(\"Scan is processed, publishing updated map.\")\n self.publish_map()", "def wifi_scanner_single_scan(self, scan_setting):\n data = wutils.start_wifi_single_scan(self.dut, scan_setting)\n idx = data[\"Index\"]\n scan_rt = data[\"ScanElapsedRealtime\"]\n self.log.info(\n \"Wifi single shot scan started index: %s at real time: %s\", idx,\n scan_rt)\n results = []\n #generating event wait time from scan setting plus leeway\n scan_time, scan_channels = wutils.get_scan_time_and_channels(\n self.wifi_chs, scan_setting, self.stime_channel)\n wait_time = int(scan_time / 1000) + self.leeway\n validity = False\n #track number of result received\n result_received = 0\n try:\n for snumber in range(1, 3):\n event_name = \"{}{}onResults\".format(EVENT_TAG, idx)\n self.log.debug(\"Waiting for event: %s for time %s\", event_name,\n wait_time)\n event = self.dut.ed.pop_event(event_name, wait_time)\n self.log.debug(\"Event received: %s\", event)\n results = event[\"data\"][\"Results\"]\n result_received += 1\n bssids, validity = self.proces_and_valid_batch_scan_result(\n results, scan_rt, event[\"data\"][KEY_RET], scan_setting)\n asserts.assert_equal(\n len(results), 1,\n \"Test fail because number of scan result %s\" %\n len(results))\n asserts.assert_true(bssids > 0, EMPTY_RESULT)\n asserts.assert_true(validity, INVALID_RESULT)\n self.log.info(\"Scan number Buckets: %s\\nTotal BSSID: %s\",\n len(results), bssids)\n except queue.Empty as error:\n asserts.assert_true(\n result_received >= 1,\n \"Event did not triggered for single shot {}\".format(error))\n finally:\n self.dut.droid.wifiScannerStopScan(idx)\n #For single shot number of result received and length of result should be one\n asserts.assert_true(\n result_received == 1,\n \"Test fail because received result {}\".format(result_received))", "def _internal_scan(self, text: str)->list:\n start_index = -1\n # end_index = -1\n\n current_regex = None\n tokens = []\n # print(\"entered\", text)\n i = -1\n while i - 1 < len(text):\n # print(i, char, \"'{}'\".format(text[start_index:i + 1]), current_regex, tokens)\n i += 1\n if i >= len(text):\n break\n char = text[i]\n if start_index == -1:\n continue_flag = False\n for regex in self._regexes:\n result = regex.check(text[i: i + regex.min_lookahead])\n if result:\n start_index = i\n i += regex.min_lookahead - 1\n # print('found', \"'\"+text[start_index:i + 1]+\"'\")\n current_regex = regex\n continue_flag = True\n break\n if not continue_flag:\n tokens.append(UndefinedToken(char))\n else:\n continue_flag = False\n # print('check', \"'\" + text[start_index:i+1] + \"'\", tokens)\n if current_regex.check(text[start_index:i + 1]):\n continue_flag = True\n else:\n for regex in self._regexes:\n if regex.check(text[start_index:i + 1]) and regex != current_regex:\n continue_flag = True\n current_regex = regex\n\n if continue_flag:\n break\n if not continue_flag:\n tokens.append(Token(current_regex, text[start_index:i]))\n # start_index = -1\n # current_regex = None\n # print(tokens, \"'{}'\".format(text[start_index:i]))\n tokens2 = self._internal_scan(text[i:])\n if tokens2:\n tokens += self._backtrack(tokens2)\n return tokens\n if start_index != -1 and current_regex:\n tokens.append(Token(current_regex, text[start_index:]))\n # tokens = self._backtrack(tokens)\n\n return tokens", "def test_scan_empty_dir(self):\n self.run_scan(self.emptydir, 0)", "def _scan_table(self, uri):\n cmd = 'kiji scan {kiji_uri}/{uri} --max-versions=10'.format(\n kiji_uri=self.kiji_uri,\n uri=uri)\n self._run_kiji_job(cmd)", "def scan_callback(self, scan):\n\n # Fill some cells in the map just so we can see that something is \n # being published. \n self._map.grid[0, 0] = 1.0\n self._map.grid[0, 1] = .9\n self._map.grid[0, 2] = .7\n self._map.grid[1, 0] = .5\n self._map.grid[2, 0] = .3\n\n # Now that the map is updated, publish it!\n rospy.loginfo(\"Scan is processed, publishing updated map.\")\n self.publish_map()", "def reset(self) -> \"Scanner\":\n raise NotImplementedError", "def scan(self):\n inst_dict = {}\n if self.status: self.status.Warning(\"Scan function not implemented yet!\")\n return inst_dict\n #raise AssertionError(\"%s not implemented\" % sys._getframe().f_code.co_name)", "def __init__ (self, input) :\r\n ReaderA.__init__(self) # call parent\r\n # print '************************* input = ', input, type(input)\r\n self.buffer_ = input # this is any thing that can be indexed\r\n self.current_ = 0", "def scan(func, iterable, start=_EMPTY, *, echo_start=True):\n it = iter(iterable)\n if start is _EMPTY:\n start = next(it)\n if echo_start:\n yield start\n for item in it:\n start = func(start, item)\n yield start", "def scan():\n print \"Filtering started\"\n #filter new CC & merche\n filterNewOperators()\n\n #add the sample-info to 4_Analysed.csv, with hash, ip, port\n readd_to_toscan()\n\n print \"Scann started\"\n timestampFile = datetime.now()\n\n addHeaderToCSVIfNecessery(trashLog)\n # addHeaderToCSVIfNecessery(activityLog)\n if os.path.isfile(liveAnalysisFile):\n with open(liveAnalysisFile, 'r') as csvFile:\n targetList = csv.DictReader(csvFile)\n for target in targetList:\n process = subprocess.Popen(\"sudo nmap -p \" + target['PORT'] + \" -n --data-string \\\"\" + messageScan + \"\\\" --script \" + darkCometScript + \" --append-output -oN \" + resultLog + \" \" + target['HOST'], stdout=subprocess.PIPE, shell=True)\n (output, err) = process.communicate()\n print output\n if err is not None:\n print err\n if \"|_script: DarkComet\" in output:\n # Means the operator is active\n print \"--> Operator is active: \"+target[\"FILE HASH\"]\n row = [timestampFile, target['HOST'], target['PORT'], target['FILE HASH']]\n with open(activityLog, 'a') as f:\n banner = getBanner(output)\n row.append(banner)\n wr = csv.writer(f)\n wr.writerow(row)\n counter = 0\n with open(targetFile, 'r') as csvFile:\n targetList = csv.DictReader(csvFile)\n with open(tempFile, 'w') as f:\n wrTemp = csv.writer(f)\n wrTemp.writerow(['HOST', 'PORT', 'FILE HASH'])\n for target in targetList:\n # TODO: Solve Python problem which doesn't recognise format [command,arg1,arg2]\n process = subprocess.Popen(\"sudo nmap -p \" + target[\n 'PORT'] + \" -n --data-string \\\"\" + messageScan + \"\\\" --script \" + darkCometScript + \" --append-output -oN \" + resultLog + \" \" +\n target['HOST'], stdout=subprocess.PIPE, shell=True)\n (output, err) = process.communicate()\n print output\n\n if \"0 IP addresses\" in output:\n # Means the domain name could not be resolved\n print \"--> Goes to trash\"\n addHeaderToCSVIfNecessery(trashFile)\n row = [timestampFile, target['HOST'], target['PORT'], target['FILE HASH']]\n with open(trashFile, 'a') as f:\n wr = csv.writer(f)\n wr.writerow(row)\n elif \"|_script: DarkComet\" in output:\n # Means the operator is active\n print \"--> Operator is active\"\n\n addHeaderToCSVIfNecessery(liveAnalysisFile)\n row = [timestampFile, target['HOST'], target['PORT'], target['FILE HASH']]\n with open(activityLog, 'a') as f:\n wr = csv.writer(f)\n banner = getBanner(output)\n row.append(banner)\n wr.writerow(row)\n if counter < 6:\n with open(liveAnalysisFile, 'a') as f:\n wr = csv.writer(f)\n wr.writerow(row)\n with open(onlineFile, 'a') as f:\n wr = csv.writer(f)\n wr.writerow([target['FILE HASH']])\n counter += 1\n else:\n print \"--> to many to analyse, not added!\"\n wrTemp.writerow([target['HOST'], target['PORT'], target['FILE HASH']])\n else:\n # Means the operator is now not active but could it be later\n wrTemp.writerow([target['HOST'], target['PORT'], target['FILE HASH']])\n os.remove(targetFile)\n os.rename(tempFile, targetFile)\n if os.path.isfile(trashFile):\n print \"There are hosts in the trash\"\n try:\n host = socket.gethostbyname(\"www.google.com\")\n socket.create_connection((host, 80), 2)\n print \"Connected to internet -- hosts in trash are removed\"\n with open(trashFile, 'r') as csvFile:\n trashList = csv.DictReader(csvFile)\n with open(trashLog, 'a') as f:\n wr = csv.writer(f)\n for trash in trashList:\n wr.writerow([timestampFile, trash['HOST'], trash['PORT'], trash['FILE HASH']])\n os.remove(trashFile)\n except:\n print \"No internet - the hosts will be replaced in target\"\n with open(trashFile, 'r') as csvFile:\n trashList = csv.DictReader(csvFile)\n with open(targetFile, 'a') as f:\n wr = csv.writer(f)\n for trash in trashList:\n wr.writerow([trash['HOST'], trash['PORT'], trash['FILE HASH']])\n os.remove(trashFile)\n online()", "def handle_seq(seq, barcode_map, result_dict):\n for i in range(len(seq)):\n for barcode in barcode_map.keys():\n possible_match = seq[i: i + len(barcode)]\n if possible_match == barcode:\n result_dict[barcode][i] += 1", "def scan(self, attribute=None):\n\n raise NotImplementedError", "def cscanChecks(motor, start, step, param1, param2=-1, param3=-1):\n\tgenericScanChecks(True, True, motor, start, -1, step, param1, param2, param3)", "def scan_id(barcode):\n return scanner.scan(barcode)", "def scan_specs(self, specs, fail_fast=True):", "def iterate(self):", "def scan_root(str_ri, lst_rx, lst_rxe, db):\n print(f\"scan_root(); Scanning: {str_ri, lst_rx, lst_rxe}\")\n # get db info on this dir\n dic_db = dict() # dic by ffn of files known to the db\n str_sql = f\"SELECT * FROM files where filename like '{str_ri}%'\"\n for row in db.execute(str_sql):\n dic_db[row[0]] = row\n # Remove files that no longer exist\n lst_del_this = list()\n for str_ffn_db in dic_db.keys():\n if not os.path.isfile(str_ffn_db):\n lst_del_this.append(str_ffn_db)\n str_ffn_db__sql = str_ffn_db.replace(\"'\", \"''\")\n str_sql = f\"DELETE FROM files WHERE filename='{str_ffn_db__sql}';\"\n db.execute(str_sql)\n db.commit()\n for itm in lst_del_this: # can't change iterable from inside loop\n del dic_db[itm]\n # Walk the root-dir\n num_cntfil = 0\n for root, dirs, files in os.walk(str_ri):\n for str_fn in files:\n # if str_fn.lower().endswith('.jpg'):\n # print(str_fn)\n num_cntfil += 1\n if not any([str_fn.endswith(e) for e in lst_rxe]):\n str_ffn = os.path.join(root, str_fn)\n if not any([str_ffn.startswith(x) for x in lst_rx]): # if the file is not excluded\n if str_ffn in dic_db.keys(): # db knows this file\n obj_bdg = dic_db[str_ffn]\n tim, siz = timeandsize(str_ffn)\n if tim == dic_db[str_ffn][1] and siz == dic_db[str_ffn][2]:\n pass # print(f\" - skipping known file: {str_ffn} == {dic_db[str_ffn]}\") #\n else:\n ## print(f\"WTF: tim? {tim == dic_db[str_ffn][1]} siz? {siz == dic_db[str_ffn][2]} @ ffn: {str_ffn}\")\n # time or date have changed - so re-scanning file, and update DB.\n str_sql = f\"DELETE FROM files WHERE filename='{str_ffn}';\"\n db.execute(str_sql)\n db.commit()\n add_file2db(str_ffn, db)\n else: # db don't know this file - add it.\n add_file2db(str_ffn, db)\n if num_cntfil % 1000000 == 0:\n print(f\"Count: {num_cntfil}: {str_ffn}\")", "def apply(self):", "def do_scans_with_ref(self, nr_runs):\n print()\n print('do_scan')\n print()\n scan = self.scan\n laser = self.devices[scan['laser']['name']]\n dev_to_scan = scan['axis']['device']['name']\n output = scan['axis']['device']['property']\n approx_time_to_scan = (laser.params['stop_wavelength']-laser.params['start_wavelength'])/laser.params['wavelength_speed']\n # Scan the laser and the values of the given device\n if output != 'time':\n dev_range = scan['axis']['device']['range']\n start = Q_(dev_range[0])\n units = start.u\n stop = Q_(dev_range[1])\n step = Q_(dev_range[2])\n \n num_points_dev = ((stop-start)/step).to('')\n else:\n dev_range = scan['axis']['device']['range']\n start = 1\n stop = dev_range[1]\n num_points_dev = stop\n\n num_points_dev += 1 # So the last bit of information is true.\n\n for value in np.linspace(start, stop, num_points_dev, endpoint=True):\n if output != 'time':\n self.set_value_to_device(dev_to_scan, {output: value * units})\n dev = self.devices[dev_to_scan]\n time.sleep(0.1)\n while not dev.driver.finished_moving:\n time.sleep(0.2)\n for i in range(nr_runs):\n print('run number = ', i)\n self.do_line_scan_shutter_closed()\n \n return True", "def count():", "def scanl(f, base, l):\n yield base\n for x in l:\n base = f(base, x)\n yield base", "def nextValues(self):\n return list(i.nextLine[self.idx] for i in self if not i.isFinished)\n\n #def isFinished(self):\n \"\"\"When all the data is read.\"\"\"\n #pass\n\n #def getInitialValue(self):\n \"\"\"Returns the initial alignment value.\"\"\"\n #pass\n\n #def newCurrentValue(self):\n \"\"\"Returns the next alignment value.\"\"\"\n #pass\n\n #def align(self, currentValue):\n \"\"\"Process all the elements of self to make them aligned.\"\"\"\n #pass", "def scan(input, policy, input_encoding=DEFAULT_ENCODING,\n output_encoding=DEFAULT_ENCODING):\n return CleanResults()", "def _scan_bq_data(self, uhandle, consumer):\n \n qual=''\n while 1:\n line=uhandle.readline()\n if is_blank_line(line):\n uhandle.saveline(line)\n break\n qual+=' '+line\n return qual", "def FastaM10Iterator(handle, seq_count=...):\n ...", "def test_set_scan_status(self):\n pass", "def scanl(func, start, itr):\n if not callable(func):\n raise TypeError(\"First argument to scanl must be callable\")\n itr = iter(itr)\n\n return _scanl(func, start, itr)", "def scanwhile(first, p):\n lines = [first]\n while True:\n line = lr.readline()\n if not line:\n break\n if p(line):\n lines.append(line)\n else:\n lr.push(line)\n break\n return lines", "def wifi_scanner_batch_scan(self, scan_setting):\n data = wutils.start_wifi_background_scan(self.dut, scan_setting)\n idx = data[\"Index\"]\n scan_rt = data[\"ScanElapsedRealtime\"]\n self.log.info(\n \"Wifi background scan started with index: %s real time %s\", idx,\n scan_rt)\n scan_time, scan_channels = wutils.get_scan_time_and_channels(\n self.wifi_chs, scan_setting, self.stime_channel)\n #generating event wait time from scan setting plus leeway\n time_cache = 0\n number_bucket = 1 #bucket for Report result on each scan\n check_get_result = False\n if scan_setting[\n 'reportEvents'] == wutils.WifiEnums.REPORT_EVENT_AFTER_BUFFER_FULL:\n check_get_result = True\n if ('maxScansToCache' in scan_setting and\n scan_setting['maxScansToCache'] != 0):\n time_cache = (scan_setting['maxScansToCache'] *\n scan_setting['periodInMs'])\n number_bucket = scan_setting['maxScansToCache']\n else:\n time_cache = 10 * scan_setting['periodInMs'\n ] #10 as default max scan cache\n number_bucket = 10\n else:\n time_cache = scan_setting[\n 'periodInMs'\n ] #need while waiting for seconds resutls\n # multiply cache time by two to account for scheduler changing period\n wait_time = (time_cache * 2 + scan_time) / 1000 + self.leeway\n validity = False\n try:\n for snumber in range(1, 3):\n event_name = \"%s%sonResults\" % (EVENT_TAG, idx)\n self.log.info(\"Waiting for event: %s for time %s\", event_name,\n wait_time)\n event = self.dut.ed.pop_event(event_name, wait_time)\n self.log.debug(\"Event received: %s\", event)\n results = event[\"data\"][\"Results\"]\n bssids, validity = (self.proces_and_valid_batch_scan_result(\n results, scan_rt, event[\"data\"][KEY_RET], scan_setting))\n self.log.info(\"Scan number: %s\\n Buckets: %s\\n BSSID: %s\",\n snumber, len(results), bssids)\n asserts.assert_equal(\n len(results), number_bucket,\n \"Test fail because number_bucket %s\" % len(results))\n asserts.assert_true(bssids >= 1, EMPTY_RESULT)\n asserts.assert_true(validity, INVALID_RESULT)\n if snumber % 2 == 1 and check_get_result:\n self.log.info(\"Get Scan result using GetScanResult API\")\n time.sleep(wait_time / number_bucket)\n if self.dut.droid.wifiScannerGetScanResults():\n event = self.dut.ed.pop_event(event_name, 1)\n self.log.debug(\"Event onResults: %s\", event)\n results = event[\"data\"][\"Results\"]\n bssids, validity = self.proces_and_valid_batch_scan_result(\n results, scan_rt, event[\"data\"][KEY_RET],\n scan_setting)\n self.log.info(\"Got Scan result number: %s BSSID: %s\",\n snumber, bssids)\n asserts.assert_true(bssids >= 1, EMPTY_RESULT)\n asserts.assert_true(validity, INVALID_RESULT)\n else:\n self.log.error(\"Error while fetching the scan result\")\n except queue.Empty as error:\n raise AssertionError(\"Event did not triggered for batch scan %s\" %\n error)\n finally:\n self.dut.droid.wifiScannerStopBackgroundScan(idx)\n self.dut.ed.clear_all_events()" ]
[ "0.74488825", "0.6680533", "0.6557721", "0.6557721", "0.64835787", "0.6480018", "0.61668414", "0.61504644", "0.60862774", "0.6066822", "0.6051269", "0.604504", "0.60443306", "0.59857833", "0.5771174", "0.5683425", "0.5614547", "0.56018776", "0.55780876", "0.5559289", "0.5559289", "0.5547957", "0.55451834", "0.5493398", "0.5479211", "0.54697555", "0.5449949", "0.54424477", "0.54391706", "0.54370385", "0.53030115", "0.5292215", "0.5279968", "0.5270953", "0.5270421", "0.5204756", "0.5201873", "0.5201114", "0.51824874", "0.51585793", "0.5147916", "0.5140837", "0.513287", "0.51261795", "0.51128507", "0.51043665", "0.5096919", "0.50796556", "0.5049689", "0.50464195", "0.50430167", "0.50293636", "0.4994291", "0.49812067", "0.49781984", "0.49755824", "0.49755824", "0.49734724", "0.49673256", "0.49641058", "0.49625817", "0.49579468", "0.49482375", "0.49447072", "0.494006", "0.49231803", "0.49193296", "0.4908048", "0.4889536", "0.48738027", "0.4851061", "0.48337647", "0.48314023", "0.48280993", "0.48162192", "0.48025057", "0.4802385", "0.48007703", "0.4796409", "0.4784265", "0.47758758", "0.4775764", "0.47753337", "0.47494355", "0.47493148", "0.47420853", "0.47404656", "0.47359627", "0.47327572", "0.47217584", "0.4717951", "0.47147012", "0.4713455", "0.470859", "0.47080147", "0.4706929", "0.46986306", "0.46950555", "0.46762156", "0.46629837", "0.46595398" ]
0.0
-1
Given rows of normal vectors to line L, return points (rows) that are somewhere on each line Just find intersection with some basis line.
def points_on_lines(hyperplanes): intersections = [] for row in hyperplanes: intersections.append(an_intersection(row[:-1], -row[-1])) return np.array(intersections)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_intersect_points(line1, line2):\n intersect_points = matrix.matrix_sol([line1, line2])\n return intersect_points", "def intersection(line1, line2):\n p0, p1, p2, p3 = map(\n lambda tup : np.array(tup[:2]),\n [line1[0], line1[1], line2[0], line2[1]]\n )\n p1, p2, p3 = map(lambda x : x - p0, [p1, p2, p3])\n transform = np.zeros((2, 2))\n transform[:,0], transform[:,1] = p1, p2\n if np.linalg.det(transform) == 0: return\n inv = np.linalg.inv(transform)\n new_p3 = np.dot(inv, p3.reshape((2, 1)))\n #Where does line connecting (0, 1) to new_p3 hit x axis\n x_intercept = new_p3[0] / (1 - new_p3[1]) \n result = np.dot(transform, [[x_intercept], [0]])\n result = result.reshape((2,)) + p0\n return result", "def get_intersection(l0, l1):\n # Source: https://en.wikipedia.org/wiki/Line–line_intersection\n\n denominator = (l0[0] - l0[1]) * (l1[2] - l1[3]) -\\\n (l0[2] - l0[3]) * (l1[0] - l1[1])\n\n x_nominator = (l0[0] * l0[3] - l0[2] * l0[1]) * (l1[0] - l1[1]) -\\\n (l1[0] * l1[3] - l1[2] * l1[1]) * (l0[0] - l0[1])\n y_nominator = (l0[0] * l0[3] - l0[2] * l0[1]) * (l1[2] - l1[3]) -\\\n (l1[0] * l1[3] - l1[2] * l1[1]) * (l0[2] - l0[3])\n\n return [x_nominator / denominator, y_nominator / denominator]", "def _intersection(line_points_0, line_points_1):\n u,v = line_points_0,line_points_1\n (A,B),(C,D) = line_points_0,line_points_1\n h1 = _homogenous_line(A,B)\n h2 = _homogenous_line(C,D)\n P = _intersection_homogenous(h1, h2)\n return P", "def get_line_intersects_line(self) -> List[List[Line]]:\n intersections = []\n\n for line_bin in self.line_bins.values():\n for connection_pair in itertools.combinations(line_bin, 2):\n line_segments = (\n connection_pair[0].line_segments + connection_pair[1].line_segments\n )\n\n for segment_pair in itertools.combinations(line_segments, 2):\n if check_cross(segment_pair[0], segment_pair[1]):\n intersections.append(connection_pair)\n # for line_bin in self.line_bins.values():\n # segments = []\n # line_idx_map = []\n # for line_1, line_2 in itertools.combinations(line_bin, 2):\n # for segment in line_1.line_segments:\n # if segment[0] != segment[1]:\n # line_idx_map.append(line_1)\n # segments.append(((segment[0].x, segment[0].y), (segment[1].x, segment[1].y)))\n # for segment in line_2.line_segments:\n # if segment[0] != segment[1]:\n # line_idx_map.append(line_2)\n # segments.append(((segment[0].x, segment[0].y), (segment[1].x, segment[1].y)))\n #\n # for collision_point in segments_intersections(segments).values():\n # for intersection in collision_point:\n # intersections.append([line_idx_map[i] for i in intersection])\n return intersections", "def parallelogram_vertices_from_grouped_lines(lines):\n if len(lines) > 2:\n raise Exception(\"parallelogram finder \\\n called with too many lines\")\n c_1 = lines[0]\n c_2 = lines[1]\n intercepts = None\n for l1, l2 in list(zip(c_1, c_2)) + list(zip(c_1, c_2[::-1])):\n x = solve_for_intersection(np.array([l1, l2]))\n if intercepts is None:\n intercepts = np.array([x])\n else:\n intercepts = np.vstack((intercepts, x))\n return intercepts", "def lineLineIntersectXY(l1,l2,inside=True,params=False):\n\n x1=l1[0][0]\n y1=l1[0][1]\n z1=l1[0][2]\n \n x2=l1[1][0]\n y2=l1[1][1]\n z2=l1[1][2]\n\n x3=l2[0][0]\n y3=l2[0][1]\n z3=l2[0][2]\n \n x4=l2[1][0]\n y4=l2[1][1]\n z4=l2[1][2]\n\n ## check for x,y planar consistency\n if abs(z2-z1) > epsilon or abs(z3-z1) > epsilon or abs(z4-z1) > epsilon:\n raise ValueError('lines not in same x-y plane')\n\n ## do lines intersect anywhere?\n denom=(x1-x2)*(y3-y4)-(y1-y2)*(x3-x4)\n if denom*denom < epsilon:\n return False\n\n ## the lines do intersect, so let's see if they intersect\n ## inside both line segments\n t = ((x1-x3)*(y3-y4) - (y1-y3)*(x3-x4))/denom\n u = -1 * ((x1-x2)*(y1-y3) - (y1-y2)*(x1-x3))/denom\n\n ## return the paramater space intersection\n if params:\n return [t,u]\n \n ## do we care about falling inside the line segments? if so,\n ## check that the intersection falls within\n if inside and ( t < 0.0 or t > 1.0 or u < 0.0 or u > 1.0):\n return False\n\n return [x1 + t*(x2-x1), y1+t*(y2-y1), z1, 1.0]", "def find_line_intersection(self, point, vector, Ns=50):\n point = np.asarray(point, dtype=float)\n vector = np.asarray(vector, dtype=float)\n if point.size == 3:\n point = np.array([point[0], point[2]])\n if vector.size == 3:\n vector = np.array([vector[0], vector[2]])\n normal = np.array([-vector[1], vector[0]])\n normal /= norm(normal)\n with self.fix_evaluator():\n def f(t):\n t = clip(t, 0, np.pi)\n rel_vec = self(t) - point\n return normal.dot(rel_vec)\n f0 = f(0)\n if f0 == 0.0:\n return 0.0\n step = np.pi/Ns\n a = 0\n while f(a+step)*f0 > 0:\n if a == np.pi:\n raise RuntimeError(\"Line seems to not intersect curve.\")\n a = min(np.pi, a+step)\n return brentq(f, a=a, b=a+step)", "def endpoints(line_points):\n neighbors = []\n for p in line_points:\n aux = 0\n for q in line_points:\n if np.linalg.norm(p-q) == 1:\n aux += 1\n neighbors.append(aux)\n e_points = np.where(np.array(neighbors)==1)\n return line_points[e_points]", "def intersection(line1, line2):\r\n rho1, theta1 = line1[0]\r\n rho2, theta2 = line2[0]\r\n A = np.array([[np.cos(theta1), np.sin(theta1)], [np.cos(theta2), np.sin(theta2)]])\r\n b = np.array([[rho1], [rho2]])\r\n x0, y0 = np.linalg.solve(A, b)\r\n x0, y0 = int(np.round(x0)), int(np.round(y0))\r\n return [[x0, y0]]", "def line_intersect(line1, line2):\n b1 = (line1[1][1] - line1[0][1]) / (line1[1][0] - line1[0][0])\n b2 = (line2[1][1] - line2[0][1]) / (line2[1][0] - line2[0][0])\n a1 = line1[0][1] - b1 * line1[0][0]\n a2 = line2[0][1] - b2 * line2[0][0]\n\n if a1 == a2 and b1 == b2:\n return line1\n\n xi = - (a1 - a2) / (b1 - b2)\n yi = a1 + b1 * xi\n if (line1[0][0] - xi) * (xi - line1[1][0]) >= 0\\\n and (line2[0][0] - xi) * (xi - line2[1][0]) >= 0\\\n and (line1[0][1] - yi) * (yi - line1[1][1]) >= 0\\\n and (line2[0][1] - yi) * (yi - line2[1][1]) >= 0:\n return xi, yi\n return None", "def intersection(line1, line2):\n\n rho1, theta1 = line1[0]\n rho2, theta2 = line2[0]\n A = np.array([\n [np.cos(theta1), np.sin(theta1)],\n [np.cos(theta2), np.sin(theta2)]\n ])\n b = np.array([[rho1], [rho2]])\n x0, y0 = np.linalg.solve(A, b)\n x0, y0 = int(np.round(x0)), int(np.round(y0))\n\n return [x0, y0]", "def intersection_line_line(ab, cd):\n a, b = ab\n c, d = cd\n\n line_vector_1 = vector_from_points(a, b)\n line_vector_2 = vector_from_points(c, d)\n d_vector = cross_vectors(line_vector_1, line_vector_2)\n\n normal_1 = cross_vectors(line_vector_1, d_vector)\n normal_2 = cross_vectors(line_vector_2, d_vector)\n plane_1 = (a, normal_1)\n plane_2 = (c, normal_2)\n\n intx_point_line_1 = intersection_line_plane(ab, plane_2)\n intx_point_line_2 = intersection_line_plane(cd, plane_1)\n\n return [intx_point_line_1, intx_point_line_2]", "def intersection(line1, line2):\n rho1, theta1 = line1\n rho2, theta2 = line2\n A = np.array([\n [np.cos(theta1), np.sin(theta1)],\n [np.cos(theta2), np.sin(theta2)]\n ])\n b = np.array([[rho1], [rho2]])\n x0, y0 = np.linalg.solve(A, b)\n x0, y0 = int(np.round(x0)), int(np.round(y0))\n return [x0, y0]", "def intersection( l1, l2):\n #coordonees de la lignes 1\n x1, y1, x2, y2 = l1.point\n #coordonees de la lignes 2\n x3, y3, x4, y4 = l2.point\n #\n a1 = y2 - y1\n b1 = x1 - x2\n a2 = y4 - y3\n b2 = x3 - x4\n #\n c1 = a1 * x1 + b1 * y1\n #\n c2 = a2 * x3 + b2 * y3\n #\n det = a1 * b2 - a2 * b1\n assert det, \"lines are parallel\"\n return (1. * (b2 * c1 - b1 * c2) / det, 1. * (a1 * c2 - a2 * c1) / det)", "def getIntersection(line1, line2):\r\n\r\n rho1, theta1 = line1[0]\r\n rho2, theta2 = line2[0]\r\n\r\n a = np.array([\r\n [np.cos(theta1), np.sin(theta1)],\r\n [np.cos(theta2), np.sin(theta2)]\r\n ])\r\n\r\n b = np.array([[rho1], [rho2]])\r\n\r\n x, y = np.linalg.solve(a, b)\r\n\r\n x = int(x[0])\r\n y = int(y[0])\r\n\r\n return [np.round(y), np.round(x)]", "def lineintersect(line1,line2):\n a1, a2, b1, b2=line1[0],line1[1],line2[0],line2[1]\n\n s = np.vstack([a1,a2,b1,b2]) # s for stacked\n h = np.hstack((s, np.ones((4, 1)))) # h for homogeneous\n l1 = np.cross(h[0], h[1]) # get first line\n l2 = np.cross(h[2], h[3]) # get second line\n x, y, z = np.cross(l1, l2) # point of intersection\n if z == 0: # lines are parallel\n return (float('inf'), float('inf'))\n return (x/z, y/z)", "def intersectionOfTwoLines(p1, v1, p2, v2):\n # if we transform multiple points in one go\n if len(v1.shape) == 2:\n a1 = np.einsum('ij,ij->i', v1, v1)\n a2 = np.einsum('ij,ij->i', v1, v2)\n b1 = -np.einsum('ij,ij->i', v2, v1)\n b2 = -np.einsum('ij,ij->i', v2, v2)\n c1 = -np.einsum('ij,j->i', v1, p1 - p2)\n c2 = -np.einsum('ij,j->i', v2, p1 - p2)\n res = np.linalg.solve(np.array([[a1, b1], [a2, b2]]).transpose(2, 0, 1), np.array([c1, c2]).T)\n res = res[:, None, :]\n return np.mean([p1 + res[..., 0] * v1, p2 + res[..., 1] * v2], axis=0)\n else: # or just one point\n a1 = np.dot(v1, v1)\n a2 = np.dot(v1, v2)\n b1 = -np.dot(v2, v1)\n b2 = -np.dot(v2, v2)\n c1 = -np.dot(v1, p1 - p2)\n c2 = -np.dot(v2, p1 - p2)\n try:\n res = np.linalg.solve(np.array([[a1, b1], [a2, b2]]), np.array([c1, c2]))\n except np.linalg.LinAlgError:\n return np.ones(3)*np.nan\n res = res[None, None, :]\n return np.mean([p1 + res[..., 0] * v1, p2 + res[..., 1] * v2], axis=0)[0]", "def get_line_circle_intersections(A, B, C, r):\n Lx = B[0] - A[0]\n Ly = B[1] - A[1]\n Lz = B[2] - A[2]\n\n # stranger things\n D = Lx**2 + Ly**2\n E = 2 * ( Lx * (A[0] - C[0]) + Ly * (A[1] - C[1]) )\n F = (\n (A[0] - C[0])**2\n + (A[1] - C[1])**2\n - r**2\n )\n det = E**2 - 4 * D * F\n \n # declare null vectors\n P1 = [0, 0, 0]\n P2 = [0, 0, 0]\n t1 = t2 = None\n eps = .00001\n if ( not (D <= eps) or (det < 0) ):\n if det == 0:\n print \"tangential intersection found\",\n t1 = t2 = -E / (2*D)\n else:\n print \"pass-through intersection found\",\n t1 = ( (-E + math.sqrt(det)) / (2 * D) )\n t2 = ( (-E - math.sqrt(det)) / (2 * D) )\n P1[0] = A[0] + t1 * Lx\n P1[1] = A[1] + t1 * Ly\n P1[2] = A[2] + t1 * Lz\n P2[0] = A[0] + t2 * Lx\n P2[1] = A[1] + t2 * Ly\n P2[2] = A[2] + t2 * Lz\n else:\n print \"no intersections are available\",\n\n return P1, P2", "def point_of_intersection(l, pz=distance):\r\n # Must fix the error here. Right now, any vector can have a point in the plane.\r\n # Must make it so that only vectors pointing in the planes direction has a point there\r\n # Can be done by checking whether d is positive or not.\r\n # This is to prevent vectors that point away from the detector to be counted\r\n # The definitions below assume that the detector is centred in the origin and its length is oriented along the z-axis.\r\n p0 = np.array([0,0,pz]) # Point on the plane\r\n l0 = np.array([0,0,0]) # Point on the line\r\n n = np.array([0,0,1]) # Normal vector of the plane\r\n d = np.dot(p0-l0, n)/np.dot(l, n)\r\n point = [i*d for i in l]\r\n return point", "def intersection(v1, v2):\n x = v1[0:2] + v2[0:2]\n y = v1[2:4] + v2[2:4]\n if( x[3] == 0 ): #To avoid a divide by zero, if x[3] is 0 then we just solve for where lineA equals x[2]\n t1 = (x[2] - x[0])/\\\n (x[1])\n return [ v1[0] + v1[1]*t1, v1[2] + v1[3]*t1 ]\n\n else: \n t1 = ( y[0] - y[2] + (y[3]/x[3])*(x[2] - x[0]) )/\\\n ( (y[3]*x[1])/x[3] - y[1] )\n return [ v1[0] + v1[1]*t1, v1[2] + v1[3]*t1 ]", "def intersection(self, L):\n if self.slope() == L.slope():\n return None\n intpt_xcood = (self.c * L.b - L.c * self.b)/(self.a * L.b - L.a * self.b)\n intpt_ycood = (self.c * L.a - L.c * self.a)/(self.b * L.a - L.b * self.a)\n\n return (intpt_xcood, intpt_ycood)", "def intersection(L1, L2):\n D = L1[0] * L2[1] - L1[1] * L2[0]\n Dx = L1[2] * L2[1] - L1[1] * L2[2]\n Dy = L1[0] * L2[2] - L1[2] * L2[0]\n if D != 0:\n x = Dx / D\n y = Dy / D\n return x, y\n else:\n return False", "def linePointXY(l,p,inside=True,distance=False,params=False):\n a=l[0]\n b=l[1]\n # check for degenerate case of zero-length line\n abdist = dist(a,b)\n if abdist < epsilon:\n #raise ValueError('zero-length line passed to linePointXY')\n print('zero-length line passed to linePointXY')\n return False\n\n if distance and params:\n raise ValueError('incompatible distance and params parameters passed to linePointXY')\n\n x0=p[0]\n y0=p[1]\n z0=p[2]\n x1=a[0]\n y1=a[1]\n z1=a[2]\n x2=b[0]\n y2=b[1]\n z2=b[2]\n\n ## check to see if all three points lie in the same x,y plane\n if not isXYPlanar([p,a,b]):\n raise ValueError('non-XY points in linePointXY call')\n return false\n # if abs(z1-z0) > epsilon or abs(z2-z0) > epsilon:\n # return False\n\n linedist = abs( ((y2-y1)*x0 - (x2-x1)*y0 + x2*y1 - y2*x1)/abdist)\n\n ## this is the fast case:\n if not inside and distance:\n return linedist\n \n ## find out where the intersection between the original line and a\n ## line defined by the point and an orthogonal direction vector\n ## is. We do this by constructing two direction vectors\n ## orthogonal to the orgiginal line scaled by the line distance,\n ## and adding them to the point in question. Assuming that the\n ## line distance is not zero, only one of these constructed points\n ## will fall on the line\n\n ## compute unit direction vector for original line\n dir = sub(b,a)\n dir = scale3(dir,1.0/mag(dir))\n\n ## compute two orthogonal direction vectors of length linedist\n ordir1 = scale3(orthoXY(dir),linedist)\n ordir2 = scale3(ordir1, -1.0)\n \n ## there are two possible intersection points\n pi1 = add(p,ordir1)\n pi2 = add(p,ordir2)\n\n ## compute distances\n d1pa = dist(a,pi1)\n d1pb = dist(pi1,b)\n d1 = d1pa+d1pb # \"triangle\" with pi1\n\n d2pa = dist(a,pi2)\n d2pb = dist(pi2,b)\n d2 = d2pa+d2pb # \"triangle\" with pi2\n\n ## the shortest \"triangle\" distance will signal the point that\n ## is actually on the line, even if that point falls outside\n ## the a,b line interval\n \n if params or not inside: # if we don't care about being inside the\n # line segment\n if d1 <= d2:\n if distance:\n return d1\n elif params:\n return d1pb/abdist\n else:\n return pi1\n else:\n if distance:\n return d2\n elif params:\n return d2pb/abdist\n else:\n return pi2\n \n \n ## if the closest point on the line to point p lies between\n ## the endpoints of the line, then either d1 or d2 will equal\n ## abdist. IF neither do, then we know that the closest point lies\n ## outside the endpoints\n\n if abs(d1-abdist) < epsilon:\n if distance:\n return linedist\n else:\n return pi1\n\n if abs(d2-abdist) < epsilon:\n if distance:\n return linedist\n else:\n return pi2\n\n ## closest point is outside the interval. That means that the\n ## distance from point p to whichever endpoint is smaller is the\n ## closest distance\n\n d3 = dist(a,p)\n d4 = dist(b,p)\n\n if d3 < d4:\n if distance:\n return d3\n else:\n return a\n else:\n if distance:\n return d4\n else:\n return b", "def find_intersections_line_line(line1: Line, line2: Line) -> {Point}:\n if line1.slope != line2.slope:\n if line1.slope is Infinity:\n # Line 1 is vertical, use its x value as the x value to evaluate line2\n x = line1.point1.x\n y = line2(x)\n elif line2.slope is Infinity:\n # Line 2 is vertical, use its x value as the x value to evaluate line1\n x = line2.point1.x\n y = line1(x)\n else:\n x = (line2.intercept - line1.intercept) / (line1.slope - line2.slope)\n y = line1(x)\n return {Point(x, y)}\n else:\n return {}", "def closest_point_on_line(point, line):\n a, b = line\n ab = subtract_vectors(b, a)\n ap = subtract_vectors(point, a)\n c = vector_component(ap, ab)\n return add_vectors(a, c)", "def intersect(nodeL, nodeR, city):\n PL = get_node_points(nodeL)\n P = get_city_points(city)\n PR = get_node_points(nodeR)\n\n equation1 = matrix.linear_eq((PL, P))\n equation2 = matrix.linear_eq((PR, P))\n\n for item in linear_list:\n lineP1 = get_node_points(item[0])\n lineP2 = get_node_points(item[1])\n temp = item[2][:]\n temp[2] = temp[2] * (-1)\n inter_points = get_intersect_points(equation1, temp)\n if inter_points == \"parallel\":\n check1 = False\n else:\n x = is_between(inter_points[0], PL[0], P[0])\n y = is_between(inter_points[0], lineP1[0], lineP2[0])\n check1 = x & y\n if check1 == True:\n return True\n\n for item in linear_list:\n lineP1 = get_node_points(item[0])\n lineP2 = get_node_points(item[1])\n temp = item[2][:]\n temp[2] = temp[2] * (-1)\n inter_points = get_intersect_points(equation2, temp)\n if inter_points == \"parallel\":\n check2 = False\n else:\n x = is_between(inter_points[0], PR[0], P[0])\n y = is_between(inter_points[0], lineP1[0], lineP2[0])\n check2 = x & y\n if check2 == True:\n return True\n return False", "def lines_intersect_2d(line1_pt1, line1_pt2, line2_pt1, line2_pt2):\r\n return geometry.gmLinesIntersect(line1_pt1, line1_pt2, line2_pt1, line2_pt2)", "def test_line_to_points(self):\n delta = 1\n # Create simple line\n L = numpy.array([[0, 0], [2, 0]])\n V = points_along_line(L, 1)\n\n expected_V = [[0, 0], [1, 0], [2, 0]]\n msg = ('Calculated points were %s, expected '\n '%s' % (V, expected_V))\n assert numpy.allclose(V, expected_V), msg\n\n # Not starting at zero\n # Create line\n L2 = numpy.array([[168, -2], [170, -2], [170, 0]])\n V2 = points_along_line(L2, delta)\n\n expected_V2 = [[168, -2], [169, -2], [170, -2],\n [170, -1], [170, 0]]\n msg = ('Calculated points were %s, expected '\n '%s' % (V2, expected_V2))\n assert numpy.allclose(V2, expected_V2), msg\n\n # Realistic polygon\n filename = '%s/%s' % (TESTDATA, 'indonesia_highway_sample.shp')\n layer = read_layer(filename)\n geometry = layer.get_geometry()\n\n P = geometry[0]\n C = points_along_line(P, delta)\n\n # Check against reference centroid\n expected_v = [[106.7168975, -6.15530081],\n [106.85224176, -6.15344678],\n [106.93660016, -6.21370279]]\n assert numpy.allclose(C, expected_v, rtol=1.0e-8)\n\n # Store points to file (to e.g. check with qgis)\n out_filename = unique_filename(prefix='test_points_along_line',\n suffix='.shp')\n V = Vector(data=None,\n projection=DEFAULT_PROJECTION,\n geometry=[C],\n name='Test points_along_line')\n V.write_to_file(out_filename)", "def line_intersection(p0_x, p0_y, p1_x, p1_y, p2_x, p2_y, p3_x, p3_y):\n s10_x = p1_x - p0_x\n s10_y = p1_y - p0_y\n s32_x = p3_x - p2_x\n s32_y = p3_y - p2_y\n\n denom = s10_x * s32_y - s32_x * s10_y\n if denom == 0.0:\n return None # Collinear\n denomPositive = denom > 0\n\n s02_x = p0_x - p2_x\n s02_y = p0_y - p2_y\n s_numer = s10_x * s02_y - s10_y * s02_x\n if (s_numer < 0) == denomPositive:\n return None # No collision\n\n t_numer = s32_x * s02_y - s32_y * s02_x\n if (t_numer < 0) == denomPositive:\n return None # No collision\n\n if (s_numer > denom) == denomPositive or (t_numer > denom) == denomPositive:\n return 0 # No collision\n \n # Collision detected\n t = t_numer / denom\n i_x = p0_x + (t * s10_x)\n i_y = p0_y + (t * s10_y)\n\n return i_x, i_y", "def line_intercept(p1,p2,p3,p4):\n # Note if vertical line m = None and b holds x-val\n (m1,b1) = line_param(p1,p2)\n (m2,b2) = line_param(p3,p4)\n if (m1 != None) and (m2 != None):\n if (m1-m2) != 0.:\n x = (b2-b1)/(m1-m2)\n y = m1*x + b1\n else:\n return (None,0)\n elif (m1 == None) and (m2 != None):\n x = b1 \n y = m2*x + b2\n elif (m1 != None) and (m2 == None):\n x = b2\n y = m1*x + b1\n else:\n return (None,0) \n \n # min and max of points. \n max_x1 = max(p1[0], p2[0])\n min_x1 = min(p1[0], p2[0])\n max_y1 = max(p1[1], p2[1])\n min_y1 = min(p1[1], p2[1])\n max_x2 = max(p3[0], p4[0])\n min_x2 = min(p3[0], p4[0])\n max_y2 = max(p3[1], p4[1])\n min_y2 = min(p3[1], p4[1])\n #check if the intersection is in bounds\n flag = 1\n if x > max_x1 or x < min_x1:\n flag = 0\n elif x > max_x2 or x < min_x2:\n flag = 0\n elif y > max_y1 or y < min_y1: \n flag = 0\n elif y > max_y2 or y < min_y2: \n flag = 0\n #check if the intersection point corresponds to an end point\n intercept = num.array([x,y])\n def _same(p1,p2,prec=0.0001):\n \"\"\" are two points the same \"\"\"\n #return num.all(num.equal(p1,p2))\n t1 = num.fabs(p1[0]-p2[0]) < prec\n t2 = num.fabs(p1[1]-p2[1]) < prec\n if t1 and t2:\n #print \"same\", p1,p2\n return True\n if flag == 1:\n if _same(intercept,p1):\n flag = 2\n elif _same(intercept,p2):\n flag = 2\n elif _same(intercept,p3):\n flag = 2\n elif _same(intercept,p4):\n flag = 2\n return (intercept,flag)", "def intersect_ext(self, line):\n c = line.cross_z\n d = self.v.dot(c)\n if d == 0:\n return False, 0, 0, 0\n dp = line.p - self.p\n c2 = self.cross_z\n u = c.dot(dp) / d\n v = c2.dot(dp) / d\n return u > 0 and v > 0 and u < 1 and v < 1, self.lerp(u), u, v", "def intersect(self, line):\n c = line.cross_z\n d = self.v.dot(c)\n if d == 0:\n return False, 0, 0\n t = c.dot(line.p - self.p) / d\n return True, self.lerp(t), t", "def side_points(p, v, L): \r\n u = np.array([-v[1], v[0]]) # positive normal of v:\r\n N = list() # list of points on one side of the line p,v:\r\n for k in range(len(L)):\r\n if (L[k] - p).dot(u) >= 0:\r\n N.append(L[k])\r\n \r\n return N", "def get_intersection_points(lines, debug_img=None):\n\n # Convert [a,b,c,d] to [(a,b), (b,c), (c,d), (d,a)]\n line_pairs = list(zip(lines, lines[1:]+lines[:1]))\n\n corners = [get_intersection_point(*p) for p in line_pairs]\n\n if debug_img is not None:\n int_corners = np.array(corners, np.int32)\n draw_corners(debug_img, int_corners, (0, 255, 0))\n\n return corners", "def intersect_line(self, line: Line) -> Tuple[Point, Point]:\n vector_to_line = Vector.from_points(self.point, line.point)\n vector_unit = line.direction.unit()\n\n dot = vector_unit.dot(vector_to_line)\n\n discriminant = dot**2 - (vector_to_line.norm() ** 2 - self.radius**2)\n\n if discriminant < 0:\n raise ValueError(\"The line does not intersect the sphere.\")\n\n pm = np.array([-1, 1]) # Array to compute minus/plus.\n distances = -dot + pm * math.sqrt(discriminant)\n\n point_a, point_b = line.point + distances.reshape(-1, 1) * vector_unit\n\n return point_a, point_b", "def linesegment_plane_intersection(self, p0,p1,point,normal): # only returns lines...intersections through the segment end points are ignored\n\t\tp0dot=numpy.dot(p0-point,normal)\n\t\tp1dot=numpy.dot(p1-point,normal)\n\t\tif (p0dot>0 and p1dot<0) or (p0dot<0 and p1dot>0): \n\t\t\t# if the dot products have opposing signs, then the line intersects the plane\n\t\t\treturn True,p0+(p1-p0)*abs(p0dot)/(abs(p0dot)+abs(p1dot))\n\t\telse:\n\t\t\treturn False", "def L(Rd: np.array, Re: np.array):\n nd = Rd[:, 0]\n sd = Rd[:, 1]\n ad = Rd[:, 2]\n\n ne = Re[:, 0]\n se = Re[:, 1]\n ae = Re[:, 2]\n\n ll = -0.5 * (\n np.dot(skew(nd), skew(ne))\n + np.dot(skew(sd), skew(se))\n + np.dot(skew(ad), skew(ae))\n )\n\n llinv = np.linalg.pinv(ll)\n\n return ll, llinv", "def _line_intersection(self, line, point):\n den = euclidean_distance((line[0],line[1]), (line[2],line[3]))\n x1, y1, x2, y2 = line[0], line[1], line[2], line[3]\n x3, y3 = point[0], point[1]\n\n u = ( ((x3-x1) * (x2-x1)) + ((y3-y1) * (y2-y1)) ) / den\n\n x, y = (x1 + u * (x2-x1)), (y1 + u * (y2-y1))\n dist = euclidean_distance((x,y), point)\n\n # pygame.draw.circle(self.screen, SIM_COLORS['aqua'], \n # (int(x*SCALE), int(y*SCALE)), \n # int(40), \n # 0)\n # print dist*SCALE, (x*SCALE,y*SCALE)\n\n return dist, (x, y)", "def _any_intersect(line_set_1, line_set_2, h_set_1=None, h_set_2=None):\n # try to speed this one up by reducing the number of comparisons\n # necessary. About half the time is spent in here, incl. subroutines.\n h_set_1 = [_homogenous_line(*segment) for segment in line_set_1] if h_set_1 is None else h_set_1\n h_set_2 = [_homogenous_line(*segment) for segment in line_set_2] if h_set_2 is None else h_set_2\n for h1, l1 in zip(h_set_1, line_set_1):\n for h2, l2 in zip(h_set_2, line_set_2):\n P = _intersection_homogenous(h1, h2)\n if P==(None, None): continue\n if _point_within_bounds(l1,P) and _point_within_bounds(l2,P):\n return True\n return False", "def line_sphere_intersection(p1, p2, c, r):\n\t# FILL in your code here\n\n\tline_vector=np.subtract(p2,p1) #np.array([p2[0]-p1[0], p2[1]-p1[1], p2[2]-p1[2] ])\n\tval=np.sqrt(np.sum([(p2 - p1)**2\n\t\t\t\t\t\t for p1, p2 in zip(p1,p2)]))\n\n\tif val==0:\n\t\tunit_vector=np.array([0,0,0])\n\telse:\n\t\tunit_vector=[linevec/val for linevec in line_vector]\n\tvecO_C=np.subtract(p1,c)\n\t\t\n\tres=np.dot(unit_vector,vecO_C)* np.dot(unit_vector,vecO_C) - ( np.dot(vecO_C, vecO_C) - r*r )\n\treturn res", "def _intersection_homogenous(homog_line_0, homog_line_1):\n # NB: renamed from '_intersection'\n eps = 1e-13\n a,b,c=homog_line_0\n u,v,w=homog_line_1\n D=float(b*u-v*a)\n if abs(D)<eps:\n # parallel lines\n return None, None\n xp=-(w*b-c*v)/D\n yp= (w*a-c*u)/D\n\n return xp, yp", "def find_hull_vertices(points: np.ndarray) -> np.ndarray:\n M = 3\n N = points.shape[0]\n for i in range(4, N):\n while ccw(points[M], points[M - 1], points[i]) >= 0:\n M -= 1\n\n M += 1\n swap(points, M, i)\n\n return points[1:M + 1]", "def get_intersection(self, l, max_y=None):\n\n # Get the points\n i, j = self.breakpoint\n\n # Initialize the resulting point\n result = Coordinate()\n p: Coordinate = i\n\n # First we replace some stuff to make it easier\n a = i.xd\n b = i.yd\n c = j.xd\n d = j.yd\n u = 2 * (b - l)\n v = 2 * (d - l)\n\n # Handle the case where the two points have the same y-coordinate (breakpoint is in the middle)\n if i.yd == j.yd:\n result.xd = (i.xd + j.xd) / 2\n\n if j.xd < i.xd:\n result.yd = max_y or float('inf')\n return result\n\n # Handle cases where one point's y-coordinate is the same as the sweep line\n elif i.yd == l:\n result.xd = i.xd\n p = j\n elif j.yd == l:\n result.xd = j.xd\n else:\n # We now need to solve for x\n # 1/u * (x**2 - 2*a*x + a**2 + b**2 - l**2) = 1/v * (x**2 - 2*c*x + c**2 + d**2 - l**2)\n # Then we let Wolfram alpha do the heavy work for us, and we put it here in the code :D\n x = -(Decimal.sqrt(\n v * (a ** 2 * u - 2 * a * c * u + b ** 2 * (u - v) + c ** 2 * u) + d ** 2 * u * (v - u) + l ** 2 * (\n u - v) ** 2) + a * v - c * u) / (u - v)\n result.xd = x\n\n # We have to re-evaluate this, since the point might have been changed\n a = p.xd\n b = p.yd\n x = result.xd\n u = 2 * (b - l)\n\n # Handle degenerate case where parabolas don't intersect\n if u == 0:\n result.yd = float(\"inf\")\n return result\n\n # And we put everything back in y\n result.yd = 1 / u * (x ** 2 - 2 * a * x + a ** 2 + b ** 2 - l ** 2)\n return result", "def get_intersect(a1, a2, b1, b2):\n s = np.vstack((a1, a2, b1, b2)) # s for stacked\n h = np.hstack((s, np.ones((4, 1)))) # h for homogeneous\n l1 = np.cross(h[0], h[1]) # get first line\n l2 = np.cross(h[2], h[3]) # get second line\n x, y, z = np.cross(l1, l2) # point of intersection\n if z == 0: # lines are parallel\n return None\n return np.array([x / z, y / z])", "def intersect(l: Line, p: Plane) -> Point:\n if math.isclose((l.d * p.normal()), 0):\n # If the line direction is perpendicular to the plane normal,\n # the line and plane must be parallel.\n return None\n else:\n # There exists a parameter t, which makes\n # p.isInPlane(l.point(t)) == 0\n # Let's find it.\n # Initial guess\n t1 = 1\n p1 = l.point(t1)\n d1 = distancePointPlane(p1, p)\n t2 = 2\n p2 = l.point(t2)\n d2 = distancePointPlane(p2, p)\n\n # Calculate line through the two points (t,d)\n a = (d2 - d1) / (t2 - t1)\n b = d1 - a * t1\n\n # Find the t-value where d is zero\n # 0 = at+b <=> t = -b/a\n t = -b / a\n print(\"parameter: {}\".format(t))\n return l.point(t)", "def get_verts(v_l, v_r):\n\n\t\tv_l = v_l%chain.length\n\t\tv_r = v_r%chain.length\n\n\t\tpoints = []\n\t\tcoords = list(chain.coords)\n\t\tif v_r > v_l:\n\n\t\t\tfor i in range(1, len(coords)):\n\t\t\t\n\t\t\t\tpd = LineString(coords[:i+1]).length\n\n\t\t\t\tif pd > v_l and pd < v_r:\n\t\t\t\t\tpoints.append(coords[i])\n\t\telse:\n\n\t\t\tfor i in range(1, len(coords)):\n\t\t\t\n\t\t\t\tpd = LineString(coords[:i+1]).length\n\n\t\t\t\tif pd > v_l:\n\t\t\t\t\tpoints.append(coords[i])\n\n\t\t\tfor i in range(1, len(coords)):\n\t\t\t\n\t\t\t\tpd = LineString(coords[:i+1]).length\n\n\t\t\t\tif pd < v_r:\n\t\t\t\t\tpoints.append(coords[i])\n\n\n\t\treturn points", "def get_points_for_thick_line(start_x: float, start_y: float,\r\n end_x: float, end_y: float,\r\n line_width: float):\r\n vector_x = start_x - end_x\r\n vector_y = start_y - end_y\r\n perpendicular_x = vector_y\r\n perpendicular_y = -vector_x\r\n length = math.sqrt(vector_x * vector_x + vector_y * vector_y)\r\n if length == 0:\r\n normal_x = 1.0\r\n normal_y = 1.0\r\n else:\r\n normal_x = perpendicular_x / length\r\n normal_y = perpendicular_y / length\r\n r1_x = start_x + normal_x * line_width / 2\r\n r1_y = start_y + normal_y * line_width / 2\r\n r2_x = start_x - normal_x * line_width / 2\r\n r2_y = start_y - normal_y * line_width / 2\r\n r3_x = end_x + normal_x * line_width / 2\r\n r3_y = end_y + normal_y * line_width / 2\r\n r4_x = end_x - normal_x * line_width / 2\r\n r4_y = end_y - normal_y * line_width / 2\r\n points = (r1_x, r1_y), (r2_x, r2_y), (r4_x, r4_y), (r3_x, r3_y)\r\n return points", "def perpendicularIntersection(point, linePoint1, linePoint2):\n\t\tx1 = linePoint1[0]\n\t\ty1 = linePoint1[1]\n\t\tx2 = linePoint2[0]\n\t\ty2 = linePoint2[1]\n\t\tx3 = point[0]\n\t\ty3 = point[1]\n\t\tk = ((y2-y1) * (x3-x1) - (x2-x1) * (y3-y1)) / ((y2-y1)**2 + (x2-x1)**2)\n\t\tx4 = x3 - k * (y2-y1)\n\t\ty4 = y3 + k * (x2-x1)\n\t\treturn (x4, y4)", "def isinsidelineXY(l,p):\n\n return linePointXY(l,p,distance=True) < epsilon", "def _lines_intersection(self, other):\n\n the_slope, the_y_intercept = False, False\n\n # parallel?\n if self.slope == other.slope:\n return (\n self.y_intercept == other.y_intercept and\n self.x_value == other.x_value\n )\n\n if self.is_vertical():\n x = self.x_value\n the_slope = other.slope\n the_y_intercept = other.y_intercept\n elif other.is_vertical():\n x = other.x_value\n else:\n x = (other.y_intercept - self.y_intercept) / (self.slope - other.slope)\n\n if the_slope is None or the_slope is False:\n the_slope = self.slope\n the_y_intercept = self.y_intercept\n\n y = the_slope * x + the_y_intercept\n\n return Point(x, y)", "def linear_LS_triangulation(u1, P1, u2, P2):\n A = np.zeros((4, 3))\n b = np.zeros((4, 1))\n\n # Create array of triangulated points\n x = np.zeros((3, len(u1)))\n\n # Initialize C matrices\n C1 = np.array(linear_LS_triangulation_C)\n C2 = np.array(linear_LS_triangulation_C)\n\n for i in range(len(u1)):\n # Derivation of matrices A and b:\n # for each camera following equations hold in case of perfect point matches:\n # u.x * (P[2,:] * x) = P[0,:] * x\n # u.y * (P[2,:] * x) = P[1,:] * x\n # and imposing the constraint:\n # x = [x.x, x.y, x.z, 1]^T\n # yields:\n # (u.x * P[2, 0:3] - P[0, 0:3]) * [x.x, x.y, x.z]^T + (u.x * P[2, 3] - P[0, 3]) * 1 = 0\n # (u.y * P[2, 0:3] - P[1, 0:3]) * [x.x, x.y, x.z]^T + (u.y * P[2, 3] - P[1, 3]) * 1 = 0\n # and since we have to do this for 2 cameras, and since we imposed the constraint,\n # we have to solve 4 equations in 3 unknowns (in LS sense).\n #\n # Build C matrices, to construct A and b in a concise way\n C1[:, 2] = u1[i, :]\n C2[:, 2] = u2[i, :]\n\n # Build A matrix:\n # [\n # [ u1.x * P1[2,0] - P1[0,0], u1.x * P1[2,1] - P1[0,1], u1.x * P1[2,2] - P1[0,2] ],\n # [ u1.y * P1[2,0] - P1[1,0], u1.y * P1[2,1] - P1[1,1], u1.y * P1[2,2] - P1[1,2] ],\n # [ u2.x * P2[2,0] - P2[0,0], u2.x * P2[2,1] - P2[0,1], u2.x * P2[2,2] - P2[0,2] ],\n # [ u2.y * P2[2,0] - P2[1,0], u2.y * P2[2,1] - P2[1,1], u2.y * P2[2,2] - P2[1,2] ]\n # ]\n A[0:2, :] = C1.dot(P1[0:3, 0:3]) # C1 * R1\n A[2:4, :] = C2.dot(P2[0:3, 0:3]) # C2 * R2\n\n # Build b vector:\n # [\n # [ -(u1.x * P1[2,3] - P1[0,3]) ],\n # [ -(u1.y * P1[2,3] - P1[1,3]) ],\n # [ -(u2.x * P2[2,3] - P2[0,3]) ],\n # [ -(u2.y * P2[2,3] - P2[1,3]) ]\n # ]\n b[0:2, :] = C1.dot(P1[0:3, 3:4]) # C1 * t1\n b[2:4, :] = C2.dot(P2[0:3, 3:4]) # C2 * t2\n b *= -1\n\n # Solve for x vector\n cv2.solve(A, b, x[:, i:i + 1], cv2.DECOMP_SVD)\n\n return np.transpose(x), np.ones(len(u1), dtype=bool)", "def is_on_line(point_a, point_b, point_c):\r\n return (point_b[0] - point_a[0]) * (point_c[1] - point_a[1]) - (point_b[1] - point_a[1]) * (point_c[0] - point_a[0])", "def intersection(self, line):\n\t\tdenom = (line.b[1]-line.a[1])*(self.b[0]-self.a[0]) - (line.b[0]-line.a[0])*(self.b[1]-self.a[1])\n\t\t# denominator is 0 if lines are parallel\n\t\tif denom == 0:\n\t\t\treturn None\n\t\t\n\t\tnum_a = (line.b[0]-line.a[0])*(self.a[1]-line.a[1]) - (line.b[1]-line.a[1])*(self.a[0]-line.a[0])\n\t\tnum_b = (self.b[0]-self.a[0])*(self.a[1]-line.a[1]) - (self.b[1]-self.a[1])*(self.a[0]-line.a[0])\n\t\t# if both numerators are 0 then lines are coincident\n\t\tif num_a==0 and num_b==0:\n\t\t\treturn None\n\t\t\t\n\t\tu_a = num_a/denom\n\t\tu_b = num_b/denom\n\t\t\t\n\t\tif 0 <= u_a <= 1 and 0 <= u_b <= 1:\n\t\t\treturn self.a + uA*(self.b-self.a)\n\t\telse:\n\t\t\treturn None", "def segmented_intersections(lines):\r\n\r\n intersections = []\r\n for i, group in enumerate(lines[:-1]):\r\n for next_group in lines[i+1:]:\r\n for line1 in group:\r\n for line2 in next_group:\r\n intersections.append(intersection(line1, line2)) \r\n\r\n return intersections", "def is_intersection_line_line(ab, cd, epsilon=1e-6):\n a, b = ab\n c, d = cd\n\n line_vector_1 = normalize_vector(vector_from_points(a, b))\n line_vector_2 = normalize_vector(vector_from_points(c, d))\n # check for parallel lines\n print(abs(dot_vectors(line_vector_1, line_vector_2)))\n if abs(dot_vectors(line_vector_1, line_vector_2)) > 1.0 - epsilon:\n return False\n # check for intersection\n d_vector = cross_vectors(line_vector_1, line_vector_2)\n if dot_vectors(d_vector, subtract_vectors(c, a)) == 0:\n return True\n return False", "def intersects(self):\n match = False\n for i in range(len(self.__points) - 1):\n p1 = self.__points[i]\n p2 = self.__points[i + 1]\n bounds = self.__line_segment(p1, p2)\n if not bounds is None:\n xmin = bounds[0]\n ymin = bounds[1]\n xmax = bounds[0]\n ymax = bounds[1]\n for j in range(len(bounds)):\n if not (j % 2):\n if bounds[j] < xmin:\n xmin = bounds[j]\n elif bounds[j] > xmax:\n xmax = bounds[j]\n else:\n if bounds[j] < ymin:\n ymin = bounds[j]\n elif bounds[j] > ymax:\n ymax = bounds[j]\n x = self.x\n y = self.y\n # TODO: Determine direction, and check two leading edge points; ie. last vector ----> then points are x+width,y+width x+width,y-width\n if x > xmin and x < xmax and y > ymin and y < ymax:\n match = True\n break\n return match", "def intersection_line_plane(line, plane, epsilon=1e-6):\n pt1 = line[0]\n pt2 = line[1]\n p_cent = plane[0]\n p_norm = plane[1]\n\n v1 = subtract_vectors(pt2, pt1)\n dot = dot_vectors(p_norm, v1)\n\n if abs(dot) > epsilon:\n v2 = subtract_vectors(pt1, p_cent)\n fac = -dot_vectors(p_norm, v2) / dot\n vec = scale_vector(v1, fac)\n return add_vectors(pt1, vec)\n else:\n return None", "def get_intersect(a1, a2, b1, b2):\n s = np.vstack([a1, a2, b1, b2]) # s for stacked\n h = np.hstack((s, np.ones((4, 1)))) # h for homogeneous\n l1 = np.cross(h[0], h[1]) # get first line\n l2 = np.cross(h[2], h[3]) # get second line\n x, y, z = np.cross(l1, l2) # point of intersection\n if z == 0: # lines are parallel\n return float('inf'), float('inf')\n return x / z, y / z", "def calculate_parallel_intersection(points, gradients):\n p1, p2, p3, p4 = points\n m1, m2 = gradients\n b1, b2 = None, None # vertical lines have no b value\n if m1 is not None:\n b1 = CollisionUtility.calculate_y_axis_intersect(p1, m1)\n if m2 is not None:\n b2 = CollisionUtility.calculate_y_axis_intersect(p3, m2)\n # If these parallel lines lay on one another\n return (p1, p2, p3, p4) if b1 == b2 else None", "def find_out_difference_perpendiculars(lap, ref_lap):\n\n distances = []\n\n for i in lap.index:\n point = lap.loc[i]\n\n closest_index = find_closest_point(point, ref_lap)\n closest_point = ref_lap.loc[closest_index]\n\n neighbor_i = len(ref_lap) - 1 if closest_index == 0 else closest_index - 1\n neighbor1 = ref_lap.loc[neighbor_i]\n neighbor_i = 0 if len(ref_lap) == closest_index + 1 else closest_index + 1\n neighbor2 = ref_lap.loc[neighbor_i]\n\n v1 = create_vector(closest_point, point)\n v2 = create_vector(closest_point, neighbor1)\n v3 = create_vector(closest_point, neighbor2)\n\n angle1 = find_angle_between_vectors(v1, v2)\n angle2 = find_angle_between_vectors(v1, v3)\n\n degrees90 = math.pi / 2\n min_dist = -1\n if angle1 > degrees90 and angle2 > degrees90:\n min_dist = line_length(point.LAT, point.LON, closest_point.LAT, closest_point.LON)\n elif angle1 < degrees90 and angle2 < degrees90:\n dist1 = find_shortest_distance(point, closest_point, neighbor1)\n dist2 = find_shortest_distance(point, closest_point, neighbor2)\n min_dist = dist1 if dist1 <= dist2 else dist2\n elif angle1 <= degrees90:\n min_dist = find_shortest_distance(point, closest_point, neighbor1)\n elif angle2 <= degrees90:\n min_dist = find_shortest_distance(point, closest_point, neighbor2)\n\n if min_dist == -1:\n print('ERROR: Could not find distance')\n print(\"Indices: {} {}\\nAngles: {} {}\".format(i, closest_index, angle1, angle2))\n elif math.isnan(min_dist):\n print(\"NAN value!!!\\nIndices: {} {}\\nAngles: {} {}\".format(i, closest_index, angle1, angle2))\n elif min_dist < 0:\n print(\"Negative value!!!\\nIndices: {} {}\\nAngles: {} {}\".format(i, closest_index, angle1, angle2))\n else:\n min_dist = degrees2kilometers(min_dist) * 100000 # in centimeters\n distances.append(min_dist)\n\n return distances", "def intersect_line(self, line: Line, **kwargs) -> Point:\n if self.normal.is_perpendicular(line.direction, **kwargs):\n raise ValueError(\"The line and plane must not be parallel.\")\n\n vector_plane_line = Vector.from_points(self.point, line.point)\n\n num = -self.normal.dot(vector_plane_line)\n denom = self.normal.dot(line.direction)\n\n # Vector along the line to the intersection point.\n vector_line_scaled = num / denom * line.direction\n\n return line.point + vector_line_scaled", "def segmented_intersections(lines):\n\n intersections = []\n for i, group in enumerate(lines[:-1]):\n for next_group in lines[i+1:]:\n for line1 in group:\n for line2 in next_group:\n intersections.append(intersection(line1, line2)) \n\n return intersections", "def LineSphereIntersection(line, sphere_center, sphere_radius):\n line = rhutil.coerceline(line, True)\n sphere_center = rhutil.coerce3dpoint(sphere_center, True)\n sphere = Rhino.Geometry.Sphere(sphere_center, sphere_radius)\n rc, pt1, pt2 = Rhino.Geometry.Intersect.Intersection.LineSphere(line, sphere)\n if rc==Rhino.Geometry.Intersect.LineSphereIntersection.None: return []\n if rc==Rhino.Geometry.Intersect.LineSphereIntersection.Single: return [pt1]\n return [pt1, pt2]", "def linear_triangulation(p1, p2, m1, m2):\n num_points = p1.shape[1]\n res = np.ones((4, num_points))\n\n for i in range(num_points):\n A = np.asarray([\n (p1[0, i] * m1[2, :] - m1[0, :]),\n (p1[1, i] * m1[2, :] - m1[1, :]),\n (p2[0, i] * m2[2, :] - m2[0, :]),\n (p2[1, i] * m2[2, :] - m2[1, :])\n ])\n\n _, _, V = np.linalg.svd(A)\n X = V[-1, :4]\n res[:, i] = X / X[3]\n\n return res", "def get_intersection_point(l1, l2):\n m, b = l1\n n, c = l2\n # Find when mx + b = nx + c\n # mx - nx = c - b\n # And...\n x = (c-b) / (m-n)\n # Then plug back in\n y = m*x + b\n return (x, y)", "def ll(L1, L2):\n if not all(isinstance(L, Line) for L in (L1, L2)):\n raise TypeError('ll() expects two lines')\n return L1.normal_vector() ** L2.normal_vector() == 0", "def find_centers(line_complex):\n # There is a line where the flux is at a minimum, i.e., the second\n # derivative is positive.\n diff2 = numpy.diff(numpy.sign(numpy.diff(line_complex)))\n zero_crossings = numpy.where(diff2 > 0.)[0]\n return zero_crossings + 1", "def get_intersect(a1, a2, b1, b2):\r\n s = np.vstack([a1,a2,b1,b2]) # s for stacked\r\n h = np.hstack((s, np.ones((4, 1)))) # h for homogeneous\r\n l1 = np.cross(h[0], h[1]) # get first line\r\n l2 = np.cross(h[2], h[3]) # get second line\r\n x, y, z = np.cross(l1, l2) # point of intersection\r\n if z == 0: # lines are parallel\r\n return (float('inf'), float('inf'))\r\n return (x/z, y/z)", "def planeLineIntersect(p1, p2, equ):\n n = vector(equ[0], equ[1], equ[2])\n v1, v2 = vector(p1), vector(p2)\n t = (equ[3] - n.dotProduct(v2)) / (n.dotProduct(v1 - v2))\n return (t * v1 + (1 - t) * v2).coords()", "def intersects(*args):\r\n if len(args) == 2:\r\n p0, p1, p2, p3 = *args[0], *args[1]\r\n elif len(args) == 4:\r\n p0, p1, p2, p3 = args\r\n else:\r\n raise AttributeError(\"Pass 2, 2-pnt lines or 4 points to the function\")\r\n #\r\n # ---- First check ---- np.cross(p1-p0, p3-p2 )\r\n p0_x, p0_y, p1_x, p1_y, p2_x, p2_y, p3_x, p3_y = *p0, *p1, *p2, *p3\r\n s10_x = p1_x - p0_x\r\n s10_y = p1_y - p0_y\r\n s32_x = p3_x - p2_x\r\n s32_y = p3_y - p2_y\r\n denom = s10_x * s32_y - s32_x * s10_y\r\n if denom == 0.0:\r\n return False\r\n #\r\n # ---- Second check ---- np.cross(p1-p0, p0-p2 )\r\n den_gt0 = denom > 0\r\n s02_x = p0_x - p2_x\r\n s02_y = p0_y - p2_y\r\n s_numer = s10_x * s02_y - s10_y * s02_x\r\n if (s_numer < 0) == den_gt0:\r\n return False\r\n #\r\n # ---- Third check ---- np.cross(p3-p2, p0-p2)\r\n t_numer = s32_x * s02_y - s32_y * s02_x\r\n if (t_numer < 0) == den_gt0:\r\n return False\r\n #\r\n if ((s_numer > denom) == den_gt0) or ((t_numer > denom) == den_gt0):\r\n return False\r\n #\r\n # ---- check to see if the intersection point is one of the input points\r\n t = t_numer / denom\r\n # substitute p0 in the equation\r\n x = p0_x + (t * s10_x)\r\n y = p0_y + (t * s10_y)\r\n # be careful that you are comparing tuples to tuples, lists to lists\r\n if sum([(x, y) == tuple(i) for i in [p0, p1, p2, p3]]) > 0:\r\n return False\r\n return True", "def compute_intersections(\r\n self, plane: Plane, directions: List[np.ndarray]\r\n ) -> List[np.ndarray]:\r\n return [\r\n line_plane_intersection(\r\n plane_origin=plane.origin,\r\n plane_normal=plane.normal,\r\n line_direction=direction,\r\n )\r\n for direction in directions\r\n ]", "def intersectConics(E1, E2):\n\n P = np.array([])\n r1 = matrix_rank(E1)\n r2 = matrix_rank(E2)\n \n if(r1==3 and r2==3):\n P = completeIntersection(E1,E2) \n else:\n if (r2 < 3): #E2 is degenerate\n defE = E2\n fullE = E1\n else:\n defE = E1 #E1 is degenerate\n fullE = E2\n m, l = decomposeDegenerateConic(defE)\n P1 = intersectConicLine(fullE,m)\n P2 = intersectConicLine(fullE,l)\n P = np.array([P1, P2])\n points_x = []\n points_y = []\n for i in range(2):\n P1 = P[i]\n if(P1.size!=0):\n for j in range(P1.shape[0]):\n points_x.append(P1[j,0]/P1[j,2])\n points_y.append(P1[j,1]/P1[j,2])\n return points_x, points_y", "def intersect_ext(self, line):\n res, p, v = self.intersect(line)\n v0 = self.p0 - self.c\n v1 = p - self.c\n u = self.signed_angle(v0, v1) / self.da\n return res and u > 0 and v > 0 and u < 1 and v < 1, p, u, v", "def endpoints_from_lines(lines):\n \n all_points = []\n for line in lines:\n for i in [0, -1]: # start and end point\n all_points.append(line.coords[i])\n \n unique_points = set(all_points)\n \n return [Point(p) for p in unique_points]", "def line_intersection(line1, line2):\n xdiff = (line1[0][0] - line1[1][0], line2[0][0] - line2[1][0])\n ydiff = (line1[0][1] - line1[1][1], line2[0][1] - line2[1][1])\n\n def det(a, b):\n return a[0] * b[1] - a[1] * b[0]\n\n div = det(xdiff, ydiff)\n if div == 0:\n raise Exception('lines do not intersect')\n\n d = (det(*line1), det(*line2))\n x = det(d, xdiff) / div\n y = det(d, ydiff) / div\n return x, y", "def LineLineIntersection(lineA, lineB):\n lineA = rhutil.coerceline(lineA, True)\n lineB = rhutil.coerceline(lineB, True)\n rc, a, b = Rhino.Geometry.Intersect.Intersection.LineLine(lineA, lineB)\n if not rc: return None\n return lineA.PointAt(a), lineB.PointAt(b)", "def _lines_intersect(self, line1, line2):\n return self._lines_overlap_on_x_axis(line1, line2) and self._lines_overlap_on_y_axis(line1, line2)", "def linePointXYDist(l,p,inside=True):\n return linePointXY(l,p,inside,distance=True)", "def inside_of_line_2d(pt1, pt2, reference_point, pt, tol=None):\r\n if tol is None:\r\n tol = get_tol_2d()\r\n return geometry.gmInsideOfLineWithTol(pt1, pt2, reference_point, pt, tol)", "def getIntersectPoint(p1, p2, p3, p4):\n points = p1, p2, p3, p4\n gradients = (\n CollisionUtility.calculate_gradient(p1, p2), CollisionUtility.calculate_gradient(p3, p4)\n )\n\n # See if the the lines are parallel\n if gradients[0] != gradients[1]:\n return CollisionUtility.calculate_not_parallel_intersection(points, gradients)\n else:\n return CollisionUtility.calculate_parallel_intersection(points, gradients)", "def lineArcIntersectXY(l,c,inside=True,params=False):\n \n if len(c) == 3:\n norm = c[2]\n if dist(norm,vect(0,0,1)) > epsilon:\n raise ValueError('arc passed to lineArcIntersectXY does not lie in x-y plane')\n points = l + [ c[0] ]\n if not isXYPlanar(points):\n raise ValueError('line and circle passed to lineArcIntersectXY do not all lie in same x-y plane')\n return _lineArcIntersectXY(l,c,inside,params)", "def linebbox(l):\n p1=l[0]\n p2=l[1]\n return [ point(min(p1[0],p2[0]),min(p1[1],p2[1]),min(p1[2],p2[2])),\n point(max(p1[0],p2[0]),max(p1[1],p2[1]),max(p1[2],p2[2])) ]", "def intersect(self, other: Line | Segment) -> list[Point]:\n if self.dim == 2:\n return list(distinct(self.edges.intersect(other)))\n\n if isinstance(other, Segment):\n try:\n result = self._plane.meet(other._line)\n except LinearDependenceError as e:\n if isinstance(other, Segment):\n other = cast(Segment, other[~e.dependent_values])\n result = cast(Plane, self._plane[~e.dependent_values]).meet(other._line)\n return list(\n result[Polygon(self[~e.dependent_values], copy=False).contains(result) & other.contains(result)])\n else:\n return list(result[self.contains(result) & other.contains(result)])\n\n try:\n result = self._plane.meet(other)\n except LinearDependenceError as e:\n if other.cdim > 0:\n other = other[~e.dependent_values]\n result = cast(Plane, self._plane[~e.dependent_values]).meet(other)\n return list(result[Polygon(self[~e.dependent_values], copy=False).contains(result)])\n else:\n return list(result[self.contains(result)])", "def intersects(a0, a1, b0, b1):\n # First line is vertical\n if a0[0] == a1[0]:\n # Both lines are vertical\n if b0[0] == b1[0]:\n return (a0[0] == b0[0]) and (in_range(b0[1], a0[1], a1[1]) or in_range(b1[1], a0[1], a1[1]))\n eqn = get_eqn(b0, b1)\n y = apply_eqn(eqn, a0[0])\n return in_range(y, a0[1], a1[1])\n # Only second line is vertical\n if b0[0] == b1[0]:\n eqn = get_eqn(a0, a1)\n y = apply_eqn(eqn, b0[0])\n return in_range(y, b0[1], b1[1])\n # Parallel lines\n eqn0 = get_eqn(a0, a1)\n eqn1 = get_eqn(b0, b1)\n if eqn0[0] == eqn1[0]:\n if eqn0[1] != eqn1[1]:\n return False\n return in_range(a0[0], b0[0], b1[0]) or in_range(a1[0], b0[0], b1[0])\n # Get intersection\n i = intersection(eqn0, eqn1)\n # Check if intersection is between end points\n return in_range(i[0], a0[0], a1[0]) and in_range(i[0], b0[0], b1[0]) and in_range(i[1], a0[1], a1[1]) and in_range(i[1], b0[1], b1[1])", "def getAllIntersections(vLines, hLines):\r\n intersections = []\r\n\r\n for vLine in vLines:\r\n for hLine in hLines:\r\n intersections.append(getIntersection(vLine, hLine))\r\n\r\n return sortFourPoints(intersections, True)", "def free_line(p, eps, s, dps1, dps2, ds):\n px = p[0]\n py = p[1]\n s1x = s[0, 0]\n s1y = s[0, 1]\n s2x = s[1, 0]\n s2y = s[1, 1]\n if s1x == s2x and s1y == s2y:\n if eucl_dist(p, s[0]) > eps:\n lf = [-1, -1]\n else:\n lf = [0, 1]\n else:\n if point_to_seg(p, s[0], s[1], dps1, dps2, ds) > eps:\n # print(\"No Intersection\")\n lf = [-1, -1]\n else:\n segl = eucl_dist(s[0], s[1])\n segl2 = segl * segl\n intersect = circle_line_intersection(px, py, s1x, s1y, s2x, s2y, eps)\n if intersect[0][0] != intersect[1][0] or intersect[0][1] != intersect[1][1]:\n i1x = intersect[0, 0]\n i1y = intersect[0, 1]\n u1 = (((i1x - s1x) * (s2x - s1x)) + ((i1y - s1y) * (s2y - s1y))) / segl2\n\n i2x = intersect[1, 0]\n i2y = intersect[1, 1]\n u2 = (((i2x - s1x) * (s2x - s1x)) + ((i2y - s1y) * (s2y - s1y))) / segl2\n ordered_point = sorted((0, 1, u1, u2))\n lf = ordered_point[1:3]\n else:\n if px == s1x and py == s1y:\n lf = [0, 0]\n elif px == s2x and py == s2y:\n lf = [1, 1]\n else:\n i1x = intersect[0][0]\n i1y = intersect[0][1]\n u1 = (((i1x - s1x) * (s2x - s1x)) + ((i1y - s1y) * (s2y - s1y))) / segl2\n if 0 <= u1 <= 1:\n lf = [u1, u1]\n else:\n lf = [-1, -1]\n return lf", "def vlinecomp(self):\n m_h, c_h = self.fitline(0,2) # Computes the equation for a line joining the points on the outside of the gear on opposites sides of the edm cut\n\n m_v_avg = self.average_grad() # Computes the average gradient of the constructed vertical line\n\n m_v_avg, c_v = self.line_through_point(m_v_avg,4) # Equation of line with average gradient though crack start point\n\n x_intersect,y_intersect = self.intersect_point(m_h, c_h, m_v_avg, c_v)\n\n coord_top = [x_intersect,y_intersect]\n coord_bot = [self.points[4, 0], self.points[4, 1]]\n\n distance = self.distance(coord_bot,coord_top)\n\n return coord_top, coord_bot, distance", "def find_circle_line_intersection(P0, r0, P1):\n\t\n\tx_offset, y_offset = P0\n\tx0, y0 = 0, 0\n\tx1, y1 = P1\n\n\tx1, y1 = x1 - x_offset, y1 - y_offset\n\n\tdx = x1 - x0\n\tdy = y1 - y0\n\tdr = math.sqrt(dx*dx + dy*dy)\n\n\tD = x0*y1 - x1*y0\n\n\tdelta0 = r0*r0*dr*dr - D*D\n\n\tx2 = (D*dy + sgn(dy)*dx*math.sqrt(delta0)) / (dr*dr)\n\ty2 = (D*dx + math.fabs(dy)*math.sqrt(delta0)) / (dr*dr)\n\n\tx3 = (D*dy - sgn(dy)*dx*math.sqrt(delta0)) / (dr*dr)\n\ty3 = (D*dx - math.fabs(dy)*math.sqrt(delta0)) / (dr*dr)\n\n\tx2 += x_offset\n\tx3 += x_offset\n\ty2 += y_offset\n\ty3 += y_offset\n\n\treturn np.array([[x2, y2], [x3, y3]])", "def compute_intersection(lattice1: NDArrayInt, lattice2: NDArrayInt, row_wise: bool = True):\n l1 = to_row_wise(lattice1, row_wise)\n l2 = to_row_wise(lattice2, row_wise)\n\n denom1 = int(np.around(np.linalg.det(l1))) ** 2\n denom2 = int(np.around(np.linalg.det(l2))) ** 2\n denom = denom1 * denom2 // gcd(denom1, denom2)\n\n # dual(intersection(l1, l2)) = union(dual(l1), dual(l2))\n d1 = np.around(compute_dual(l1) * denom).astype(int)\n d2 = np.around(compute_dual(l2) * denom).astype(int)\n dunion = compute_union(d1, d2)\n ret = np.around(compute_dual(dunion) * denom).astype(int)\n\n if not row_wise:\n ret = ret.T\n\n return ret", "def get_intersect(pair1, pair2):\n # calculate the homogeneous coords\n tmp = np.vstack((pair1, pair2))\n h = np.hstack((tmp, np.ones((4, 1))))\n\n # line through each pair of points\n l1 = np.cross(h[0], h[1])\n l2 = np.cross(h[2], h[3])\n\n # get the intersect\n x, y, z = np.cross(l1, l2)\n x /= z\n y /= z\n return x, y", "def find_line_model(points):\n\n # [WARNING] vertical and horizontal lines should be treated differently\n # here we just add some noise to avoid division by zero\n\n # find a line model for these points\n m = (points[1, 1] - points[0, 1]) / (\n points[1, 0] - points[0, 0] + sys.float_info.epsilon) # slope (gradient) of the line\n c = points[1, 1] - m * points[1, 0] # y-intercept of the line\n\n return m, c", "def intersect_2_lines(P1, V1, P2, V2):\n Vx = np.cross(V1, V2)\n s = np.dot(np.cross(P2 - P1, V1), Vx)/np.dot(Vx, Vx)\n return s", "def get_shape_line_intersections(cls, shape, line):\n shape_inter = IntCurvesFace_ShapeIntersector()\n shape_inter.Load(shape, 1e-3)\n shape_inter.PerformNearest(line, float(\"-inf\"), float(\"+inf\"))\n with assert_isdone(shape_inter, \"failed to computer shape / line intersection\"):\n intersections = [(shape_inter.Pnt(i), shape_inter.Face(i), line) for i in\n range(1, shape_inter.NbPnt() + 1)] # Indices start at 1 :(\n return intersections", "def find_intersection(A, B, C, D):\n \n a1, b1, c1 = line_equation(A.x, A.y, B.x, B.y)\n a2, b2, c2 = line_equation(C.x, C.y, D.x, D.y)\n \n Y = - np.array([[c1],\n [c2]])\n M = np.array([[a1, b1],\n [a2, b2]])\n\n X = np.linalg.solve(M, Y)\n intersection = Coordinates(X[0], X[1])\n \n return intersection", "def line_line_intersection(a1: Vector3, a2: Vector3, b1: Vector3, b2: Vector3) -> Vector3:\n # From https://stackoverflow.com/a/20677983/7245441\n\n def det(a: Vector3, b: Vector3) -> float:\n return a.x * b.y - a.y * b.x\n\n y_diff = Vector3(a1.y - a2.y, b1.y - b2.y, 0)\n x_diff = Vector3(a1.x - a2.x, b1.x - b2.x, 0)\n\n div = det(x_diff, y_diff)\n if div == 0:\n raise Exception(\"Lines do not intersect\")\n\n d = Vector3(det(a1, a2), det(b1, b2), 0)\n x = det(d, x_diff) / div\n y = det(d, y_diff) / div\n\n return Vector3(x, y, 0)", "def next_in_hull(p, v, L): \r\n N = normalize(p, L)\r\n if N != []:\r\n q = N[0]\r\n index = 0\r\n for k in range(1, len(N)):\r\n if (N[k] - q).dot(v) >= 0: # points on support line included\r\n q = N[k]\r\n index = k\r\n \r\n return index", "def matchlines(nlines, wl, z, eml):\n lbdas = np.array(list(eml.keys()))\n a = (wl[:, np.newaxis] / (1 + z) - lbdas[np.newaxis, :]) ** 2.0\n jfound = np.argmin(a, axis=1)\n error = np.diag(a[:, jfound]).sum()\n error = np.sqrt(error / nlines)\n if((nlines >= 2)and(jfound[0] == jfound[1])):\n error = 15.\n return(error, jfound)", "def ando_lin(eigen_values):\n\n pos, neg, _ = cluster_eignvalues(eigen_values)\n\n sp = sumsq(pos)\n sn = sumsq(neg)\n\n try:\n return 1 + max([sp / sn, sn / sp])\n except:\n raise Exception(\n f\"Division by zero in Ando-Lin bound construction: negative = {sn}, positive = {sp}\"\n )", "def lagrangePoints(mu):\n \n # define l = 1-mu\n l = 1 - mu\n \n # collinear points\n def eqL1(x):\n fval = x**5 + 2*(mu-l)*x**4 + (l**2-4*l*mu+mu**2)*x**3 + (2*mu*l*(l-mu)+mu-l)*x**2 + (mu**2*l**2+2*(l**2+mu**2))*x + mu**3-l**3\n #fval = gamma**5 - (3-mu)*gamma**4 + (3-2*mu)*gamma**3 - mu*gamma**2 + 2*mu*gamma - mu\n return fval\n sol_l1 = optimize.root(eqL1, 0.5, method='hybr')\n l1 = np.array([sol_l1.x[0] , 0, 0])\n \n def eqL2(x):\n fval = x**5 + 2*(mu-l)*x**4 + (l**2-4*l*mu+mu**2)*x**3 + (2*mu*l*(l-mu)-(mu+l))*x**2 + (mu**2*l**2+2*(l**2-mu**2))*x - (mu**3+l**3)\n #fval = gamma**5 + (3-mu)*gamma**4 + (3-2*mu)*gamma**3 - mu*gamma**2 - 2*mu*gamma - mu\n return fval\n sol_l2 = optimize.root(eqL2, 1.5, method='hybr')\n l2 = np.array([sol_l2.x[0] , 0, 0])\n \n def eqL3(x):\n fval = x**5 + 2*(mu-l)*x**4 + (l**2-4*mu*l+mu**2)*x**3 + (2*mu*l*(l-mu)+(l+mu))*x**2 + (mu**2*l**2+2*(mu**2-l**2))*x + l**3+mu**3\n return fval\n sol_l3 = optimize.root(eqL3, -1, method='hybr')\n l3 = np.array([sol_l3.x[0] , 0, 0])\n \n # equilateral points\n # L4\n l4 = np.array([np.cos(np.pi/3) - mu , np.sin(np.pi/3), 0])\n # L5\n l5 = np.array([np.cos(np.pi/3) - mu , -np.sin(np.pi/3), 0])\n \n return _lagrangePointsReturn(l1,l2,l3,l4,l5)" ]
[ "0.68824154", "0.6712293", "0.654148", "0.64509463", "0.64332", "0.6398922", "0.6371681", "0.6350881", "0.6311801", "0.6303512", "0.62909174", "0.6267723", "0.62216777", "0.6182562", "0.61743903", "0.6087039", "0.6078734", "0.6011505", "0.60104203", "0.5943569", "0.5912073", "0.59040856", "0.5883364", "0.58811575", "0.58544254", "0.5848009", "0.5833881", "0.5815353", "0.5814023", "0.5809542", "0.5802485", "0.58006066", "0.57737714", "0.57643", "0.5759404", "0.5759064", "0.5742946", "0.5736111", "0.570998", "0.5706321", "0.56738055", "0.5670489", "0.5660822", "0.5647064", "0.56424403", "0.563473", "0.5632178", "0.56201214", "0.56168896", "0.56150407", "0.5612337", "0.56053764", "0.56023115", "0.5589012", "0.5575439", "0.55638546", "0.5561028", "0.5550253", "0.5533486", "0.5520919", "0.5508735", "0.55035496", "0.549906", "0.54972917", "0.5493814", "0.5492936", "0.54906386", "0.5485016", "0.54774505", "0.54654574", "0.54648614", "0.5463456", "0.5456804", "0.54512095", "0.54445904", "0.54380816", "0.54308116", "0.54304296", "0.54263043", "0.5417149", "0.54138947", "0.54110426", "0.54039663", "0.54030865", "0.5401199", "0.53973114", "0.5388436", "0.53879553", "0.53824925", "0.53797615", "0.5373223", "0.537155", "0.5369953", "0.5368914", "0.5368128", "0.5363319", "0.53622526", "0.535721", "0.53535527", "0.5349344" ]
0.6902687
0
Get intersection with some basis line
def an_intersection(v1, b1): try: return intersection(v1, b1, np.array([1,1]), 0) except np.linalg.linalg.LinAlgError: print v1 return intersection(v1, b1, np.array([-1,1]), 0)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def intersection(line1, line2):\n p0, p1, p2, p3 = map(\n lambda tup : np.array(tup[:2]),\n [line1[0], line1[1], line2[0], line2[1]]\n )\n p1, p2, p3 = map(lambda x : x - p0, [p1, p2, p3])\n transform = np.zeros((2, 2))\n transform[:,0], transform[:,1] = p1, p2\n if np.linalg.det(transform) == 0: return\n inv = np.linalg.inv(transform)\n new_p3 = np.dot(inv, p3.reshape((2, 1)))\n #Where does line connecting (0, 1) to new_p3 hit x axis\n x_intercept = new_p3[0] / (1 - new_p3[1]) \n result = np.dot(transform, [[x_intercept], [0]])\n result = result.reshape((2,)) + p0\n return result", "def intersection(self, line):\n\t\tdenom = (line.b[1]-line.a[1])*(self.b[0]-self.a[0]) - (line.b[0]-line.a[0])*(self.b[1]-self.a[1])\n\t\t# denominator is 0 if lines are parallel\n\t\tif denom == 0:\n\t\t\treturn None\n\t\t\n\t\tnum_a = (line.b[0]-line.a[0])*(self.a[1]-line.a[1]) - (line.b[1]-line.a[1])*(self.a[0]-line.a[0])\n\t\tnum_b = (self.b[0]-self.a[0])*(self.a[1]-line.a[1]) - (self.b[1]-self.a[1])*(self.a[0]-line.a[0])\n\t\t# if both numerators are 0 then lines are coincident\n\t\tif num_a==0 and num_b==0:\n\t\t\treturn None\n\t\t\t\n\t\tu_a = num_a/denom\n\t\tu_b = num_b/denom\n\t\t\t\n\t\tif 0 <= u_a <= 1 and 0 <= u_b <= 1:\n\t\t\treturn self.a + uA*(self.b-self.a)\n\t\telse:\n\t\t\treturn None", "def _intersection(line_points_0, line_points_1):\n u,v = line_points_0,line_points_1\n (A,B),(C,D) = line_points_0,line_points_1\n h1 = _homogenous_line(A,B)\n h2 = _homogenous_line(C,D)\n P = _intersection_homogenous(h1, h2)\n return P", "def intersection(line1, line2):\r\n rho1, theta1 = line1[0]\r\n rho2, theta2 = line2[0]\r\n A = np.array([[np.cos(theta1), np.sin(theta1)], [np.cos(theta2), np.sin(theta2)]])\r\n b = np.array([[rho1], [rho2]])\r\n x0, y0 = np.linalg.solve(A, b)\r\n x0, y0 = int(np.round(x0)), int(np.round(y0))\r\n return [[x0, y0]]", "def line_intersect(line1, line2):\n b1 = (line1[1][1] - line1[0][1]) / (line1[1][0] - line1[0][0])\n b2 = (line2[1][1] - line2[0][1]) / (line2[1][0] - line2[0][0])\n a1 = line1[0][1] - b1 * line1[0][0]\n a2 = line2[0][1] - b2 * line2[0][0]\n\n if a1 == a2 and b1 == b2:\n return line1\n\n xi = - (a1 - a2) / (b1 - b2)\n yi = a1 + b1 * xi\n if (line1[0][0] - xi) * (xi - line1[1][0]) >= 0\\\n and (line2[0][0] - xi) * (xi - line2[1][0]) >= 0\\\n and (line1[0][1] - yi) * (yi - line1[1][1]) >= 0\\\n and (line2[0][1] - yi) * (yi - line2[1][1]) >= 0:\n return xi, yi\n return None", "def intersection(line1, line2):\n\n rho1, theta1 = line1[0]\n rho2, theta2 = line2[0]\n A = np.array([\n [np.cos(theta1), np.sin(theta1)],\n [np.cos(theta2), np.sin(theta2)]\n ])\n b = np.array([[rho1], [rho2]])\n x0, y0 = np.linalg.solve(A, b)\n x0, y0 = int(np.round(x0)), int(np.round(y0))\n\n return [x0, y0]", "def intersection(line1, line2):\n rho1, theta1 = line1\n rho2, theta2 = line2\n A = np.array([\n [np.cos(theta1), np.sin(theta1)],\n [np.cos(theta2), np.sin(theta2)]\n ])\n b = np.array([[rho1], [rho2]])\n x0, y0 = np.linalg.solve(A, b)\n x0, y0 = int(np.round(x0)), int(np.round(y0))\n return [x0, y0]", "def get_intersect_points(line1, line2):\n intersect_points = matrix.matrix_sol([line1, line2])\n return intersect_points", "def lineintersect(line1,line2):\n a1, a2, b1, b2=line1[0],line1[1],line2[0],line2[1]\n\n s = np.vstack([a1,a2,b1,b2]) # s for stacked\n h = np.hstack((s, np.ones((4, 1)))) # h for homogeneous\n l1 = np.cross(h[0], h[1]) # get first line\n l2 = np.cross(h[2], h[3]) # get second line\n x, y, z = np.cross(l1, l2) # point of intersection\n if z == 0: # lines are parallel\n return (float('inf'), float('inf'))\n return (x/z, y/z)", "def intersection(x, y, f, p):", "def getIntersection(line1, line2):\r\n\r\n rho1, theta1 = line1[0]\r\n rho2, theta2 = line2[0]\r\n\r\n a = np.array([\r\n [np.cos(theta1), np.sin(theta1)],\r\n [np.cos(theta2), np.sin(theta2)]\r\n ])\r\n\r\n b = np.array([[rho1], [rho2]])\r\n\r\n x, y = np.linalg.solve(a, b)\r\n\r\n x = int(x[0])\r\n y = int(y[0])\r\n\r\n return [np.round(y), np.round(x)]", "def intersection(v1, v2):\n x = v1[0:2] + v2[0:2]\n y = v1[2:4] + v2[2:4]\n if( x[3] == 0 ): #To avoid a divide by zero, if x[3] is 0 then we just solve for where lineA equals x[2]\n t1 = (x[2] - x[0])/\\\n (x[1])\n return [ v1[0] + v1[1]*t1, v1[2] + v1[3]*t1 ]\n\n else: \n t1 = ( y[0] - y[2] + (y[3]/x[3])*(x[2] - x[0]) )/\\\n ( (y[3]*x[1])/x[3] - y[1] )\n return [ v1[0] + v1[1]*t1, v1[2] + v1[3]*t1 ]", "def line_intersection(p0_x, p0_y, p1_x, p1_y, p2_x, p2_y, p3_x, p3_y):\n s10_x = p1_x - p0_x\n s10_y = p1_y - p0_y\n s32_x = p3_x - p2_x\n s32_y = p3_y - p2_y\n\n denom = s10_x * s32_y - s32_x * s10_y\n if denom == 0.0:\n return None # Collinear\n denomPositive = denom > 0\n\n s02_x = p0_x - p2_x\n s02_y = p0_y - p2_y\n s_numer = s10_x * s02_y - s10_y * s02_x\n if (s_numer < 0) == denomPositive:\n return None # No collision\n\n t_numer = s32_x * s02_y - s32_y * s02_x\n if (t_numer < 0) == denomPositive:\n return None # No collision\n\n if (s_numer > denom) == denomPositive or (t_numer > denom) == denomPositive:\n return 0 # No collision\n \n # Collision detected\n t = t_numer / denom\n i_x = p0_x + (t * s10_x)\n i_y = p0_y + (t * s10_y)\n\n return i_x, i_y", "def intersection(self, axis2):", "def get_intersect(a1, a2, b1, b2):\n s = np.vstack((a1, a2, b1, b2)) # s for stacked\n h = np.hstack((s, np.ones((4, 1)))) # h for homogeneous\n l1 = np.cross(h[0], h[1]) # get first line\n l2 = np.cross(h[2], h[3]) # get second line\n x, y, z = np.cross(l1, l2) # point of intersection\n if z == 0: # lines are parallel\n return None\n return np.array([x / z, y / z])", "def intersection_with(self, other):\n i = self.line_intersection_with(other)\n if i is None:\n return None# parallel lines\n\n if self.contains(i) and other.contains(i) and not (i in self.endpoints and i in other.endpoints):\n return i\n return None", "def line_intercept(p1,p2,p3,p4):\n # Note if vertical line m = None and b holds x-val\n (m1,b1) = line_param(p1,p2)\n (m2,b2) = line_param(p3,p4)\n if (m1 != None) and (m2 != None):\n if (m1-m2) != 0.:\n x = (b2-b1)/(m1-m2)\n y = m1*x + b1\n else:\n return (None,0)\n elif (m1 == None) and (m2 != None):\n x = b1 \n y = m2*x + b2\n elif (m1 != None) and (m2 == None):\n x = b2\n y = m1*x + b1\n else:\n return (None,0) \n \n # min and max of points. \n max_x1 = max(p1[0], p2[0])\n min_x1 = min(p1[0], p2[0])\n max_y1 = max(p1[1], p2[1])\n min_y1 = min(p1[1], p2[1])\n max_x2 = max(p3[0], p4[0])\n min_x2 = min(p3[0], p4[0])\n max_y2 = max(p3[1], p4[1])\n min_y2 = min(p3[1], p4[1])\n #check if the intersection is in bounds\n flag = 1\n if x > max_x1 or x < min_x1:\n flag = 0\n elif x > max_x2 or x < min_x2:\n flag = 0\n elif y > max_y1 or y < min_y1: \n flag = 0\n elif y > max_y2 or y < min_y2: \n flag = 0\n #check if the intersection point corresponds to an end point\n intercept = num.array([x,y])\n def _same(p1,p2,prec=0.0001):\n \"\"\" are two points the same \"\"\"\n #return num.all(num.equal(p1,p2))\n t1 = num.fabs(p1[0]-p2[0]) < prec\n t2 = num.fabs(p1[1]-p2[1]) < prec\n if t1 and t2:\n #print \"same\", p1,p2\n return True\n if flag == 1:\n if _same(intercept,p1):\n flag = 2\n elif _same(intercept,p2):\n flag = 2\n elif _same(intercept,p3):\n flag = 2\n elif _same(intercept,p4):\n flag = 2\n return (intercept,flag)", "def get_intersection(l0, l1):\n # Source: https://en.wikipedia.org/wiki/Line–line_intersection\n\n denominator = (l0[0] - l0[1]) * (l1[2] - l1[3]) -\\\n (l0[2] - l0[3]) * (l1[0] - l1[1])\n\n x_nominator = (l0[0] * l0[3] - l0[2] * l0[1]) * (l1[0] - l1[1]) -\\\n (l1[0] * l1[3] - l1[2] * l1[1]) * (l0[0] - l0[1])\n y_nominator = (l0[0] * l0[3] - l0[2] * l0[1]) * (l1[2] - l1[3]) -\\\n (l1[0] * l1[3] - l1[2] * l1[1]) * (l0[2] - l0[3])\n\n return [x_nominator / denominator, y_nominator / denominator]", "def line_segment_intersection(line1,\n line2):\n a = float(line1[0][0]*line1[1][1] - line1[0][1]*line1[1][0])\n b = float(line1[0][1] - line1[1][1])\n c = float(line1[1][0] - line1[0][0])\n\n d = float(line2[0][0]*line2[1][1] - line2[0][1]*line2[1][0])\n e = float(line2[0][1] - line2[1][1])\n f = float(line2[1][0] - line2[0][0])\n\n prod = b*f - c*e\n if abs(prod) < 1e-10:\n return (np.inf, np.inf)\n\n xc = (d*c - a*f) / prod\n yc = (a*e - b*d) / prod\n\n sign_x1 = (xc - line1[0][0])*(xc - line1[1][0])\n sign_y1 = (yc - line1[0][1])*(yc - line1[1][1])\n\n if sign_x1 > 1e-10:\n return (np.inf, np.inf)\n if sign_x1 < 1e-10:\n if sign_y1 > 1e-10:\n return (np.inf, np.inf)\n\n sign_x2 = (xc - line2[0][0])*(xc - line2[1][0])\n sign_y2 = (yc - line2[0][1])*(yc - line2[1][1])\n\n if sign_x2 > 1e-10:\n return (np.inf, np.inf)\n if sign_x2 == 1e-10:\n if sign_y2 > 1e-10:\n return (np.inf, np.inf)\n return (int(xc), int(yc))", "def _lines_intersection(self, other):\n\n the_slope, the_y_intercept = False, False\n\n # parallel?\n if self.slope == other.slope:\n return (\n self.y_intercept == other.y_intercept and\n self.x_value == other.x_value\n )\n\n if self.is_vertical():\n x = self.x_value\n the_slope = other.slope\n the_y_intercept = other.y_intercept\n elif other.is_vertical():\n x = other.x_value\n else:\n x = (other.y_intercept - self.y_intercept) / (self.slope - other.slope)\n\n if the_slope is None or the_slope is False:\n the_slope = self.slope\n the_y_intercept = self.y_intercept\n\n y = the_slope * x + the_y_intercept\n\n return Point(x, y)", "def line_intersection_with(self, other):\n # solve following system :\n # intersection = start of self + alpha * direction of self\n # intersection = start of other + beta * direction of other\n directions = [s.endpoints[1] - s.endpoints[0] for s in (self, other)]\n denominator = directions[0].cross_product(directions[1])\n if abs(denominator) < 0.000001:\n # almost parallel lines\n return\n start_diff = other.endpoints[0] - self.endpoints[0]\n alpha = start_diff.cross_product(directions[1]) / denominator\n return self.endpoints[0] + directions[0] * alpha", "def get_line_intersects_line(self) -> List[List[Line]]:\n intersections = []\n\n for line_bin in self.line_bins.values():\n for connection_pair in itertools.combinations(line_bin, 2):\n line_segments = (\n connection_pair[0].line_segments + connection_pair[1].line_segments\n )\n\n for segment_pair in itertools.combinations(line_segments, 2):\n if check_cross(segment_pair[0], segment_pair[1]):\n intersections.append(connection_pair)\n # for line_bin in self.line_bins.values():\n # segments = []\n # line_idx_map = []\n # for line_1, line_2 in itertools.combinations(line_bin, 2):\n # for segment in line_1.line_segments:\n # if segment[0] != segment[1]:\n # line_idx_map.append(line_1)\n # segments.append(((segment[0].x, segment[0].y), (segment[1].x, segment[1].y)))\n # for segment in line_2.line_segments:\n # if segment[0] != segment[1]:\n # line_idx_map.append(line_2)\n # segments.append(((segment[0].x, segment[0].y), (segment[1].x, segment[1].y)))\n #\n # for collision_point in segments_intersections(segments).values():\n # for intersection in collision_point:\n # intersections.append([line_idx_map[i] for i in intersection])\n return intersections", "def test_intersect_line_in_one_point(start, end):\n circle = ConstructionCircle((0, 0), 1.0)\n assert len(circle.intersect_line(ConstructionLine(start, end))) == 1", "def line_intersection(line1, line2):\n xdiff = (line1[0][0] - line1[1][0], line2[0][0] - line2[1][0])\n ydiff = (line1[0][1] - line1[1][1], line2[0][1] - line2[1][1])\n\n def det(a, b):\n return a[0] * b[1] - a[1] * b[0]\n\n div = det(xdiff, ydiff)\n if div == 0:\n raise Exception('lines do not intersect')\n\n d = (det(*line1), det(*line2))\n x = det(d, xdiff) / div\n y = det(d, ydiff) / div\n return x, y", "def intersect_ext(self, line):\n res, p, v = self.intersect(line)\n v0 = self.p0 - self.c\n v1 = p - self.c\n u = self.signed_angle(v0, v1) / self.da\n return res and u > 0 and v > 0 and u < 1 and v < 1, p, u, v", "def intersect(self, line):\n c = line.cross_z\n d = self.v.dot(c)\n if d == 0:\n return False, 0, 0\n t = c.dot(line.p - self.p) / d\n return True, self.lerp(t), t", "def get_intersect(a1, a2, b1, b2):\n s = np.vstack([a1, a2, b1, b2]) # s for stacked\n h = np.hstack((s, np.ones((4, 1)))) # h for homogeneous\n l1 = np.cross(h[0], h[1]) # get first line\n l2 = np.cross(h[2], h[3]) # get second line\n x, y, z = np.cross(l1, l2) # point of intersection\n if z == 0: # lines are parallel\n return float('inf'), float('inf')\n return x / z, y / z", "def find_intersections_line_line(line1: Line, line2: Line) -> {Point}:\n if line1.slope != line2.slope:\n if line1.slope is Infinity:\n # Line 1 is vertical, use its x value as the x value to evaluate line2\n x = line1.point1.x\n y = line2(x)\n elif line2.slope is Infinity:\n # Line 2 is vertical, use its x value as the x value to evaluate line1\n x = line2.point1.x\n y = line1(x)\n else:\n x = (line2.intercept - line1.intercept) / (line1.slope - line2.slope)\n y = line1(x)\n return {Point(x, y)}\n else:\n return {}", "def intersection_line_line(ab, cd):\n a, b = ab\n c, d = cd\n\n line_vector_1 = vector_from_points(a, b)\n line_vector_2 = vector_from_points(c, d)\n d_vector = cross_vectors(line_vector_1, line_vector_2)\n\n normal_1 = cross_vectors(line_vector_1, d_vector)\n normal_2 = cross_vectors(line_vector_2, d_vector)\n plane_1 = (a, normal_1)\n plane_2 = (c, normal_2)\n\n intx_point_line_1 = intersection_line_plane(ab, plane_2)\n intx_point_line_2 = intersection_line_plane(cd, plane_1)\n\n return [intx_point_line_1, intx_point_line_2]", "def intersect_ext(self, line):\n c = line.cross_z\n d = self.v.dot(c)\n if d == 0:\n return False, 0, 0, 0\n dp = line.p - self.p\n c2 = self.cross_z\n u = c.dot(dp) / d\n v = c2.dot(dp) / d\n return u > 0 and v > 0 and u < 1 and v < 1, self.lerp(u), u, v", "def _line_intersection(self, line, point):\n den = euclidean_distance((line[0],line[1]), (line[2],line[3]))\n x1, y1, x2, y2 = line[0], line[1], line[2], line[3]\n x3, y3 = point[0], point[1]\n\n u = ( ((x3-x1) * (x2-x1)) + ((y3-y1) * (y2-y1)) ) / den\n\n x, y = (x1 + u * (x2-x1)), (y1 + u * (y2-y1))\n dist = euclidean_distance((x,y), point)\n\n # pygame.draw.circle(self.screen, SIM_COLORS['aqua'], \n # (int(x*SCALE), int(y*SCALE)), \n # int(40), \n # 0)\n # print dist*SCALE, (x*SCALE,y*SCALE)\n\n return dist, (x, y)", "def get_intersect(a1, a2, b1, b2):\r\n s = np.vstack([a1,a2,b1,b2]) # s for stacked\r\n h = np.hstack((s, np.ones((4, 1)))) # h for homogeneous\r\n l1 = np.cross(h[0], h[1]) # get first line\r\n l2 = np.cross(h[2], h[3]) # get second line\r\n x, y, z = np.cross(l1, l2) # point of intersection\r\n if z == 0: # lines are parallel\r\n return (float('inf'), float('inf'))\r\n return (x/z, y/z)", "def _intersection_homogenous(homog_line_0, homog_line_1):\n # NB: renamed from '_intersection'\n eps = 1e-13\n a,b,c=homog_line_0\n u,v,w=homog_line_1\n D=float(b*u-v*a)\n if abs(D)<eps:\n # parallel lines\n return None, None\n xp=-(w*b-c*v)/D\n yp= (w*a-c*u)/D\n\n return xp, yp", "def intersect_shape_by_line(topods_shape, line, low_parameter=0.0, hi_parameter=float(\"+inf\")):\n from OCC.Core.IntCurvesFace import IntCurvesFace_ShapeIntersector\n shape_inter = IntCurvesFace_ShapeIntersector()\n shape_inter.Load(topods_shape, TOLERANCE)\n shape_inter.PerformNearest(line, low_parameter, hi_parameter)\n\n with assert_isdone(shape_inter, \"failed to computer shape / line intersection\"):\n return (shape_inter.Pnt(1),\n shape_inter.Face(1),\n shape_inter.UParameter(1),\n shape_inter.VParameter(1),\n shape_inter.WParameter(1))", "def get_intersection(self, particle): \n\t\tline_string_coord = particle.line_coordinates()\n\t\ttrajectory = LineString(line_string_coord)\n\t\tintersection = self.line.intersection(trajectory)\n\t\treturn intersection", "def intersection(line1, line2):\n xdiff = (line1[0][0] - line1[1][0], line2[0][0] - line2[1][0])\n ydiff = (line1[0][1] - line1[1][1], line2[0][1] - line2[1][1])\n\n def det(a, b):\n return a[0] * b[1] - a[1] * b[0]\n\n div = det(xdiff, ydiff)\n if div == 0:\n raise Exception('lines do not intersect')\n\n d = (det(*line1), det(*line2))\n x = det(d, xdiff) / div\n y = det(d, ydiff) / div\n return x, y", "def bimedianIntersection(self):\n l1 = self.bimedianOnLine(idx=0)\n l2 = self.bimedianOnLine(idx=1)\n return l1.intersectionWith(l2)", "def lineLineIntersectXY(l1,l2,inside=True,params=False):\n\n x1=l1[0][0]\n y1=l1[0][1]\n z1=l1[0][2]\n \n x2=l1[1][0]\n y2=l1[1][1]\n z2=l1[1][2]\n\n x3=l2[0][0]\n y3=l2[0][1]\n z3=l2[0][2]\n \n x4=l2[1][0]\n y4=l2[1][1]\n z4=l2[1][2]\n\n ## check for x,y planar consistency\n if abs(z2-z1) > epsilon or abs(z3-z1) > epsilon or abs(z4-z1) > epsilon:\n raise ValueError('lines not in same x-y plane')\n\n ## do lines intersect anywhere?\n denom=(x1-x2)*(y3-y4)-(y1-y2)*(x3-x4)\n if denom*denom < epsilon:\n return False\n\n ## the lines do intersect, so let's see if they intersect\n ## inside both line segments\n t = ((x1-x3)*(y3-y4) - (y1-y3)*(x3-x4))/denom\n u = -1 * ((x1-x2)*(y1-y3) - (y1-y2)*(x1-x3))/denom\n\n ## return the paramater space intersection\n if params:\n return [t,u]\n \n ## do we care about falling inside the line segments? if so,\n ## check that the intersection falls within\n if inside and ( t < 0.0 or t > 1.0 or u < 0.0 or u > 1.0):\n return False\n\n return [x1 + t*(x2-x1), y1+t*(y2-y1), z1, 1.0]", "def IntersectWithLine(self, , , p_float_6, p_float_7, p_float=..., p_float=..., p_float=..., *args, **kwargs):\n ...", "def intersection( l1, l2):\n #coordonees de la lignes 1\n x1, y1, x2, y2 = l1.point\n #coordonees de la lignes 2\n x3, y3, x4, y4 = l2.point\n #\n a1 = y2 - y1\n b1 = x1 - x2\n a2 = y4 - y3\n b2 = x3 - x4\n #\n c1 = a1 * x1 + b1 * y1\n #\n c2 = a2 * x3 + b2 * y3\n #\n det = a1 * b2 - a2 * b1\n assert det, \"lines are parallel\"\n return (1. * (b2 * c1 - b1 * c2) / det, 1. * (a1 * c2 - a2 * c1) / det)", "def segmentsIntersect(self, other, allowProjInt = False):\n \n \"\"\"\n If we are not allowing projected intersection and the bounding boxes\n do not intersect then return -3, None.\n \"\"\"\n if(not(allowProjInt) and not(self.doBoundingBoxesIntersect(other))): return -3, None #return if bounding boxes do not intersect\n \"\"\" A special case for colinear lines. \"\"\" \n if(self.areColinear(other)):\n \"\"\"\n First place all four endpoint into a set. This will elliminate shared\n end points. Next, convert the set back into a list so it can\n finally be sorted.\n \"\"\"\n pointList = sorted(list(set([self.start, self.end, other.start, other.end])), key=self.calcT) \n if len(pointList) == 3:\n \"\"\"\n if there are only three points in the list then return 2, the\n middle point in the list since it is the shared point of the\n two lines.\n \"\"\"\n return 2, pointList[1] #if they are colinear and two ends have the same point return that point\n elif len(pointList) == 2:\n \"\"\" If the two lines have the same endpoints. \"\"\"\n return 2.5, self.getMidPoint()\n else:\n \"\"\"\n If the length was not three then we know it is length 4 in which case\n we turn the two middle points into a line and return 3, the line's\n midpoint.\n \"\"\"\n tempLine = Line(pointList[1], pointList[2])\n return 3, tempLine.getMidPoint() #If they are colinear return half way inbetween middle two points\n \"\"\"\n To calculate the intersection of two points we put the lines into the\n form P+tr and Q+us where P and Q are the starting points of the lines\n r and s are vectors form the starting point to the end point, and\n t and u are scalars. Set the two equations equal to each other and \n then solve for t and u. If t and u are in the range [0-1] then the\n intersection point lines on the lines, else it is a projected point.\n \"\"\"\n r = np.subtract(self.end.get2DPoint(), self.start.get2DPoint())\n s = np.subtract(other.end.get2DPoint(), other.start.get2DPoint())\n Q_Less_P = np.subtract(other.start.get2DPoint(), self.start.get2DPoint())\n denom = np.cross(r, s)*1.0\n t = np.cross(Q_Less_P, s)/denom\n u = np.cross(Q_Less_P, r)/denom \n point = p.Point(self.start.x + r[c.X]*t, self.start.y+r[c.Y]*t) \n #If t or u are not in the range 0-1 then the intersection is projected\n if(t > 1 or u > 1 or t < 0 or u < 0):\n \"\"\"\n Due to floating point problems sometimes if t or u is outside the 0-1\n range we end up inside this if statement but are actually at the end\n of one of the lines. I can't figure out how to properly add in a tolerance\n so we are taking the four end points putting them into a list,\n then comparing them to the calculated point. The Point module is\n properly handling tolerances so if the point == any of the end\n points then we should not return a projected point.\n \"\"\"\n if not any(point == lineEnd for lineEnd in (self.start, self.end,\n other.start, other.end)):\n return -1, point #return for projected intersection of non-colinear lines\n return 1, point #lines intersect at given point", "def LineLineIntersection(lineA, lineB):\n lineA = rhutil.coerceline(lineA, True)\n lineB = rhutil.coerceline(lineB, True)\n rc, a, b = Rhino.Geometry.Intersect.Intersection.LineLine(lineA, lineB)\n if not rc: return None\n return lineA.PointAt(a), lineB.PointAt(b)", "def planeLineIntersect(p1, p2, equ):\n n = vector(equ[0], equ[1], equ[2])\n v1, v2 = vector(p1), vector(p2)\n t = (equ[3] - n.dotProduct(v2)) / (n.dotProduct(v1 - v2))\n return (t * v1 + (1 - t) * v2).coords()", "def find_line_intersection(self, point, vector, Ns=50):\n point = np.asarray(point, dtype=float)\n vector = np.asarray(vector, dtype=float)\n if point.size == 3:\n point = np.array([point[0], point[2]])\n if vector.size == 3:\n vector = np.array([vector[0], vector[2]])\n normal = np.array([-vector[1], vector[0]])\n normal /= norm(normal)\n with self.fix_evaluator():\n def f(t):\n t = clip(t, 0, np.pi)\n rel_vec = self(t) - point\n return normal.dot(rel_vec)\n f0 = f(0)\n if f0 == 0.0:\n return 0.0\n step = np.pi/Ns\n a = 0\n while f(a+step)*f0 > 0:\n if a == np.pi:\n raise RuntimeError(\"Line seems to not intersect curve.\")\n a = min(np.pi, a+step)\n return brentq(f, a=a, b=a+step)", "def intersection(self, other):\n return self._geomgen(capi.geom_intersection, other)", "def intersection_with(self, other):\n\n if self.gradient == other.gradient:\n # Lines of the same gradient never intersect.\n return None\n\n # Calculate the X and Y values of this intersection using linear algebra.\n x = (other.y_intercept - self.y_intercept) / (self.gradient - other.gradient)\n y = self.gradient * x + self.y_intercept\n\n # If this or the other line belong to a shape, add it to a new set of shapes\n # involved in this intersection.\n shapes = filter((lambda o: o is not None), (self.shape, other.shape))\n return Intersection(x, y, shapes)", "def lines_intersect_2d(line1_pt1, line1_pt2, line2_pt1, line2_pt2):\r\n return geometry.gmLinesIntersect(line1_pt1, line1_pt2, line2_pt1, line2_pt2)", "def test_does_intersect() -> None:\n line_1 = Line(k=1, n=0)\n line_2 = Line(k=2.5, n=1)\n line_3 = Line(k=2.5, n=3)\n\n assert line_1.does_intersect(line_1) == True\n assert line_1.does_intersect(line_2) == True\n assert line_2.does_intersect(line_3) == False", "def get_shape_line_intersections(cls, shape, line):\n shape_inter = IntCurvesFace_ShapeIntersector()\n shape_inter.Load(shape, 1e-3)\n shape_inter.PerformNearest(line, float(\"-inf\"), float(\"+inf\"))\n with assert_isdone(shape_inter, \"failed to computer shape / line intersection\"):\n intersections = [(shape_inter.Pnt(i), shape_inter.Face(i), line) for i in\n range(1, shape_inter.NbPnt() + 1)] # Indices start at 1 :(\n return intersections", "def test_intersect_line_in_no_point(start, end):\n circle = ConstructionCircle((0, 0), 1.0)\n assert len(circle.intersect_line(ConstructionLine(start, end))) == 0", "def intersects(a0, a1, b0, b1):\n # First line is vertical\n if a0[0] == a1[0]:\n # Both lines are vertical\n if b0[0] == b1[0]:\n return (a0[0] == b0[0]) and (in_range(b0[1], a0[1], a1[1]) or in_range(b1[1], a0[1], a1[1]))\n eqn = get_eqn(b0, b1)\n y = apply_eqn(eqn, a0[0])\n return in_range(y, a0[1], a1[1])\n # Only second line is vertical\n if b0[0] == b1[0]:\n eqn = get_eqn(a0, a1)\n y = apply_eqn(eqn, b0[0])\n return in_range(y, b0[1], b1[1])\n # Parallel lines\n eqn0 = get_eqn(a0, a1)\n eqn1 = get_eqn(b0, b1)\n if eqn0[0] == eqn1[0]:\n if eqn0[1] != eqn1[1]:\n return False\n return in_range(a0[0], b0[0], b1[0]) or in_range(a1[0], b0[0], b1[0])\n # Get intersection\n i = intersection(eqn0, eqn1)\n # Check if intersection is between end points\n return in_range(i[0], a0[0], a1[0]) and in_range(i[0], b0[0], b1[0]) and in_range(i[1], a0[1], a1[1]) and in_range(i[1], b0[1], b1[1])", "def line_line_intersection(a1: Vector3, a2: Vector3, b1: Vector3, b2: Vector3) -> Vector3:\n # From https://stackoverflow.com/a/20677983/7245441\n\n def det(a: Vector3, b: Vector3) -> float:\n return a.x * b.y - a.y * b.x\n\n y_diff = Vector3(a1.y - a2.y, b1.y - b2.y, 0)\n x_diff = Vector3(a1.x - a2.x, b1.x - b2.x, 0)\n\n div = det(x_diff, y_diff)\n if div == 0:\n raise Exception(\"Lines do not intersect\")\n\n d = Vector3(det(a1, a2), det(b1, b2), 0)\n x = det(d, x_diff) / div\n y = det(d, y_diff) / div\n\n return Vector3(x, y, 0)", "def intersection(self, other):\n log.info('self: '+str(self)+' other: '+str(other))\n if self == other:\n # Used to be return True, that is definitely not right (expects Coordinate)\n # Do we want start or end ? Does it matter? Lines are the same, everything is\n # an intersection.\n return self.start\n # If any of the start/end points match, return that point.\n if self.end==other.start or self.end == other.end:\n return self.end \n if self.start==other.start or self.start == other.end: \n return self.start\n\n # Line equation: y = mx + b\n # m = (y2-y1)/(x2-x1)\n # B_self = y - M_self*x\n # Pick any x/y on the line - try end point\n # B_self = self.end.lat - M_self*self.end.lon\n # B_other = other.end.lat - M_self*self.end.lon\n from pyresample.spherical_geometry import Coordinate\n\n selfendlon = self.end.lon\n selfstartlon = self.start.lon\n otherendlon = other.end.lon\n otherstartlon = other.start.lon\n # Not sure if this is necessary, or good...\n# if self.end.lon < 0:\n# selfendlon = self.end.lon + 2*math.pi\n# if self.start.lon < 0:\n# selfstartlon = self.start.lon + 2*math.pi\n# if other.end.lon < 0:\n# otherendlon = other.end.lon + 2*math.pi\n# if other.start.lon < 0:\n# otherstartlon = other.start.lon + 2*math.pi\n\n log.info(' self lons: '+str(math.degrees(selfstartlon))+' '+str(math.degrees(selfendlon))+' other lons: '+str(math.degrees(otherstartlon))+' '+str(math.degrees(otherendlon)))\n\n # If both vertical, will be no intersection\n if abs(selfendlon - selfstartlon) < EPSILON and abs(otherendlon - otherstartlon) < EPSILON:\n log.info(' Both vertical, no intersection')\n return None\n # If self is vertical, but not parallel, intersection will be selfstartlon and lat = Mother*lon+B_other\n if abs(selfendlon - selfstartlon) < EPSILON:\n lon = selfstartlon\n M_other = (other.end.lat - other.start.lat)/(otherendlon-otherstartlon)\n B_other = other.end.lat - M_other*otherendlon\n lat = M_other*lon+B_other\n log.info(' self is vertical')\n #Make sure it falls within the segment and not outside.\n # Previously was only checking lat, need to \n # also check lon or opposite side of world would match\n if (lat > min([self.end.lat,self.start.lat]) and \n lat < max([self.end.lat,self.start.lat]) and\n lon > min([otherendlon,otherstartlon]) and\n lon < max([otherendlon,otherstartlon])):\n log.info(' and intersects')\n # Apparently Coordinate takes degrees ??? And must be -180 to 180 ?!\n # MLS use wrap_longitudes?\n if lon > math.pi:\n lon -= 2*math.pi\n return Coordinate(math.degrees(lon),math.degrees(lat))\n else:\n return None\n # same for other\n if abs(otherendlon - otherstartlon) < EPSILON:\n lon = otherstartlon\n M_self = (self.end.lat - self.start.lat)/(selfendlon-selfstartlon)\n B_self = self.end.lat - M_self*selfendlon\n lat = M_self*lon+B_self\n log.info(' other is vertical')\n #Make sure it falls within the segment and not outside.\n # Previously was only checking lat, need to \n # also check lon or opposite side of world would match\n if (lat > min([other.end.lat,other.start.lat]) and \n lat < max([other.end.lat,other.start.lat]) and \n lon > min([selfendlon,selfstartlon]) and\n lon < max([selfendlon,selfstartlon])):\n log.info(' and intersects')\n # Apparently Coordinate takes degrees ??? And must be -180 to 180 ?!\n # MLS Use wrap_longitudes?\n if lon > math.pi:\n lon -= 2*math.pi\n return Coordinate(math.degrees(lon),math.degrees(lat))\n else:\n return None\n\n \n\n # Get slopes of the lines \n M_self = (self.end.lat - self.start.lat)/(selfendlon-selfstartlon)\n M_other = (other.end.lat - other.start.lat)/(otherendlon-otherstartlon)\n \n # If they are parallel, no intersection\n if (M_self-M_other) < EPSILON:\n log.info(' self and other are parallel, no intersection')\n return None\n\n # Get the y-intercepts of the lines \n B_self = self.end.lat - M_self*selfendlon\n B_other = other.end.lat - M_other*otherendlon\n\n # Solve the equation\n # y=m1x+b1 and y=m2x+b2, equate y's so m1x+b1=m2x+b2, x = (b1-b2)/(m2-m1)\n # equate x's so x=(y-b1)/m1=(y-b2)/m2, y = (b1m2-b2m1)/(m2-m1)\n lon = (B_self - B_other)/(M_other - M_self)\n lat = (B_self*M_other - B_other*M_self)/(M_other-M_self)\n\n # Make sure lat/lon intersects within the line segment, and not outside.\n if (lat > min([other.end.lat,other.start.lat]) and \n lat < max([other.end.lat,other.start.lat]) and\n lon > min([otherendlon,otherstartlon]) and \n lon < max([otherendlon,otherstartlon]) and\n lat > min([self.end.lat,self.start.lat]) and \n lat < max([self.end.lat,self.start.lat]) and\n lon > min([selfendlon,selfstartlon]) and \n lon < max([selfendlon,selfstartlon])):\n log.info(' self and other intersect within segment')\n # Apparently Coordinate takes degrees ??? And must be -180 to 180 ?!\n # MLS use wrap longitudes?\n if lon > math.pi:\n lon -= 2*math.pi\n return Coordinate(math.degrees(lon),math.degrees(lat))\n else:\n log.info(' self and other intersect, but not within segment')\n return None", "def _lines_intersect(self, line1, line2):\n return self._lines_overlap_on_x_axis(line1, line2) and self._lines_overlap_on_y_axis(line1, line2)", "def intersection(self, other): # -> BaseGeometry:\n ...", "def test_union_intersection():\n X = np.random.randn(d, 100)\n assert np.array_equal(lincon.indicator_intersection(X), 1-lincon.indicator_union(X))", "def getIntersectPoint(p1, p2, p3, p4):\n points = p1, p2, p3, p4\n gradients = (\n CollisionUtility.calculate_gradient(p1, p2), CollisionUtility.calculate_gradient(p3, p4)\n )\n\n # See if the the lines are parallel\n if gradients[0] != gradients[1]:\n return CollisionUtility.calculate_not_parallel_intersection(points, gradients)\n else:\n return CollisionUtility.calculate_parallel_intersection(points, gradients)", "def line_sphere_intersection(p1, p2, c, r):\n\t# FILL in your code here\n\n\tline_vector=np.subtract(p2,p1) #np.array([p2[0]-p1[0], p2[1]-p1[1], p2[2]-p1[2] ])\n\tval=np.sqrt(np.sum([(p2 - p1)**2\n\t\t\t\t\t\t for p1, p2 in zip(p1,p2)]))\n\n\tif val==0:\n\t\tunit_vector=np.array([0,0,0])\n\telse:\n\t\tunit_vector=[linevec/val for linevec in line_vector]\n\tvecO_C=np.subtract(p1,c)\n\t\t\n\tres=np.dot(unit_vector,vecO_C)* np.dot(unit_vector,vecO_C) - ( np.dot(vecO_C, vecO_C) - r*r )\n\treturn res", "def get_intersection(self, l, max_y=None):\n\n # Get the points\n i, j = self.breakpoint\n\n # Initialize the resulting point\n result = Coordinate()\n p: Coordinate = i\n\n # First we replace some stuff to make it easier\n a = i.xd\n b = i.yd\n c = j.xd\n d = j.yd\n u = 2 * (b - l)\n v = 2 * (d - l)\n\n # Handle the case where the two points have the same y-coordinate (breakpoint is in the middle)\n if i.yd == j.yd:\n result.xd = (i.xd + j.xd) / 2\n\n if j.xd < i.xd:\n result.yd = max_y or float('inf')\n return result\n\n # Handle cases where one point's y-coordinate is the same as the sweep line\n elif i.yd == l:\n result.xd = i.xd\n p = j\n elif j.yd == l:\n result.xd = j.xd\n else:\n # We now need to solve for x\n # 1/u * (x**2 - 2*a*x + a**2 + b**2 - l**2) = 1/v * (x**2 - 2*c*x + c**2 + d**2 - l**2)\n # Then we let Wolfram alpha do the heavy work for us, and we put it here in the code :D\n x = -(Decimal.sqrt(\n v * (a ** 2 * u - 2 * a * c * u + b ** 2 * (u - v) + c ** 2 * u) + d ** 2 * u * (v - u) + l ** 2 * (\n u - v) ** 2) + a * v - c * u) / (u - v)\n result.xd = x\n\n # We have to re-evaluate this, since the point might have been changed\n a = p.xd\n b = p.yd\n x = result.xd\n u = 2 * (b - l)\n\n # Handle degenerate case where parabolas don't intersect\n if u == 0:\n result.yd = float(\"inf\")\n return result\n\n # And we put everything back in y\n result.yd = 1 / u * (x ** 2 - 2 * a * x + a ** 2 + b ** 2 - l ** 2)\n return result", "def get_line_circle_intersections(A, B, C, r):\n Lx = B[0] - A[0]\n Ly = B[1] - A[1]\n Lz = B[2] - A[2]\n\n # stranger things\n D = Lx**2 + Ly**2\n E = 2 * ( Lx * (A[0] - C[0]) + Ly * (A[1] - C[1]) )\n F = (\n (A[0] - C[0])**2\n + (A[1] - C[1])**2\n - r**2\n )\n det = E**2 - 4 * D * F\n \n # declare null vectors\n P1 = [0, 0, 0]\n P2 = [0, 0, 0]\n t1 = t2 = None\n eps = .00001\n if ( not (D <= eps) or (det < 0) ):\n if det == 0:\n print \"tangential intersection found\",\n t1 = t2 = -E / (2*D)\n else:\n print \"pass-through intersection found\",\n t1 = ( (-E + math.sqrt(det)) / (2 * D) )\n t2 = ( (-E - math.sqrt(det)) / (2 * D) )\n P1[0] = A[0] + t1 * Lx\n P1[1] = A[1] + t1 * Ly\n P1[2] = A[2] + t1 * Lz\n P2[0] = A[0] + t2 * Lx\n P2[1] = A[1] + t2 * Ly\n P2[2] = A[2] + t2 * Lz\n else:\n print \"no intersections are available\",\n\n return P1, P2", "def intersection(self, L):\n if self.slope() == L.slope():\n return None\n intpt_xcood = (self.c * L.b - L.c * self.b)/(self.a * L.b - L.a * self.b)\n intpt_ycood = (self.c * L.a - L.c * self.a)/(self.b * L.a - L.b * self.a)\n\n return (intpt_xcood, intpt_ycood)", "def intersects(connection, blocker):\n # this function solves two bounded lines for the point of intersection.\n # if (x,y) is in the domain of both of the lines this function return true.\n cslope = float(connection[0][1] - connection[1][1]) / (connection[0][0] - connection[1][0])\n bslope = float(blocker[0][1] - blocker[1][1]) / (blocker[0][0] - blocker[1][0])\n if cslope != bslope: # check for parallelism.\n dm = float(cslope - bslope)\n cintercept = float(connection[0][1] - cslope * connection[0][0])\n bintercept = float(blocker[0][1] - bslope * blocker[0][0])\n db = float(cintercept - bintercept)\n ix = -db/dm # solving for x\n iy = cslope*ix + cintercept # solving for y.\n # now we have the point of interception but is it on the domain\n # of **both** lines?\n cdomain = sorted([connection[0][0], connection[1][0]])\n bdomain = sorted([blocker[0][0], blocker[1][0]])\n if cdomain[0] < ix and cdomain[1] > ix and bdomain[0] < ix and bdomain[1] > ix:\n # the point of intersection is on the domain of both lines.\n return True\n # slopes are equal, or the point of intersection is not on the domain\n # of both lines.\n return False", "def points_on_lines(hyperplanes):\n intersections = []\n for row in hyperplanes:\n intersections.append(an_intersection(row[:-1], -row[-1]))\n return np.array(intersections)", "def linesegment_plane_intersection(self, p0,p1,point,normal): # only returns lines...intersections through the segment end points are ignored\n\t\tp0dot=numpy.dot(p0-point,normal)\n\t\tp1dot=numpy.dot(p1-point,normal)\n\t\tif (p0dot>0 and p1dot<0) or (p0dot<0 and p1dot>0): \n\t\t\t# if the dot products have opposing signs, then the line intersects the plane\n\t\t\treturn True,p0+(p1-p0)*abs(p0dot)/(abs(p0dot)+abs(p1dot))\n\t\telse:\n\t\t\treturn False", "def intersection(self, line: AbstractLine) -> Optional[AbstractPoint]:\n plane = Plane(self.__point_a,\n self.__point_b - self.__point_a,\n self.__point_c - self.__point_a)\n\n point = plane.intersection(line)\n if point is not None:\n if self.has_point(point):\n return point\n return None", "def intersect_line(self, line: Line) -> Tuple[Point, Point]:\n vector_to_line = Vector.from_points(self.point, line.point)\n vector_unit = line.direction.unit()\n\n dot = vector_unit.dot(vector_to_line)\n\n discriminant = dot**2 - (vector_to_line.norm() ** 2 - self.radius**2)\n\n if discriminant < 0:\n raise ValueError(\"The line does not intersect the sphere.\")\n\n pm = np.array([-1, 1]) # Array to compute minus/plus.\n distances = -dot + pm * math.sqrt(discriminant)\n\n point_a, point_b = line.point + distances.reshape(-1, 1) * vector_unit\n\n return point_a, point_b", "def intersection_line_plane(line, plane, epsilon=1e-6):\n pt1 = line[0]\n pt2 = line[1]\n p_cent = plane[0]\n p_norm = plane[1]\n\n v1 = subtract_vectors(pt2, pt1)\n dot = dot_vectors(p_norm, v1)\n\n if abs(dot) > epsilon:\n v2 = subtract_vectors(pt1, p_cent)\n fac = -dot_vectors(p_norm, v2) / dot\n vec = scale_vector(v1, fac)\n return add_vectors(pt1, vec)\n else:\n return None", "def intersects(self, other_line):\n intpt= self.intersection(other_line)\n return bool(intpt)", "def intersects(self, line):\n\t\treturn self.intersection(lint) != None", "def intersection(self, pn1, pn2, h):\n #print \"intersectionection:\", pn1, pn2, h\n #print \"z: \", (pn2[0]-pn1[0])/(pn2[1]-pn1[1])*(h-pn1[1])+pn1[0]\n return (pn2[0]-pn1[0])/(pn2[1]-pn1[1])*(h-pn1[1])+pn1[0], h", "def intersect_2_lines(P1, V1, P2, V2):\n Vx = np.cross(V1, V2)\n s = np.dot(np.cross(P2 - P1, V1), Vx)/np.dot(Vx, Vx)\n return s", "def segmented_intersections(lines):\r\n\r\n intersections = []\r\n for i, group in enumerate(lines[:-1]):\r\n for next_group in lines[i+1:]:\r\n for line1 in group:\r\n for line2 in next_group:\r\n intersections.append(intersection(line1, line2)) \r\n\r\n return intersections", "def get_intersect_lines(self, p10, p11, p20, p21):\n t = (p20 - p10) / (p11 - p10 - p21 + p20)\n return p10 + t * (p11 - p10)", "def find_intersection(A, B, C, D):\n \n a1, b1, c1 = line_equation(A.x, A.y, B.x, B.y)\n a2, b2, c2 = line_equation(C.x, C.y, D.x, D.y)\n \n Y = - np.array([[c1],\n [c2]])\n M = np.array([[a1, b1],\n [a2, b2]])\n\n X = np.linalg.solve(M, Y)\n intersection = Coordinates(X[0], X[1])\n \n return intersection", "def intersectionOnBaseline( thisLayer ):\n\tgoodMeasure = 1\n\n\toriginX = thisLayer.bounds.origin.x - goodMeasure\n\toriginPoint = NSPoint( originX, 0.0 )\n\ttargetX = originX + thisLayer.bounds.size.width + goodMeasure\n\ttargetPoint = NSPoint( targetX, 0.0 )\n\t\n\tlistOfIntersections = sliceIntersections( thisLayer, originPoint, targetPoint )\n\t\n\tprint(\"intersectionOnBaseline:\", listOfIntersections, originPoint, targetPoint)\n\tif listOfIntersections:\n\t\trightmostIntersection = listOfIntersections[-2].pointValue()\n\t\treturn rightmostIntersection\n\telse:\n\t\treturn None", "def lines_intersect(x1, y1, x2, y2, a1, b1, a2, b2):\n\n\t# Ensures that x1 < x2 \n\t(x1, x2, y1, y2) = (x1, x2, y1, y2) if x1 < x2 else (x2, x1, y2, y1) \n\t(a1, a2, b1, b2) = (a1, a2, b1, b2) if a1 < a2 else (a2, a1, b2, b1) \n\t\n\t# Make lines same domain\n\tif x1 > a1:\n\t\tif x1 > a2 or a1 == a2:\n\t\t\treturn False \n\n\t\ta = x1 \n\telse:\n\t\tif a1 > x2 or x1 == x2:\n\t\t\treturn False\n\t\t\n\t\ta = a1 \n\n\tif x2 < a2:\n\t\tif x2 < a1 or a1 == a2:\n\t\t\treturn False \n\n\t\tb = x2\n\telse:\n\t\tif a2 < x1 or x1 == x2:\n\t\t\treturn False \n\n\t\tb = a2\n\n\tif x1 != x2:\n\t\tx1, y1, x2, y2 = trim_line(x1, y1, x2, y2, a, b)\n\tif a1 != a2:\n\t\ta1, b1, a2, b2 = trim_line(a1, b1, a2, b2, a, b)\n\n\t\n\treturn (y1 >= b1 and y2 <= b2) or (y1 <= b1 and y2 >= b2)", "def intersection(self, other):\n return _binary_geo(arctern.ST_Intersection, self, other)", "def free_line(p, eps, s, dps1, dps2, ds):\n px = p[0]\n py = p[1]\n s1x = s[0, 0]\n s1y = s[0, 1]\n s2x = s[1, 0]\n s2y = s[1, 1]\n if s1x == s2x and s1y == s2y:\n if eucl_dist(p, s[0]) > eps:\n lf = [-1, -1]\n else:\n lf = [0, 1]\n else:\n if point_to_seg(p, s[0], s[1], dps1, dps2, ds) > eps:\n # print(\"No Intersection\")\n lf = [-1, -1]\n else:\n segl = eucl_dist(s[0], s[1])\n segl2 = segl * segl\n intersect = circle_line_intersection(px, py, s1x, s1y, s2x, s2y, eps)\n if intersect[0][0] != intersect[1][0] or intersect[0][1] != intersect[1][1]:\n i1x = intersect[0, 0]\n i1y = intersect[0, 1]\n u1 = (((i1x - s1x) * (s2x - s1x)) + ((i1y - s1y) * (s2y - s1y))) / segl2\n\n i2x = intersect[1, 0]\n i2y = intersect[1, 1]\n u2 = (((i2x - s1x) * (s2x - s1x)) + ((i2y - s1y) * (s2y - s1y))) / segl2\n ordered_point = sorted((0, 1, u1, u2))\n lf = ordered_point[1:3]\n else:\n if px == s1x and py == s1y:\n lf = [0, 0]\n elif px == s2x and py == s2y:\n lf = [1, 1]\n else:\n i1x = intersect[0][0]\n i1y = intersect[0][1]\n u1 = (((i1x - s1x) * (s2x - s1x)) + ((i1y - s1y) * (s2y - s1y))) / segl2\n if 0 <= u1 <= 1:\n lf = [u1, u1]\n else:\n lf = [-1, -1]\n return lf", "def _any_intersect(line_set_1, line_set_2, h_set_1=None, h_set_2=None):\n # try to speed this one up by reducing the number of comparisons\n # necessary. About half the time is spent in here, incl. subroutines.\n h_set_1 = [_homogenous_line(*segment) for segment in line_set_1] if h_set_1 is None else h_set_1\n h_set_2 = [_homogenous_line(*segment) for segment in line_set_2] if h_set_2 is None else h_set_2\n for h1, l1 in zip(h_set_1, line_set_1):\n for h2, l2 in zip(h_set_2, line_set_2):\n P = _intersection_homogenous(h1, h2)\n if P==(None, None): continue\n if _point_within_bounds(l1,P) and _point_within_bounds(l2,P):\n return True\n return False", "def intersection(a, b):\n x = max(a[0],b[0])\n y = max(a[1],b[1])\n w = min(a[2],b[2]) - x\n h = min(a[3],b[3]) - y\n \n if h<0 or w<0 :\n return 0\n \n return h*w", "def intersection(self, segment):\n p0, p1 = segment.p0, segment.p1\n\n # x = t*(p1 - p0) + p0\n # n'*(x - origin) = 0\n # combine to get\n # n'*(t*(p1-p0) + p0 - origin) = 0\n # solve for t\n\n v = p1 - p0\n w = p0 - self.origin\n t = -np.dot(self.normal, w)/np.dot(self.normal, v)\n\n if 0-epsilon <= t <= 1+epsilon:\n return t*(p1-p0) + p0\n else:\n return None", "def find_circle_line_intersection(P0, r0, P1):\n\t\n\tx_offset, y_offset = P0\n\tx0, y0 = 0, 0\n\tx1, y1 = P1\n\n\tx1, y1 = x1 - x_offset, y1 - y_offset\n\n\tdx = x1 - x0\n\tdy = y1 - y0\n\tdr = math.sqrt(dx*dx + dy*dy)\n\n\tD = x0*y1 - x1*y0\n\n\tdelta0 = r0*r0*dr*dr - D*D\n\n\tx2 = (D*dy + sgn(dy)*dx*math.sqrt(delta0)) / (dr*dr)\n\ty2 = (D*dx + math.fabs(dy)*math.sqrt(delta0)) / (dr*dr)\n\n\tx3 = (D*dy - sgn(dy)*dx*math.sqrt(delta0)) / (dr*dr)\n\ty3 = (D*dx - math.fabs(dy)*math.sqrt(delta0)) / (dr*dr)\n\n\tx2 += x_offset\n\tx3 += x_offset\n\ty2 += y_offset\n\ty3 += y_offset\n\n\treturn np.array([[x2, y2], [x3, y3]])", "def intersect_segment(self, p1, p2):\n p1 = base.getvector(p1)\n if len(p1) == 2:\n p1 = np.r_[p1, 1]\n p2 = base.getvector(p2)\n if len(p2) == 2:\n p2 = np.r_[p2, 1]\n \n\n z1 = self.line * p1\n z2 = self.line * p2\n\n if np.sign(z1) != np.sign(z2):\n return True\n if self.contains(p1) or self.contains(p2):\n return True\n return False", "def segmented_intersections(lines):\n\n intersections = []\n for i, group in enumerate(lines[:-1]):\n for next_group in lines[i+1:]:\n for line1 in group:\n for line2 in next_group:\n intersections.append(intersection(line1, line2)) \n\n return intersections", "def intersect(self, other: Line | Segment) -> list[Point]:\n if self.dim == 2:\n return list(distinct(self.edges.intersect(other)))\n\n if isinstance(other, Segment):\n try:\n result = self._plane.meet(other._line)\n except LinearDependenceError as e:\n if isinstance(other, Segment):\n other = cast(Segment, other[~e.dependent_values])\n result = cast(Plane, self._plane[~e.dependent_values]).meet(other._line)\n return list(\n result[Polygon(self[~e.dependent_values], copy=False).contains(result) & other.contains(result)])\n else:\n return list(result[self.contains(result) & other.contains(result)])\n\n try:\n result = self._plane.meet(other)\n except LinearDependenceError as e:\n if other.cdim > 0:\n other = other[~e.dependent_values]\n result = cast(Plane, self._plane[~e.dependent_values]).meet(other)\n return list(result[Polygon(self[~e.dependent_values], copy=False).contains(result)])\n else:\n return list(result[self.contains(result)])", "def intersectionOfTwoLines(p1, v1, p2, v2):\n # if we transform multiple points in one go\n if len(v1.shape) == 2:\n a1 = np.einsum('ij,ij->i', v1, v1)\n a2 = np.einsum('ij,ij->i', v1, v2)\n b1 = -np.einsum('ij,ij->i', v2, v1)\n b2 = -np.einsum('ij,ij->i', v2, v2)\n c1 = -np.einsum('ij,j->i', v1, p1 - p2)\n c2 = -np.einsum('ij,j->i', v2, p1 - p2)\n res = np.linalg.solve(np.array([[a1, b1], [a2, b2]]).transpose(2, 0, 1), np.array([c1, c2]).T)\n res = res[:, None, :]\n return np.mean([p1 + res[..., 0] * v1, p2 + res[..., 1] * v2], axis=0)\n else: # or just one point\n a1 = np.dot(v1, v1)\n a2 = np.dot(v1, v2)\n b1 = -np.dot(v2, v1)\n b2 = -np.dot(v2, v2)\n c1 = -np.dot(v1, p1 - p2)\n c2 = -np.dot(v2, p1 - p2)\n try:\n res = np.linalg.solve(np.array([[a1, b1], [a2, b2]]), np.array([c1, c2]))\n except np.linalg.LinAlgError:\n return np.ones(3)*np.nan\n res = res[None, None, :]\n return np.mean([p1 + res[..., 0] * v1, p2 + res[..., 1] * v2], axis=0)[0]", "def intersectConics(E1, E2):\n\n P = np.array([])\n r1 = matrix_rank(E1)\n r2 = matrix_rank(E2)\n \n if(r1==3 and r2==3):\n P = completeIntersection(E1,E2) \n else:\n if (r2 < 3): #E2 is degenerate\n defE = E2\n fullE = E1\n else:\n defE = E1 #E1 is degenerate\n fullE = E2\n m, l = decomposeDegenerateConic(defE)\n P1 = intersectConicLine(fullE,m)\n P2 = intersectConicLine(fullE,l)\n P = np.array([P1, P2])\n points_x = []\n points_y = []\n for i in range(2):\n P1 = P[i]\n if(P1.size!=0):\n for j in range(P1.shape[0]):\n points_x.append(P1[j,0]/P1[j,2])\n points_y.append(P1[j,1]/P1[j,2])\n return points_x, points_y", "def intersection(self, other):\n new_ieqs = []\n new_ieqs.extend(self.inequalities())\n new_ieqs.extend(other.inequalities())\n\n new_eqns = []\n new_eqns.extend(self.equations())\n new_eqns.extend(other.equations())\n\n return Polyhedron(ieqs = new_ieqs, eqns = new_eqns, \n field=self.coerce_field(other))", "def is_intersection_line_line(ab, cd, epsilon=1e-6):\n a, b = ab\n c, d = cd\n\n line_vector_1 = normalize_vector(vector_from_points(a, b))\n line_vector_2 = normalize_vector(vector_from_points(c, d))\n # check for parallel lines\n print(abs(dot_vectors(line_vector_1, line_vector_2)))\n if abs(dot_vectors(line_vector_1, line_vector_2)) > 1.0 - epsilon:\n return False\n # check for intersection\n d_vector = cross_vectors(line_vector_1, line_vector_2)\n if dot_vectors(d_vector, subtract_vectors(c, a)) == 0:\n return True\n return False", "def intersection(self):\n return Intersection(self.source, self)", "def intersection(self, other):\n \n self_corners = self.corners\n\n other_corners = get_2d_false_corners(other)\n\n #shell()\n\n return planar_intersection_polygon(self_corners,other_corners)", "def intersect_line(self, line: Line, **kwargs) -> Point:\n if self.normal.is_perpendicular(line.direction, **kwargs):\n raise ValueError(\"The line and plane must not be parallel.\")\n\n vector_plane_line = Vector.from_points(self.point, line.point)\n\n num = -self.normal.dot(vector_plane_line)\n denom = self.normal.dot(line.direction)\n\n # Vector along the line to the intersection point.\n vector_line_scaled = num / denom * line.direction\n\n return line.point + vector_line_scaled", "def intersect_or_on(s1, s2, c1, c2):\n den = float( (c2.y - c1.y) * (s2.x - s1.x) - (c2.x - c1.x) * (s2.y - s1.y) )\n if not den:\n return None\n\n us = ((c2.x - c1.x) * (s1.y - c1.y) - (c2.y - c1.y) * (s1.x - c1.x)) / den\n uc = ((s2.x - s1.x) * (s1.y - c1.y) - (s2.y - s1.y) * (s1.x - c1.x)) / den\n\n if (0 <= us <= 1) and (0 <= uc <= 1):\n #subj and clip line intersect eachother somewhere in the middle\n #this includes the possibility of degenerates (edge intersections)\n x = s1.x + us * (s2.x - s1.x)\n y = s1.y + us * (s2.y - s1.y)\n return (x, y), us, uc\n else:\n return None", "def _intersection(x, y):\n a, b = x\n c, d = y\n return (d > a) and (c < b)", "def _get_first_intersection(self, trajectory: TrajectoryBase):\n for trajectory_line in trajectory.corners:\n for o in self.sprites:\n if not isinstance(o, Ball):\n intersection_result = o.get_intersection(trajectory_line)\n if intersection_result is not None:\n edge, point = intersection_result\n if trajectory.intersection is None:\n trajectory.set_intersection(point, trajectory_line, o, edge)\n elif point == trajectory.intersection and trajectory_line == trajectory.intersected_trajectory:\n raise NotImplementedError(\"overlapping parallel edges not implemented\")\n elif (point.l2_distance(trajectory_line.start) <\n trajectory.intersection.l2_distance(trajectory.intersected_trajectory.start)):\n trajectory.set_intersection(point, trajectory_line, o, edge)", "def get_intersection_point(l1, l2):\n m, b = l1\n n, c = l2\n # Find when mx + b = nx + c\n # mx - nx = c - b\n # And...\n x = (c-b) / (m-n)\n # Then plug back in\n y = m*x + b\n return (x, y)", "def outline_to_mask(line, x, y):\n mpath = mplp.Path(line)\n X, Y = np.meshgrid(x, y)\n points = np.array((X.flatten(), Y.flatten())).T\n mask = mpath.contains_points(points).reshape(X.shape)\n return mask", "def intersect_point(self,m1,c1,m2,c2):\n\n x = (c2 - c1)/(m1 - m2)\n y = m1*x + c1\n return x, y", "def intersection(st, ave):\n return (st+ave)*(st+ave+1)//2 + ave", "def intersection(st, ave):\n return (st+ave)*(st+ave+1)//2 + ave" ]
[ "0.79033774", "0.7781142", "0.763585", "0.759173", "0.75264156", "0.7498257", "0.7412484", "0.73506904", "0.7265628", "0.72416824", "0.7208849", "0.72006744", "0.71884364", "0.7114838", "0.7074302", "0.7025444", "0.7015237", "0.70007503", "0.6991902", "0.6991328", "0.69785804", "0.69493425", "0.68704957", "0.6865662", "0.6863656", "0.6849469", "0.68454725", "0.6840753", "0.6837833", "0.68341476", "0.6796905", "0.67910343", "0.6779764", "0.6752647", "0.6743642", "0.67069405", "0.66422594", "0.66183853", "0.66020274", "0.65890706", "0.6586467", "0.6570659", "0.6566154", "0.6560746", "0.6556676", "0.6552417", "0.6547111", "0.6533708", "0.65313303", "0.6515298", "0.65137327", "0.6507623", "0.6499552", "0.64984214", "0.6469691", "0.6458404", "0.6433065", "0.6418899", "0.6407147", "0.6396921", "0.6388759", "0.6388666", "0.6383127", "0.6375654", "0.63384205", "0.6292925", "0.6282233", "0.6275086", "0.62643003", "0.62193125", "0.62181807", "0.6204603", "0.6201813", "0.6198585", "0.6198507", "0.619718", "0.61812234", "0.6179493", "0.6176007", "0.6173333", "0.616722", "0.6164217", "0.61621356", "0.61551255", "0.61394686", "0.6136902", "0.61267745", "0.61161375", "0.6111107", "0.6093928", "0.6093083", "0.6087905", "0.60857856", "0.6083174", "0.6066987", "0.60626346", "0.60511565", "0.6043189", "0.60239005", "0.60239005" ]
0.657359
41
Set up the LayZ Spa component.
async def async_setup(hass: HomeAssistant, config: dict): hass.data[DOMAIN] = {} return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setup(self, stage: Optional[str] = None) -> None:", "def SetUpLayerManager(self):\n pass", "def setup(self):\n self.ae = None", "def setup_class(self):\n # Blackbody: bb(5000)\n self.bb = SourceSpectrum(BlackBodyNorm1D, temperature=5000)\n\n # Gaussian emission line: em(5500, 250, 1e-13, flam)\n tf_unit = u.erg / (u.cm * u.cm * u.s)\n self.em = SourceSpectrum(GaussianFlux1D, mean=5500,\n total_flux=(1e-13 * tf_unit), fwhm=250)\n\n # ACS bandpass: band(acs,hrc,f555w)\n bandfile = get_pkg_data_filename(\n os.path.join('data', 'hst_acs_hrc_f555w.fits'),\n package='synphot.tests')\n self.acs = SpectralElement.from_file(bandfile)\n\n # Box bandpass: box(5500,1)\n self.abox = SpectralElement(Box1D, amplitude=1, x_0=5500, width=1)", "def setup_layers(self):\n if self.args.model == \"exact\":\n self.layer = PPNPLayer\n else:\n self.layer = APPNPLayer\n self.setup_layer_structure()", "def setup(self):\n\n self.points = [[0.360502, 0.535494],\n [0.476489, 0.560185],\n [0.503125, 0.601218],\n [0.462382, 0.666667],\n [0.504702, 0.5]]\n self.max_neighbors = 4\n self.beta = 1\n self.graph = 'beta skeleton'\n self.edges = [0, 1, 0, 2, 0, 3, 0, 4,\n 1, 3, 1, 4,\n 2, 3, 2, 4,\n 3, 4]", "def InitEnvironment(self):\r\n\t\t\r\n\t\t# Turn antialiasing on\r\n\t\trender.setAntialias(AntialiasAttrib.MMultisample,1)\r\n\t\t\r\n\t\t# load the falcon model\r\n\t\tfalcon = loader.loadModel(\"Content/falcon/falcon.bam\")\r\n\t\tfalcon.setScale(30)\r\n\t\tfalcon.setPos(0, 0, 28.5)\r\n\t\tfalcon.reparentTo(render)", "def setup(self):\n\n warnings.simplefilter(\"always\", DeprecationWarning)\n\n orbit_info = {'index': 'slt', 'kind': 'lt'}\n self.tinst = pysat.Instrument('pysat', 'testing', orbit_info=orbit_info)\n self.tinst.bounds = (dt.datetime(2008, 1, 1), dt.datetime(2008, 1, 2))\n\n self.warn_msgs = []\n self.war = \"\"\n return", "def setup(self):\n\n self.insts = []\n self.testInst = pysat.Instrument('pysat', 'testing2D',\n clean_level='clean')\n self.testInst.bounds = (dt.datetime(2008, 1, 1),\n dt.datetime(2008, 1, 3))\n self.insts.append(self.testInst)\n self.insts.append(self.testInst)\n\n self.dname = 'series_profiles'\n self.test_vals = np.arange(50) * 1.2\n\n self.testC = pysat.Constellation(instruments=self.insts)\n\n return", "def setup_component(self):\n self.conf, self.context = self._init_component()\n self.initialize()", "def setup( self ):", "def setup(self):\n switches, links = self.generateTopology()\n self.graph = KytosGraph()\n self.graph.clear()\n self.graph.update_nodes(switches)\n self.graph.update_links(links)\n self.graph.set_path_fun(nx.shortest_simple_paths)", "def setup(self):\n\n logger.info('Setting up SimulatedMaps module.')\n\n # Save the cls as a class attribute\n self.cls = self.read_cls()\n\n logger.info('Setup done!')", "def __init__(self, stage, layer=None, proxyPrimPrefix=\"\", renderPrimPrefix=\"\"):\n\n self._stage = stage\n\n # Layer\n if layer is None:\n self._layer = Sdf.Layer.CreateAnonymous()\n elif isinstance(layer, Sdf.Layer):\n self._layer = layer\n elif isinstance(layer, str):\n self._layer = Sdf.Layer.Open(layer)\n\n self._layer.SetMuted(True)\n self._stage.GetSessionLayer().subLayerPaths.append(self._layer.identifier)\n self._proxyPrimPrefix = proxyPrimPrefix\n self._renderPrimPrefix = renderPrimPrefix", "def setup_product():\n\n fiveconfigure.debug_mode = True\n import collective.geo.openlayers\n zcml.load_config('configuretest.zcml', collective.geo.openlayers)\n\n fiveconfigure.debug_mode = False", "def setup(self):\n\n self._enable_torque(self._reg.TORQUE_ENABLE)\n self.change_operating_mode(self._reg.MODE_EXT_POSI)\n # set to max velocity\n self.change_veloity(self._default_velocity)", "def setup(self):\n pass", "def setup(self):\n pass", "def setup(self):\n pass", "def setup(self):\n pass", "def setup(self):\n pass", "def setup(self):\n pass", "def setup(self):\n pass", "def setup(self):\n pass", "def setup(self):\n pass", "def setup(self):\n pass", "def setup(self):\n pass", "def setup(self):\n pass", "def setup(self) -> None:", "def setup(self):\n\t\tpass", "def setup(self) -> None:\n pass", "def setup(self) -> None:\n pass", "def setup(self) -> None:\n pass", "def setup(self):\n raise NotImplementedError", "def setup(self):\n raise NotImplementedError", "def setup(self):\n raise NotImplementedError", "def setup(self):\n raise NotImplementedError", "def generator_setup():\n PaaSPureGenerator()", "def __init__():\n self.placa = placa", "def setUp(self):\n self.shonan = ShonanAveraging3(\"toyExample.g2o\", DEFAULT_PARAMS)", "def setUpClass(cls):\n cls.config.setup_toolbox('ENVI', 'qa_envitaskengine_datatype_sarscapedataarray',\n 'test_datatype_sarscapedataarray')", "def setup(self):\r\n pass", "def initialize_substructure_production(self):\n\n self.wet_storage = WetStorage(self.env, float(\"inf\"))\n takt_time = self.config[\"offshore_substation_substructure\"].get(\"takt_time\", 0)\n attach_time = self.config[\"offshore_substation_topside\"].get(\"attach_time\", 24)\n to_assemble = [1] * self.num_substations\n\n self.assembly_line = SubstationAssemblyLine(to_assemble, takt_time, attach_time, self.wet_storage, 1)\n\n self.env.register(self.assembly_line)\n self.assembly_line.start()", "def setup(self):\n self.testInst = pysat.Instrument('pysat', 'testing2D',\n clean_level='clean')\n self.testInst.bounds = (dt.datetime(2008, 1, 1),\n dt.datetime(2008, 1, 3))\n self.dname = 'alt_profiles'\n self.test_vals = np.arange(50) * 1.2\n self.test_fracs = np.arange(50) / 50.0\n\n return", "def setup(self,**kwargs):\n pass", "def setup(self):\n pass", "def setup():\n env.total_time = 0\n boeing_737 = Airplane(env, 'Boeing 737', 20, 6, 1, False)\n boeing_737.make_rows()\n seats = boeing_737.get_seats()\n passengers = []\n\n # create a passenger for every seat on the plane\n for j in range(0, boeing_737.get_number_of_seats()):\n passenger = Passenger(env, seats[j], boeing_737)\n passengers.append(passenger)\n\n _algorithms = BoardingAlgorithm(env, boeing_737, passengers)\n\n return boeing_737, passengers, _algorithms", "def setup(self, *args, **kwargs):\n conf_file = os.environ.get(\"VCLUSTER_INFO\")\n if not conf_file:\n raise Exception(\n \"Environment variable VCLUSTER_INFO \"\n + \"not set to vcluster output configuration file\"\n )\n self.vcluster = vcluster.vcluster_from_conf(conf_file)\n\n self.component_name_map.update(\n {\n components.MesosMaster().name: \"mesos-master\",\n components.MesosAgent().name: \"mesos-slave\",\n components.Zookeeper().name: \"zookeeper\",\n components.HostMgr().name: \"hostmgr\",\n components.JobMgr().name: \"jobmgr\",\n components.ResMgr().name: \"resmgr\",\n components.BatchPlacementEngine().name: \"placement\",\n components.StatelessPlacementEngine().name: \"placement_stateless\",\n }\n )", "def setup(self):\n self.ctx.current_structure = self.inputs.structure", "def setup(self, *args, **kwargs):\n pass", "def setUp(self):\n from ampscan.core import AmpObject\n # Load 2 spheres with radius 1, and 1.2\n stl_path = get_path(\"stl_file_5.stl\") # R=1\n self.amp1 = AmpObject(stl_path)\n stl_path = get_path(\"stl_file_4.stl\") # R=1.2\n self.amp2 = AmpObject(stl_path)\n stl_path = get_path(\"stl_file.stl\")\n self.amp3 = AmpObject(stl_path)\n self.amp4 = AmpObject(stl_path)\n stl_path = get_path(\"cone1.stl\")\n self.cone1 = AmpObject(stl_path)\n stl_path = get_path(\"cone2.stl\")\n self.cone2 = AmpObject(stl_path)", "def setup(self, stage: Optional[str] = None) -> None:\n if self.dataset_type == \"hdf5\":\n DS = SegmentationHDF5Dataset\n else:\n DS = SegmentationFolderDataset\n\n self.trainset = DS(\n path=self._get_path(\"train\", self.dataset_type, is_mask=False),\n mask_path=self._get_path(\"train\", self.dataset_type, is_mask=True),\n img_transforms=self.img_transforms,\n inst_transforms=self.inst_transforms,\n return_sem=False,\n normalization=self.normalization,\n **self.kwargs,\n )\n\n self.validset = DS(\n path=self._get_path(\"valid\", self.dataset_type, is_mask=False),\n mask_path=self._get_path(\"valid\", self.dataset_type, is_mask=True),\n img_transforms=self.img_transforms,\n inst_transforms=self.inst_transforms,\n return_sem=False,\n normalization=self.normalization,\n **self.kwargs,\n )\n\n self.testset = DS(\n path=self._get_path(\"test\", self.dataset_type, is_mask=False),\n mask_path=self._get_path(\"test\", self.dataset_type, is_mask=True),\n img_transforms=self.img_transforms,\n inst_transforms=self.inst_transforms,\n return_sem=False,\n normalization=self.normalization,\n **self.kwargs,\n )", "def _setup(self) -> None:\n\t\treturn", "def setup(self, shader_program):\n self.setup_view(shader_program)\n self.setup_projection(shader_program)", "def setup(self, stage=None):\n self.data_train, self.data_val, self.data_test = [None] * 3", "def setUpClass(cls):\n\n # initialize Pulsar class\n cls.psr = Pulsar(\n datadir + \"/B1855+09_NANOGrav_9yv1.gls.par\",\n datadir + \"/B1855+09_NANOGrav_9yv1.tim\",\n ephem=\"DE430\",\n timing_package=\"pint\",\n )", "def test_init(paper):\n atSi = Atoms(\"Si8\",positions=[[0,0,0],[0.25,0.25,0.25],[0.5,0.5,0],[0.75,0.75,0.25],\n [0.5,0,0.5],[0.75,0.25,0.75],[0,0.5,0.5],[0.25,0.75,0.75]],\n cell=[5.43,5.43,5.43])\n\n entry = paper[0]\n kwargs = {}\n args = []\n\n calc = Aflow(atSi, '.', '.', 0, entry=entry, *args, **kwargs)\n\n assert calc.entry_file == \"./entry.pkl\"\n assert calc.can_execute()\n assert calc.can_extract()\n assert not calc.is_executing()\n calc.extract()", "def setup(self, core) :\n # We need direct access to the core\n self.core = core\n # Validate a square quarter core. (Not applicable to 1/2 or 1/8)\n assert(len(self.core.stencil[0,:])==len(self.core.stencil[:,0]))\n # Core size per dimension.\n self.dimension = len(self.core.stencil[0,:])\n # Assembly boundaries\n self.widths = np.zeros(self.dimension+1)\n self.widths[:] = self.core.width\n self.widths[0] = 0.5 * self.core.width\n # Subdivisions. Not really used.\n self.subdivisions = np.ones(self.dimension,dtype='i')\n # Peaking factor map\n self.peaking_map = np.zeros((self.dimension, self.dimension))\n self.peaking = np.zeros(len(self.core.assemblies))\n # Create the static top part of the LABAN-PEL input\n self.make_input_top()", "def initialise(self):\n self.set_up()", "def setup(self):\n\n self.testInst = pysat.Instrument('pysat', 'testing2D_xarray',\n clean_level='clean')\n self.testInst.bounds = (dt.datetime(2008, 1, 1),\n dt.datetime(2008, 2, 1))\n self.dname = 'profiles'\n self.test_val_length = 15\n\n return", "def setup(self):\n if self.config.resume:\n if self._try_load_plan():\n Run().print_main(f\"#> Loaded plan from {self.plan_path}:\")\n Run().print_main(f\"#> num_chunks = {self.num_chunks}\")\n Run().print_main(f\"#> num_partitions = {self.num_chunks}\")\n Run().print_main(f\"#> num_embeddings_est = {self.num_embeddings_est}\")\n Run().print_main(f\"#> avg_doclen_est = {self.avg_doclen_est}\")\n return\n\n self.num_chunks = int(np.ceil(len(self.collection) / self.collection.get_chunksize()))\n\n # Saves sampled passages and embeddings for training k-means centroids later\n sampled_pids = self._sample_pids()\n avg_doclen_est = self._sample_embeddings(sampled_pids)\n\n # Select the number of partitions\n num_passages = len(self.collection)\n self.num_embeddings_est = num_passages * avg_doclen_est\n self.num_partitions = int(2 ** np.floor(np.log2(16 * np.sqrt(self.num_embeddings_est))))\n\n Run().print_main(f\"Creaing {self.num_partitions:,} partitions.\")\n Run().print_main(f\"*Estimated* {int(self.num_embeddings_est):,} embeddings.\")\n\n self._save_plan()", "def setup(self):\n self.ca_lines = []\n self.ca_lines.append(self.build_initial_line())\n self.set_display_from_lines()", "def setup(self):", "def setup(self):", "def setup(self):", "def setup(self):", "def setup(self):\n ...", "def setup(self):\n self.config = pau.IConfig\n self.session = pau.ISession\n pau.resolve(self)\n\n self.session.assets = Assets()\n self.config.db = self.db_name\n\n self.db = pau.IDb\n pau.resolve(self)\n\n # Instance\n i = Setup()\n pau.resolve(i)\n return i", "def _swift_saio_setup(self):\n self._swift_storage_setup()\n self._swift_proxy_setup()", "def setup(self):\n pass", "def _setup_pipeline_cfg(self):", "def setUp(self):\n self._s1ap_wrapper = s1ap_wrapper.TestWrapper()", "def setUp(self):\n self._s1ap_wrapper = s1ap_wrapper.TestWrapper()", "def start(self):\n self.__init__()\n self.set_n_players()\n self.init_players()\n self.init_territory_selection_phase()\n self.init_troop_deployment_phase()\n # self.game_phase()", "def setUp(self):\n import protolibs.ics_servers as ics_servers\n from point import Point\n from configobj import ConfigObj\n\n # Get config file\n configfile = '/'.join(['sims', 'rtutank', 'config'])\n config=ConfigObj(infile=configfile, unrepr=True)\n self.config = config\n #Set global variable devconfig here \n devconfig=config['vdevs']['slave'] \n\n ##--Set up points\n points={}\n for p in devconfig['points']:\n points.update( { p['name'] : Point(**p) } ) \n #The ** treats the p dictionary as the arguments to the Point class\n self.server = ics_servers.ModbusRTU(devconfig['icsifaces'][0], points.values())\n self.server.start()", "def _setup(self):", "def _setup(self):", "def setUp(cls):\n arkane = Arkane()\n cls.job_list = arkane.load_input_file(os.path.join(os.path.dirname(os.path.abspath(__file__)),\n 'data', 'Benzyl', 'input.py'))", "def setup(self, ds):\n pass", "def setUp(self):\n uri = path.join(path.dirname(__file__),'data','StressTest.shp')\n self.vlayer = QgsVectorLayer(uri, \"StressTest_Layer\", \"ogr\")\n self.nxProfile = Profile(self.vlayer, None)\n self.nxProfile.pathfinder(8, 10)\n # ['DateTime', 'Date', 'Time', 'RealField', 'IntField', 'StringFiel', 'PathName', 'PathName2']", "def test_pyaflowa_setup(tmpdir):\n pyaflowa = Pyaflowa(\n workdir=tmpdir,\n path_specfem_data=os.path.join(TEST_SOLVER, \"mainsolver\", \"DATA\"),\n path_solver=os.path.join(TEST_SOLVER, \"mainsolver\"),\n source_prefix=\"SOURCE\",\n ntask=2,\n components=\"Y\",\n )\n\n assert(pyaflowa._station_codes == [])\n assert(pyaflowa._source_names == [])\n\n pyaflowa.setup()\n\n assert(len(pyaflowa._station_codes) == 2)\n assert(pyaflowa._station_codes[0] == \"AA.S000000.*.*\")\n assert(len(pyaflowa._source_names) == pyaflowa._ntask)\n assert(pyaflowa._source_names[0] == \"001\")\n assert(pyaflowa._config.component_list == [\"Y\"])", "def setUp(self):\n import protolibs.ics_servers as ics_servers\n from point import Point\n from configobj import ConfigObj\n\n # Get config file\n configfile = '/'.join(['sims', 'tcptank', 'config'])\n config=ConfigObj(infile=configfile, unrepr=True)\n self.config = config\n #Set global variable devconfig here \n devconfig=config['vdevs']['slave'] \n\n ##--Set up points\n points={}\n for p in devconfig['points']:\n points.update( { p['name'] : Point(**p) } ) \n #The ** treats the p dictionary as the arguments to the Point class\n self.server = ics_servers.ModbusTCP( devconfig['icsifaces'][0], points.values() )\n self.server.start()", "def setup_layer_structure(self):\n self.page_rank_convolution_1 = self.layer(self.feature_number, self.args.layers[0], self.args.iterations, self.args.alpha)\n self.page_rank_convolution_2 = self.layer(self.args.layers[0], self.args.layers[1], self.args.iterations, self.args.alpha)\n self.page_rank_convolution_3 = self.layer(self.args.layers[1], self.class_number, self.args.iterations, self.args.alpha)", "def setup(self):\n self.testInst = pysat.Instrument('pysat', 'testing2D',\n clean_level='clean')\n self.testInst.bounds = (dt.datetime(2008, 1, 1),\n dt.datetime(2008, 2, 1))\n self.dname = 'series_profiles'\n self.test_vals = np.arange(50) * 1.2\n\n return", "def setup_class(self):\n args = {'pdb_path':'/sdf/home/a/apeck/tomoxtal/examples/input/193l.pdb', 'resolution':6.0, 'size':250}\n\n # generate structure factors and retrieve associated cell information\n sf = cctbx_tools.reference_sf(args['pdb_path'], args['resolution'], expand_to_p1=True)\n sf_data = cctbx_tools.reformat_sf(sf)\n sg_symbol, sg_no, self.cell, cs = cctbx_tools.unit_cell_info(args['pdb_path'])\n \n # add random phase shifts\n hklIp1, hklIp2, hklIp3 = sf_data.copy(), sf_data.copy(), sf_data.copy()\n hklIp2[:,-1], self.shifts2 = phases_utils.add_random_phase_shift(sf_data[:,:3], sf_data[:,-1])\n hklIp3[:,-1], self.shifts3 = phases_utils.add_random_phase_shift(sf_data[:,:3], sf_data[:,-1])\n\n # retain subset of Millers\n for data in [hklIp1,hklIp2,hklIp3]:\n keep_idx = np.unique(np.random.randint(0, high=data.shape[0], size=args['size']))\n data = data[keep_idx]\n \n self.data1, self.data2, self.data3 = hklIp1, hklIp2, hklIp3\n fshifts_list = np.random.uniform(size=(4,3))\n self.fshifts_list = np.vstack((fshifts_list, 1-self.shifts2, 1-self.shifts3))", "def _setup(self):\n pass", "def _setup(self):\n pass", "def _setup(self):\n pass", "def _setup(self):\n pass", "def _setup(self):\n pass", "def setUp(self):\n\n self.eps = 0.001 # Accept 0.1 % relative error\n\n self.RSISE = Point(-35.27456, 149.12065)\n self.Home = Point(-35.25629, 149.12494) # 28 Scrivener Street, ACT\n self.Syd = Point(-33.93479, 151.16794) # Sydney Airport\n self.Nadi = Point(-17.75330, 177.45148) # Nadi Airport\n self.Kobenhavn = Point(55.70248, 12.58364) # Kobenhavn, Denmark\n self.Muncar = Point(-8.43, 114.33) # Muncar, Indonesia", "def initialize_vasp_runs(self):\n\n\t\treference_polarization_path = self.get_extended_path('reference_polarization')\n\t\tdistorted_polarization_path = self.get_extended_path('distorted_polarization')\n\n\t\t#if not Path.exists(reference_polarization_path):\n\t\tself.create_new_vasp_run(reference_polarization_path, self.reference_structure)\n\n\t\t# if not Path.exists(distorted_polarization_path):\n\t\tself.create_new_vasp_run(distorted_polarization_path, self.distorted_structure)", "def setup(self):\n self.graph = KytosGraph()", "def setup(self):\n self.testInst = pysat.Instrument('pysat', 'testing2D_xarray',\n clean_level='clean')\n self.testInst.bounds = (dt.datetime(2008, 1, 1),\n dt.datetime(2008, 2, 1))\n self.dname = 'variable_profiles'\n self.test_val_length = 15\n\n return", "def setup(self):\n EventGenerator.setup(self)\n\n if self.madgraph_dir is None:\n self.madgraph_dir = self.get_install_dir()\n logger.debug(\"Using Madgraph from install dir: \" + self.madgraph_dir)\n\n if self.name == 'ap' and self.apmass is None:\n raise Exception(\"Missing apmass param for AP generation.\")", "def setup(self, ctxConfig, drvConfig):\n superClass.setup(self, ctxConfig, drvConfig)\n # TODO Your startup stuff here", "def setup(self):\n # Instrument names\n instruments = list(self.features_df[\"instrument\"].unique())\n\n # Get Muxes for each instrument.\n inst_muxes = [self._instrument_mux(i) for i in instruments]\n\n # Construct the streams for each mux.\n mux_streams = [pescador.Streamer(x) for x in inst_muxes\n if x is not None]\n\n # Construct the master mux\n master_mux = pescador.mux(mux_streams, **self.master_mux_params)\n # We have to wrap the mux in a stream so that the buffer\n # knows what to do with it.\n self.master_stream = pescador.Streamer(master_mux)\n\n # Now construct the final streamer\n if self.use_zmq:\n self.buffered_streamer = zmq_buffered_stream(\n self.master_stream, self.batch_size)\n else:\n self.buffered_streamer = buffer_stream(\n self.master_stream, self.batch_size)", "def setUpClass(cls):\n\n # initialize Pulsar class\n cls.psr = Pulsar(datadir + \"/B1855+09_NANOGrav_9yv1.gls.par\", datadir + \"/B1855+09_NANOGrav_9yv1.tim\")", "def setup(self):\n self.bqSession.update_mex('Initializing...')\n self.mex_parameter_parser(self.bqSession.mex.xmltree)\n self.output_file = None", "def setUp(self):\n self.sc = init_orca_context(cores=4)", "def setUp(self):\n\n # Create the data pipe.\n self.interpreter.pipe.create('dasha', 'mf')\n\n # Create a temporary directory for Dasha outputs.\n ds.tmpdir = mkdtemp()" ]
[ "0.6041124", "0.6023509", "0.5904585", "0.5892326", "0.5793188", "0.5763117", "0.57407904", "0.5732804", "0.5718261", "0.570464", "0.5694896", "0.56861675", "0.5647585", "0.56182", "0.5605254", "0.5602402", "0.55925673", "0.55925673", "0.55925673", "0.55925673", "0.55925673", "0.55925673", "0.55925673", "0.55925673", "0.55925673", "0.55925673", "0.55925673", "0.55925673", "0.5590253", "0.5581465", "0.5579039", "0.5579039", "0.5579039", "0.5571222", "0.5571222", "0.5571222", "0.5571222", "0.5553446", "0.55375427", "0.5536666", "0.5536038", "0.55336666", "0.55332345", "0.5531669", "0.5530264", "0.55247563", "0.5518435", "0.5516122", "0.5504043", "0.54784834", "0.54699165", "0.54670453", "0.5465487", "0.54575384", "0.54522717", "0.5451329", "0.5449798", "0.5443955", "0.54390585", "0.54362124", "0.54355353", "0.54292244", "0.542615", "0.542615", "0.542615", "0.542615", "0.5422614", "0.54167163", "0.54139113", "0.54095864", "0.54078484", "0.54065365", "0.54065365", "0.540151", "0.5395005", "0.5390591", "0.5390591", "0.5378941", "0.5376979", "0.53721297", "0.53681725", "0.53664654", "0.53645295", "0.5358487", "0.53503895", "0.53501344", "0.53501344", "0.53501344", "0.53501344", "0.53501344", "0.5350125", "0.53459513", "0.5336237", "0.53349555", "0.5330866", "0.53224164", "0.5316784", "0.53137803", "0.53098226", "0.53025275", "0.52979696" ]
0.0
-1
Set up LayZ Spa from a config entry.
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry): hub = Spa(entry.data[CONF_API], entry.data[CONF_DID]) await hub.update_status() _LOGGER.warning("temp %s", hub.temp_now) hass.data[DOMAIN][entry.entry_id] = {} hass.data[DOMAIN][entry.entry_id][HUB] = hub hass.data[DOMAIN][entry.entry_id][CONF_NAME] = entry.data[CONF_NAME] hass.data[DOMAIN][entry.entry_id][CONF_DID] = entry.data[CONF_DID] async def async_update_data(): """Fetch data from API endpoint. This is the place to pre-process the data to lookup tables so entities can quickly look up their data. """ try: # Note: asyncio.TimeoutError and aiohttp.ClientError are already # handled by the data update coordinator. async with async_timeout.timeout(10): return await hub.update_status() except InvalidPasswordOrEmail as err: raise UpdateFailed(f"The password or email address is invalid: {err}") coordinator = DataUpdateCoordinator( hass, _LOGGER, # Name of the data. For logging purposes. name="lay-z sensor updater", update_method=async_update_data, # Polling interval. Will only be polled if there are subscribers. update_interval=timedelta(seconds=60), ) # Fetch initial data so we have data when entities subscribe await coordinator.async_refresh() hass.data[DOMAIN][entry.entry_id][COORDINATOR] = coordinator for component in PLATFORMS: hass.async_create_task( hass.config_entries.async_forward_entry_setup(entry, component) ) return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, config_entry):\n self.config_entry = config_entry", "def __init__(self, config_entry):\n self.config_entry = config_entry", "def setup_config(self, args=None):\n self.config_parse(args=args)", "def provider_setup(cls, args, config):\n if len(args) < 1:\n print \"USAGE: molns provider setup name\"\n print \"\\tCreates a new provider with the given name.\"\n return\n # check if provider exists\n try:\n provider_obj = config.get_object(args[0], kind='Provider')\n except DatastoreException:\n # ask provider type\n print \"Select a provider type:\"\n for n, p in enumerate(VALID_PROVIDER_TYPES):\n print \"\\t[{0}] {1}\".format(n, p)\n while True:\n try:\n provider_ndx = int(raw_input_default(\"Enter the number of type:\", default='0'))\n provider_type = VALID_PROVIDER_TYPES[provider_ndx]\n break\n except (ValueError, IndexError):\n pass\n logging.debug(\"Provider type '{0}'\".format(provider_type))\n # Create provider\n try:\n provider_obj = config.create_object(name=args[0], ptype=provider_type, kind='Provider')\n except DatastoreException as e:\n logging.exception(e)\n print e\n return\n print \"Enter configuration for provider {0}:\".format(args[0])\n setup_object(provider_obj)\n config.save_object(provider_obj, kind='Provider')\n\n cls.provider_initialize(args[0], config)", "def __init__(self, config_entry: config_entries.ConfigEntry) -> None:\n self.config_entry = config_entry", "def configure(self):\n if self.three_layer:\n config = self.config\n # remove the continental shelf\n config.set('soma', 'phi', '1e-16')\n config.set('soma', 'shelf_depth', '0.0')", "def config(config, *args, **kwargs):\n # Extract source definitions from config and store as source_name => config\n update_config = defaultdict(dict)\n key_rx = re.compile(\n '(?x) ^ spline-frontpage [.] sources [.] (\\w+) (?: [.] (\\w+) )? $')\n for key, val in config.iteritems():\n # Match against spline-frontpage.source.(source).(key)\n match = key_rx.match(key)\n if not match:\n continue\n\n source_name, subkey = match.groups()\n if not subkey:\n # This is the type declaration; use a special key\n subkey = '__type__'\n\n update_config[source_name][subkey] = val\n\n # Figure out the global limit and expiration time, with reasonable\n # defaults. Make sure they're integers.\n global_limit = int(config.get('spline-frontpage.limit', 10))\n # max_age is optional and can be None\n try:\n global_max_age = int(config['spline-frontpage.max_age'])\n except KeyError:\n global_max_age = None\n\n config['spline-frontpage.limit'] = global_limit\n config['spline-frontpage.max_age'] = global_max_age\n\n # Ask plugins to turn configuration into source objects\n sources = []\n for source, source_config in update_config.iteritems():\n source_type = source_types[source_config['__type__']]\n del source_config['__type__'] # don't feed this to constructor!\n\n # Default to global limit and max age. Source takes care of making\n # integers and whatnot\n source_config.setdefault('limit', global_limit)\n source_config.setdefault('max_age', global_max_age)\n\n # Hooks return a list of sources; combine with running list\n sources += [source_type(config=config, **source_config)]\n\n # Save the list of sources, and done\n config['spline-frontpage.sources'] = sources", "def _setConfig(self,config):\n if config:\n self.config = config\n else:\n from layman import config\n self.config = config", "def initialize_from_config(self):", "def test_pyaflowa_setup_config(tmpdir):\n pyaflowa = Pyaflowa(\n workdir=tmpdir,\n path_specfem_data=os.path.join(TEST_SOLVER, \"mainsolver\", \"DATA\"),\n path_solver=TEST_SOLVER, source_prefix=\"SOURCE\", ntask=1,\n data_case=\"synthetic\", components=\"Y\", fix_windows=\"ITER\",\n )\n pyaflowa.setup()\n config = pyaflowa.set_config(source_name=\"001\", iteration=1, step_count=1)\n assert(config.eval_tag == \"i01s01\")", "def init_config():\n try:\n initConfig()\n click.echo(\"Submarine CLI Config initialized\")\n except AttributeError as err:\n click.echo(err)", "def __init__(self, config_entry: config_entries.ConfigEntry) -> None:\n self.config_entry = config_entry\n self.options = dict(config_entry.options)", "def setup_config():\n global config\n config = modConfig.Config(cmdline.config)", "def put_stash_into_config(config, stash_code):\n key = config.value.keys()\n # set the values from the stash_list dictionary into this config node\n print stash_code\n print stash_code['dom_name'], stash_code['tim_name'], stash_code['use_name'], stash_code['section'],\\\n stash_code['item'], stash_code['cmor']\n for profile in ['tim_name', 'use_name', 'dom_name', 'package']:\n config.value[key[0]].value[profile].value = str(\"'\" +\n stash_code[profile] +\n \"'\")\n config.value[key[0]].value['item'].value = str(int(stash_code['item']))\n config.value[key[0]].value['isec'].value = str(int(stash_code['section']))\n\n make_unique_index(config, stash_code)", "def __init__(self, config_entry):\n self.config_entry = config_entry\n # self.dps_strings = config_entry.data.get(CONF_DPS_STRINGS, gen_dps_strings())\n # self.entities = config_entry.data[CONF_ENTITIES]\n self.selected_device = None\n self.editing_device = False\n self.device_data = None\n self.dps_strings = []\n self.selected_platform = None\n self.discovered_devices = {}\n self.entities = []", "def init(cls, config, src):\n cls.config = config", "def setup(args):\n cfg = get_cfg()\n cfg.merge_from_file(args.config_file)\n cfg.merge_from_list(args.opts)\n # customize reszied parameters\n # cfg['INPUT']['MIN_SIZE_TRAIN'] = (20,)\n # cfg['INPUT']['MAX_SIZE_TRAIN'] = 50\n cfg.freeze()\n default_setup(\n cfg, args\n ) # if you don't like any of the default setup, write your own setup code\n return cfg", "def __init__(self, name, pardus_profile):\n\n self.cfg = ConfigParser.ConfigParser()\n self.connection = Connection(pardus_profile)\n self.ipv4 = IpV4(pardus_profile)\n self.ipv6 = IpV6(pardus_profile)\n self._802_3_ethernet = self.set_802_3_ethernet(pardus_profile)\n self._802_11_wireless = self.set_802_11_wireless(pardus_profile)\n self._802_11_wireless_security = self.set_802_11_wireless_security(pardus_profile)\n\n self.create_config()", "def __init__(self):\n\n self.config = load_config()\n self.set_env_var()", "def init(_config, _run):\n sacred.commands.print_config(_run)\n dump_config_and_makefile()\n\n print()\n print('Initialized storage dir. Now run these commands:')\n print(f\"cd {_config['trainer']['storage_dir']}\")\n print(f\"make train\")\n print()\n print('or')\n print()\n print(f\"cd {_config['trainer']['storage_dir']}\")\n print('make ccsalloc')", "def load_config(self, config_file, usage):\n config = configparser.ConfigParser()\n config.read(config_file)\n auth_id = config.get('SMARTY STREETS', 'auth_id' )\n auth_token = config.get('SMARTY STREETS', 'auth_token')\n api_credentials = StaticCredentials(auth_id, auth_token)\n client_builder = ClientBuilder(api_credentials)\n if usage == 'batch': \n client_builder.with_custom_header( {'Connection':'keep-alive'} )\n \n self.client = client_builder.build_us_street_api_client()", "def config( **kwargs ):", "def setup_config(args):\n # Set the default configuration file\n f = pkgutil.get_data(__package__, 'dnstap.conf')\n cfg = load_yaml(f)\n\n # Overwrites then with the external file ? \n if args.c:\n cfg_ext = load_yaml(open(args.c, 'r'))\n merge_cfg(u=cfg_ext,o=cfg)\n\n # Or searches for a file named dnstap.conf in /etc/dnstap_receiver/ \n else:\n etc_conf = \"/etc/dnstap_receiver/dnstap.conf\"\n f = pathlib.Path(etc_conf)\n if f.exists():\n cfg_etc = load_yaml(open(etc_conf, 'r'))\n merge_cfg(u=cfg_etc,o=cfg)\n \n # update default config with command line arguments\n if args.v:\n cfg[\"trace\"][\"verbose\"] = args.v \n if args.u is not None:\n cfg[\"input\"][\"unix-socket\"][\"enable\"] = True\n cfg[\"input\"][\"unix-socket\"][\"path\"] = args.u\n if args.l != DFLT_LISTEN_IP:\n cfg[\"input\"][\"tcp-socket\"][\"local-address\"] = args.l\n if args.l != DFLT_LISTEN_PORT:\n cfg[\"input\"][\"tcp-socket\"][\"local-port\"] = args.p\n\n return cfg", "def __init__(self, config, town):\n\t\tself.config = config", "def init() -> None:\n # Setup elib_config\n elib_config.ELIBConfig.setup(\n app_version=__version__,\n app_name='ESST',\n config_file_path='esst.toml',\n config_sep_str='__',\n )\n\n # Write example config file\n elib_config.write_example_config('esst.toml.example')\n\n # Validate config\n try:\n elib_config.validate_config()\n except elib_config.ConfigMissingValueError as error:\n LOGGER.error('missing mandatory config value: %s', error.value_name)\n LOGGER.error('please read \"esst.toml.example\" for instructions on how to setup the configuration for ESST')\n sys.exit(1)\n\n for config in SentryConfigContext.__subclasses__():\n SENTRY.register_context(context_name=config.__name__, context_provider=config)", "def launch(config):\n \n launch_with_configs([config])", "def __init__(self, config):\n try:\n config['volume_id']\n config['access_key']\n config['secret_access_key']\n config['region']\n except KeyError, e:\n logging.error(repr(e))\n raise ImproperlyConfigured()\n\n if not config.has_key('keep'):\n config['keep'] = 5\n\n self.config = config", "def setup_from_args(ma, args):\n ma.out_file = ma.fix_path(args.output)\n ma.deck_id = ma.dm.id(args.deck)\n ma.model_name = _(u'Subtitles ({})').format(args.deck)\n if args.language_names:\n ma.language_names = args.language_names\n if args.Japanese:\n ma.japanese = True\n ma.use_readings = True\n if args.Chinese:\n ma.chinese = True\n ma.use_readings = True\n ma.subtitle_files = args.subtitles", "def __init__(self, config):\n\n self.locations_hltv_starting_ = config[sC.BUCKET_LOCATIONS][sC.HLTV_STARTING]\n self.score_starting_ = config[sC.BUCKET_LOCATIONS][sC.SCORE_STARTING]\n self.logs_starting_ = config[sC.BUCKET_LOCATIONS][sC.LOGS_STARTING]\n self.temp = config[sC.FOLDER_LOCATIONS][sC.TEMP_APP_ENGINE_FOLDER]\n self.results_ = config[sC.FOLDER_LOCATIONS][sC.CONFIGS_RESULTS]\n self.amxmodx_logs_ = config[sC.FOLDER_LOCATIONS][sC.ADDONS_AMXMODX_LOGS]\n self.cstrike_logs_ = config[sC.FOLDER_LOCATIONS][sC.CSTRIKE_LOGS]\n self.hltv_demos_func_url = config[sC.CLOUD_FUNCTIONS_URLS][sC.HLTV_DEMOS_FUNC]\n self.ftp_logs_func_url = config[sC.CLOUD_FUNCTIONS_URLS][sC.FTP_LOGS_FUNC]\n\n print('{} - Initialized'.format(__name__))", "def on_game_start(self, config):\n gamelib.debug_write('Configuring your custom algo strategy...')\n self.config = config\n global FILTER, ENCRYPTOR, DESTRUCTOR, PING, EMP, SCRAMBLER\n FILTER = config[\"unitInformation\"][0][\"shorthand\"]\n ENCRYPTOR = config[\"unitInformation\"][1][\"shorthand\"]\n DESTRUCTOR = config[\"unitInformation\"][2][\"shorthand\"]\n PING = config[\"unitInformation\"][3][\"shorthand\"]\n EMP = config[\"unitInformation\"][4][\"shorthand\"]\n SCRAMBLER = config[\"unitInformation\"][5][\"shorthand\"]", "def on_game_start(self, config):\n gamelib.debug_write('Configuring your custom algo strategy...')\n self.config = config\n global FILTER, ENCRYPTOR, DESTRUCTOR, PING, EMP, SCRAMBLER\n FILTER = config[\"unitInformation\"][0][\"shorthand\"]\n ENCRYPTOR = config[\"unitInformation\"][1][\"shorthand\"]\n DESTRUCTOR = config[\"unitInformation\"][2][\"shorthand\"]\n PING = config[\"unitInformation\"][3][\"shorthand\"]\n EMP = config[\"unitInformation\"][4][\"shorthand\"]\n SCRAMBLER = config[\"unitInformation\"][5][\"shorthand\"]", "def from_config_plan(cls,\n model_cfg: dict,\n plan_arch: dict,\n plan_anchors: dict,\n log_num_anchors: str = None,\n **kwargs,\n ):\n raise NotImplementedError", "def from_config(config: dict):\n pass", "def configure_stp_instance(self, instance, **kwargs):\n pass", "def Init(self, config):\r\n pass", "def __init__(\n self, coordinator: ToloSaunaUpdateCoordinator, entry: ConfigEntry\n ) -> None:\n super().__init__(coordinator, entry)\n\n self._attr_unique_id = f\"{entry.entry_id}_lamp_mode\"", "def setup(self, config):\n config_location = None\n try:\n try:\n stream = config.read()\n if hasattr(config, 'name'):\n config_location = config.name\n except (AttributeError, TypeError):\n f = file(config)\n stream = f.read()\n config_location = f.name\n except (AttributeError, TypeError):\n stream = config\n\n try:\n config = yaml_load(stream, Loader=yaml_loader)\n config = CaseInsensitiveDictMapper(config)\n\n if config_location:\n self._config.app_path = os.path.abspath(config_location)\n\n elif not 'config' in config or \\\n not 'app_path' in config['config']:\n raise ConfigError('app_path could not be calculated and is not set in config')\n\n except YAMLError as e:\n error = 'Import failed with malformed config'\n if hasattr(e, 'problem_mark'):\n mark = e.problem_mark\n error += ' at: (%s:%s)' % (mark.line+1, mark.column+1)\n raise ConfigError(error)\n\n if not self._validate_imported_config(config):\n raise ConfigError('Import failed: config invalid')\n\n if 'config' in config:\n self._update_config_from_import(config['config'])\n\n if 'filters' in config:\n self._update_global_filters_from_import(config['filters'])\n\n if 'global filters' in config:\n self._update_global_filters_from_import(config['global filters'])\n\n if 'routes' in config:\n self._update_routes_from_import(config['routes'])\n\n log.debug(self._router)\n\n return True", "def on_game_start(self, config):\n gamelib.debug_write('Configuring your custom algo strategy...')\n self.config = config\n global FILTER, ENCRYPTOR, DESTRUCTOR, PING, EMP, SCRAMBLER\n FILTER = config[\"unitInformation\"][0][\"shorthand\"]\n ENCRYPTOR = config[\"unitInformation\"][1][\"shorthand\"]\n DESTRUCTOR = config[\"unitInformation\"][2][\"shorthand\"]\n PING = config[\"unitInformation\"][3][\"shorthand\"]\n EMP = config[\"unitInformation\"][4][\"shorthand\"]\n SCRAMBLER = config[\"unitInformation\"][5][\"shorthand\"]\n\n # Can store custom variables and settings here:\n self.hole_x_location = 17", "def __init__(self, config: Tuple):", "def setup():\n # Parse the command line arguments\n args = parse_arguments()\n\n # Load training configurations\n config = load_yaml(args.config)\n update_not_none(config, vars(args))\n\n # Setup experiment directories and update them to configurations\n setup_dirs(config)\n\n # Setup loggers\n del logging.getLogger('tensorflow').handlers[0]\n setup_loggers(config['log_dir'])\n\n # Setup GPUs\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = config['gpu']\n\n # Backup source code\n backup_src(config['src_dir'])\n\n return config", "def from_config(cls, xknx, name, config):\n group_address = config.get(\"group_address\")\n scene_number = int(config.get(\"scene_number\"))\n return cls(\n xknx, name=name, group_address=group_address, scene_number=scene_number\n )", "def __init__(self, config):\n super().__init__(config)\n self.collector_host = config.get(\"collector_host\")\n self.schedds = config.get(\"schedds\", [None])\n self.condor_config = config.get(\"condor_config\")\n self.constraint = config.get(\"constraint\", True)\n self.classad_attrs = config.get(\"classad_attrs\")\n self.correction_map = config.get(\"correction_map\")", "def prepare_config(exp_config: Union[List[str], str], run_type: str, ckpt_path=\"\", opts=None, suffix=None, graph_file=None) -> Tuple[CN, str]:\n config = get_config(exp_config, opts)\n\n # Default behavior is to pull experiment name from config file\n # Bind variant name to directories\n if isinstance(exp_config, str):\n variant_config = exp_config\n else:\n variant_config = exp_config[-1]\n variant_name = osp.split(variant_config)[1].split('.')[0]\n config.defrost()\n config.VARIANT = variant_name\n if suffix is None:\n suffix = config.EXPERIMENT\n add_suffix(config, suffix)\n\n add_suffix(config, variant_name)\n\n if osp.exists(config.MODEL.GRAPH_FILE) and not osp.isdir(config.MODEL.GRAPH_FILE):\n graph_id = osp.split(config.MODEL.GRAPH_FILE)[1][:5]\n add_suffix(config, graph_id)\n elif graph_file is not None and osp.isdir(config.MODEL.GRAPH_FILE):\n graph_id = graph_file[:5]\n add_suffix(config, graph_id)\n\n if ckpt_path is not None:\n if not osp.exists(ckpt_path):\n ckpt_path = osp.join(config.CHECKPOINT_DIR, ckpt_path)\n\n np.random.seed(config.SEED)\n random.seed(config.SEED)\n torch.random.manual_seed(config.SEED)\n torch.backends.cudnn.deterministic = True\n\n return config, ckpt_path", "def main():\n\n # parse arguments\n args = parseArguments()\n\n # read stac specification \n with open( args.config_file, 'r' ) as f:\n root = yaml.safe_load( f )\n\n # generate nested stac hierarchy\n obj = getStacObject( root )\n\n # create out path if required\n if not os.path.exists ( args.out_path ):\n os.makedirs( args.out_path )\n\n # generate nested stac hierarchy\n obj.normalize_and_save( root_href=args.out_path, \n catalog_type=pystac.CatalogType.SELF_CONTAINED)\n\n return", "def __init_from_config(self, cli_config_file, cluster, log): # noqa: C901 FIXME\n with open(cli_config_file, encoding=\"utf-8\") as config_file:\n log.info(\"Searching for configuration file %s\" % cli_config_file)\n config = ConfigParser()\n config.read_file(config_file)\n\n # use cluster if there or search for default value in [main] section of the config file\n try:\n cluster_name = cluster if cluster else config.get(\"main\", \"cluster_name\")\n except NoSectionError as e:\n fail(\"Error getting the section [%s] from the configuration file (%s)\" % (e.section, cli_config_file))\n except NoOptionError as e:\n fail(\n \"Error getting the option (%s) from the section [%s] of the configuration file (%s)\"\n % (e.option, e.section, cli_config_file)\n )\n cluster_section = \"cluster {0}\".format(cluster_name)\n try:\n self.region = config.get(\"main\", \"region\")\n except NoOptionError:\n pass\n try:\n self.env_blacklist = config.get(\"main\", \"env_blacklist\")\n except NoOptionError:\n pass\n\n try:\n self.stack_name = cluster_name\n log.info(\"Stack name is (%s)\" % self.stack_name)\n # if region is set for the current stack, override the region from the AWS ParallelCluster config file\n # or the region from the [main] section\n self.region = config.get(cluster_section, \"region\")\n self.s3_bucket = config.get(cluster_section, \"s3_bucket\")\n self.artifact_directory = config.get(cluster_section, \"artifact_directory\")\n self.batch_cli_requirements = config.get(cluster_section, \"batch_cli_requirements\")\n self.compute_environment = config.get(cluster_section, \"compute_environment\")\n self.job_queue = config.get(cluster_section, \"job_queue\")\n self.job_definition = config.get(cluster_section, \"job_definition\")\n try:\n self.job_definition_mnp = config.get(cluster_section, \"job_definition_mnp\")\n except NoOptionError:\n pass\n self.head_node_ip = config.get(cluster_section, \"head_node_ip\")\n\n # get proxy\n self.proxy = config.get(cluster_section, \"proxy\")\n if self.proxy != \"NONE\":\n log.info(\"Configured proxy is: %s\" % self.proxy)\n except NoSectionError:\n # initialize by getting stack info\n self.__init_from_stack(cluster_name, log)\n except NoOptionError as e:\n fail(\n \"Error getting the option (%s) from the section [%s] of the configuration file (%s)\"\n % (e.option, e.section, cli_config_file)\n )", "def prepare(self, config, **kwargs):\n pass", "def setup(self, config, env=None):\n self.tracconfig = config\n if env:\n self.tracenv = env\n self._log('setup(env=%s, config=%s)' % (self.tracenv, self.tracconfig))", "def train_config(parser, input_argv=None):\n\n data(parser)\n token(parser)\n model(parser)\n if nsml.IS_ON_NSML:\n nsml_for_internal(parser)\n trainer(parser)\n\n # Use from config file\n base_config(parser)\n\n config = parser.parse_args(input_argv, namespace=NestedNamespace())\n\n use_base_config = config.base_config\n # use pre-defined base_config\n if use_base_config:\n base_config_path = os.path.join(\"base_config\", config.base_config)\n base_config_path = utils.add_config_extension(base_config_path)\n defined_config = utils.read_config()\n # config.overwrite(defined_config)\n\n config = NestedNamespace()\n config.load_from_json(defined_config)\n\n # overwrite input argument when base_config and arguments are provided.\n # (eg. --base_config bidaf --learning_rate 2) -> set bidaf.json then overwrite learning_rate 2)\n input_args = get_input_arguments(parser, input_argv)\n for k, v in input_args.items():\n setattr(config, k, v)\n\n if not use_base_config:\n config = optimize_config(config)\n\n set_gpu_env(config)\n set_batch_size(config)\n return config", "def setup():\n\tglobal config_parser, config_file\n\tglobal prefix\n\n\tif os.path.islink(sys.argv[0]):\n\t\tlink = os.readlink(sys.argv[0])\n\n\t\tif not os.path.isabs(link):\n\t\t\tlink = os.path.join(os.path.dirname(sys.argv[0]), link)\n\n\t\tprefix = os.path.dirname(os.path.abspath(link))\n\telse:\n\t\tprefix = os.path.dirname(os.path.abspath(sys.argv[0]))\n\n\tconfig_parser = ConfigParser.ConfigParser()\n\tset_defaults()\n\n\tconfig_file = os.path.join (xdg_config_home, \"sushi\", \"nigiri\")\n\n\tif not check_config_file(config_file):\n\t\tprint \"Config file creation failed. Aborting.\"\n\t\treturn\n\n\tread_config_file()", "def __init__(self, environment=None):\n if environment is None:\n environment = os.environ.get(\"SENTERA_ENV\") or \"prod\"\n environment = environment.lower()\n self.environment = environment\n\n if self.environment == \"prod\":\n self.config = {\n \"sentera_api_url\": \"https://api.sentera.com\",\n \"weather_api_url\": \"https://weather.sentera.com\",\n }\n else:\n self.config = {\n \"sentera_api_url\": f\"https://api{self.environment}.sentera.com\",\n \"weather_api_url\": f\"https://weather{self.environment}.sentera.com\",\n }\n\n if ENV_SENTERA_API_URL in os.environ:\n self.config[\"sentera_api_url\"] = os.environ.get(ENV_SENTERA_API_URL)\n\n if ENV_WEATHER_API_URL in os.environ:\n self.config[\"weather_api_url\"] = os.environ.get(ENV_WEATHER_API_URL)", "def setup(self, tenant, inside_vlan_arg, outside_vlan_arg,\n inside_ip, inside_mask, inside_gw, inside_sec_gw,\n outside_ip, outside_mask, outside_gw, outside_sec_gw,\n interface_in, interface_out):\n LOG.debug(\"asa_setup: %s %d %d %s %s %s %s\",\n tenant, inside_vlan_arg, outside_vlan_arg,\n inside_ip, inside_mask, outside_ip, outside_mask)\n inside_vlan = str(inside_vlan_arg)\n outside_vlan = str(outside_vlan_arg)\n context = tenant\n cmds = [\"conf t\", \"changeto system\"]\n inside_int = interface_in + '.' + inside_vlan\n cmds.append(\"int \" + inside_int)\n cmds.append(\"vlan \" + inside_vlan)\n outside_int = interface_out + '.' + outside_vlan\n cmds.append(\"int \" + outside_int)\n cmds.append(\"vlan \" + outside_vlan)\n cmds.append(\"context \" + context)\n cmds.append(\"allocate-interface \" + inside_int)\n cmds.append(\"allocate-interface \" + outside_int)\n cmds.append(\"config-url disk0:/\" + context + \".cfg\")\n cmds.append(\"write memory\")\n cmds.append(\"changeto context \" + context)\n cmds.append(\"int \" + inside_int)\n cmds.append(\"nameif Inside\")\n cmds.append(\"security-level 100\")\n cmds.append(\"ip address \" + inside_ip + \" \" + inside_mask)\n cmds.append(\"int \" + outside_int)\n cmds.append(\"nameif Outside\")\n cmds.append(\"security-level 0\")\n cmds.append(\"ip address \" + outside_ip + \" \" + outside_mask)\n\n cmds.append(\"router ospf 1\")\n cmds.append(\"network \" + inside_ip + \" \" + inside_mask + \" area 0\")\n cmds.append(\"network \" + outside_ip + \" \" + outside_mask + \" area 0\")\n cmds.append(\"area 0\")\n cmds.append(\"route Outside 0.0.0.0 0.0.0.0 \" + outside_gw + \" 1\")\n cmds.append(\"route Outside 0.0.0.0 0.0.0.0 \" + outside_sec_gw + \" 1\")\n cmds.append(\"end\")\n cmds.append(\"write memory\")\n\n if tenant not in self.tenant_rule:\n self.tenant_rule[tenant] = dict()\n self.tenant_rule[tenant]['rule_lst'] = []\n\n data = {\"commands\": cmds}\n return self.rest_send_cli(data)", "def __init__(self, hass, config_entry, name: str, config: Dict):\n super().__init__()\n self.hass = hass\n self.config_entry = config_entry\n self._host = self.hass.data[DOMAIN][\"host\"]\n self._name = name\n _LOGGER.debug(f\"Adding camera {name}\")\n self._config = config\n self._latest_url = urllib.parse.urljoin(self._host, f\"/api/{self._name}/latest.jpg?h=277\")\n parsed_host = urllib.parse.urlparse(self._host).hostname\n self._stream_source = f\"rtmp://{parsed_host}/live/{self._name}\"\n self._stream_enabled = self._config[\"rtmp\"][\"enabled\"]", "def setup(self, run, run_id):\n\n raise NotImplementedError", "def from_config(config: Dict[str, Any]):\n source_name = config[\"source\"]\n host = config.get(\"host\", \"localhost\")\n port = config.get(\"port\", 8081)\n api_key = (config.get(\"api_key_name\", \"\"), config.get(\"api_key\", \"\"))\n return KukurSource(source_name, host, port, api_key)", "def setup_environment(self, spack_env, run_env):\n run_env.prepend_path('PICARD',\n join_path(self.prefix, 'bin', 'picard.jar'))", "def __init__(self, config: Dict[str, Any]) -> None:\n self.config = config", "def setup_config(self, cfg: [dict, str, None] = None):\n if isinstance(cfg, str):\n print(f\"Loading config from file: {cfg}\")\n cfg = json.loads(open(cfg, \"r\").read())\n self.configure_network(cfg)\n self.configure_codegen(cfg)\n self.configure_jiff(cfg)\n\n return self", "def _setupConfigAnnotation(self):\n annotations = IAnnotations(self)\n settings = annotations.get(\"PLOMINOFIELDCONFIG\", None)\n if not settings:\n annotations[\"PLOMINOFIELDCONFIG\"] = PersistentDict()", "def __init__(__self__, *,\n config: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n metadata: Optional[pulumi.Input['SyntheticsPrivateLocationMetadataArgs']] = None,\n name: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):\n if config is not None:\n pulumi.set(__self__, \"config\", config)\n if description is not None:\n pulumi.set(__self__, \"description\", description)\n if metadata is not None:\n pulumi.set(__self__, \"metadata\", metadata)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if tags is not None:\n pulumi.set(__self__, \"tags\", tags)", "def update_config_external_template(config):\r\n\r\n # best parameters from the paper\r\n config['train_batch_size'] = 16384\r\n config['lr'] = 3e-4\r\n config['sgd_minibatch_size'] = 4096\r\n config['num_sgd_iter'] = 4\r\n config['rollout_fragment_length'] = 100\r\n\r\n # run ID to communicate to the http trainer\r\n config['run_uid'] = '_setme'\r\n\r\n # stable baselines accepts full episodes\r\n config[\"batch_mode\"] = \"complete_episodes\"\r\n\r\n # stable baselines server address\r\n config[\"http_remote_port\"] = \"http://127.0.0.1:50001\"\r\n\r\n # no gpus, stable baselines might use them\r\n config['num_gpus'] = 0\r\n\r\n # set trainer class\r\n config['_trainer'] = \"External\"\r\n config['_policy'] = \"PPO\"\r\n\r\n # tuned\r\n config['num_envs_per_worker'] = 10\r\n config['num_workers'] = 3\r\n return config", "def config(ctx, stream_source, kafka_addr, db_addr, analytic_host, analytic_port, tags):\n tag_map = {}\n for tag_str in tags:\n tag_map.update(parse_tag(tag_str))\n a = analytic_pb2.AnalyticData(\n addr=\"{!s}:{!s}\".format(analytic_host, analytic_port))\n client = aceclient.ConfigClient(host=analytic_host, port=analytic_port)\n client.config(src=stream_source, analytic=a,\n kafka_addr=kafka_addr, db_addr=db_addr, tags=tag_map)", "def cli_setup(argv):\n parser = argparse.ArgumentParser(\n prog=\"bazel_bf setup\",\n description=\"\"\"\n Set up the remote environment.\n Specify --region, --s3_bucket and --s3_key to specify a remote config for the first time.\n (After bazel_bf setup has been called, this info is stored in the local config file\n \"~/.bazel_bf/config.json\").\n \"\"\")\n parser.add_argument(\"--region\", type=str)\n parser.add_argument(\"--s3_bucket\", type=str)\n parser.add_argument(\"--s3_key\", type=str)\n\n args = parser.parse_args(argv)\n\n if args.region or args.s3_bucket or args.s3_key:\n if not args.region or not args.s3_bucket or not args.s3_key:\n raise CommandLineException(\n \"for initial setup, --region, --s3_bucket and --s3_key are all mandatory\"\n )\n config.write_local_config(\n region=args.region, s3_bucket=args.s3_bucket, s3_key=args.s3_key)\n\n lambda_config = config.read_config()\n\n next_lambda_config = setup.setup(lambda_config)\n config.write_config(next_lambda_config)", "def main(config):\n current_config = DKRConfig()\n\n for key, value in config.items():\n\n if key in current_config.config:\n for version in value['versions']:\n current_config.add_entrypoint_version(key, version)\n continue\n\n current_config.add_entrypoint(key, value['versions'])\n\n current_config.write(create=True)", "def test_init_from(config):\n\n config.init_from()\n config.init_from(file='../../config.cfg')", "def __init__(self, config):\n self.config = config", "def __init__(self, config):\n self.config = config", "def __init__(self, config):\n self.config = config", "def __init__(self, environment):\n with open('config.json') as f:\n self.config = eval(f.read())\n self.config = self.config[environment]", "def main(_config, _run):\n sacred.commands.print_config(_run)\n dump_config_and_makefile()\n prepare_and_train()", "def setup_workspace(\n config=None,\n front_config=None,\n branch_config=None,\n arg_list=None,\n initializer=None\n):\n namespace = PARSER.parse_args(args=arg_list)\n branch_list = BRANCH_LIST\n directory_list = [branch_config[branch] for branch in branch_list]\n directory_list.append(front_config['log_top_dir'])\n initializer.initialize(directory_list=directory_list)", "def setup_product():\n\n fiveconfigure.debug_mode = True\n import collective.geo.openlayers\n zcml.load_config('configuretest.zcml', collective.geo.openlayers)\n\n fiveconfigure.debug_mode = False", "def configure(self, args):\n pass", "def configure(args):\n print('Configures HPC fleet with given name \"{}\"'.format(args))", "def from_config(cls, config: Dict[str, Any]) -> \"SkLDA\":\r\n from sklearn.discriminant_analysis import LinearDiscriminantAnalysis\r\n\r\n lda = LinearDiscriminantAnalysis()\r\n lda.set_params(**config[\"params\"])\r\n\r\n for name, value in config[\"attributes\"].items():\r\n if value is not None:\r\n setattr(lda, name, value)\r\n\r\n return SkLDA(lda)", "def set_config(config_name, host, port, core=''):\n global CONFIGS\n CONFIGS[config_name] = {'host': host, 'port': port, 'core': core}", "def __init__(self, config):\n\n self.config = config", "def configuration(config):\n create_str_dir(config)\n add_skymap(config)\n save_configuration(config)", "def __init__(self, simname, pointing):\n\n conf=files.read_config(simname)\n self.update(conf)\n\n self['pointing_id']=pointing\n self['fnum']=FILTERNUM[self['filter']]\n\n # use 2*seed for images, seed for catalogs\n numpy.random.seed(2*self['seed'])\n\n self._load_pointing()\n self._load_catalog()", "def __init__(self, config=None):\n config_dict = {}\n if config:\n config_dict = json.load(config)\n\n self.android = config_dict.get(\"android\")\n self.linux = config_dict.get(\"linux\")\n self.atf = config_dict.get(\"atf\")\n self.qemu = config_dict.get(\"qemu\", \"qemu-system-aarch64\")", "def cli(ctx):\n config = get_config_data()\n\n ctx.obj = config", "def cryptsetup_format(config):\n\n (password, slot) = config.first_password()\n\n args = [\"luksFormat\"]\n cipher = config.cipher + \"-\" + config.mode + \"-\" + config.ivgen\n if config.ivgen_hash is not None:\n cipher = cipher + \":\" + config.ivgen_hash\n elif config.ivgen == \"essiv\":\n cipher = cipher + \":\" + \"sha256\"\n args.extend([\"--cipher\", cipher])\n if config.mode == \"xts\":\n args.extend([\"--key-size\", str(config.keylen * 2)])\n else:\n args.extend([\"--key-size\", str(config.keylen)])\n if config.hash is not None:\n args.extend([\"--hash\", config.hash])\n args.extend([\"--key-slot\", slot])\n args.extend([\"--key-file\", \"-\"])\n args.extend([\"--iter-time\", \"10\"])\n args.append(config.image_path())\n\n cryptsetup(args, password)", "def main(args: argparse.Namespace, config: Config) -> None:\n # Notes:\n # - 1878 is the number of unique answers from the GQA paper\n # - 1843 is the number of answers across train, val and testdev\n\n # Download and initialise resources\n print(colored(\"initialisation:\", attrs=[\"bold\"]))\n stanza.download(lang=\"en\", dir=\".stanza\")\n\n # Print environment info\n print(colored(\"environment:\", attrs=[\"bold\"]))\n cuda = torch.cuda.is_available()\n device = torch.device(\"cuda\" if cuda else \"cpu\")\n print(f\"device: {torch.cuda.get_device_name(device) if cuda else 'CPU'}\")\n print(config)\n\n if args.job == JobType.PREPROCESS:\n preprocess(config)\n elif args.job in (JobType.TRAIN, JobType.PREDICT):\n resume = None\n if args.resume != \"\":\n run_id, checkpoint = args.resume.split(\":\")\n resume = ResumeInfo(run_id, checkpoint)\n if args.job == JobType.TRAIN:\n train(config, device, resume)\n else:\n predict(config, device, resume)\n else:\n raise NotImplementedError()", "async def async_setup_entry(hass, config_entry):\n # setup the Spotify\n if AIS_SPOTIFY_TOKEN is None:\n # remove configurator\n # configurator = hass.components.configurator\n # req_config = _CONFIGURING.pop(OAUTH_CLIENT_ID)\n # configurator.request_done(req_config)\n\n await async_setup(hass, hass.config)\n return True", "def Config(ss):\n ss.InitParams()\n ss.OpenPats()\n ss.ConfigEnv()\n ss.ConfigNet(ss.Net)\n ss.ConfigTrnEpcLog(ss.TrnEpcLog)\n ss.ConfigTstEpcLog(ss.TstEpcLog)\n ss.ConfigTstTrlLog(ss.TstTrlLog)\n ss.ConfigRunLog(ss.RunLog)", "def setup_layers(self):\n if self.args.model == \"exact\":\n self.layer = PPNPLayer\n else:\n self.layer = APPNPLayer\n self.setup_layer_structure()", "def __init__(self, config, cfg):\n self.config = config\n self.cfg = cfg", "def init():\n try:\n compile_contract(\n \"fishcake\", f\"Fishcake(sp.address('{pub_key_hash}'),{default_supply})\")\n fishcake_addr = deploy(\"fishcake\")\n print(\"\\n\")\n compile_contract(\n \"fishcakeBox\", f\"FishcakeBox({default_redeem_amt}, sp.address('{fishcake_addr}'))\")\n fishcake_box_addr = deploy(\"fishcakeBox\")\n setup(fishcake_addr, fishcake_box_addr)\n print(\"\\n\\n[!] Details :\\n\")\n print(f\"-- Fishcake Token Address : {fishcake_addr}\")\n print(f\"-- Fishcake Box Address : {fishcake_box_addr}\")\n except Exception as e:\n print(\"Failed to originate Contracts : \", e)", "def modify_setupcfg(struct, opts):\n opts[\"namespace\"] = [PYSCAFFOLDEXT_NS]\n setupcfg_path = [opts[\"project\"], \"setup.cfg\"]\n struct = helpers.modify(struct, setupcfg_path, add_install_requires)\n struct = helpers.modify(struct, setupcfg_path, add_pytest_requirements)\n struct = helpers.modify(struct, setupcfg_path,\n lambda x: add_entry_point(x, opts))\n return struct, opts", "def set_amf_addr(self, addr: str) -> None:\n self.config[\"amfConfigs\"][0][\"address\"] = addr", "def _configure(self, config):\n self.friction_coef = config['friction_coef']\n self.num_cone_faces = config['num_cone_faces']\n self.num_samples = config['grasp_samples_per_surface_point']\n self.dir_prior = config['dir_prior']\n self.target_num_grasps = config['target_num_grasps']\n if self.target_num_grasps is None:\n self.target_num_grasps = config['min_num_grasps']\n\n self.min_contact_dist = config['min_contact_dist']\n self.num_grasp_rots = config['coll_check_num_grasp_rots']\n if 'max_num_surface_points' in config.keys():\n self.max_num_surface_points_ = config['max_num_surface_points']\n else:\n self.max_num_surface_points_ = 100", "def config():", "def config():", "def setup(self, stage: Optional[str] = None) -> None:", "def run(config, toml_config, args, parser, subparser):\n if not args.landingzone_cmd: # pragma: nocover\n return run_nocmd(config, args, parser, subparser)\n else:\n config = LandingZoneConfig.create(args, config, toml_config)\n return args.landingzone_cmd(config, toml_config, args, parser, subparser)", "def test_init(paper):\n atSi = Atoms(\"Si8\",positions=[[0,0,0],[0.25,0.25,0.25],[0.5,0.5,0],[0.75,0.75,0.25],\n [0.5,0,0.5],[0.75,0.25,0.75],[0,0.5,0.5],[0.25,0.75,0.75]],\n cell=[5.43,5.43,5.43])\n\n entry = paper[0]\n kwargs = {}\n args = []\n\n calc = Aflow(atSi, '.', '.', 0, entry=entry, *args, **kwargs)\n\n assert calc.entry_file == \"./entry.pkl\"\n assert calc.can_execute()\n assert calc.can_extract()\n assert not calc.is_executing()\n calc.extract()", "def _configure_vz(self, instance, config='basic'):\n \n try:\n # Set the base config for the VE, this currently defaults to the\n # basic config.\n # TODO(imsplitbit): add guest flavor support here\n _, err = utils.execute('sudo', 'vzctl', 'set', instance['id'],\n '--save', '--applyconfig', config)\n if err:\n LOG.error(err)\n return True\n\n except ProcessExecutionError:\n raise exception.Error('Failed to add %s to OpenVz' % instance['id'])", "def initialise(self, args, environ):", "def __init__(self, config: str) -> None:\n self.configuration = config", "def __init__(self, config: str) -> None:\n self.configuration = config", "def createStations (config):\n trace (\"createStations()\")\n for section in config.sections():\n if section.capitalize().startswith(\"Station\"):\n myPressureProbes = myHumidityProbes = []\n myTemperatureProbes= []\n name = section\n for option in config.options (section):\n value = config.get (section, option)\n opt = option.capitalize()\n if opt == \"Name\":\n name = value\n elif opt == \"Temperature\":\n myTemperatureProbes = getProbeList (value,\n temperatureProbes)\n elif opt == \"Pressure\":\n myPressureProbes = getProbeList (value,\n pressureProbes)\n elif opt == \"Humidity\":\n myHumidityProbes = getProbeList (value,\n humidityProbes)\n stations [name] = Station.Station(myTemperatureProbes,\n myPressureProbes, myHumidityProbes, name)", "def setUp(self):\n import protolibs.ics_servers as ics_servers\n from point import Point\n from configobj import ConfigObj\n\n # Get config file\n configfile = '/'.join(['sims', 'rtutank', 'config'])\n config=ConfigObj(infile=configfile, unrepr=True)\n self.config = config\n #Set global variable devconfig here \n devconfig=config['vdevs']['slave'] \n\n ##--Set up points\n points={}\n for p in devconfig['points']:\n points.update( { p['name'] : Point(**p) } ) \n #The ** treats the p dictionary as the arguments to the Point class\n self.server = ics_servers.ModbusRTU(devconfig['icsifaces'][0], points.values())\n self.server.start()" ]
[ "0.575565", "0.575565", "0.56301713", "0.55638593", "0.5519828", "0.5385264", "0.5352522", "0.5347906", "0.5321907", "0.528488", "0.5280284", "0.52756244", "0.5265489", "0.52172726", "0.516801", "0.51658285", "0.50733215", "0.50239307", "0.50198895", "0.50092506", "0.4990384", "0.49899703", "0.498613", "0.49811512", "0.49678642", "0.4965849", "0.4957167", "0.49433503", "0.49424008", "0.49404728", "0.49404728", "0.49348027", "0.4924388", "0.4907983", "0.48995173", "0.4899072", "0.48706272", "0.48698014", "0.48663747", "0.48443016", "0.4842474", "0.4826253", "0.48156998", "0.48049912", "0.48038775", "0.48021936", "0.47963393", "0.4789117", "0.47823665", "0.47808692", "0.4780775", "0.47665325", "0.4762288", "0.47463977", "0.47450808", "0.4741019", "0.47397268", "0.47355863", "0.47330448", "0.4729986", "0.47238693", "0.47127026", "0.4706333", "0.47052622", "0.4701391", "0.4701391", "0.4701391", "0.46975428", "0.46924207", "0.46921024", "0.4686248", "0.46861273", "0.46842363", "0.4682948", "0.46815354", "0.4676985", "0.4670779", "0.4669499", "0.46652722", "0.46592683", "0.46582845", "0.46577382", "0.46572244", "0.46549332", "0.4653945", "0.46502286", "0.46471232", "0.46471056", "0.46420103", "0.46378636", "0.4636224", "0.4636224", "0.46357393", "0.46299294", "0.46275508", "0.4624095", "0.46199745", "0.46184465", "0.46184465", "0.46150643", "0.46125033" ]
0.0
-1
Fetch data from API endpoint. This is the place to preprocess the data to lookup tables so entities can quickly look up their data.
async def async_update_data(): try: # Note: asyncio.TimeoutError and aiohttp.ClientError are already # handled by the data update coordinator. async with async_timeout.timeout(10): return await hub.update_status() except InvalidPasswordOrEmail as err: raise UpdateFailed(f"The password or email address is invalid: {err}")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _fetch_data(self):\n pass", "def _load_data(self):\n if self._api_response.status_code == 200:\n self._dataset = self._api_response.json()\n self._fill_day_dicts()", "def fetch_entity(endpoint, values):\n values['entity'] = Entity.objects.get_or_404(name=values['entity'])", "def fetch_data(self):\n if not self.json_query:\n self.generate_json_query()\n\n response = search_graphql(self.json_query)\n\n if \"errors\" in response:\n print(\"ERROR encountered in fetch_data().\")\n for error in response['errors']:\n print(error['message'])\n\n return\n\n self.response = response\n\n if len(self.response['data'][self.data_type.value]) != len(self.id):\n print(\"WARNING: one or more IDs not found in the PDB.\")", "def _load_data(self, url, options=None, location=None):\n # Set API key in query parameters\n params = { \"api-key\": self.key }\n\n # Add options to query parameters\n if options is not None:\n params.update(options)\n\n # Load the data from the API, raise error if there's an invalid status code\n res = self.session.get(self.protocol + url, params=params, timeout=(4, 10))\n if res.status_code == 401:\n raise ValueError(\"Invalid API Key\")\n elif res.status_code == 404:\n raise RuntimeError(\"Error 404: This page is not available\")\n res.raise_for_status()\n\n if orjson is None:\n parsed_res = res.json()\n else:\n parsed_res = orjson.loads(res.content)\n\n # Get the data from the usual results location\n if location is None:\n results = parsed_res.get(\"results\")\n\n # Sometimes the results are in a different location, this location can be defined in a list\n # Load the data from that location\n else:\n results = parsed_res\n for loc in location:\n results = results.get(loc)\n\n return results", "def fetch_data(self):", "def _get_data_in_api(url: str) -> list:\n\n try:\n resp = requests.request('GET', url, timeout=10)\n resp.raise_for_status\n\n return Froxy._data_filter(resp.text)\n\n except (\n requests.ConnectionError,\n requests.ConnectTimeout,\n requests.HTTPError,\n requests.ReadTimeout\n ) as err:\n sys.exit(err)", "def fetch():\n \n r = requests.get(url).json()\n\n # Reponse is a single key dictionary with a list of nested dictionaries.\n # We are using Python 3.6 so dictionaries are still unordered. \n # From Python 3.7 dictionaries are ordered, but as of this comment Zappa \n # does not support Python past 3.6.\n \n ## Get list of dictionaries from first key\n results = r[list(r.keys())[0]]\n\n ## Get list of values for each dictionary in results\n values = [d.values() for d in results]\n\n ## Get the names of the columns from first nested dictionary and format them\n keys = results[0].keys()\n columns = [i.replace('_', ' ').title() for i in keys]\n \n # Turn results into a DataFrame and then into formatted HTML\n ## Formatting is applied per column instead of globally to\n ## retain format of numeric ID column\n results = pd.DataFrame(values, columns=columns)\n\n comma_format = '{:,}'.format\n table = results.to_html(index=False, table_id=\"results-table\", render_links=True, classes=\"table table-striped table-bordered\",\n formatters={\n \"Outlay Amount\": comma_format,\n \"Obligated Amount\": comma_format,\n \"Budget Authority Amount\": comma_format,\n \"Current Total Budget Authority Amount\": comma_format,\n \"Percentage Of Total Budget Authority\": '{:,.2e}'.format\n })\n\n return source, table", "def _get_data(api):\n url = API_BASE_URL + api\n logging.info(\"Querying URL: %s\", url)\n response = requests.get(url)\n count = 0\n retry_count = 50\n if response.status_code == 404:\n logging.error(\"Received 404\")\n return None\n while response.status_code != 200:\n count += 1\n if count == retry_count:\n break\n logging.error(\"Received status code %s, expected 200\",\n response.status_code)\n time.sleep(2)\n try:\n response = requests.get(url)\n except:\n pass\n if response.status_code != 200:\n # Send emoty dataframe. Remove None\n return None\n rdd = SPARK_CONTEXT.parallelize([response.json()])\n data_frame = SPARK_SESSION.read.json(rdd)\n return data_frame", "def get_entity_data(self, instance_id = None):\n # Typically, this url is better, becuase we can insert parameters into url in a proper way\n # original url is 'https://api.ci.ai.dynamics.com/v1/instances/{instanceId}/data/{relativePath}[?forceSearch][&proxy]'\n # but we don't need optional parameters, so we delete them from full url\n url_original = 'https://api.ci.ai.dynamics.com/v1/instances/{instanceId}/data/{relativePath}'\n # it seems {} only part of python sting format, so we only use string.format(value1, value2,..) to replace it \n url_parse = url_original.format(instanceId = self.INSTANCEID, relativePath = self.RELATIVEPATH)\n # it seems sometimes url with parameters not working, so we provide a fulfuiled url \n url_full = 'https://api.ci.ai.dynamics.com/v1/instances/c910b061-1008-4397-95f6-4e7b443b924a/data/RetailDemoData_RetailSystem_Contacts'\n \n # send get request with \n response = requests.get(url_parse, headers=self.HEADERS)\n # r2 = requests.get(url_full, headers=self.HEADERS)\n print(response)\n # for record in r3.json():\n # print(json.dumps(record))\n # according to json file structore, our data contain in ['value']\n # response.json()['value']\n # for row in r3.json()['value']:\n # print(json.dumps(row))\n # we can try to storage data into a local directory,\n # these only work for local python script, not on AZURE runbook\n # with open('03_data/32_entity.json', 'w') as outfile:\n # json.dump(r3.json()['value'], outfile)\n\n return response.json()['value']", "async def fetch_data(self) -> T:", "def fetch_from_db(self):\n self._potential_deals = DBApi.get_instance().potential_records\n self._filters = DBApi.get_instance().filters\n # Add markdown for url\n for data in self._potential_deals:\n data[\"url\"] = f\"[Link]({data['url']})\"\n self._potential_deals_cols = self._db_api.get_potential_deal_columns()\n self._years = self._db_api.get_unique_years(self._potential_deals)\n self._make_model = self._db_api.get_all_make_models()\n self._action_options = [\"Action1\", \"Action2\", \"Action3\"]", "def _fetch_resource(self):\n\n qs = self._build_qs() # Build the query string\n url = self._build_url(qs) # Build the full url\n fp = self._api_call(url) # Fetch the data as a file pointer\n\n # Parse the list of dicts in to a dict generator\n return csv.DictReader(fp)", "def fetch_data(self, pre_site_method_url, params):\n\n # Create the main body of Api url with the pre_site_method part\n url = \"{}{}\".format(self._body_url, pre_site_method_url)\n\n data = []\n while True:\n # Request to api with the url that created and params\n response = requests.get(url, params=params)\n response.encoding = 'utf-8-sig'\n response = response.json()\n data.append(response)\n # If 'has_more' at the end of the response\n # param page increase by 1 and a new request is created\n if 'has_more' in response and response['has_more']:\n params[\"page\"] += 1\n else:\n break\n if 'error_id' in response.keys():\n raise ValueError('Api blocks me to get data.Api return error_name: throttle_violation')\n\n r = []\n for d in data:\n r.extend(d['items'])\n\n return list(chain(r))", "def _fetch(self):\n self._data = self._get(self.url)\n\n if self._data['released_errata'] is not None:\n self._released_errata = Erratum(errata_id=self._data[\n 'released_errata']['id'])\n\n for errata_dict in self._data['all_errata']:\n errata = Erratum(errata_id=errata_dict['id'])\n self._all_errata.append(errata)\n\n self._signed_rpms = self._data.get('rpms_signed')\n\n for et_file in self._data['files']:\n self._files.append(et_file['path'])", "def _fetch(cls, *args, **kwargs):\n apikey = htpc.settings.get('plexpy_apikey')\n\n if apikey is None:\n raise\n\n url = '%sapi/v2?apikey=%s&%s' % (cls._build_url(), apikey, urlencode(kwargs))\n\n try:\n r = requests.get(url, verify=False)\n r.raise_for_status()\n # Lets just copy the headers for now.\n cherrypy.response.headers['Content-Type'] = r.headers.get('Content-Type', 'application/json;charset=UTF-8')\n resp = r.json()\n if resp.get('response', {}).get('result') == 'success':\n return resp['response']['data']\n except:\n log.exception('Failed to get %s' % url)\n return", "def get_api_results(self, endpoint):\n endpoint = self.sched_url + endpoint.format(self.API_KEY)\n if self._verbose:\n print(\"Fetch data from: {} \".format(endpoint))\n try:\n resp = requests.get(url=endpoint)\n data = resp.json()\n return data\n except Exception as e:\n print(e)\n return False", "async def _fetch_data(self) -> T:\n raise NotImplementedError", "def fetch(api_key, query='', page=1, from_date=False, to_date=False):\n fetch_articles(api_key, query, page, from_date, to_date)", "def fetch_data(path: str, params: Dict[str, Any] = {}) -> Dict[str, Any]:\n\n service_host = LOCAL_AFL_DATA_SERVICE\n headers: Dict[str, str] = {}\n\n service_url = service_host + path\n response = _make_request(service_url, params=params, headers=headers)\n\n return _handle_response_data(response)", "def test_get_data_from_api(self):\n dag = self.dagbag.get_dag(self.dag_id)\n extract_task = dag.get_task('extract')\n resp = self.extract.getDataFromAPI()\n self.assertIsNotNone(resp)\n self.assertEqual(type(resp), list)", "def do_fetch(self):\n pass", "def get_all_data():\n \n # open the data stored in a file called \"data.json\"\n try:\n fp = open(\"data/data.json\")\n response = simplejson.load(fp)\n # but if that file does not exist, download the data from fusiontables\n except IOError:\n logging.info(\"failed to load file\")\n service = build('fusiontables', 'v1', developerKey=API_KEY)\n query = \"SELECT * FROM \" + TABLE_ID + \" WHERE Animal_Type = 'DOG'\"\n response = service.query().sql(sql=query).execute()\n \n return response", "def getEntities(self, request_data: dict = None) -> dict:\n if self.loggingEnabled:\n self.logger.debug(f\"Starting getEntities\")\n path = \"/access/entities\"\n if request_data is None or type(request_data) != dict:\n raise Exception(\"Expected a dictionary to fetch entities\")\n res = self.connector.postData(\n self.endpoint + path, data=request_data, headers=self.header\n )\n return res", "def apicall():\r\n# try:\r\n print request.get_json()\r\n test_json = request.get_json()\r\n logger.info(\"input json object loaded\")\r\n logger.info(test_json)\r\n k=MetaData(test_json)\r\n int_res=k.getData()\r\n print '------------------------------'\r\n print int_res\r\n return jsonify(int_res)", "def covid_fetch():\n #Sets the structure of the data retrieved from the API\n cases_and_deaths = {\n \"date\": \"date\",\n \"areaName\": \"areaName\",\n \"areaCode\": \"areaCode\",\n \"newCasesByPublishDate\": \"newCasesByPublishDate\",\n \"cumCasesByPublishDate\": \"cumCasesByPublishDate\",\n \"newDeathsByDeathDate\": \"newDeathsByDeathDate\",\n \"cumDeathsByDeathDate\": \"cumDeathsByDeathDate\"\n }\n #Sets the filter for the API using config.json\n covid_nation = ['areaType=nation']\n nation = 'areaName=' + str(config_fetcher(\"covid_region\"))\n covid_nation.append(nation)\n\n #Gets API latest data\n covid_api = Cov19API(\n filters = covid_nation,\n structure = cases_and_deaths,\n )\n #Gets data in form of dictionary\n covid_json = covid_api.get_json()\n #Gets timestamp for last update\n covid_timestamp = covid_api.last_update\n #Assign data to variables\n covid_data = covid_json['data'] #This formats the data as a list, while I want a dictionary, hence the next line.\n return covid_data", "def _fetch_data(self, samples):\n pass", "def fetch_data():\n data.fetch_data()\n data.start_updating()", "def handle_rest_api(request, response):\n url_fragments = urlparse.urlparse(request.url)\n query_options = urlparse.parse_qs(url_fragments.query)\n api_endpoint = url_fragments.path.rsplit('/', 2)[1]\n\n # Use API endpoint to load reference JSON data\n with open(os.path.join(HERE, 'data', '%s.json' % api_endpoint), 'r') as f:\n data = json.loads(f.read())\n\n def do_filter(entry):\n result = True\n\n for option, values in query_options.iteritems():\n # Don't handle options which are not properties of the entry\n if option not in entry:\n continue\n\n for value in values:\n if isinstance(entry[option], int):\n result &= entry[option] == int(value)\n else:\n result &= entry[option] == value\n\n return result\n\n if api_endpoint == 'jobs':\n data['results'] = filter(do_filter, data['results'])\n\n elif api_endpoint == 'job-log-url':\n data = filter(do_filter, data)\n\n return data", "def fetch_data(data_url):\n return requests.get(data_url).content", "def hit(self, endpoint, **params):\n params['api_key'] = self.api_key\n params['language'] = 'es-ES'\n url = self.base_url + endpoint\n response = requests.get(url, params=params)\n data = response.json()\n return data", "def fetch(self, endpoint: str, query: str = None, protocol: str = protocol,\n host: str = host) -> Response:\n\n # overwrite the class attributes to custom values provided by the method's arguments\n self.endpoint = endpoint\n self.query = query\n self.protocol = protocol\n self.host = host\n\n # concatenate arguments to URL and save as attribute\n self.URL = f'{protocol}://{host}{endpoint}{query}'\n\n logger.debug(f'Fetching from API: {self.URL}')\n ret = requests.get(self.URL)\n logger.debug(f'Returning from API: {ret} {ret.content}')\n return ret", "async def fetch_data(self, url: str) -> dict:\n async with self.bot.http_session.get(url) as r:\n return await r.json()", "def fetch(*args, **kwargs):\n raise InvalidEndpoint('Not a valid location on this endpoint')", "def fetch(*args, **kwargs):\n raise InvalidEndpoint('Not a valid location on this endpoint')", "def fetch(self):\n # type: () -> List[List[Any]]\n if self._request.next_uri is None:\n self._finished = True\n return []\n response = self._request.get(self._request.next_uri)\n status = self._request.process(response)\n if status.columns:\n self._columns = status.columns\n self._stats.update(status.stats)\n if status.next_uri is None:\n self._finished = True\n return status.rows", "def _get_data(self):\n\n # Grab the data. Note, the separator is actually ', ', not just a\n # comma, so specify. Also, recognize the \"?\" as an NA value\n # (I think it is easier to have pandas catch the NA values instead\n # of manually searching for and parsing these in the future).\n # Finally, set the engine to python, since having a separator greater\n # than one character automatically does this, and prints a warning\n # message. By explicitly telling it to use python, we suppress the\n # warning.\n self.train_df = pd.read_csv(self.train_url, sep=', ', header=None,\n na_values='?', engine='python')\n\n # For the training data, have one comment row, so need to ignore\n self.test_df = pd.read_csv(self.test_url, sep=', ', header=None,\n skiprows=1, na_values='?', engine='python')\n\n # Get the header data\n response = requests.get(self.head_url)\n header = response.text.split('\\n')\n\n # Now, filter to grab the header lines:\n # First, make sure there is at least one character for the line, and\n # ignore lines that start with the comment character for the file \"|\"\n header = [row for row in header if len(row) > 0 and row[0] != '|']\n\n # Ignore the first row, since it is just identifying the classifier\n # task and, get just the header values\n header = [head.split(':')[0] for head in header[1:]]\n\n # Finally, we need to add a header name for the last column (if <= or >\n # income of 50k)\n header.append('income')\n\n # Now, set the header for the data sets\n self.train_df.columns = header\n self.test_df.columns = header", "def test_api_tables_endpoint(self):\n params = {'lender': '90000451965', 'metro': '49180'}\n url = reverse(tables)\n resp = self.client.get(url, params)\n result_dict = json.loads(resp.content)\n self.assertTrue(isinstance(result_dict, dict))\n keys = ['lender', 'peers', 'odds', 'msa', 'counties']\n lender_keys = ['hma_pct', 'lma_pct', 'mma_pct', 'lma', 'mma', 'hma', 'lar_total']\n for key in keys:\n self.assertTrue(key in result_dict['table_data'].keys())\n for key in lender_keys:\n self.assertTrue(key in result_dict['table_data']['lender'].keys())\n self.assertTrue(len(result_dict['table_data']['counties']) > 0)", "def get_api_response_data(base_url: str, **kwargs: Any) -> Any:\n query = urlencode(kwargs)\n url = f'{base_url}?{query}'\n\n response = requests.get(url)\n response.raise_for_status()\n\n return response.json()", "def fetch(self) -> None:\n pass", "async def get_records_from_api(url: str, session: ClientSession):\n try:\n response = await session.request(method='GET', url=url)\n response.raise_for_status()\n log.info(f\"Response status ({url}): {response.status}\")\n return await response.json()\n except HttpProcessingError as http_err:\n log.info('An error occurred during the request. Error: ', http_err)\n raise http_err\n except Exception as err:\n log.info('Unable to proceed: Error: ', err)\n raise err", "def fetch_data(self):\n\n data_dict = {\n 'price': self.get_current_price(),\n }\n\n return self.save_data(data_dict)", "def fetch_elections(self):\n payload = {\"key\": self._api_key} \n response = requests.get(self._url, params=payload)\n try: \n response.raise_for_status() \n return response.json()\n except requests.exceptions.HTTPError as error:\n # Error in request \n logging.error(error)\n except requests.exceptions.RequestException as error:\n # Catastrophic error \n logging.error(error)\n raise", "def test_api_lookup(self):\n\n # Set up the url for the api call\n\n expected_url = 'https://www.gov.uk/api/content{}'.format(self.urlsclass.dedupurls[0])\n\n # Make request and extract json.\n\n expected = requests.get(expected_url).json()\n\n assert api_lookup(self.urlsclass.dedupurls[0], 'https://www.gov.uk/api/content') == expected", "def get_entities(self, data):\n\n entities = None\n\n if \"d\" in data:\n logger.debug(f\"'d' found.\")\n if \"results\" in data.get(\"d\"):\n logger.debug(f\"'d.results' found.\")\n entities = data[\"d\"].get(\"results\")\n else:\n entities = data.get(\"d\")\n elif \"value\" in data:\n logger.debug(f\"'value' found.\")\n entities = data.get(\"value\")\n else:\n logger.debug(f\"No entities found.\")\n\n return entities", "def load_data():\n try:\n loader.download()\n load_table_data()\n status = 'loaded'\n except Exception as ex:\n log.log_traceback(ex)\n status = 'failed'\n return flask.jsonify({'status': status})", "def _api_query(self, endpoint, options=None):\n self.logger.debug(\"Host URL: '%s'\", self.host_url)\n self.logger.debug(\"Endpoint: '%s'\", endpoint)\n self.logger.debug(\"Options: '%s'\", str(options))\n self.logger.debug(\"Headers: '%s'\", str(self.headers))\n encoded_options = urllib.parse.urlencode(options if options is not None else {})\n request_url = self.host_url + endpoint + '?' + encoded_options\n self.logger.debug(\"Request URL: '%s'\", str(request_url))\n response = requests.get(request_url, headers=self.headers).content.decode('utf-8')\n self.logger.debug(\"API query sent.\")\n return json.loads(response, parse_float=Decimal, parse_int=Decimal)", "def _fetch(\n cls, url: str, headers: Mapping[str, str], params: Mapping[str, Any]\n ) -> Tuple[List[EventType], Optional[str]]:\n status_url = cls._post_query(url, headers, params)\n # Await a while before polling the results\n time.sleep(0.1)\n result_url = cls._poll_status(status_url, headers, params)\n data, headers = cls._get_results(result_url, headers, params)\n result = json.loads(data)\n return result, headers.get(\"x-next-token\")", "def fetch(self):\n pass", "def fetch(self):\n pass", "def fetch_fixture_data(\n start_date: str, end_date: str, data_import=match_data, verbose: int = 1\n) -> ApiResponse:\n return _api_response(\n pd.DataFrame(\n data_import.fetch_fixture_data(\n start_date=start_date, end_date=end_date, verbose=verbose\n )\n ).pipe(match.clean_fixture_data)\n )", "def _fetch_data(url: str, d: datetime) -> pd.DataFrame:\n return pd.read_json(url)", "def get_data(endpoint_name, arg=None,\n project_name=None, fields=None, size=get_setting_value('DEFAULT_SIZE'), page=0,\n data_category=None, query_args={}, verify=False, *args, **kwargs):\n endpoint = get_setting_value('GDC_API_ENDPOINT').format(endpoint=endpoint_name)\n if arg:\n endpoint = endpoint+'/{}'.format(arg)\n else:\n ## prep extra-params, including `from` param, as dict\n extra_params = {}\n if page>0:\n from_param = helpers.compute_start_given_page(page=page, size=size)\n extra_params.update({\n 'from': from_param,\n })\n if fields:\n extra_params.update({'fields': ','.join(helpers.convert_to_list(fields))})\n if dict(**kwargs):\n ## TODO check on whether this handles redundant param spec \n ## correctly\n extra_params.update(dict(**kwargs))\n params = _params.construct_parameters(project_name=project_name,\n size=size,\n data_category=data_category,\n query_args=query_args,\n verify=verify,\n **extra_params\n )\n # requests URL-encodes automatically\n log.info('submitting request for {endpoint} with params {params}'.format(endpoint=endpoint, params=params))\n response = requests_get(endpoint, params=params)\n log.info('url requested was: {}'.format(response.url))\n response.raise_for_status()\n return response", "def _get_data(self, url: str)->dict:\n data = None\n resp = self._get(url)\n if resp:\n data = resp.json()['data']\n return data", "def _fetch(self, output_type='xml'):\n\n # authenticate\n self._auth()\n\n # get the table\n response = self._do('GET', self.URLS['adp'])\n\n # load results\n self._results = self._parse_doc(response.text)", "async def _fetch_data(self) -> JobInfo:\n return await self.api.get_job()", "def get_all_records(self, data: dict, execution_context: dict):", "def Fetch(self, request, global_params=None):\n config = self.GetMethodConfig('Fetch')\n return self._RunMethod(\n config, request, global_params=global_params)", "def _fetch(self, fetch):\n if fetch == 'posts':\n if self['handle'] and not self['guid']: self.fetchhandle()\n else: self.fetchguid()\n elif fetch == 'data' and self['handle']:\n self.fetchprofile()", "def fetch(self, person=None):\n if not person:\n # get the list.\n self.endpoint = 'people.json'\n else:\n self.endpoint = 'people/{0}.json'.format(person)\n\n request = self.get(self.construct_url())\n\n if request.status_code == 200:\n return json.loads(request.content)\n\n raise BasecampAPIError()", "async def get_api_data(self, referer: str, params: list) -> list:\n return await asyncio.ensure_future(self.get_response(referer, params))", "def update(self):\n try:\n response = requests.get(\n self.API_URL, headers=self.API_HEADERS, timeout=15)\n except requests.exceptions.RequestException:\n self._logger.exception(\"While fetching data from server\")\n return\n\n if response.status_code != 200:\n self._logger.error(\"API call returned with status %s\",\n response.status_code)\n return\n\n content_type = response.headers.get('Content-Type', 'whatever')\n if content_type != 'text/csv':\n self._logger.error(\"Expected text/csv but got %s\", content_type)\n return\n\n response.encoding = 'UTF8'\n content = response.text\n data = (line for line in content.split('\\n'))\n reader = csv.DictReader(data, delimiter=';', quotechar='\"')\n for row in reader:\n if row.get(\"Station\", None) == self._station_id:\n self.data = {\n self.API_FIELDS.get(k)[0]:\n self.API_FIELDS.get(k)[1](v.replace(',', '.'))\n for k, v in row.items()\n if v and k in self.API_FIELDS\n }\n break", "def fetch_data(self, fields):\n n_symbols = len(self.symbol_list)\n n_iters = n_symbols // 100 + 1\n\n # data_dict keyed by symbol\n data_dict = {}\n for i in range(0, n_iters):\n start = i*100\n end = (i+1) * 100\n response = self.get_points(self.symbol_list[start:end],\n fields)['response']\n\n for symbol in response:\n if response[symbol]['meta']['status'] == 'ok':\n symbol_data = response[symbol]['results']\n data_dict[symbol] = {}\n for data_point in symbol_data:\n data_dict[symbol][data_point] = \\\n symbol_data[data_point]['data'][1]\n else:\n data_dict[symbol] = {field:np.nan for field in fields}\n return data_dict", "def fetch_data():\n for category in CHEATSHEETS.items():\n subprocess.call(f'curl -o {PATH}{category[0] + \".csv\"} {category[1]}', shell=True)\n\n index = -1\n for filename in os.listdir(PATH):\n for idx, row in pd.read_csv(PATH + filename, on_bad_lines='skip').replace(np.nan, '').iterrows():\n name = row['Model']\n url = REDIRECT_URL + name.lower()\n category = filename.split('.')[0]\n featurizers = row['Acceptable Featurizers'].split(' ') if row['Acceptable Featurizers'] != '' else []\n backends = ['PyTorch' if item in {\"PTorch\", \"Torch\", \"PyTorch \"} else item for item in row['Backend'].split('/')]\n types = row['Type'] if filename != 'general.csv' else row['Classifier/Regressor']\n types = types.split('/') if filename == 'material.csv' else types.split('/ ')\n index += 1\n\n backend_list.append(backends)\n type_list.append(types)\n featurizer_list.append(featurizers)\n model_list.append(Model(name, url, category, featurizers, backends, types, index))", "def test_fetch_all():\n response = requests.get('http://localhost:5000/api/persons')\n data = response.json()\n\n assert response.status_code == 200\n for field in FIELDS:\n assert field in data[0]", "async def fetch(self, hostname, **params):\n\n self.source_records.extend(\n await self.source.client.paginate(\n url=_IPAM_ADDR_URL, filters=dict(device=hostname, **params)\n )\n )", "def connect_data_api(self, endpoint):\n\n url = 'https://api.gdax.com' + endpoint\n res = requests.get(url)\n\n if res.status_code == 200:\n return res.json()\n else:\n raise ValueError(res.content)", "def apicall():\n try:\n test_json = request.get_json()\n test = pd.read_json(test_json, orient='records')\n\n # To resolve the issue of TypeError: Cannot compare types 'ndarray(dtype=int64)' and 'str'\n # test['Dependents'] = [str(x) for x in list(test['Dependents'])]\n\n # Getting the Loan_IDs separated out\n # loan_ids = test['Loan_ID']\n\n except Exception as e:\n raise e\n\n clf = 'model_1.pk'\n\n if test.empty:\n return(bad_request())\n else:\n print('Loading the model ..')\n loaded_model = None\n with open('models/'+clf, 'rb') as f:\n loaded_model = pickle.load(f)\n print(\"The model has been loaded...doing predictions now...\")\n predictions = loaded_model.predict(test)\n\n prediction_series = list(pd.Series(predictions))\n final_predictions = pd.DataFrame(list(zip(loan_ids, prediction_series)))\n\n \"\"\"We can be as creative in sending the responses.\n But we need to send the response codes as well.\n \"\"\"\n responses = jsonify(predictions=final_predictions.to_json(orient=\"records\"))\n responses.status_code = 200\n\n return (responses)", "def get_data(url):\n response = get(url, timeout=10)\n \n if response.status_code >= 400:\n raise RuntimeError(f'Request failed: { response.text }')\n \n return response.json()", "def get_data(self):\n def _clean_search_hit(search_hit):\n \"\"\"\n Takes in a search result hit as a BeautifySoup tag and pulls out all the data to match the desired schema.\n\n :param search_hit:\n :return Dictionary: A dictionary with the cleaned data\n \"\"\"\n\n hit_name = search_hit.find(class_='hit-name')\n hit_url = hit_name.get('href')\n hit_id = hit_url.split('/')[-1]\n name = hit_name.get_text().split(',')[0].title().split()\n\n current_city = search_hit.find(class_='hit-location').get_text().upper()\n\n # Find all Addresses for search result.\n try:\n address = search_hit.find(class_='hit-pastAddresses').find_all(class_='hit-values')\n address = list({a.text.upper().replace('.', '') for a in address})\n except AttributeError:\n address = list()\n\n # find the address that is most likely the current main address.\n try:\n address.insert(0, address.pop(address.index(current_city)))\n except ValueError:\n address.insert(0, current_city)\n\n address = [\n {\n '@type': 'PostalAddress',\n 'addressLocality': locality.title(),\n 'addressRegion': region\n } for locality, region in [a.split(', ') for a in address]]\n\n work_location = {'@type': 'Place'}\n try:\n work_location['name'] = search_hit\\\n .find(class_='hit-work')\\\n .find(class_='hit-values')\\\n .get_text()\\\n .title()\n except AttributeError:\n work_location['name'] = ''\n\n alumni_of = {'@type': 'EducationalOrganization'}\n try:\n alumni_of['name'] = search_hit\\\n .find(class_='hit-high-school')\\\n .find(class_='hit-values')\\\n .get_text().title()\n except AttributeError:\n pass\n\n return {\n '@id': hit_id,\n '@type': 'Person',\n 'name': ' '.join(name),\n 'givenName': name[0],\n 'middleName': ' '.join(name[1:-1]),\n 'familyName': name[-1],\n 'url': hit_url,\n 'address': address,\n 'workLocation': work_location,\n 'alumniOf': alumni_of,\n }\n\n def _refine_search(search_str, options):\n \"\"\"\n Takes a list of WebElements and a search string, looks for string in the text of each WebElement, and\n press the option if found. Returns Boolean for found status\n\n :param search_str: str of the desired option.\n :param options: list of WebElements from Beautify Soup that represents all of the available options.\n :return:\n \"\"\"\n search_str = search_str.upper()\n logging.info(f'Looking for \\'{search_str}\\'')\n try:\n for option in options:\n option_text = option.text.upper()\n logging.info(f'Option Checked: {option_text}')\n if search_str in option_text:\n option.click()\n time.sleep(2)\n logging.info(f'Option Selected: {option_text}')\n return True\n else:\n return False\n except AttributeError:\n return True\n except StaleElementReferenceException as e:\n ChromeCrash(e)\n\n with self.driver(executable_path=self.DRIVER_DIR) as driver:\n driver.get(self.url)\n\n \"\"\"\n The CSS for the page doesn't show the State nor the City selector options if the page is too narrow,\n so we need to make sure the browser is open wide enough for the CSS to make those options visible. \n \"\"\"\n driver.fullscreen_window()\n\n # Refine the search by State\n address_region = self.person.get('addressRegion', '')\n address_region = STATES.get(address_region.upper(), address_region.upper())\n region_options = driver\\\n .find_element_by_class_name(\"STATE\")\\\n .find_elements_by_class_name(\"refinementList-text\")\n\n if not _refine_search(address_region, region_options):\n return False\n\n # Narrow the search by pressing a City option\n address_locality = self.person.get('addressLocality').title()\n locality_options = driver\\\n .find_element_by_class_name(\"CITY\")\\\n .find_elements_by_class_name(\"refinementList-text\")\n\n if not _refine_search(address_locality, locality_options):\n return False\n\n \"\"\"\n The Page Loads dynamically, so we need to scroll down the page to show all the search results. It needs to\n be done in steps with a pause between movements to allow for loading. \n Here it will first get the current location on the page, attempt to move down the page, and then check to\n see if the location changed.\n \"\"\"\n\n if self.auto_scroll and len(driver.find_elements_by_class_name(\"ais-InfiniteHits-item\")) > 15:\n current_height, new_height = 0, driver.execute_script(\"return document.body.scrollHeight\")\n\n while new_height != current_height:\n # Scroll down to the bottom of the page\n driver.execute_script(\"window.scrollTo(0, document.body.scrollHeight);\")\n\n # Wait to load page\n time.sleep(SCROLL_PAUSE_TIME)\n\n # Calculate new scroll height and compare with last scroll height\n current_height, new_height = new_height, driver.execute_script(\"return document.body.scrollHeight\")\n\n page_source = driver.page_source\n page_soup = bs(page_source, 'html.parser')\n search_results = list(page_soup.find_all(class_='ais-InfiniteHits-item'))\n for i, search_result in enumerate(search_results):\n search_results[i] = _clean_search_hit(search_result)\n\n self.data_from_website = pd.DataFrame(search_results)\n self.data_from_website.set_index('@id', inplace=True)\n return True", "async def get(self, entity_type: str):\n u = Utilities()\n\n # Get a dict of paths and information to entity types\n # await u.get_api_root_response()\n\n # Parameters to pass to aiohttp (the OAuth token we got with the oauth_token function)\n headers = {\"Authorization\": \"Bearer \" + self.token}\n #\n # The url to use\n # url = await self.create_url(\"/api/{entity}\")\n url = await u.create_url(await u.entity_url(entity_type))\n\n async with ClientSession() as session:\n async with session.get(url, headers=headers) as resp:\n # Dict with key that contain dict of all entity types as values\n Get.entity_dict = {}\n\n # Nested dictionary magic\n Get.entity_dict[entity_type] = await resp.json()", "def load_raw_data(self):\n if self.trendfile:\n self.raw_data = self.get_ap_file()\n else:\n report_params = self.format_api_request_params()\n report_params['test'] = self.testresults\n\n self.raw_data = self.get_ap_report(params=report_params)", "def make_query(self, api_endpoint: str, data=True) -> dict:\n\n result = utils.get_json(os.path.join(self.api_url, api_endpoint),\n RPKI_Validator_Wrapper.get_headers())\n return result[\"data\"] if data else result", "def receiver():\n def generate(entities_to_proceed):\n \"\"\"Process list of entities populating them with altitude data\"\"\"\n yield \"[\"\n for index, entity in enumerate(entities_to_proceed):\n if logging.getLogger().isEnabledFor(logging.DEBUG):\n logging.debug(\"processing entity : %s\", entity)\n else:\n logging.info(\"processing entity : %s\", entity.get(GUID_STR))\n\n if index > 0:\n yield \",\"\n booking_guid = entity.get(GUID_STR)\n iata = entity.get(IATA_STR)\n api_key = resolve_api_key(API_KEYS, iata)\n\n if not isinstance(api_key, str):\n entity[PROP] = []\n yield json.dumps(entity)\n continue\n url = URL_TEMPLATE.render(entity) + booking_guid + \"?api_key=\" + api_key\n if METHOD == \"get\":\n entity[PROP] = requests.get(url, headers=HEADERS).json()\n else:\n entity[PROP] = requests.request(METHOD, url, data=entity.get(\"payload\"),\n headers=HEADERS).json()\n yield json.dumps(entity)\n yield \"]\"\n\n # get entities from request\n entities = request.get_json()\n\n # create the response\n logging.debug(\"Processing %i entities\", len(entities))\n return Response(generate(entities), mimetype='application/json')", "def fetch(self):\n raise NotImplementedError()", "def get_entities(self, type, offset=0, limit=20):\n # url = '{}/ngsi-ld/v1/entities?type={}&offset={}&limit={}'.format(self.url, type, offset, limit)\n url = '{}/ngsi-ld/v1/entities?type={}'.format(self.url, type, offset, limit)\n r = requests.get(url, headers=self.headers_with_link)\n return r.json()", "def test_api_predictors_get(self):\n pass", "def retrieve(self,request,*args,**kwargs):\n response=super(RetrieveAPIView,self).retrieve(request,*args,**kwargs)\n return self.fetch_related(request,response,*args,**kwargs)", "def fetch_data():\n if request.method == 'GET':\n return (\"Use this endpoint with POST method to fetch data\", 200)\n elif request.method == 'POST':\n # request data\n app.logger.info(\"Requesting data\")\n data = get_data('regulatorydecision')\n\n # write to file\n app.logger.info(\"Writing to file\")\n write_to_file(data)\n\n # upload to cloud storage\n app.logger.info(\"Uploading to GCS\")\n upload_to_gcs('data.json', 'health-ca-data-staging')\n\n # publish message to pubsub\n app.logger.info(\"Publishing status message to Pubsub\")\n message = \"Data uploaded to GCS\"\n pubsub_publish('projects/health-ca-data/topics/gcs_load', message, \"\")\n\n return (\"Fetching data\", 200)", "def _request(self, endpoint, params=dict(), data=None):\n client_value = \"Python Netinfo\"\n headers = {'X-Request-Client': client_value}\n url = '/'.join([self.url, endpoint])\n kwargs = {'url': url, 'headers': headers, 'timeout': 30,\n 'params': params, 'data': data}\n response = requests.get(**kwargs)\n if response.status_code not in range(200, 299):\n raise RequestFailure(response.status_code, response.content)\n try:\n loaded = json.loads(response.content)\n except Exception as error:\n raise InvalidResponse(error)\n return loaded", "def __get_raw_data(self, address):\n\n query_params = self.api_key_params\n\n # add address to query_params\n query_params[self.query_address_string] = address\n\n # construct URL from baseUrl and query_params\n url_string = self.base_url + \"?\" + urlencode(query_params)\n\n # get response from service provider API\n return urlopen(url_string)", "def fetch_temp_data(url):\n res = requests.get(url)\n return res.json()", "def get_data(self, request, url):\n data = request.get(endpoint=url)\n return data[0], data[1]", "def _api_call(self, **kwargs):\n params = {\n 'format': 'json',\n }\n params.update(kwargs)\n r = requests.get(self.api_base_url, params=params)\n return r.json()", "def get_data():\n pass", "def refresh_from_api(self):\n self.populate_from_api(self.get_from_api())", "def _Dynamic_Fetch(self, request, response):\n print \"Request:\"\n print (\"Request: {}\").format(request)\n response.set_content(self.mock_response_issue)\n response.set_statuscode(200)\n new_header = response.add_header()\n new_header.set_key('Content-type')\n new_header.set_value('application/json')\n\n response.set_finalurl(request.url)\n response.set_contentwastruncated(False)\n\n # allow to query the object after it is used\n # pylint: disable=attribute-defined-outside-init\n self.request = request\n self.response = response", "def fill_from_api_response(self, api_response):\n pass", "def get(self, endpoint, data=None):\n if endpoint.startswith(\"http\"):\n url = endpoint\n elif endpoint.startswith(\"/\"):\n url = \"{}{}\".format(api_endpoint, endpoint)\n else:\n url = \"{}/{}\".format(api_endpoint, endpoint)\n\n response = requests.get(\n url,\n data=data,\n headers= {\n \"Content-Type\": \"application/json\",\n \"Accept\": \"application/json\",\n \"Authorization\": \"Bearer \" + self.token,\n }\n )\n\n if response.status_code == 200:\n return response.json()\n elif response.status_code == 404:\n print(\"404: Probably invalid endpoint\")\n else:\n print(\"ERROR IN REQUEST: {}\".format(response.content))\n return response", "def _getData(self, entity, params):\n\n res = []\n entity_code = entity.code\n conn = self._connect(entity)\n try:\n conn.create_function(\"INLIST\", 2, self._inlist)\n\n conn.row_factory = sqlite3.Row\n cursor = conn.cursor()\n\n if not self.exists(entity_code, cursor):\n self.generate_entity(entity)\n\n my_departments = \"\"\n my_users = \"\"\n for column in entity.definition[\"columns\"]:\n if \"entityFilterByDepartment\" in column or column[\"type\"] == \"departmentSelector\":\n my_departments = self.getMyDepartments()\n if \"entityFilterByUser\" in column or column[\"type\"] == \"userSelector\":\n my_users = self.getMyUsers()\n\n # Create columnames for each column in entity metadata. Adding too related fields\n columnNames = \"A.id\"\n leftJoin = \"\"\n letter = \"B\"\n thisEntityHaveDepartmentFilter = False\n thisEntityHaveUserFilter = False\n for column in entity.definition[\"columns\"]:\n\n if column[\"type\"] in [\"numeric\", \"text\"]:\n columnNames += f\", A.[{column['field']}]\"\n\n elif column[\"type\"] == \"dateTime\":\n columnNames += f\", strftime('%Y-%m-%d',{column['field']}) as [{column['field']}]\"\n\n elif column[\"type\"] in [\"dropdown\", \"remoteDropdown\"]:\n columnNames += f\", A.[{column['field']}]\"\n columnNames += f\", {letter}.[{column['entityLabel']}] as {letter}_label\"\n leftJoin += f\" LEFT JOIN [{column['entity']}] as {letter} ON {letter}.id = A.{column['field']} \"\n\n if \"entityFilterByDepartment\" in column:\n leftJoin += f' AND ( {letter}.departments is null or INLIST({letter}.departments,\"{my_departments}\") = 1 ) '\n if \"entityFilterByUser\" in column:\n leftJoin += f' AND ( {letter}.users is null or INLIST({letter}.users,\"{my_users}\") = 1 ) '\n\n letter = self.getNextLetter(letter)\n\n elif column[\"type\"] == \"departmentSelector\":\n columnNames += f\", A.[departments]\"\n thisEntityHaveDepartmentFilter = True\n\n elif column[\"type\"] == \"userSelector\":\n columnNames += f\", A.[users]\"\n thisEntityHaveUserFilter = True\n\n elif column[\"type\"] == \"relatedEntity\":\n columnNames += f\", {letter}.[{column['entityLabel']}] as {column.field}\"\n if \"relatedColumnRelation\" in column and column[\"relatedColumnRelation\"]:\n left_on = str(column['relatedColumnRelation']).replace(\n \"#entity#\", \"A\").replace(\"#relatedEntity#\", letter)\n leftJoin += f\" LEFT JOIN [{column['entity']}] as {letter} ON {left_on} \"\n else:\n leftJoin += f\" LEFT JOIN [{column['entity']}] as {letter} ON {letter}.id = A.{column['relatedForeignKey']} \"\n letter = self.getNextLetter(letter)\n\n sortBy = \"A.ID\"\n if \"sortBy\" in params and params[\"sortBy\"]:\n sortBy = f'A.{params[\"sortBy\"]}'\n elif \"sortBy\" in entity.definition and entity.definition[\"sortBy\"]:\n sortBy = f'A.{entity.definition[\"sortBy\"]}'\n where = \"\"\n letter = \"B\"\n\n if thisEntityHaveDepartmentFilter:\n where = f' WHERE ( A.departments is null or INLIST(A.departments,\"{my_departments}\") = 1 ) '\n if thisEntityHaveUserFilter:\n where = f' WHERE ( A.users is null or INLIST(A.users,\"{my_users}\") = 1 ) '\n\n # Add filter for group in related entities\n for column in entity.definition[\"columns\"]:\n if column[\"type\"] in [\"dropdown\", \"remoteDropdown\"] and (\"entityFilterByDepartment\" in column or \"entityFilterByUser\" in column):\n where += \" AND \" if where else \" WHERE \"\n where += f'A.{column[\"field\"]} is null or A.{column[\"field\"]} is not null and {letter}.id is not null '\n letter = self.getNextLetter(letter)\n\n param_list = tuple()\n if \"filters\" in params and params[\"filters\"] and len(params[\"filters\"]) > 0:\n for filter_item in params[\"filters\"]:\n if \"values\" in filter_item and filter_item[\"values\"] and len(filter_item[\"values\"]) > 0:\n if where == \"\":\n where = \" WHERE \"\n else:\n where += \" AND \"\n\n if \".\" in str(filter_item[\"field\"]):\n mm_entity = \"MM\" + str(filter_item[\"field\"]).split(\".\")[0]\n mm_field = str(filter_item[\"field\"]).split(\".\")[1]\n if len(filter_item[\"values\"]) == 1:\n where += f\" {mm_entity}.[{mm_field}] = ?\"\n param_list += (append(filter_item[\"values\"][0]),)\n else:\n where += f\" {mm_entity}.[{mm_field}] IN ({','.join( filter_item['values'])})\"\n\n leftJoin += f\" INNER JOIN [{filter_item['field'].split('.')[0]}] as {mm_entity} ON {mm_entity}.{filter_item['relatedManyToManyKey']} = A.id \"\n else:\n if len(filter_item[\"values\"]) == 1:\n if filter_item[\"useLike\"]:\n where += f\" A.[{filter_item['field']}] LIKE ?\"\n param_list += (f\"%{filter_item['values'][0]}%\",)\n else:\n where += f\" A.[{filter_item['field']}] = ?\"\n param_list += (filter_item[\"values\"][0],)\n else:\n if filter_item[\"useLike\"]:\n where += \" ( 1=2 \"\n for filter_value in filter_item[\"values\"]:\n if filter_value:\n where += f\" OR A.[{filter_item['field']}] LIKE ?\"\n param_list += (f\"%{filter_value}%\",)\n where += \" ) \"\n else:\n where += f\" A.[{filter_item['field']}] IN ({','.join( filter_item['values'])})\"\n\n # Add fixed condition\n if \"condition\" in entity.definition and entity.definition[\"condition\"]:\n if where == \"\":\n where = \" WHERE \"\n else:\n where += \" AND \"\n where += entity.definition[\"condition\"]\n\n sql = f\"SELECT {columnNames} FROM {entity_code} as A {leftJoin}\"\n if where != \"\":\n sql += where\n\n sql += f\" ORDER BY {sortBy}\"\n\n if \"fromReg\" in params and params[\"fromReg\"] > 0 and \"toReg\" in params and params[\"toReg\"] > 0:\n sql += F\" LIMIT {params['fromReg']-1}, {params['toReg']-params['fromReg']+1} \"\n\n cursor.execute(sql, param_list)\n for row in cursor:\n dic = {\"id\": row[\"id\"]}\n letter = \"B\"\n\n for column in entity.definition[\"columns\"]:\n\n if column[\"type\"] in [\"numeric\", \"text\", \"dateTime\", \"date\"]:\n dic[column[\"field\"]] = row[column[\"field\"]]\n elif column[\"type\"] in [\"dropdown\", \"remoteDropdown\"]:\n dic[column[\"field\"]] = f\"{row[column['field']]}|-|{row[f'{letter}_label']}\"\n letter = self.getNextLetter(letter)\n elif column[\"type\"] == \"departmentSelector\":\n dic[\"departments\"] = row[\"departments\"]\n elif column[\"type\"] == \"userSelector\":\n dic[\"users\"] = row[\"users\"]\n elif column[\"type\"] == \"relatedEntity\":\n dic[column[\"field\"]] = row[column[\"field\"]]\n letter = self.getNextLetter(letter)\n\n res.append(dic)\n\n finally:\n conn.close()\n\n return res", "def _import_data(self, data, base_url, endpoint, timezone_offset=None, ignore_alias=False, dataset_id=None,\n dataset_version=None, raw_record_import=False):\n assert self.token, \"Project token required for import!\"\n if self.dataset_id or dataset_version:\n if not (dataset_id and dataset_version):\n Mixpanel.LOGGER.warning('Both dataset_id AND dataset_version are required')\n return\n\n # Create a list of arguments to be used in one of the _prep functions later\n args = [{}, self.token]\n\n item_list = Mixpanel._list_from_argument(data)\n if not raw_record_import:\n if endpoint == 'import' or endpoint == 'import-events':\n args.append(timezone_offset)\n elif endpoint == 'engage' or endpoint == 'import-people':\n args.extend(['$set', lambda profile: profile['$properties'], ignore_alias, True])\n else:\n args = None\n\n self._dispatch_batches(base_url, endpoint, item_list, args, dataset_id=dataset_id,\n dataset_version=dataset_version)", "def ez_fetch(auth_token, dataset_id, options = None):\n status_code = 500\n try:\n API_REQUEST_URL = API_URL + \"/ez_fetch\"\n payload = {\n \"dataset_id\": dataset_id,\n \"options\": options\n }\n headers = {\n \"Content-Type\": \"application/json\",\n \"Authorization\": \"Bearer \" + str(auth_token),\n }\n response = requests.request(\n \"POST\", API_REQUEST_URL, headers = headers, data = json.dumps(payload)\n )\n status_code = response.status_code\n try:\n response_json = response.json()\n except Exception as e:\n response.raise_for_status()\n response_json[\"status_code\"] = status_code\n return response_json\n except Exception as e:\n print((traceback.print_exc()))\n return exception_return(e, status_code)", "def get_data(self):\n has_next_page = True\n page = 1\n while has_next_page:\n print(f'Getting page {page}')\n response = self.get_articles(\n page=page,\n size=200,\n order_by='extracted_at',\n order_type='asc'\n )\n pagination = response.get('pagination')\n has_next_page = pagination.get('has_next')\n self.save_articles(response.get('articles'))\n page += 1\n time.sleep(2.5)", "def fetch(self):\n # This method also sets self._results_filtered and\n # self._urltable.\n page = self._conn.fetch_page(self._ddg_url.relative())\n\n if logger.isEnabledFor(logging.DEBUG):\n import tempfile\n fd, tmpfile = tempfile.mkstemp(prefix='ddgr-response-')\n os.close(fd)\n with open(tmpfile, 'w', encoding='utf-8') as fp:\n fp.write(page)\n logger.debug(\"Response body written to '%s'.\", tmpfile)\n\n parser = DdgParser(news=self._ddg_url.news)\n parser.feed(page)\n\n self.results = parser.results\n self._results_filtered = parser.filtered\n self._urltable = {}\n for r in self.results:\n self._urltable.update(r.urltable())", "def fetch_details_from_api(self, org_names=None):\n logger.debug('Fetching org details from API...')\n details = {}\n if org_names is None:\n org_names = self._all_page_names(without_namespace=True)\n for org in org_names:\n code = self._code_by_name(org)\n if code is None:\n continue\n data = self._data_by_code(code)\n if data is None:\n continue\n details[org] = data\n # Replace parent code with parent name (preferredLabel)\n parent_code = details[org].get('subOrganizationOf')\n if parent_code:\n parent_name = self._name_by_code(parent_code)\n if parent_name is None:\n parent_name = ''\n details[org]['subOrganizationOf'] = parent_name\n purpose_ids = details[org].get('purpose')\n # Replace purpose ids with purpose (function) names\n if purpose_ids:\n details[org]['purpose'] = ','.join([\n self._purpose_by_id[id_] for id_ in purpose_ids])\n # Replace status with greek translation\n status = details[org].get('status')\n if status:\n details[org]['status'] = self.STATUS_TRANSLATION[status]\n # Replace type id with type name\n type_id = details[org].get('organizationType')\n if type_id:\n details[org]['organizationType'] = self._type_by_id[type_id]\n logger.debug(f'{org} - fetched details')\n logger.debug('Fetched org details.')\n return details", "def table_data(api, start_date: datetime.datetime, end_date: datetime.datetime, path: str, column_names: dict = None):\n if column_names is None:\n column_names = {\n \"path\": \"URL\",\n \"method\": \"Method\",\n \"status_code\": \"Status\",\n \"response_time\": \"Duration in [s]\",\n \"date\": \"Date [UTC]\",\n \"remote_address\": \"IP Address\",\n \"user_country_name\": \"Location\",\n \"platform\": \"Operating System\",\n \"browser\": \"Browser\"\n }\n\n requests = api.get_requests_for_path(path, start_date, end_date)\n\n # performance issues?\n # Purpose: Only use specified column_names\n return [\n [vars(request).get(column_name) for column_name in column_names]\n for request in requests\n ], column_names.values()", "def _request_data(self, url):\n connection = httplib.HTTPConnection(self.url)\n connection.request(\"GET\", url)\n response = connection.getresponse()\n\n if response.status != 200:\n raise Exception(response.reason)\n\n data = response.read()\n response.close()\n\n return json.loads(data)", "def fetch(self, data: Any, *args: Any, **kwargs: Any):\n product = None\n next_args = (data, *args)\n next_kwargs = kwargs\n for name, method, outlet, description in self.steps:\n product, new_args, next_kwargs = method(*next_args, **next_kwargs)\n next_args = (product, *new_args)\n if isinstance(product, self.outlet):\n return product\n else:\n raise RuntimeError(\"Process was not completed according to specification.\")", "def pull(self):\n data = api.get(endpoint=self.endpoint, resource_id=self.slug)\n self.__init__(**data)", "def pull(self):\n data = api.get(endpoint=self.endpoint, resource_id=self.id)\n self.__init__(**data)", "def _query(self, mapping, from_date=None, to_date=None, max_count=None,\n offset=None, ascendingly=True, describe=False):\n group, key = mapping.data_var.split(self._data_var_separator)\n\n # build params\n params = 'describe={describe}&keys={key}'.format(describe=str(describe).lower(), key=key)\n if self._api['token'] is not None:\n params += '&apitoken={}'.format(self._api['token'])\n if from_date is not None:\n params += '&from-date={}'.format(from_date.isoformat())\n if to_date is not None:\n params += '&to-date={}'.format(to_date.isoformat())\n\n # build url\n url = '{}{}?{}'.format(self._api['host'], self._api['url'], params).format(group=group)\n\n r = requests.get(url)\n if r.status_code == 200:\n data = json.loads(r.content.decode('utf-8'))\n # return query result\n if not describe:\n # sort\n data = sorted(\n data,\n key=lambda k: k.get(self._timestampkey),\n reverse=(not ascendingly))\n # apply constraints\n if offset is not None:\n data = data[offset:]\n if max_count is not None:\n data = data[:max_count]\n # process to query result\n res = QueryResult(mapping.obs_uri)\n for r in data:\n res.add_row(\n dateutil.parser.parse(r.get(self._timestampkey)),\n r.get(self._valuekey))\n # return\n return res\n # return query result description\n else:\n min = data.get('mindate', None)\n if min is not None:\n min = dateutil.parser.parse(min)\n max = data.get('maxdate', None)\n if max is not None:\n max = dateutil.parser.parse(max)\n return QueryResultDescription(mapping.obs_uri, min, max, data.get('count', 0))\n else:\n # empty/erronous response\n self.pyerr(\"Failed calling API: {}\".format(url))\n if not describe:\n return QueryResult(mapping.obs_uri)\n return QueryResultDescription(mapping.obs_uri, None, None, 0)" ]
[ "0.64704037", "0.6393368", "0.62753636", "0.61291873", "0.6107467", "0.59635717", "0.5958843", "0.5937075", "0.5928641", "0.58827174", "0.5872337", "0.58669746", "0.58654255", "0.58403516", "0.5829332", "0.58139884", "0.58029085", "0.5763503", "0.56696826", "0.5658464", "0.5648578", "0.5644146", "0.56364757", "0.56293327", "0.5604701", "0.5594672", "0.5566977", "0.5559215", "0.55552286", "0.553738", "0.55115086", "0.5502249", "0.5490657", "0.54889363", "0.54889363", "0.54697186", "0.54543537", "0.54398894", "0.5429007", "0.54274845", "0.5418097", "0.5409964", "0.5408401", "0.53996897", "0.5395072", "0.53940105", "0.53865355", "0.5386145", "0.53854483", "0.53854483", "0.5372101", "0.5370517", "0.5368507", "0.53608304", "0.53561866", "0.5352154", "0.5349396", "0.53455937", "0.53289765", "0.5325429", "0.5314452", "0.5311737", "0.5308452", "0.53051984", "0.53041893", "0.52877825", "0.52855736", "0.5279343", "0.5275904", "0.5269918", "0.5265907", "0.5255829", "0.52399415", "0.5235853", "0.5229626", "0.5223876", "0.52108216", "0.52055025", "0.52048874", "0.51991177", "0.51911795", "0.5189821", "0.5185452", "0.51785266", "0.5177714", "0.5175824", "0.51718444", "0.5163134", "0.5161015", "0.51545584", "0.514643", "0.5145029", "0.5144144", "0.5138915", "0.51232773", "0.5122885", "0.5121065", "0.5120775", "0.5117911", "0.5111201", "0.510485" ]
0.0
-1
Unload a config entry.
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry): unload_ok = all( await asyncio.gather( *[ hass.config_entries.async_forward_entry_unload(entry, component) for component in PLATFORMS ] ) ) if unload_ok: hass.data[DOMAIN].pop(entry.entry_id) return unload_ok
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def async_unload_entry(hass, config_entry):\n unload_ok = await hass.config_entries.async_forward_entry_unload(\n config_entry, \"climate\"\n )\n return unload_ok", "async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:\n hass.data.pop(DOMAIN)\n return True", "async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry):\n unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)\n if unload_ok:\n hass.data[DOMAIN].pop(entry.entry_id)\n\n return unload_ok", "async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry):\n unload_ok = all(\n await asyncio.gather(\n *[\n hass.config_entries.async_forward_entry_unload(entry, component)\n for component in PLATFORMS\n ]\n )\n )\n if unload_ok:\n hass.data[DOMAIN][DATA_CONFIG_ENTRY].pop(entry.entry_id)\n\n return unload_ok", "async def test_unload_config_entry(\n hass: HomeAssistant, aioclient_mock: AiohttpClientMocker\n) -> None:\n entry = await init_integration(hass, aioclient_mock)\n assert hass.data[DOMAIN]\n\n await hass.config_entries.async_unload(entry.entry_id)\n await hass.async_block_till_done()\n assert not hass.data.get(DOMAIN)", "async def async_unload_entry(hass, config_entry):\n hass.data[DOMAIN][DATA_CLIENT].pop(config_entry.entry_id)\n\n remove_listener = hass.data[DOMAIN][DATA_LISTENER].pop(\n config_entry.entry_id)\n remove_listener()\n\n await hass.config_entries.async_forward_entry_unload(\n config_entry, 'sensor')\n\n return True", "async def async_unload_entry(hass: HomeAssistant, config_entry: ConfigEntry) -> bool:\n unload_ok = bool(\n await hass.config_entries.async_unload_platforms(config_entry, PLATFORMS)\n )\n if unload_ok:\n hass.data[DOMAIN].pop(config_entry.entry_id)\n\n return unload_ok", "async def async_unload_entry(hass: HomeAssistant, config_entry: ConfigEntry):\n unload_ok = await hass.config_entries.async_unload_platforms(\n config_entry, PLATFORMS\n )\n if unload_ok:\n shell = hass.data[DOMAIN]\n shell.remove_entry(config_entry)\n if shell.is_idle():\n # also remove shell if not used by any entry any more\n del hass.data[DOMAIN]\n\n return unload_ok", "async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:\n unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)\n hass.data[DOMAIN].pop(entry.entry_id)\n return unload_ok", "async def async_unload_entry(hass: HomeAssistant, config_entry: ConfigEntry) -> bool:\n if unload_ok := await hass.config_entries.async_unload_platforms(\n config_entry, PLATFORMS\n ):\n hass.data[DOMAIN].pop(config_entry.entry_id)\n return unload_ok", "async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:\n if await hass.config_entries.async_unload_platforms(entry, PLATFORMS):\n del hass.data[DOMAIN][entry.entry_id]\n if not hass.data[DOMAIN]:\n del hass.data[DOMAIN]\n _LOGGER.debug(\"Unloaded entry for %s\", entry.title)\n return True\n return False", "async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:\n unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)\n if unload_ok:\n hass.data[DOMAIN].pop(entry.entry_id)\n return unload_ok", "async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:\n unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)\n if unload_ok:\n hass.data[DOMAIN].pop(entry.entry_id)\n return unload_ok", "async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:\n unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)\n if unload_ok:\n hass.data[DOMAIN].pop(entry.entry_id)\n return unload_ok", "async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:\n unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)\n if unload_ok:\n hass.data[DOMAIN].pop(entry.entry_id)\n return unload_ok", "async def test_unload_entry(hass: HomeAssistant) -> None:\n entry = MockConfigEntry(\n domain=DOMAIN,\n unique_id=\"0123456789\",\n data=DEFAULT_CONFIG,\n options=DEFAULT_OPTIONS,\n )\n entry.add_to_hass(hass)\n\n await hass.config_entries.async_setup(entry.entry_id)\n await hass.async_block_till_done()\n assert await hass.config_entries.async_unload(entry.entry_id)\n assert not hass.data[DOMAIN]", "async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:\n\n _LOGGER.debug(\"Unload entry\")\n unloaded = all(\n await asyncio.gather(\n *[\n hass.config_entries.async_forward_entry_unload(entry, platform)\n for platform in PLATFORMS\n ]\n )\n )\n if unloaded:\n coordinator = hass.data[DOMAIN].pop(entry.entry_id)\n coordinator.unsub()\n\n return True # unloaded", "async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:\n unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)\n if unload_ok:\n hass.data[DOMAIN].pop(entry.entry_id)\n\n return unload_ok", "async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:\n unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)\n if unload_ok:\n hass.data[DOMAIN].pop(entry.entry_id)\n\n return unload_ok", "async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:\n unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)\n if unload_ok:\n hass.data[DOMAIN].pop(entry.entry_id)\n\n return unload_ok", "async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:\n unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)\n if unload_ok:\n hass.data[DOMAIN].pop(entry.entry_id)\n\n return unload_ok", "async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:\n if unload_ok := await hass.config_entries.async_unload_platforms(entry, PLATFORMS):\n hass.data[DOMAIN].pop(entry.entry_id)\n return unload_ok", "async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:\n if unload_ok := await hass.config_entries.async_unload_platforms(entry, PLATFORMS):\n hass.data[DOMAIN].pop(entry.entry_id)\n return unload_ok", "async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry):\n unload_ok = all(\n await asyncio.gather(\n *[\n hass.config_entries.async_forward_entry_unload(entry, component)\n for component in AUGUST_COMPONENTS\n ]\n )\n )\n\n if unload_ok:\n hass.data[DOMAIN].pop(entry.entry_id)\n\n return unload_ok", "async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:\n unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)\n if unload_ok:\n hass.data[DOMAIN].pop(entry.entry_id)\n if not hass.data[DOMAIN]:\n hass.data.pop(DOMAIN)\n return unload_ok", "async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:\n if unload_ok := await hass.config_entries.async_unload_platforms(entry, PLATFORMS):\n hass.data[DOMAIN].pop(entry.entry_id)\n\n return unload_ok", "async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:\n if unload_ok := await hass.config_entries.async_unload_platforms(entry, PLATFORMS):\n hass.data[DOMAIN].pop(entry.entry_id)\n\n return unload_ok", "async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:\n if unload_ok := await hass.config_entries.async_unload_platforms(entry, PLATFORMS):\n hass.data[DOMAIN].pop(entry.entry_id)\n\n return unload_ok", "async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:\n if unload_ok := await hass.config_entries.async_unload_platforms(entry, PLATFORMS):\n hass.data[DOMAIN].pop(entry.entry_id)\n\n return unload_ok", "async def async_unload_entry(opp: OpenPeerPower, entry: ConfigEntry):\n unload_ok = await opp.config_entries.async_unload_platforms(entry, PLATFORMS)\n if unload_ok:\n opp.data[DOMAIN].pop(entry.entry_id)\n return unload_ok", "async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry):\n coordinator = hass.data[DOMAIN][entry.entry_id]\n unloaded = all(\n await asyncio.gather(\n *[\n hass.config_entries.async_forward_entry_unload(entry, platform)\n for platform in PLATFORMS\n if platform in coordinator.platforms\n ]\n )\n )\n if unloaded:\n hass.data[DOMAIN].pop(entry.entry_id)\n\n return unloaded", "async def test_unload_entry(\n hass: HomeAssistant,\n mqtt_mock_entry: MqttMockHAClientGenerator,\n) -> None:\n domain = select.DOMAIN\n config = DEFAULT_CONFIG\n await help_test_unload_config_entry_with_platform(\n hass, mqtt_mock_entry, domain, config\n )", "async def test_unload_entry(\n hass: HomeAssistant,\n component_setup: ComponentSetup,\n) -> None:\n await component_setup()\n\n entries = hass.config_entries.async_entries(DOMAIN)\n assert len(entries) == 1\n entry = entries[0]\n assert entry.state is ConfigEntryState.LOADED\n\n assert await hass.config_entries.async_unload(entry.entry_id)\n assert entry.state == ConfigEntryState.NOT_LOADED", "async def async_unload_entry(hass: HomeAssistantType, config_entry: ConfigEntry):\n unload_ok = all(\n await asyncio.gather(\n *[\n hass.config_entries.async_forward_entry_unload(config_entry, component)\n for component in SUPPORTED_PLATFORMS\n ]\n )\n )\n if unload_ok:\n hass.data[DOMAIN].pop(config_entry.unique_id)\n return unload_ok", "async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:\n unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)\n if unload_ok:\n hass.data[DOMAIN].pop(entry.entry_id)\n\n if len(hass.config_entries.async_entries(DOMAIN)) == 1:\n hass.services.async_remove(DOMAIN, SERVICE_API_CALL)\n return unload_ok", "async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:\n hass.data[DOMAIN].pop(entry.entry_id)\n loaded_entries = [\n entry\n for entry in hass.config_entries.async_entries(DOMAIN)\n if entry.state == ConfigEntryState.LOADED\n ]\n if len(loaded_entries) == 1:\n for service_name in hass.services.async_services()[DOMAIN]:\n hass.services.async_remove(DOMAIN, service_name)\n\n conversation.async_unset_agent(hass, entry)\n\n return True", "async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:\n unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)\n if unload_ok:\n del hass.data[DOMAIN][entry.entry_id]\n return unload_ok", "async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:\n unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)\n if unload_ok:\n del hass.data[DOMAIN][entry.entry_id]\n return unload_ok", "async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:\n unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)\n if unload_ok:\n del hass.data[DOMAIN][entry.entry_id]\n return unload_ok", "async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:\n if unloaded := await hass.config_entries.async_unload_platforms(entry, [PLATFORM]):\n hass.data[DOMAIN].pop(entry.entry_id)\n return unloaded", "async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:\n unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)\n\n # Pop add-on data\n hass.data.pop(ADDONS_COORDINATOR, None)\n\n return unload_ok", "async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:\n\n if unload_ok := await hass.config_entries.async_unload_platforms(entry, PLATFORMS):\n hass.data[DOMAIN].pop(entry.entry_id)\n\n if not hass.data[DOMAIN]:\n hass.data.pop(DOMAIN)\n\n return unload_ok", "async def async_unload_entry(hass, entry):\n # TODO: This is not finished yet\n identifier = entry.data[CONF_IDENTIFIER]\n manager = hass.data[DOMAIN].pop(identifier)\n await manager.disconnect()\n\n await hass.config_entries.async_forward_entry_unload(entry, \"media_player\")\n\n # TODO: unload remote?\n\n return True", "async def async_unload_entry(hass, config_entry) -> bool:\n unload_ok = await hass.config_entries.async_unload_platforms(\n config_entry, PLATFORMS\n )\n for listener in hass.data[DOMAIN][config_entry.entry_id][DATA_LISTENER]:\n listener()\n username = config_entry.title\n if unload_ok:\n hass.data[DOMAIN].pop(config_entry.entry_id)\n _LOGGER.debug(\"Unloaded entry for %s\", username)\n return True\n return False", "async def async_unload_entry(hass, config_entry):\n try:\n await hass.config_entries.async_forward_entry_unload(config_entry, \"sensor\")\n _LOGGER.info(\"Successfully removed sensor from the \" + DOMAIN + \" integration\")\n except ValueError:\n pass\n return True", "async def async_unload_entry(hass: HomeAssistant, config_entry: ConfigEntry) -> bool:\n _LOGGER.debug(\"Closing Firmata board %s\", config_entry.data[CONF_NAME])\n\n unload_entries = []\n for conf, platform in CONF_PLATFORM_MAP.items():\n if conf in config_entry.data:\n unload_entries.append(\n hass.config_entries.async_forward_entry_unload(config_entry, platform)\n )\n results = []\n if unload_entries:\n results = await asyncio.gather(*unload_entries)\n results.append(await hass.data[DOMAIN].pop(config_entry.entry_id).async_reset())\n\n return False not in results", "async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:\n unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)\n if unload_ok:\n del hass.data[DOMAIN]\n\n return unload_ok", "async def test_unload_entry(hass):\n entry = await init_integration(hass)\n\n assert len(hass.config_entries.async_entries(DOMAIN)) == 1\n assert entry.state == ENTRY_STATE_LOADED\n\n assert await hass.config_entries.async_unload(entry.entry_id)\n await hass.async_block_till_done()\n\n assert entry.state == ENTRY_STATE_NOT_LOADED\n assert not hass.data.get(DOMAIN)", "async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry):\n if hass.data[DOMAIN].get(DISPATCHERS) is not None:\n for cleanup in hass.data[DOMAIN][DISPATCHERS]:\n cleanup()\n\n if hass.data[DOMAIN].get(DATA_DISCOVERY_INTERVAL) is not None:\n hass.data[DOMAIN].pop(DATA_DISCOVERY_INTERVAL)()\n\n if hass.data.get(DATA_DISCOVERY_SERVICE) is not None:\n hass.data.pop(DATA_DISCOVERY_SERVICE)\n\n results = asyncio.gather(\n hass.config_entries.async_forward_entry_unload(entry, CLIMATE_DOMAIN),\n hass.config_entries.async_forward_entry_unload(entry, SWITCH_DOMAIN),\n )\n\n unload_ok = all(await results)\n if unload_ok:\n hass.data[DOMAIN].pop(COORDINATORS, None)\n hass.data[DOMAIN].pop(DISPATCHERS, None)\n\n return unload_ok", "async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:\n unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)\n return unload_ok", "async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry):\n unload_ok = all(\n await asyncio.gather(\n *[\n hass.config_entries.async_forward_entry_unload(entry, component)\n for component in PLATFORMS\n ]\n )\n )\n if unload_ok:\n await hass.data[DOMAIN].pop(entry.entry_id).shutdown()\n\n return unload_ok", "async def test_unload_entry(opp, nzbget_api):\n entry = await init_integration(opp)\n\n assert len(opp.config_entries.async_entries(DOMAIN)) == 1\n assert entry.state is ConfigEntryState.LOADED\n\n assert await opp.config_entries.async_unload(entry.entry_id)\n await opp.async_block_till_done()\n\n assert entry.state is ConfigEntryState.NOT_LOADED\n assert not opp.data.get(DOMAIN)", "async def async_unload_entry(hass: HomeAssistant, config_entry: ConfigEntry) -> bool:\n if unload_ok := await hass.config_entries.async_unload_platforms(\n config_entry, PLATFORMS\n ):\n del hass.data[DOMAIN][config_entry.unique_id]\n if not hass.data[DOMAIN]:\n async_unload_services(hass)\n return unload_ok", "def remove_config(name):\n db = dbm.open(config_file, 'c')\n del db[name]\n db.close()", "async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:\n return await hass.config_entries.async_unload_platforms(\n entry, (entry.options[\"group_type\"],)\n )", "async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:\n\n data: AugustData = hass.data[DOMAIN][entry.entry_id]\n data.async_stop()\n\n unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)\n\n if unload_ok:\n hass.data[DOMAIN].pop(entry.entry_id)\n\n return unload_ok", "async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:\n unloaded = all(\n await asyncio.gather(\n *[\n hass.config_entries.async_forward_entry_unload(entry, platform)\n for platform in PLATFORMS\n ]\n )\n )\n if unloaded:\n hass.data[DOMAIN].pop(entry.entry_id)\n\n return unloaded", "async def test_unload_entry(recorder_mock: Recorder, hass: HomeAssistant) -> None:\n config_entry = await init_integration(hass)\n assert config_entry.state == config_entries.ConfigEntryState.LOADED\n\n assert await hass.config_entries.async_unload(config_entry.entry_id)\n await hass.async_block_till_done()\n assert config_entry.state is config_entries.ConfigEntryState.NOT_LOADED", "async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:\n return await hass.config_entries.async_unload_platforms(entry, PLATFORMS)", "async def async_unload_entry(opp: OpenPeerPower, entry: ConfigEntry) -> bool:\n return await opp.data[DOMAIN].async_unload_entry(entry) # type: ignore", "async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:\n hap = hass.data[DOMAIN].pop(entry.unique_id)\n hap.reset_connection_listener()\n\n await async_unload_services(hass)\n\n return await hap.async_reset()", "async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:\n unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)\n if not unload_ok:\n return False\n\n cookie_file = hass.config.path(STORAGE_DIR, f\"verisure_{entry.entry_id}\")\n with suppress(FileNotFoundError):\n await hass.async_add_executor_job(os.unlink, cookie_file)\n\n del hass.data[DOMAIN][entry.entry_id]\n\n if not hass.data[DOMAIN]:\n del hass.data[DOMAIN]\n\n return True", "async def test_unload_entry(hass):\n config_entry = await setup_axis_integration(hass)\n device = hass.data[AXIS_DOMAIN][config_entry.unique_id]\n assert hass.data[AXIS_DOMAIN]\n\n assert await hass.config_entries.async_unload(device.config_entry.entry_id)\n assert not hass.data[AXIS_DOMAIN]", "def del_conf(self, path):\n\t\tself.monitor.removePath(path)\n\t\tself.cache.pop(path, None)", "async def test_unload_entry(_, hass: HomeAssistant) -> None:\n mock_entry_data = {\n \"device\": \"/dev/USB0\",\n \"model\": \"LUGCUH50\",\n \"device_number\": \"12345\",\n }\n mock_entry = MockConfigEntry(\n domain=\"landisgyr_heat_meter\",\n title=\"LUGCUH50\",\n entry_id=\"987654321\",\n data=mock_entry_data,\n )\n mock_entry.add_to_hass(hass)\n\n assert await hass.config_entries.async_setup(mock_entry.entry_id)\n await hass.async_block_till_done()\n assert \"landisgyr_heat_meter\" in hass.config.components\n\n assert await hass.config_entries.async_remove(mock_entry.entry_id)", "async def async_unload_entry(hass, entry):\n await hass.config_entries.async_forward_entry_unload(\n entry, 'media_player')\n return True", "async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:\n unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)\n\n # Shutdown a harmony remote for removal\n entry_data = hass.data[DOMAIN][entry.entry_id]\n entry_data[CANCEL_LISTENER]()\n entry_data[CANCEL_STOP]()\n await entry_data[HARMONY_DATA].shutdown()\n\n if unload_ok:\n hass.data[DOMAIN].pop(entry.entry_id)\n\n return unload_ok", "def config_exit(self):\n self._master.destroy()\n self._config_status = False # ensure the world wouldn't be built", "def clear(self):\r\n del self.__config\r\n self.__config = {}\r\n self.save()", "async def async_unload_entry(opp: OpenPeerPower, entry: ConfigEntry) -> bool:\n unload_ok = await opp.config_entries.async_unload_platforms(entry, PLATFORMS)\n\n return unload_ok", "async def async_unload_entry(hass: HomeAssistantType, entry: ConfigEntry):\n\n tasks = []\n\n for platform in KETRA_PLATFORMS:\n tasks.append(hass.config_entries.async_forward_entry_unload(entry, platform))\n\n await asyncio.gather(*tasks)\n\n common_platform = hass.data[DOMAIN][entry.unique_id][\"common_platform\"]\n await common_platform.shutdown()\n\n return True", "async def async_unload_entry(opp: OpenPeerPower, entry: ConfigEntry) -> bool:\n return await opp.config_entries.async_unload_platforms(entry, PLATFORMS)", "async def async_unload_entry(opp: OpenPeerPower, entry: ConfigEntry) -> bool:\n return await opp.config_entries.async_unload_platforms(entry, PLATFORMS)", "async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:\n component: EntityComponent[SensorEntity] = hass.data[DOMAIN]\n return await component.async_unload_entry(entry)", "def remove_config_object() -> None:\n if G_CONFIG_OBJECT:\n G_CONFIG_OBJECT.clear()", "async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:\n unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)\n if unload_ok:\n hass.data[DOMAIN].pop(entry.entry_id)\n\n loaded_entries = [\n entry\n for entry in hass.config_entries.async_entries(DOMAIN)\n if entry.state == ConfigEntryState.LOADED\n ]\n if len(loaded_entries) == 1:\n # If this is the last loaded instance of RainMachine, deregister any services\n # defined during integration setup:\n for service_name in (\n SERVICE_NAME_PAUSE_WATERING,\n SERVICE_NAME_PUSH_FLOW_METER_DATA,\n SERVICE_NAME_PUSH_WEATHER_DATA,\n SERVICE_NAME_RESTRICT_WATERING,\n SERVICE_NAME_STOP_ALL,\n SERVICE_NAME_UNPAUSE_WATERING,\n SERVICE_NAME_UNRESTRICT_WATERING,\n ):\n hass.services.async_remove(DOMAIN, service_name)\n\n return unload_ok", "async def async_unload_entry_gw(hass: HomeAssistant, entry: ConfigEntry):\n unload_ok = all(\n await asyncio.gather(\n *(\n hass.config_entries.async_forward_entry_unload(entry, component)\n for component in GATEWAY_PLATFORMS\n )\n )\n )\n\n hass.data[DOMAIN][entry.entry_id][UNDO_UPDATE_LISTENER]()\n\n if unload_ok:\n hass.data[DOMAIN].pop(entry.entry_id)\n\n return unload_ok", "async def test_unload(hass: HomeAssistant, ufp: MockUFPFixture, light: Light) -> None:\n\n await init_entry(hass, ufp, [light])\n assert ufp.entry.state == ConfigEntryState.LOADED\n\n await hass.config_entries.async_unload(ufp.entry.entry_id)\n assert ufp.entry.state == ConfigEntryState.NOT_LOADED\n assert ufp.api.async_disconnect_ws.called", "def unloadaddon(self, addonName):\r\n es.unload(\"%s/addons/%s\" % (info.basename, addonName))", "async def async_remove_entry(hass, config_entry):\n try:\n await hass.config_entries.async_forward_entry_unload(\n config_entry, \"binary_sensor\"\n )\n _LOGGER.info(\n \"Successfully removed binary_sensor from the gtasks integration\"\n )\n except ValueError:\n pass\n\n try:\n await hass.config_entries.async_forward_entry_unload(config_entry, \"sensor\")\n _LOGGER.info(\"Successfully removed sensor from the gtasks integration\")\n except ValueError:\n pass", "async def test_unload_entry(hass: HomeAssistant) -> None:\n\n now = dt_util.parse_datetime(\"2021-01-09 12:00:00+00:00\")\n with patch(\"homeassistant.util.dt.now\", return_value=now), patch(\n \"homeassistant.util.dt.utcnow\", return_value=now\n ), requests_mock.mock() as _m:\n aemet_requests_mock(_m)\n\n config_entry = MockConfigEntry(\n domain=DOMAIN, unique_id=\"aemet_unique_id\", data=CONFIG\n )\n config_entry.add_to_hass(hass)\n\n assert await hass.config_entries.async_setup(config_entry.entry_id)\n await hass.async_block_till_done()\n assert config_entry.state is ConfigEntryState.LOADED\n\n await hass.config_entries.async_unload(config_entry.entry_id)\n await hass.async_block_till_done()\n assert config_entry.state is ConfigEntryState.NOT_LOADED", "async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:\n if entry.data.get(CONF_ID):\n # discovery\n scanner = YeelightScanner.async_get(hass)\n scanner.async_unregister_callback(entry.data[CONF_ID])\n\n data_config_entries = hass.data[DOMAIN][DATA_CONFIG_ENTRIES]\n if entry.entry_id not in data_config_entries:\n # Device not online\n return True\n\n entry_data = data_config_entries[entry.entry_id]\n unload_ok = True\n if entry_data[DATA_PLATFORMS_LOADED]:\n unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)\n\n if DATA_DEVICE in entry_data:\n device = entry_data[DATA_DEVICE]\n _LOGGER.debug(\"Shutting down Yeelight Listener\")\n await device.bulb.async_stop_listening()\n _LOGGER.debug(\"Yeelight Listener stopped\")\n\n data_config_entries.pop(entry.entry_id)\n return unload_ok", "def cleanup(self):\n self.exit_config_mode()", "async def test_load_unload_config_entry(\n hass: HomeAssistant,\n mock_config_entry: MockConfigEntry,\n mock_whois: MagicMock,\n) -> None:\n mock_config_entry.add_to_hass(hass)\n await hass.config_entries.async_setup(mock_config_entry.entry_id)\n await hass.async_block_till_done()\n\n assert mock_config_entry.state is ConfigEntryState.LOADED\n assert len(mock_whois.mock_calls) == 1\n\n await hass.config_entries.async_unload(mock_config_entry.entry_id)\n await hass.async_block_till_done()\n\n assert not hass.data.get(DOMAIN)\n assert mock_config_entry.state is ConfigEntryState.NOT_LOADED", "async def async_unload_entry(hass, config_entry):\n _LOGGER.info(\"Remove the Spotify token from AIS gate and cloud\")\n try:\n import os\n os.remove(hass.config.path(DEFAULT_CACHE_PATH))\n _LOGGER.info(\"Token from cache file removed\")\n except Exception as e:\n _LOGGER.error(\"Error removing token cache file \" + str(e))\n try:\n ws_resp = aisCloud.delete_key(\"spotify_token\")\n key = ws_resp.json()[\"key\"]\n _LOGGER.info(\"Token from AIS cloud removed \" + str(key))\n except Exception as e:\n _LOGGER.error(\"Error removing token from cloud \" + str(e))\n\n # setup the Spotify\n await async_setup(hass, hass.config)\n return True", "def deleteGRASSEntry(context, key):\n GenericMetadata.deleteEntryFromSection(context, GenericMetadata.GRASS_SECTION, key)", "def delKey(self, key ):\n if key in self.conf:\n del self.conf[key]", "async def test_load_unload_config_entry(\n hass: HomeAssistant, mock_config_entry: MockConfigEntry, tmpdir: str\n) -> None:\n testfile = f\"{tmpdir}/file.txt\"\n create_file(testfile)\n hass.config.allowlist_external_dirs = {tmpdir}\n mock_config_entry.add_to_hass(hass)\n hass.config_entries.async_update_entry(\n mock_config_entry, unique_id=testfile, data={CONF_FILE_PATH: testfile}\n )\n await hass.config_entries.async_setup(mock_config_entry.entry_id)\n await hass.async_block_till_done()\n\n assert mock_config_entry.state is ConfigEntryState.LOADED\n\n await hass.config_entries.async_unload(mock_config_entry.entry_id)\n await hass.async_block_till_done()\n\n assert not hass.data.get(DOMAIN)\n assert mock_config_entry.state is ConfigEntryState.NOT_LOADED", "def acShutdown():\n # Update config if necessary\n if cfg.update_cfg:\n cfg.save()", "def deconfigure(self):\n\n pass", "async def test_load_unload_config_entry(\n hass: HomeAssistant,\n mock_config_entry: MockConfigEntry,\n mock_jellyfin: MagicMock,\n) -> None:\n mock_config_entry.add_to_hass(hass)\n await hass.config_entries.async_setup(mock_config_entry.entry_id)\n await hass.async_block_till_done()\n\n assert mock_config_entry.entry_id in hass.data[DOMAIN]\n assert mock_config_entry.state is ConfigEntryState.LOADED\n\n await hass.config_entries.async_unload(mock_config_entry.entry_id)\n await hass.async_block_till_done()\n assert mock_config_entry.entry_id not in hass.data[DOMAIN]\n assert mock_config_entry.state is ConfigEntryState.NOT_LOADED", "async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:\n unload_ok = await hass.config_entries.async_unload_platforms(\n entry, SUPPORTED_PLATFORMS\n )\n\n if unload_ok:\n matter_entry_data: MatterEntryData = hass.data[DOMAIN].pop(entry.entry_id)\n matter_entry_data.listen_task.cancel()\n await matter_entry_data.adapter.matter_client.disconnect()\n\n if entry.data.get(CONF_USE_ADDON) and entry.disabled_by:\n addon_manager: AddonManager = get_addon_manager(hass)\n LOGGER.debug(\"Stopping Matter Server add-on\")\n try:\n await addon_manager.async_stop_addon()\n except AddonError as err:\n LOGGER.error(\"Failed to stop the Matter Server add-on: %s\", err)\n return False\n\n return unload_ok", "def remove_stored_config(self):\n stored_config_filename = self.stored_config_filename\n if stored_config_filename.exists():\n stored_config_filename.remove()\n self._stored_cmake_generator = self._stored_config.cmake_generator", "def _del(self, entry):\n entry.key = dummy\n entry.value = None\n self.used -= 1", "def remove_prompt(name, delete_config):\n\n with open(DATABASE_FILE_PATH) as f:\n config = json.load(f)\n path = config[name]\n del config[name]\n\n with open(DATABASE_FILE_PATH, 'w') as f:\n json.dump(config, f)\n\n if delete_config:\n os.remove(path)" ]
[ "0.697284", "0.6888074", "0.6779855", "0.6747459", "0.6689002", "0.6657831", "0.66162205", "0.6603433", "0.65925974", "0.65595686", "0.65411645", "0.6507643", "0.6507643", "0.6507643", "0.6507643", "0.64977276", "0.64931643", "0.6486601", "0.6486601", "0.6486601", "0.6486601", "0.6469397", "0.6469397", "0.6462678", "0.64596915", "0.64530563", "0.64530563", "0.64530563", "0.64530563", "0.64405274", "0.6437986", "0.6433246", "0.64269835", "0.64135826", "0.639899", "0.63978153", "0.639415", "0.639415", "0.639415", "0.63931185", "0.63878036", "0.63742703", "0.6371073", "0.6331371", "0.6280038", "0.62670827", "0.6265014", "0.6256049", "0.6246551", "0.62452537", "0.6242181", "0.6221915", "0.61827", "0.6181224", "0.6150584", "0.61350876", "0.611984", "0.60854065", "0.60852206", "0.60818183", "0.6057508", "0.6048585", "0.6042227", "0.6040048", "0.6024285", "0.59791756", "0.5943831", "0.5922388", "0.5889516", "0.58745533", "0.5864566", "0.58642447", "0.58642447", "0.58630556", "0.5833692", "0.5775617", "0.5764828", "0.5734671", "0.5719406", "0.5716535", "0.57139283", "0.5683943", "0.56723887", "0.56712943", "0.56245375", "0.56229323", "0.5614622", "0.5606286", "0.5589333", "0.5531308", "0.55294573", "0.5523446", "0.5517512", "0.54772425", "0.54116964" ]
0.6459733
29
Creates a BuienRadar class
def __init__(self): self.buienradar_rpc = {"rain_at": self.rain_at, "rain_max": self.rain_max } multiprocessing.Process.__init__(self) self.name = 'buienradar' self.shutdown = False self._sched = None self._rain = []
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_radar_chart(num_vars, frame='polygon', **kwargs):\n theta = _theta(num_vars)\n\n def draw_poly_patch(self):\n verts = _unit_poly_verts(theta)\n return plt.Polygon(verts, closed=True, edgecolor='k')\n\n def draw_circle_patch(self):\n # unit circle centered on (0.5, 0.5)\n return plt.Circle((0.5, 0.5), 0.5)\n\n patch_dict = {'polygon': draw_poly_patch, 'circle': draw_circle_patch}\n if frame not in patch_dict:\n raise ValueError('unknown value for `frame`: %s' % frame)\n\n class RadarAxes(PolarAxes):\n \"\"\"\n Projection class for a radar chart\n \"\"\"\n\n name = 'radar'\n size = num_vars\n # use 1 line segment to connect specified points\n RESOLUTION = 1\n # define draw_frame method\n shape = frame\n draw_patch = patch_dict[frame]\n\n def set_rscale(self, top, bottom=0, round_up=False):\n \"\"\"Scale the radar chart\n If circle chart then this function just sets the ylim of the polar ax.\n If polygon chart then ylim will be set to fit a circle with radius h\n completely inside it (distance from center to midpoint of polygon \n edge will be h.\n \"\"\"\n if self.shape == 'circle':\n r = top\n elif self.shape == 'polygon':\n angle_of_slice = 2 * np.pi / self.size\n r = top / np.cos(angle_of_slice / 2.)\n if round_up:\n r = np.ceil(r)\n else:\n # this should never happen since this is checked for in class\n # creation\n raise ValueError('unknown value for `frame`: %s' % self.shape)\n self.set_ylim(bottom, r)\n\n def fill(self, *args, **kwargs):\n \"\"\"Override fill so that line is closed by default\"\"\"\n closed = kwargs.pop('closed', True)\n return super(RadarAxes, self).fill(closed=closed, *args, **kwargs)\n\n def plot(self, *args, **kwargs):\n \"\"\"Override plot so that line is closed by default\"\"\"\n lines = super(RadarAxes, self).plot(*args, **kwargs)\n for line in lines:\n self._close_line(line)\n\n def _close_line(self, line):\n x, y = line.get_data()\n # FIXME: markers at x[0], y[0] get doubled-up\n if x[0] != x[-1]:\n x = np.concatenate((x, [x[0]]))\n y = np.concatenate((y, [y[0]]))\n line.set_data(x, y)\n\n def set_varlabels(self, labels):\n \"\"\"Label the radial axes\"\"\"\n self.set_thetagrids(np.degrees(theta) % FULL_CIRCLE_DEG, labels)\n\n def _gen_axes_patch(self):\n return self.draw_patch()\n\n def _gen_axes_spines(self):\n if frame == 'circle':\n return PolarAxes._gen_axes_spines(self)\n # The following is a hack to get the spines (i.e. the axes frame)\n # to draw correctly for a polygon frame.\n\n # spine_type must be 'left', 'right', 'top', 'bottom', or `circle`.\n spine_type = 'circle'\n verts = _unit_poly_verts(theta)\n # close off polygon by repeating first vertex\n verts.append(verts[0])\n path = Path(verts)\n\n spine = Spine(self, spine_type, path)\n spine.set_transform(self.transAxes)\n return {'polar': spine}\n\n register_projection(RadarAxes)\n \n # if subplot_kw argument is given, overwrite projection field\n # TODO: maybe throw error when projection is given?\n if 'subplot_kw' in kwargs:\n kwargs['subplot_kw']['projection'] = 'radar'\n else:\n kwargs['subplot_kw'] = {'projection': 'radar'}\n fig, axes = plt.subplots(**kwargs)\n\n return fig, axes", "def radar_factory(num_vars, frame='circle'):\r\n # calculate evenly-spaced axis angles\r\n theta = np.linspace(0, 2*np.pi, num_vars, endpoint=False)\r\n # rotate theta such that the first axis is at the top\r\n theta += np.pi/2\r\n\r\n def draw_poly_patch(self):\r\n verts = unit_poly_verts(theta)\r\n return plt.Polygon(verts, closed=True, edgecolor='k')\r\n\r\n def draw_circle_patch(self):\r\n # unit circle centered on (0.5, 0.5)\r\n return plt.Circle((0.5, 0.5), 0.5)\r\n\r\n patch_dict = {'polygon': draw_poly_patch, 'circle': draw_circle_patch}\r\n if frame not in patch_dict:\r\n raise ValueError('unknown value for `frame`: %s' % frame)\r\n\r\n class RadarAxes(PolarAxes):\r\n\r\n name = 'radar'\r\n # use 1 line segment to connect specified points\r\n RESOLUTION = 1\r\n # define draw_frame method\r\n draw_patch = patch_dict[frame]\r\n\r\n def fill(self, *args, **kwargs):\r\n \"\"\"Override fill so that line is closed by default\"\"\"\r\n closed = kwargs.pop('closed', True)\r\n return super(RadarAxes, self).fill(closed=closed, *args, **kwargs)\r\n\r\n def plot(self, *args, **kwargs):\r\n \"\"\"Override plot so that line is closed by default\"\"\"\r\n lines = super(RadarAxes, self).plot(*args, **kwargs)\r\n for line in lines:\r\n self._close_line(line)\r\n\r\n def _close_line(self, line):\r\n x, y = line.get_data()\r\n # FIXME: markers at x[0], y[0] get doubled-up\r\n if x[0] != x[-1]:\r\n x = np.concatenate((x, [x[0]]))\r\n y = np.concatenate((y, [y[0]]))\r\n line.set_data(x, y)\r\n\r\n def set_varlabels(self, labels):\r\n self.set_thetagrids(np.degrees(theta), labels)\r\n\r\n def _gen_axes_patch(self):\r\n return self.draw_patch()\r\n\r\n def _gen_axes_spines(self):\r\n if frame == 'circle':\r\n return PolarAxes._gen_axes_spines(self)\r\n # The following is a hack to get the spines (i.e. the axes frame)\r\n # to draw correctly for a polygon frame.\r\n\r\n # spine_type must be 'left', 'right', 'top', 'bottom', or `circle`.\r\n spine_type = 'circle'\r\n verts = unit_poly_verts(theta)\r\n # close off polygon by repeating first vertex\r\n verts.append(verts[0])\r\n path = Path(verts)\r\n\r\n spine = Spine(self, spine_type, path)\r\n spine.set_transform(self.transAxes)\r\n return {'polar': spine}\r\n\r\n register_projection(RadarAxes)\r\n return theta", "def __init__(self):\n raise NotImplementedError('cannot create independent arc')", "def _create_rain(self):\n r_calc = self._calculate_spacing()\n # Create the full screen of raindrops.\n for raindrop_y in range(r_calc[3]):\n self._create_raindrops_y(raindrop_y)", "def __init__(self,x_pos, y_pos, velocity, kind, fillcolor = 'red'):\n self._velocity = velocity\n self._kind = kind\n super().__init__(x = x_pos, y=y_pos, width = BOLT_WIDTH, \\\n height = BOLT_HEIGHT, fillcolor=fillcolor)", "def create_barn_door(self):\n light_shape = self.light.getShape()\n inputs = light_shape.inputs(type='aiBarndoor')\n if inputs:\n self.barn_door = inputs[0]\n else:\n self.barn_door = pm.createNode('aiBarndoor')\n self.barn_door.attr('message') >> \\\n light_shape.attr('aiFilters').next_available", "def radar_factory(num_vars, frame='circle'):\n # calculate evenly-spaced axis angles\n theta = np.linspace(0, 2*np.pi, num_vars, endpoint=False)\n # rotate theta such that the first axis is at the top\n theta += np.pi/2\n\n def draw_poly_patch(self):\n verts = unit_poly_verts(theta)\n return plt.Polygon(verts, closed=True, edgecolor='k')\n\n def draw_circle_patch(self):\n # unit circle centered on (0.5, 0.5)\n return plt.Circle((0.5, 0.5), 0.5)\n\n patch_dict = {'polygon': draw_poly_patch, 'circle': draw_circle_patch}\n if frame not in patch_dict:\n raise ValueError('unknown value for `frame`: %s' % frame)\n\n class RadarAxes(PolarAxes):\n\n name = 'radar'\n # use 1 line segment to connect specified points\n RESOLUTION = 1\n # define draw_frame method\n draw_patch = patch_dict[frame]\n\n def fill(self, *args, **kwargs):\n \"\"\"Override fill so that line is closed by default\"\"\"\n closed = kwargs.pop('closed', True)\n return super(RadarAxes, self).fill(closed=closed, *args, **kwargs)\n\n def plot(self, *args, **kwargs):\n \"\"\"Override plot so that line is closed by default\"\"\"\n lines = super(RadarAxes, self).plot(*args, **kwargs)\n for line in lines:\n self._close_line(line)\n\n def _close_line(self, line):\n x, y = line.get_data()\n # FIXME: markers at x[0], y[0] get doubled-up\n if x[0] != x[-1]:\n x = np.concatenate((x, [x[0]]))\n y = np.concatenate((y, [y[0]]))\n line.set_data(x, y)\n\n def set_varlabels(self, labels):\n self.set_thetagrids(np.degrees(theta), labels)\n\n def _gen_axes_patch(self):\n return self.draw_patch()\n\n def _gen_axes_spines(self):\n if frame == 'circle':\n return PolarAxes._gen_axes_spines(self)\n # The following is a hack to get the spines (i.e. the axes frame)\n # to draw correctly for a polygon frame.\n\n # spine_type must be 'left', 'right', 'top', 'bottom', or `circle`.\n spine_type = 'circle'\n verts = unit_poly_verts(theta)\n # close off polygon by repeating first vertex\n verts.append(verts[0])\n path = Path(verts)\n\n spine = Spine(self, spine_type, path)\n spine.set_transform(self.transAxes)\n return {'polar': spine}\n\n register_projection(RadarAxes)\n return theta", "def __init__(self, thresh=0.1, ar_depth=1, rnd_key=random.PRNGKey(42)):\n self.thresh = thresh\n self.ar_depth = ar_depth\n\n params = random.normal(rnd_key, shape=(ar_depth+1, ))\n self.did_fit = False\n super().__init__('RainDay', params)", "def __init__(self, r,g,b):\n self.__r = r; self.__g = g; self.__b = b", "def __init__(self):\n # Screen settings\n self.screen_width = 2400\n self.screen_height = 1600\n self.bg_color = (0, 0, 0)\n\n # Raindrop settings\n self.r_y_speed = 10", "def __init__(self, width, height):\r\n super().__init__(width, height)\r\n\r\n self.rifle = Rifle()\r\n self.score = 0\r\n\r\n self.bullets = []\r\n\r\n # TODO: Create a list for your targets (similar to the above bullets)\r\n self.targets = []\r\n\r\n arcade.set_background_color(arcade.color.WHITE)", "def __init__(self, BridgeObj, speed=0.005):\n self.speed = speed\n self.BridgeObj = BridgeObj\n print(\"[RainbowAll] Mode Initialized. Speed : \" + str(speed))", "def __init__(self, rings=False, branches=False):\n self.rings = rings\n self.branches = branches", "def create_ball():\n balls.append(gen_ball())\n generate_velocity(balls)", "def __init__(self,make,model,year):\n super().__init__(make,model,year)\n # adicionando atributo especifico dessa classe\n self.batery_size = Batery(100)", "def __init__(self, fromPort = 0, toPort = 0, Type = 'PowerBond'):\n BGcomponent.__init__(self)\n Graph.__init__(self)\n self.id = next(self.id_generator)\n self.__id = BGbond.__ID\n BGbond.__ID += 1\n self.__id += 1\n self.__fromPort = fromPort\n self.__toPort = toPort\n self.__causalityStroke = 1\n self.__directionArrow = 1\n self.__type = Type", "def _create_raindrop(self, raindrop_x, raindrop_y):\n raindrop = Raindrop(self)\n r_calc = self._calculate_spacing()\n x_math = r_calc[4] + 2 * r_calc[4] * raindrop_x\n x_math_high = x_math + raindrop_x // 2\n x_math_low = x_math - raindrop_x // 2\n y_math_low = 2 * raindrop.rect.height * raindrop_y\n y_math_high = 2 * raindrop.rect.height * raindrop_y\n overlap_chk = raindrop\n overlap_counter = 0\n sprite_collision = pygame.sprite.spritecollideany(\n raindrop, self.rain, collided=None\n )\n\n while overlap_chk is not None:\n raindrop.x = x_math\n raindrop.y = y_math_high\n raindrop.rect.x = raindrop.x\n raindrop.rect.y = raindrop.y\n if sprite_collision is not None and overlap_counter < 30:\n overlap_counter += 1\n continue\n else:\n break\n\n self.rain.add(raindrop)", "def __init__(self, x = 140, y = 140):\r\n super(Ball, self).__init__(image = Ball.image,\r\n x = 600, y = 240,\r\n dx = -3, dy = 1)", "def arm2_maker(size=60, color='0x9C661F'):\n arm = GArc(size, size+30, 245, 200)\n arm.filled = True\n arm.fill_color = color\n return arm", "def draw_radar(self, screen):\n self.get_data()\n for radar in self.radars:\n position, _ = radar\n pg.draw.line(screen, RADAR_COLOR, self.center, position, 1)\n pg.draw.circle(screen, RADAR_COLOR, position, 2)", "def arm1_maker(size=60, color='0x9C661F'):\n arm = GArc(size, size+30, 100, 200)\n arm.filled = True\n arm.fill_color = color\n return arm", "def __init__(self, coordinates): \n\t\tsuper().__init__(coordinates)\n\t\tself.type = 'drain'", "def __init__(self, name, type_name, length, barrel_diameter, barrel_type, array_type, rod_number, rod_diameter, pitch,\n orientation):\n self.name = name\n self.type = type_name\n self.length = length\n self.barrel_type = barrel_type\n self.barrel_diameter = barrel_diameter\n self.array_type = array_type\n self.rod_diameter = rod_diameter\n self.pitch = pitch\n self.orientation = orientation\n self.rod_number = rod_number", "def __init__(self, minRA, maxRA, minDec, maxDec, radius_RA, radius_Dec):\n\n self.RA = np.mean([minRA, maxRA])\n self.Dec = np.mean([minDec, maxDec])\n self.radius_RA = radius_RA\n self.radius_Dec = radius_Dec\n\n # define the polygon attached to this area\n \"\"\"\n self.area_poly = areap(self.RA-radius_RA/2.,\n self.RA+radius_RA/2.,\n self.Dec-radius_Dec/2.,\n self.Dec+radius_Dec/2.)\n \"\"\"\n self.area_poly = areap(minRA, maxRA, minDec, maxDec)\n all_patches = self.getpatches(minRA, maxRA, minDec, maxDec)\n\n self.patches = self.inside(all_patches)", "def __init__(self,x,y,r,vx,vy):\n self.x = x\n self.y = y\n self.r = r\n self.vx = vx\n self.vy = vy", "def __init__(self, radius):\n self.radius = radius", "def __init__(self, *args, **kwargs):\n super(Ball, self).__init__(*args, **kwargs)\n self.speed = kwargs.get('speed', 5)\n self.ball_image = pyglet.image.load(os.path.join(config.ASSETS_DIR, 'ball.png'))\n self.width = self.ball_image.width\n self.height = self.ball_image.height\n self.ball_sprite = pyglet.sprite.Sprite(self.ball_image, self.x, self.y)\n self.x_direction = 1\n self.y_direction = 1\n\n print('Ball Created')", "def createRadarSims(self):\n import anwp.sims\n # remove old sims if any\n self.removeRadarSims()\n # create resource sims\n self.radarSims = []\n for systemID, systemDict in self.game.allSystems.iteritems():\n imageFileName = ''\n \n if systemDict['intelReport']['round'] == self.game.currentRound:\n # there is a current report from this system indicate with green radar\n imageFileName = '%sradar_green.png' % self.game.app.genImagePath\n \n if systemDict['myEmpireID'] == self.game.myEmpireID:\n if systemDict['radarStrength'] > 0:\n # players system has radar indicate with blue radar\n imageFileName = '%sradar_blue.png' % self.game.app.genImagePath\n \n if imageFileName <> '':\n # create sim\n sim = RadarEntity(self, anwp.sims.categories.StaticCategory(imageFileName, 'radar'))\n \n # add sim to world\n self.radarSims.append(sim)\n x = systemDict['x']-46\n y = systemDict['y']+65\n facing = 0\n speed = 0\n force = 1\n self.world.addToWorld(sim, x, y, facing, speed, force)", "def __init__(self, yil=None, ay=None, gun=None, tarih=None):\n pass", "def __init__(self,naca_name,N,plt=True):\n # Name of the airfoil\n self.name = naca_name\n # Number of points\n self.N = N/2\n # [m] Points of the airfoil (_u: upper, _l: lower)\n self.xu = np.linspace(0,1,N)\n self.xl = np.linspace(0,1,N)\n self.x = np.concatenate((self.xu, np.flip(self.xl)))\n self.yu = np.empty((N))\n self.yl = np.empty((N))\n self.ytu = np.empty((N))\n self.ytl = np.empty((N))\n self.z = np.concatenate((self.yu, np.flip(self.yl)))\n self.y = np.zeros((2*N))\n self.dyc = np.zeros((N))\n # Camber line y position (_u: upper, _l: lower)\n self.yc = np.zeros((N))\n # [m] Chord length\n self.c = 1\n # Macximum camber in chord length\n self.m = 0.01*int(naca_name[-4:-3])\n # Distance of the maximum camber from the leading edge\n self.p = 0.1*int(naca_name[-3:-2])\n # Thickness of the airfoil\n self.t = 0.01*int(naca_name[-2:])\n # Leading edge radius\n self.r = 1.019*self.t**2 / self.c\n # Plotting variable\n self.plot = plt", "def createSimpleRibbon(name='noodle', totalJoints=6):\r\n # create a ribbonNurbsPlane:\r\n ribbonNurbsPlane = cmds.nurbsPlane(name=name+\"RibbonNurbsPlane\", constructionHistory=False, object=True, polygon=0, axis=(0, 1, 0), width=1, lengthRatio=8, patchesV=totalJoints)[0]\r\n # get the ribbonNurbsPlane shape:\r\n ribbonNurbsPlaneShape = cmds.listRelatives(ribbonNurbsPlane, shapes=True, children=True)[0]\r\n # make this ribbonNurbsPlane as template, invisible and not renderable:\r\n cmds.setAttr(ribbonNurbsPlane+\".template\", 1)\r\n cmds.setAttr(ribbonNurbsPlane+\".visibility\", 0)\r\n setNotRenderable([ribbonNurbsPlaneShape])\r\n # make this ribbonNurbsPlane as not skinable from dpAR_UI:\r\n cmds.addAttr(ribbonNurbsPlane, longName=\"doNotSkinIt\", attributeType=\"bool\", keyable=True)\r\n cmds.setAttr(ribbonNurbsPlane+\".doNotSkinIt\", 1)\r\n # create groups to be used as a root of the ribbon system:\r\n ribbonGrp = cmds.group(ribbonNurbsPlane, n=name+\"_RibbonJoint_grp\")\r\n # create joints:\r\n jointList, jointGrpList = [], []\r\n for j in range(totalJoints+1):\r\n # create pointOnSurfaceInfo:\r\n infoNode = cmds.createNode('pointOnSurfaceInfo', name=name+\"_POSI\"+str(j))\r\n # setting parameters worldSpace, U and V:\r\n cmds.connectAttr(ribbonNurbsPlaneShape + \".worldSpace[0]\", infoNode + \".inputSurface\")\r\n cmds.setAttr(infoNode + \".parameterV\", ((1/float(totalJoints))*j) )\r\n cmds.setAttr(infoNode + \".parameterU\", 0.5)\r\n # create and parent groups to calculate:\r\n posGrp = cmds.group(n=name+\"Pos\"+str(j)+\"_grp\", empty=True)\r\n upGrp = cmds.group(n=name+\"Up\"+str(j)+\"_grp\", empty=True)\r\n aimGrp = cmds.group(n=name+\"Aim\"+str(j)+\"_grp\", empty=True)\r\n cmds.parent(upGrp, aimGrp, posGrp, relative=True)\r\n # connect groups translations:\r\n cmds.connectAttr(infoNode + \".position\", posGrp + \".translate\", force=True)\r\n cmds.connectAttr(infoNode + \".tangentU\", upGrp + \".translate\", force=True)\r\n cmds.connectAttr(infoNode + \".tangentV\", aimGrp + \".translate\", force=True)\r\n # create joint:\r\n cmds.select(clear=True)\r\n joint = cmds.joint(name=name+str(j)+\"_jnt\")\r\n jointList.append(joint)\r\n cmds.addAttr(joint, longName='dpAR_joint', attributeType='float', keyable=False)\r\n # parent the joint to the groups:\r\n cmds.parent(joint, posGrp, relative=True)\r\n jointGrp = cmds.group(joint, name=name+\"Joint\"+str(j)+\"_grp\")\r\n jointGrpList.append(jointGrp)\r\n # create aimConstraint from aimGrp to jointGrp:\r\n cmds.aimConstraint(aimGrp, jointGrp, offset=(0, 0, 0), weight=1, aimVector=(0, 1, 0), upVector=(0, 0, 1), worldUpType=\"object\", worldUpObject=upGrp, n=name+\"Ribbon\"+str(j)+\"_aimConstraint\" )\r\n # parent this ribbonPos to the ribbonGrp:\r\n cmds.parent(posGrp, ribbonGrp, absolute=True)\r\n return [ribbonNurbsPlane, ribbonNurbsPlaneShape, jointGrpList, jointList]", "def __init__(self, gamepad, location):\n self.gamepad = gamepad\n self.id = gamepad.get_id()\n self.original_barrel = TURRET.subsurface((0,0,150,150))\n self.barrel = self.original_barrel.copy()\n self.base = TURRET.subsurface((300,0,150,150))\n self.rect = self.barrel.get_rect(center=location)\n self.base_rect = self.rect.copy()\n self.angle = 0", "def __init__(self):\n super(Grasshopper, self).__init__()\n # self.world.gravity = (0.0,0.0)\n\n # Initialize all of the objects\n ground = self.world.CreateBody(position=(0, 20))\n ground.CreateEdgeChain(\n [ (-20,-20),\n (-20, 20),\n ( 20, 20),\n ( 20,-20),\n (-20,-20) ]\n )\n\n # Initialize sliders\n self.settings.altitude_p = pid_values['altitude'].p * 100\n self.settings.altitude_d = pid_values['altitude'].d * 100\n self.settings.lateral_p = pid_values['lateral drift'].p * 20\n self.settings.lateral_d = pid_values['lateral drift'].d * 20\n self.settings.attitude_p = pid_values['attitude'].p * 100\n self.settings.attitude_d = pid_values['attitude'].d * 100\n\n # Rocket\n self.ship=self.world.CreateDynamicBody(\n position=(0,6), angle=0.1,\n angularDamping=0, linearDamping=0)\n\n # And add a box fixture onto it (with a nonzero density, so it will move)\n box=self.ship.CreatePolygonFixture(box=self.ship_dimensions, density=self.ship_mass/(self.ship_dimensions[0]*self.ship_dimensions[1]), friction=0.3)", "def crear_arbol(self):\n\n padre = GrupoRaizFactory(id=100)\n cabeza_red1 = GrupoFactory(id=200, parent=padre, red__nombre='matrimonio')\n cabeza_red2 = GrupoFactory(id=300, parent=padre)\n cabeza_red3 = GrupoFactory(id=400, parent=padre, red__nombre='adultos')\n\n hijo1_cb2 = GrupoHijoFactory(id=500, parent=cabeza_red2)\n hijo2_cb2 = GrupoHijoFactory(id=800, parent=cabeza_red2)\n hijo11_cb2 = GrupoHijoFactory(id=600, parent=hijo1_cb2)\n\n hijo1_cb3 = GrupoHijoFactory(id=700, parent=cabeza_red3)\n\n self.lista_arbol_completo = [\n padre, [cabeza_red1, cabeza_red2, [hijo1_cb2, [hijo11_cb2], hijo2_cb2], cabeza_red3, [hijo1_cb3]]\n ]\n\n self.lista_arbol_cb2 = [\n cabeza_red2, [hijo1_cb2, [hijo11_cb2], hijo2_cb2]\n ]", "def create_rink():\n\n # RINK\n coords = OFFSET, OFFSET, OFFSET+22*SCALE, OFFSET+22*SCALE\n canvas.create_arc(coords, start=90, extent=90, fill=WHITE, outline=\"\")\n coords = OFFSET, HEIGHT-OFFSET-22*SCALE, OFFSET+22*SCALE, HEIGHT-OFFSET\n canvas.create_arc(coords, start=180, extent=90, fill=WHITE, outline=WHITE)\n coords = WIDTH-OFFSET-22*SCALE, HEIGHT-OFFSET-22*SCALE, WIDTH-OFFSET, HEIGHT-OFFSET\n canvas.create_arc(coords, start=270, extent=90, fill=WHITE, outline=WHITE)\n coords = WIDTH-OFFSET-22*SCALE, OFFSET, WIDTH-OFFSET, OFFSET+22*SCALE\n canvas.create_arc(coords, start=0, extent=90, fill=WHITE, outline=WHITE)\n coords = OFFSET+11*SCALE, OFFSET, WIDTH-OFFSET-11*SCALE, OFFSET, WIDTH-OFFSET-11*SCALE, HEIGHT-OFFSET, OFFSET+11*SCALE, HEIGHT-OFFSET\n canvas.create_polygon(coords, fill=WHITE, outline=WHITE)\n coords = OFFSET, OFFSET+11*SCALE, WIDTH-OFFSET, OFFSET+11*SCALE, WIDTH-OFFSET, HEIGHT-OFFSET-11*SCALE, OFFSET, HEIGHT-OFFSET-11*SCALE\n canvas.create_polygon(coords, fill=WHITE, outline=WHITE)\n\n # CENTER CIRCLE\n coords = WIDTH/2-15*SCALE, HEIGHT/2-15*SCALE, WIDTH/2+15*SCALE, HEIGHT/2+15*SCALE\n canvas.create_oval(coords, outline=BLUE, width=2, fill=WHITE)\n\n # HALF CENTER CIRCLE\n coords = WIDTH/2-10*SCALE, HEIGHT-OFFSET-10*SCALE, WIDTH/2+10*SCALE, HEIGHT-OFFSET+10*SCALE\n canvas.create_arc(coords, outline=RED, width=2, start=0, extent=180)\n\n # GOAL AREA\n # - Left\n # - - Crease\n coords = OFFSET+5*SCALE, HEIGHT/2-6*SCALE, OFFSET+17*SCALE, HEIGHT/2+6*SCALE\n canvas.create_arc(coords, fill=LIGHT_BLUE, start=318, extent=84, outline=\"\")\n canvas.create_arc(coords, outline=RED, start=318, extent=84, style=ARC)\n coords = OFFSET+11*SCALE, HEIGHT/2-4*SCALE, OFFSET+15.5*SCALE, HEIGHT/2-4*SCALE, OFFSET+15.5*SCALE, HEIGHT/2+4*SCALE, OFFSET+11*SCALE, HEIGHT/2+4*SCALE\n canvas.create_polygon(coords, fill=LIGHT_BLUE, outline=\"\")\n coords = OFFSET+11*SCALE, HEIGHT/2-4*SCALE, OFFSET+15.2*SCALE+1, HEIGHT/2-4*SCALE\n canvas.create_line(coords, fill=RED)\n coords = OFFSET+15.2*SCALE+1, HEIGHT/2+4*SCALE, OFFSET+11*SCALE, HEIGHT/2+4*SCALE\n canvas.create_line(coords, fill=RED)\n # - - Restricted Area\n coords = OFFSET, HEIGHT/2-14*SCALE, OFFSET+11*SCALE, HEIGHT/2-9*SCALE\n canvas.create_line(coords, fill=RED)\n coords = OFFSET, HEIGHT/2+14*SCALE, OFFSET+11*SCALE, HEIGHT/2+9*SCALE\n canvas.create_line(coords, fill=RED)\n # - - Goal\n coords = OFFSET+8*SCALE, HEIGHT/2-3*SCALE, OFFSET+11*SCALE, HEIGHT/2-3*SCALE, OFFSET+11*SCALE, HEIGHT/2+3*SCALE, OFFSET+8*SCALE, HEIGHT/2+3*SCALE\n canvas.create_polygon(coords, fill=GRAY, outline=RED)\n # - Right\n # - - Crease\n coords = WIDTH-(OFFSET+5*SCALE), HEIGHT/2-6*SCALE, WIDTH-(OFFSET+17*SCALE), HEIGHT/2+6*SCALE\n canvas.create_arc(coords, fill=LIGHT_BLUE, start=138, extent=84, outline=\"\")\n canvas.create_arc(coords, outline=RED, start=138, extent=84, style=ARC)\n coords = WIDTH-(OFFSET+11*SCALE), HEIGHT/2-4*SCALE, WIDTH-(OFFSET+15.5*SCALE), HEIGHT/2-4*SCALE, WIDTH-(OFFSET+15.5*SCALE), HEIGHT/2+4*SCALE, WIDTH-(OFFSET+11*SCALE), HEIGHT/2+4*SCALE\n canvas.create_polygon(coords, fill=LIGHT_BLUE, outline=\"\")\n coords = WIDTH-(OFFSET+11*SCALE), HEIGHT/2-4*SCALE, WIDTH-(OFFSET+15.2*SCALE+1), HEIGHT/2-4*SCALE\n canvas.create_line(coords, fill=RED)\n coords = WIDTH-(OFFSET+15.2*SCALE+1), HEIGHT/2+4*SCALE, WIDTH-(OFFSET+11*SCALE), HEIGHT/2+4*SCALE\n canvas.create_line(coords, fill=RED)\n # - - Restricted Area\n coords = WIDTH-OFFSET, HEIGHT/2-14*SCALE, WIDTH-OFFSET-11*SCALE, HEIGHT/2-9*SCALE\n canvas.create_line(coords, fill=RED)\n coords = WIDTH-OFFSET, HEIGHT/2+14*SCALE, WIDTH-OFFSET-11*SCALE, HEIGHT/2+9*SCALE\n canvas.create_line(coords, fill=RED)\n # - - Goal\n coords = WIDTH-(OFFSET+8*SCALE), HEIGHT/2-3*SCALE, WIDTH-(OFFSET+11*SCALE), HEIGHT/2-3*SCALE, WIDTH-(OFFSET+11*SCALE), HEIGHT/2+3*SCALE, WIDTH-(OFFSET+8*SCALE), HEIGHT/2+3*SCALE\n canvas.create_polygon(coords, fill=GRAY, outline=RED)\n\n # LINES\n # - Left Baseline\n coords = OFFSET+11*SCALE, OFFSET, OFFSET+11*SCALE, HEIGHT-OFFSET\n canvas.create_line(coords, fill=RED, width=1.5)\n # - Right Baseline\n coords = WIDTH-OFFSET-11*SCALE, OFFSET, WIDTH-OFFSET-11*SCALE, HEIGHT-OFFSET\n canvas.create_line(coords, fill=RED, width=1.5)\n # - Left Blueline\n coords = OFFSET+70*SCALE, OFFSET, OFFSET+70*SCALE, HEIGHT-OFFSET\n canvas.create_line(coords, fill=BLUE, width=7)\n # - Right Blueline\n coords = WIDTH-(OFFSET+70*SCALE), OFFSET, WIDTH-(OFFSET+70*SCALE), HEIGHT-OFFSET\n canvas.create_line(coords, fill=BLUE, width=7)\n # - Redline\n coords = WIDTH/2, OFFSET, WIDTH/2, HEIGHT-OFFSET\n canvas.create_line(coords, fill=RED, width=7)\n coords = WIDTH/2, OFFSET, WIDTH/2, HEIGHT-OFFSET\n canvas.create_line(coords, fill=WHITE, width=5, dash=(9,9))\n\n # RINK OUTLINE\n coords = OFFSET, OFFSET, OFFSET+22*SCALE, OFFSET+22*SCALE\n canvas.create_arc(coords, start=90, extent=90, outline=BLACK, style=ARC, width=2)\n coords = OFFSET, HEIGHT-OFFSET-22*SCALE, OFFSET+22*SCALE, HEIGHT-OFFSET\n canvas.create_arc(coords, start=180, extent=90, outline=BLACK, style=ARC, width=2)\n coords = WIDTH-OFFSET-22*SCALE, HEIGHT-OFFSET-22*SCALE, WIDTH-OFFSET, HEIGHT-OFFSET\n canvas.create_arc(coords, start=270, extent=90, outline=BLACK, style=ARC, width=2)\n coords = WIDTH-OFFSET-22*SCALE, OFFSET, WIDTH-OFFSET, OFFSET+22*SCALE\n canvas.create_arc(coords, start=0, extent=90, outline=BLACK, style=ARC, width=2)\n coords = OFFSET+11*SCALE, OFFSET, WIDTH-OFFSET-11*SCALE, OFFSET\n canvas.create_line(coords, fill=BLACK, width=2)\n coords = WIDTH-OFFSET, OFFSET+11*SCALE, WIDTH-OFFSET, HEIGHT-OFFSET-11*SCALE\n canvas.create_line(coords, fill=BLACK, width=2)\n coords = WIDTH-OFFSET-11*SCALE, HEIGHT-OFFSET, OFFSET+11*SCALE, HEIGHT-OFFSET\n canvas.create_line(coords, fill=BLACK, width=2)\n coords = OFFSET, OFFSET+11*SCALE, OFFSET, HEIGHT-OFFSET-11*SCALE\n canvas.create_line(coords, fill=BLACK, width=2)\n\n\n # CENTER DOT\n coords = WIDTH/2-1*SCALE-1, HEIGHT/2-1*SCALE-1, WIDTH/2+1*SCALE+1, HEIGHT/2+1*SCALE+1\n canvas.create_oval(coords, outline=WHITE, fill=BLUE)\n\n # FACEOFF\n # - Top Left\n # - - Ticks\n coords = OFFSET+29.5*SCALE, HEIGHT/2-39*SCALE, OFFSET+29.5*SCALE, HEIGHT/2-5*SCALE\n canvas.create_line(coords, fill=RED, width=2)\n coords = OFFSET+32.5*SCALE, HEIGHT/2-39*SCALE, OFFSET+32.5*SCALE, HEIGHT/2-5*SCALE\n canvas.create_line(coords, fill=RED, width=2)\n # - - Circles\n coords = OFFSET+16*SCALE, HEIGHT/2-37*SCALE, OFFSET+46*SCALE, HEIGHT/2-7*SCALE\n canvas.create_oval(coords, outline=RED, width=2, fill=WHITE)\n coords = OFFSET+30*SCALE, HEIGHT/2-23*SCALE, OFFSET+32*SCALE, HEIGHT/2-21*SCALE\n canvas.create_oval(coords, fill=RED, outline=\"\")\n # - - Cross\n coords = OFFSET+25*SCALE, HEIGHT/2-22.8*SCALE, OFFSET+29*SCALE, HEIGHT/2-22.8*SCALE, OFFSET+29*SCALE, HEIGHT/2-25.8*SCALE\n canvas.create_line(coords, fill=RED, width=2)\n coords = OFFSET+25*SCALE, HEIGHT/2-21.2*SCALE, OFFSET+29*SCALE, HEIGHT/2-21.2*SCALE, OFFSET+29*SCALE, HEIGHT/2-18.2*SCALE\n canvas.create_line(coords, fill=RED, width=2)\n coords = OFFSET+37*SCALE, HEIGHT/2-22.8*SCALE, OFFSET+33*SCALE, HEIGHT/2-22.8*SCALE, OFFSET+33*SCALE, HEIGHT/2-25.8*SCALE\n canvas.create_line(coords, fill=RED, width=2)\n coords = OFFSET+37*SCALE, HEIGHT/2-21.2*SCALE, OFFSET+33*SCALE, HEIGHT/2-21.2*SCALE, OFFSET+33*SCALE, HEIGHT/2-18.2*SCALE\n canvas.create_line(coords, fill=RED, width=2)\n # - Bottom Left\n # - - Ticks\n coords = OFFSET+29.5*SCALE, HEIGHT/2+39*SCALE, OFFSET+29.5*SCALE, HEIGHT/2+5*SCALE\n canvas.create_line(coords, fill=RED, width=2)\n coords = OFFSET+32.5*SCALE, HEIGHT/2+39*SCALE, OFFSET+32.5*SCALE, HEIGHT/2+5*SCALE\n canvas.create_line(coords, fill=RED, width=2)\n # - - Circles\n coords = OFFSET+16*SCALE, HEIGHT/2+37*SCALE, OFFSET+46*SCALE, HEIGHT/2+7*SCALE\n canvas.create_oval(coords, outline=RED, width=2, fill=WHITE)\n coords = OFFSET+30*SCALE, HEIGHT/2+23*SCALE, OFFSET+32*SCALE, HEIGHT/2+21*SCALE\n canvas.create_oval(coords, fill=RED, outline=\"\")\n # - - Cross\n coords = OFFSET+25*SCALE, HEIGHT/2+22.8*SCALE, OFFSET+29*SCALE, HEIGHT/2+22.8*SCALE, OFFSET+29*SCALE, HEIGHT/2+25.8*SCALE\n canvas.create_line(coords, fill=RED, width=2)\n coords = OFFSET+25*SCALE, HEIGHT/2+21.2*SCALE, OFFSET+29*SCALE, HEIGHT/2+21.2*SCALE, OFFSET+29*SCALE, HEIGHT/2+18.2*SCALE\n canvas.create_line(coords, fill=RED, width=2)\n coords = OFFSET+37*SCALE, HEIGHT/2+22.8*SCALE, OFFSET+33*SCALE, HEIGHT/2+22.8*SCALE, OFFSET+33*SCALE, HEIGHT/2+25.8*SCALE\n canvas.create_line(coords, fill=RED, width=2)\n coords = OFFSET+37*SCALE, HEIGHT/2+21.2*SCALE, OFFSET+33*SCALE, HEIGHT/2+21.2*SCALE, OFFSET+33*SCALE, HEIGHT/2+18.2*SCALE\n canvas.create_line(coords, fill=RED, width=2)\n # - Top Right\n # - - Ticks\n coords = WIDTH-(OFFSET+29.5*SCALE), HEIGHT/2-39*SCALE, WIDTH-(OFFSET+29.5*SCALE), HEIGHT/2-5*SCALE\n canvas.create_line(coords, fill=RED, width=2)\n coords = WIDTH-(OFFSET+32.5*SCALE), HEIGHT/2-39*SCALE, WIDTH-(OFFSET+32.5*SCALE), HEIGHT/2-5*SCALE\n canvas.create_line(coords, fill=RED, width=2)\n # - - Circles\n coords = WIDTH-(OFFSET+16*SCALE), HEIGHT/2-37*SCALE, WIDTH-(OFFSET+46*SCALE), HEIGHT/2-7*SCALE\n canvas.create_oval(coords, outline=RED, width=2, fill=WHITE)\n coords = WIDTH-(OFFSET+30*SCALE), HEIGHT/2-23*SCALE, WIDTH-(OFFSET+32*SCALE), HEIGHT/2-21*SCALE\n canvas.create_oval(coords, fill=RED, outline=\"\")\n # - - Cross\n coords = WIDTH-(OFFSET+25*SCALE), HEIGHT/2-22.8*SCALE, WIDTH-(OFFSET+29*SCALE), HEIGHT/2-22.8*SCALE, WIDTH-(OFFSET+29*SCALE), HEIGHT/2-25.8*SCALE\n canvas.create_line(coords, fill=RED, width=2)\n coords = WIDTH-(OFFSET+25*SCALE), HEIGHT/2-21.2*SCALE, WIDTH-(OFFSET+29*SCALE), HEIGHT/2-21.2*SCALE, WIDTH-(OFFSET+29*SCALE), HEIGHT/2-18.2*SCALE\n canvas.create_line(coords, fill=RED, width=2)\n coords = WIDTH-(OFFSET+37*SCALE), HEIGHT/2-22.8*SCALE, WIDTH-(OFFSET+33*SCALE), HEIGHT/2-22.8*SCALE, WIDTH-(OFFSET+33*SCALE), HEIGHT/2-25.8*SCALE\n canvas.create_line(coords, fill=RED, width=2)\n coords = WIDTH-(OFFSET+37*SCALE), HEIGHT/2-21.2*SCALE, WIDTH-(OFFSET+33*SCALE), HEIGHT/2-21.2*SCALE, WIDTH-(OFFSET+33*SCALE), HEIGHT/2-18.2*SCALE\n canvas.create_line(coords, fill=RED, width=2)\n # - Bottom Right\n # - - Ticks\n coords = WIDTH-(OFFSET+29.5*SCALE), HEIGHT/2+39*SCALE, WIDTH-(OFFSET+29.5*SCALE), HEIGHT/2+5*SCALE\n canvas.create_line(coords, fill=RED, width=2)\n coords = WIDTH-(OFFSET+32.5*SCALE), HEIGHT/2+39*SCALE, WIDTH-(OFFSET+32.5*SCALE), HEIGHT/2+5*SCALE\n canvas.create_line(coords, fill=RED, width=2)\n # - - Circles\n coords = WIDTH-(OFFSET+16*SCALE), HEIGHT/2+37*SCALE, WIDTH-(OFFSET+46*SCALE), HEIGHT/2+7*SCALE\n canvas.create_oval(coords, outline=RED, width=2, fill=WHITE)\n coords = WIDTH-(OFFSET+30*SCALE), HEIGHT/2+23*SCALE, WIDTH-(OFFSET+32*SCALE), HEIGHT/2+21*SCALE\n canvas.create_oval(coords, fill=RED, outline=\"\")\n # - - Cross\n coords = WIDTH-(OFFSET+25*SCALE), HEIGHT/2+22.8*SCALE, WIDTH-(OFFSET+29*SCALE), HEIGHT/2+22.8*SCALE, WIDTH-(OFFSET+29*SCALE), HEIGHT/2+25.8*SCALE\n canvas.create_line(coords, fill=RED, width=2)\n coords = WIDTH-(OFFSET+25*SCALE), HEIGHT/2+21.2*SCALE, WIDTH-(OFFSET+29*SCALE), HEIGHT/2+21.2*SCALE, WIDTH-(OFFSET+29*SCALE), HEIGHT/2+18.2*SCALE\n canvas.create_line(coords, fill=RED, width=2)\n coords = WIDTH-(OFFSET+37*SCALE), HEIGHT/2+22.8*SCALE, WIDTH-(OFFSET+33*SCALE), HEIGHT/2+22.8*SCALE, WIDTH-(OFFSET+33*SCALE), HEIGHT/2+25.8*SCALE\n canvas.create_line(coords, fill=RED, width=2)\n coords = WIDTH-(OFFSET+37*SCALE), HEIGHT/2+21.2*SCALE, WIDTH-(OFFSET+33*SCALE), HEIGHT/2+21.2*SCALE, WIDTH-(OFFSET+33*SCALE), HEIGHT/2+18.2*SCALE\n canvas.create_line(coords, fill=RED, width=2)\n\n # NEUTRAL ZONE FACEOFF\n # - Top Left\n coords = WIDTH/2-21*SCALE, HEIGHT/2-23*SCALE, WIDTH/2-19*SCALE, HEIGHT/2-21*SCALE\n canvas.create_oval(coords, outline=\"\", fill=RED)\n # - Bottom Left\n coords = WIDTH/2-21*SCALE, HEIGHT/2+23*SCALE, WIDTH/2-19*SCALE, HEIGHT/2+21*SCALE\n canvas.create_oval(coords, outline=\"\", fill=RED)\n # - Top Right\n coords = WIDTH/2+21*SCALE, HEIGHT/2-23*SCALE, WIDTH/2+19*SCALE, HEIGHT/2-21*SCALE\n canvas.create_oval(coords, outline=\"\", fill=RED)\n # - Bottom Right\n coords = WIDTH/2+21*SCALE, HEIGHT/2+23*SCALE, WIDTH/2+19*SCALE, HEIGHT/2+21*SCALE\n canvas.create_oval(coords, outline=\"\", fill=RED)\n\n\n canvas.grid(row=1, columnspan=5)", "def __init__(self):\n # initialize a bird to default values.\n self.set_instance_vars()\n\n # randomize some parameters, such as starting height\n self.pos_y = self.random_height()\n\n # tag each bird\n\n self.identifier = Bird.num_birds\n\n # create ai net for each bird\n self.initialize_ai()\n\n # increment Bird counter\n Bird.num_birds += 1\n\n # remember time of birth\n self.birth_time = 0", "def __init__(self,name,speed,depth_of_view,view_angle,x_coor = \"\",y_coor = \"\"):\n self.name = name\n self.speed = speed # That will the instantenous speed of the robot\n self.depth_of_view = depth_of_view # That will the instantenous depth of view of the robot\n self.view_angle = view_angle # That will the instantenous view angle of the robot\n self.type = \"Robot\" #Specift the object type\n self.x = x_coor # store the position of the robot\n self.y = y_coor # store the position of the robot\n self.kind = name #Store its kind to give the GUI", "def __init__(self, x, y, width, height,skin):\n self.rect = pygame.Rect(x, y, width, height)\n # la idea es eventualmente cambiar esto con image.get_rect() cuando hayan sprites, por eso las llamadas en #position\n\n # position\n self.rect.x = x\n self.rect.y = y\n\n # movement\n self.speed_x = 9 # esta es constante\n self.speed_y = 0 # esta varía\n self.left = False\n self.right = False\n self.rising = False\n self.falling = False\n self.push = False\n self.moveRight = False\n self.count = 10\n self.moveCount = 0\n self.fallingCount = 0\n\n #poderes\n self.forcePush = False\n self.doubleJump = False\n self.shield = False\n self.clock = 0\n self.clockStart = 0\n\n #arbol\n self.tree = None", "def __init__(self, pos=(0, 0)):\n super().__init__() # Call 'turtle' initiation\n self.penup() # Stop displaying trail\n self.shapesize(stretch_wid=1, stretch_len=1) # Set dimensions of ball object to same height and width\n self.color(\"white\") # Set colour to white\n self.shape(\"circle\") # Set ball shape to round\n self.setpos(pos) # Move ball to desired position on screen\n self.x_dir = 1 # Set ball horizontal movement to right\n self.y_dir = 1 # Set ball vertical movement to up", "def __init__(self, distance_from_goal, drill_name=None, goalie_name=None):\n\n super().__init__()\n self.run_drill = True\n\n # Flag for the first ball\n self.first_ball = True\n\n # Create and connect to Bluetooth button\n self.threaded_bt_helper = threaded_bt_helper()\n self.threaded_bt_helper.bt_button_click.connect(self.bt_button_click)\n\n self.drill_name = drill_name\n self.goalie_name = goalie_name\n self.distance_from_goal = distance_from_goal\n\n if self.drill_name is not None:\n # Get drill information and save it\n self.drill_info = self.get_profile_info()\n\n # Acquire Rate of Fire (ROF) of the drill\n self.rof = int(self.drill_info['1'][2])\n\n # Initialize Trajectory Algorithm Helper\n self.trajectory_algo = trajectory_algorithm.TrajectoryAlgorithm(\n self.distance_from_goal)\n\n # Initialize all motors\n self.bfm = motor_ball_feed_vel.MotorBallFeed()\n self.bqm = motor_ball_queue_turn_once.MotorBallQueue()\n self.fmt = motor_flywheel_top.MotorFlywheelTop()\n self.fmb = motor_flywheel_bottom.MotorFlywheelBottom()\n self.pm = motor_pitch.MotorPitch()\n self.ym = motor_yaw.MotorYaw()\n\n # Stores previous shot location\n self.prev_shot_loc = \"CM\"", "def __init__(self,size):\n self.bricks = []\n self.size = size\n for x in range(self.BRICK_GAPS[0],self.size[0]-(self.BRICK_SIZE[0]+self.BRICK_GAPS[0]),self.BRICK_SIZE[0]+self.BRICK_GAPS[0]):\n for y in range(self.BRICK_GAPS[1],int(self.size[1]/2.0),self.BRICK_SIZE[1]+self.BRICK_GAPS[1]):\n brick_color = pygame.Color(random.randrange(0,256),random.randrange(0,256),random.randrange(0,256))\n new_brick = Brick(x,y,self.BRICK_SIZE[0],self.BRICK_SIZE[1],brick_color)\n self.bricks.append(new_brick)\n self.paddle = Paddle(self.size[0]/2.0,self.size[1]-40.0,100,20)\n self.ball = Ball(self.paddle.x+self.paddle.width/2.0,self.paddle.y - (self.BALL_RADIUS+10),self.BALL_RADIUS,0.0,-1.0)", "def __init__(self, vx = 0, vy = 0, a = 500, pos = 'u', x_bul=0, y_bul=0, r_bul = 2):\n \n self.vx, self.vy, self.a, self.pos, self.r_bul = \\\n vx, vy, a, pos, r_bul\n self.tank = Block((0,255,255), 80, 80)\n self.player_list = pygame.sprite.Group()\n self.player_list.add(self.tank)\n\tself.x = 100.0\n\tself.y = 100.0\n self.tank.rect.x = self.x\n self.tank.rect.y = self.y\n self.refresh_color()", "def __init__(self):\n self.x = int(constants.SCREEN_WIDTH/2)\n self.y = int(constants.SCREEN_HEIGHT/2)\n self.DX = self.getRandSpeed()\n self.DY = self.getRandSpeed()\n self.RADIUS = 5", "def __init__(self, x, y , planet):\n\t\tself.x = x\n\t\tself.y = y\n\t\tself.planet = planet\n\t\tself.battery = 100", "def __init__(self, *args, **kwds):\n if args or kwds:\n super(RadarObject309, self).__init__(*args, **kwds)\n #message fields cannot be None, assign default values for those that are\n if self.header is None:\n self.header = std_msgs.msg.Header()\n if self.sensorId is None:\n self.sensorId = 0\n if self.id is None:\n self.id = 0\n if self.length is None:\n self.length = 0\n if self.width is None:\n self.width = 0\n if self.measstat is None:\n self.measstat = 0\n if self.existprob is None:\n self.existprob = 0\n if self.dynprop is None:\n self.dynprop = 0\n if self.latdisp is None:\n self.latdisp = 0.\n if self.longdisp is None:\n self.longdisp = 0.\n if self.relxdot is None:\n self.relxdot = 0.\n if self.relxddot is None:\n self.relxddot = 0.\n if self.latspeed is None:\n self.latspeed = 0.\n if self.obsprob is None:\n self.obsprob = 0\n if self.rollcount is None:\n self.rollcount = 0\n if self.rcs is None:\n self.rcs = 0.\n else:\n self.header = std_msgs.msg.Header()\n self.sensorId = 0\n self.id = 0\n self.length = 0\n self.width = 0\n self.measstat = 0\n self.existprob = 0\n self.dynprop = 0\n self.latdisp = 0.\n self.longdisp = 0.\n self.relxdot = 0.\n self.relxddot = 0.\n self.latspeed = 0.\n self.obsprob = 0\n self.rollcount = 0\n self.rcs = 0.", "def __init__(self,height=7,width=7):\n self.__cars = []\n self.__height = height\n self.__width = width", "def __init__(self, r, g, b, a=1.0):\n self.value = [r, g, b, a]", "def __init__(self, skin_position: str, /):", "def buildRiver(*args):\n\n # Query parameters\n width = cmds.floatSliderGrp(RoadRiverTab.roadWidth, query=True, value=True)\n quality = cmds.intSliderGrp(RoadRiverTab.roadQuality, query=True, value=True)\n\n # Create using RoadCreation.py\n RC.createRiver(width, quality)", "def create_barco(x0, y0,largo_1,largo_2,alto):\n # Defining the location and colors of each vertex of the shape\n vertices = [\n # positions colors\n x0, y0, 0.0, 0.7, 0.25, 0.2, #Punto uno linea superior\n x0 + largo_1, y0, 0.0, 0.7, 0.25, 0.2, #Punto dos linea superior\n x0 + largo_1 + largo_2, y0, 0.0, 0.5, 0.25, 0.0, #Punto tres linea superior\n x0 + (largo_1)*2 + largo_2, y0, 0.0, 0.5, 0.25, 0.0, #Punto cuatro linea superior\n\n x0 + largo_1, y0 - alto, 0.0, 0.7, 0.25, 0.2, #Punto uno linea inferior\n x0 + largo_1 + largo_2, y0 - alto, 0.0, 0.5, 0.25, 0.0] #Punto dos linea inferior\n\n # Defining connections among vertices\n # We have a triangle every 3 indices specified\n indices = [0, 1, 4,\n 4, 1, 2,\n 2, 4, 5,\n 5, 3, 2]\n\n return Shape(vertices, indices)", "def __init__(self, posn_x, posn_y, velocity_x, velocity_y, kula): \n self.posn_x = posn_x # x position of box containing the ball (bottom). \n self.posn_y = posn_y # x position of box containing the ball (left edge). \n self.velocity_x = velocity_x # amount of x-movement each cycle of the 'for' loop. \n self.velocity_y = 100.0 # amount of y-movement each cycle of the 'for' loop. \n self.color = kula # color of the ball \n\n self.ball_width = 20.0 # size of ball - width (x-dimension). \n self.ball_height = 20.0 # size of ball - height (y-dimension). \n self.coef_restitution = 0.90", "def __init__(self,r):\n self.radius = r\n self.uc_centered_a = r\n self.uc_centered_b = r*np.sqrt(3.0)", "def _create_ml_obj(radar, ml_pos_field='melting_layer_height'):\n ml_obj = deepcopy(radar)\n\n # modify original metadata\n ml_obj.range['data'] = np.array([0, 1], dtype='float64')\n ml_obj.ngates = 2\n\n ml_obj.gate_x = np.zeros((ml_obj.nrays, ml_obj.ngates), dtype=float)\n ml_obj.gate_y = np.zeros((ml_obj.nrays, ml_obj.ngates), dtype=float)\n ml_obj.gate_z = np.zeros((ml_obj.nrays, ml_obj.ngates), dtype=float)\n\n ml_obj.gate_longitude = np.zeros(\n (ml_obj.nrays, ml_obj.ngates), dtype=float)\n ml_obj.gate_latitude = np.zeros(\n (ml_obj.nrays, ml_obj.ngates), dtype=float)\n ml_obj.gate_altitude = np.zeros(\n (ml_obj.nrays, ml_obj.ngates), dtype=float)\n\n # Create field\n ml_obj.fields = dict()\n ml_dict = get_metadata(ml_pos_field)\n ml_dict['data'] = np.ma.masked_all((ml_obj.nrays, ml_obj.ngates))\n ml_obj.add_field(ml_pos_field, ml_dict)\n\n return ml_obj", "def __init__(self):\n # Screen settings\n self.screen_width = 400\n self.screen_height = 300\n self.bg_color = (230, 230, 230)\n\n self.rocket_speed_factor= 1.5", "def create_asteroid_r(self):\n self.create_asteroid_common(8, self.colors[0], self.colors[0], enemy=True)", "def __init__(self,circlePos,circleRad,circleVel):\n self.circlePos=circlePos\n self.circleRad=circleRad\n self.circleVel=circleVel", "def __init__(self,Number=100,ContainerRadius=60e-10,BallRadius=53e-12,temperature=2980,ballmass=1.67e-27,speed=1012):\r\n self.__cont=Container(ContainerRadius)\r\n self.__ContainerRad=ContainerRadius\r\n self.__ballList=[]\r\n self.__relativetemp=(temperature/298)\r\n self._bmass=ballmass\r\n randx=[np.sqrt(self.__relativetemp)*k*np.sqrt(speed) for k in np.random.normal(0,50,Number)]\r\n randy=[np.sqrt(self.__relativetemp)*k*np.sqrt(speed) for k in np.random.normal(0,50,Number)]\r\n \r\n r=BallRadius\r\n n=1\r\n Angle=0\r\n for i in range(Number):\r\n self.__ballList.append(Ball(ballmass,BallRadius,[r*np.cos(Angle),r*np.sin(Angle)],[randx[i],randy[i]]))\r\n circumference=np.pi*2*r\r\n distAngle=(np.pi*2)/(circumference/(3*BallRadius))\r\n Angle+=distAngle\r\n if (Angle+distAngle)>2*np.pi:\r\n r+=3*BallRadius\r\n n+=1\r\n Angle=0\r\n scale=self.__ContainerRad/(n*3*BallRadius)\r\n for ball in self.__ballList:\r\n ball.scalepos(scale)\r\n \r\n self.__text0 = None", "def __init__(self):\n self.maze = [['#','#','#','#','#','#','#','#','#','#','#',],\n ['#',' ',' ',' ',' ',' ',' ',' ',' ','#','#'],\n ['#',' ',' ',' ',' ',' ',' ',' ',' ','#','#'],\n ['#',' ',' ',' ',' ',' ',' ',' ',' ','#','#'],\n ['#','^','/',' ',' ',' ',' ',' ',' ','#','#'],\n ['#',' ',' ',' ',' ','@',' ',' ',' ','#','#'],\n ['#',' ',' ',' ',' ',' ',' ',' ',' ','#','#'],\n ['#',' ',' ',' ',' ',' ',' ',' ',' ','#','#'],\n ['#',' ',' ',' ',' ',' ',' ',' ',' ','#','#'],\n ['#',' ',' ',' ',' ',' ',' ',' ',' ','#','#'],\n ['#',' ',' ',' ',' ',' ',' ',' ',' ','#','#'],\n ['#','#','#','#','#','#','#','#','#','#','#'],\n ['#','#','#','#','#','#','#','#','#','#','#']]\n self.diamonds = 1\n self.width = 10\n self.height = 12\n self.crates = 1", "def create_rgr(self, **kwargs):\n rgr = models.Raingaugereading.objects.create(\n LocationID=kwargs.get('aloc', self.aloc),\n LocationDescription=self.aloc.loc_name,\n deviceID=kwargs.get('adevice', self.adevice),\n SystemType='Metric',\n datetime=\"2020-06-01T21:10:24.000Z\",\n rain=0.2,\n AccumulatedRain=45.6,\n )\n return rgr", "def __init__(self, world_map, GRID_LOCK, coordinates=None):\n\n ''' Take parameters, and Sprite Constants '''\n super(BeesSprite, self).__init__(world_map, BeesSprite.IMAGE, GRID_LOCK,\n BeesSprite.HEALTH_BAR, BeesSprite.AVG_SPEED,\n BeesSprite.VISION, coordinates)\n\n self.type = \"bees\"\n self.prey = [\"plant\"]", "def __init__(self, pos, radius):\n super().__init__(pos, radius)", "def __init__(self,):\r\n self.g = 9.81\r\n self.l = 0.5\r\n self.m1 = 1.0\r\n self.m2 = 1.0\r\n self.m3 = 1.0\r\n self.r1 = 1.0\r\n self.r2 = 1.0\r\n self.tau = 0.001\r\n self.theta1 = 1.0\r\n self.theta2 = 1.0\r\n self.theta3 = 1.0", "def __init__(self, arena_size, orb_count, tick_rate):\n self.arena_size = arena_size\n self.orb_count = orb_count\n self.tick_rate = tick_rate", "def __init__(self):\n\n # Screen's settings\n self.screen_width = 1200\n self.screen_height = 800\n self.bg_color = (230, 230, 230)\n\n # Bluebee Settings\n self.bb_speed = 1.0\n\n # Moving test.\n self.counter = 0\n self.max_left = 400\n self.max_up = 300\n self.max_right = 400\n self.max_down = 300", "def __init__(self, roidb, num_classes):\n self._roidb = roidb\n self._num_classes = num_classes\n self._shuffle_roidb_inds()", "def __init__(self, roidb, num_classes):\n self._roidb = roidb\n self._num_classes = num_classes\n self._shuffle_roidb_inds()", "def __init__(self, type='lg_asteroid', dataset=obstacles, **kwargs):\n super().__init__(type=type, dataset=dataset, **kwargs)\n self.obj_type = \"asteroid\"", "def __init__(self):\n self.quadrants_count = 4\n self.quadrant_positions_count = 9\n self.play_area = [Quadrant(self.quadrant_positions_count) for _ in range(self.quadrants_count)]", "def __init__(self, r, r_2, pie_d, pie_d_2, lmb_d, lmb_d_2, pie_s, pie_s_2, b_s, b_s_2):\n lma_d=(pie_d-(1+r))\n lma_d_2=(pie_d_2-(1+r_2))\n a_s=((1+r)-pie_s)\n a_s_2=((1+r_2)-pie_s_2)\n \n self.lma_d, self.lma_d_2, self.lmb_d, self.lmb_d_2, self.a_s, self.a_s_2, self.b_s, self.b_s_2 = lma_d , lma_d_2 , lmb_d , lmb_d_2, a_s, a_s_2 , b_s, b_s_2\n if lma_d < a_s :\n raise ValueError('Insufficient demand.')\n elif lma_d_2 < a_s_2 :\n raise ValueError('Insufficient demand on 2.')", "def __init__(self, class_type=1, windows=True, radius=3):\n if windows:\n self.radius = radius\n else:\n self.radius = None\n self.class_type = class_type\n self._choose_regions()\n self._make_features()", "def __init__(self, x_coor, x_speed, y_coor, y_speed, direction):\n self.__x_coor = x_coor\n self.__x_speed = x_speed\n self.__y_coor = y_coor\n self.__y_speed = y_speed\n self.__direction = direction\n self.__radius = self.TORPEDO_RADIUS", "def __init__(self, r: float, i: float = 0):\n self.r = r\n self.i = i", "def __init__(self, parent):\n super(Demo5, self).__init__(parent)\n self.angle = 0.0\n self.replication = 1.0\n self.offset = 0.0\n self.deltaRep = 1\n self.revolution = 0\n self.stepsPer90 = 180\n self.stepsLeft = self.stepsPer90\n self.deltaAng = 90.0\n self.deltaOff = 0.15\n self.spin = True\n self.x2yAspect = 1.0\n self.texture = None", "def __init__(self, x_pos, y_pos, radius, colour, moving = False):\n\t\t\n\t\tself.x_pos = x_pos\n\t\tself.y_pos = y_pos\n\t\tself.radius = radius\n\t\tself.diameter = 2*radius\n\t\tself.colour = colour\n\t\tself.moving = moving\n\t\tself.x_vec = 0\n\t\tself.y_vec = 0", "def __init__(self):\r\n self.radius = BALL_RADIUS\r\n self.center_x = BALL_START_X\r\n self.center_y = BALL_START_Y\r\n self.velocity = BALL_SPEED\r\n self.angle = - math.pi / 2\r\n self.rectangle = pygame.Rect(self.center_x - self.radius, self.center_y - self.radius, 2 * self.radius, 2 * self.radius)\r\n self.color = \"white\"\r\n self.save_pos = (self.center_x, self.center_y)", "def __init__(self):\n #Configuracion de pantalla\n self.screen_width = 1200\n self.screen_height = 800\n self.bg_color = (230,230,230)\n \n #Configuracion de la nave\n self.ship_speed_factor = 1.5\n self.ship_limit = 2\n\n #Configuracion de las balas\n self.bullet_speed_factor = 2\n self.bullet_width = 300\n self.bullet_height = 15\n self.bullet_color = 220,20,60\n self.bullets_allowed = 3\n\n #Configuracion de aliens\n self.alien_speed_factor = 1\n self.fleet_drop_speed = 10\n #fleet_direction = 1 representa derecha; -1 representa izquierda\n self.fleet_direction = 1\n\n #Que tan rapido el juego aumenta de velocidad\n self.speedup_scale = 1.1\n #Que tan rapido aumenta el valor de los puntos\n self.score_scale = 1.5\n self.initialize_dynamic_settings()", "def __init__(self, boolee):\n\n super(BinaryColor, self).__init__(1)\n self.boolee = bool(boolee)", "def __init__(self, rouge_types):\n\n self.rouge_types = rouge_types", "def __init__(self):\n\n self.HBridges = dict()\n\n HBridges['x'] = HBridge.HBridge(config.X_AXIS_H_BRIDGE_DIRECTION_PORT_1,\n config.X_AXIS_H_BRIDGE_DIRECTION_PORT_2,\n config.X_AXIS_H_BRIDGE_PWM_PORT_1,\n config.X_AXIS_H_BRIDGE_PWM_PORT_2)\n\n HBridges['y'] = HBridge.HBridge(config.Y_AXIS_H_BRIDGE_DIRECTION_PORT_1,\n config.Y_AXIS_H_BRIDGE_DIRECTION_PORT_2,\n config.Y_AXIS_H_BRIDGE_PWM_PORT_1,\n config.Y_AXIS_H_BRIDGE_PWM_PORT_2)\n \n HBridges['z'] = HBridge.HBridge(config.Z_AXIS_H_BRIDGE_DIRECTION_PORT_1,\n config.Z_AXIS_H_BRIDGE_DIRECTION_PORT_2,\n config.Z_AXIS_H_BRIDGE_PWM_PORT_1,\n config.Z_AXIS_H_BRIDGE_PWM_PORT_2)\n \n self.direction = {'x' : 0, 'y' : 0, 'z' : 0}\n\n self.lock = threading.Lock()", "def create_village(tick):\n village_template = nbt.TAG_Compound()\n\n village_template['Doors'] = nbt.TAG_List(Banana)\n village_template['Players'] = nbt.TAG_List(Banana)\n village_template['ACX'] = nbt.TAG_Int(0)\n village_template['ACY'] = nbt.TAG_Int(0)\n village_template['ACZ'] = nbt.TAG_Int(0)\n\n village_template['CX'] = nbt.TAG_Int(0)\n village_template['CY'] = nbt.TAG_Int(0)\n village_template['CZ'] = nbt.TAG_Int(0)\n\n village_template['Golems'] = nbt.TAG_Int(0)\n village_template['MTick'] = nbt.TAG_Int(0)\n village_template['PopSize'] = nbt.TAG_Int(1)\n village_template['Radius'] = nbt.TAG_Int(32)\n village_template['Stable'] = nbt.TAG_Int(tick)\n village_template['Tick'] = nbt.TAG_Int(tick)\n return Village(village_template)", "def __init__(self, home_robots=[], away_robots=[], ball=Ball(75, 65)):\n \n self.home_robots = home_robots\n self.away_robots = away_robots\n self.ball = ball\n self.field = Field(150, 130)\n\n self.status = 0\n self.home_goals = 0\n self.away_goals = 0", "def make_ball():\n ball = Ball()\n # Starting position of the ball.\n # Take into account the ball size so we don't spawn on the edge.\n ball.x = random.randrange(BALL_SIZE, SCREEN_WIDTH - BALL_SIZE)\n ball.y = random.randrange(BALL_SIZE, SCREEN_HEIGHT - BALL_SIZE)\n\n # Speed and direction of rectangle\n ball.change_x = random.randrange(-2, 2)\n ball.change_y = random.randrange(-2, 2)\n\n return ball", "def newForCircle(p: Tuple[float, float], r: float) -> \"BB\":\n\n bb_ = lib.cpBBNewForCircle(p, r)\n return BB(bb_.l, bb_.b, bb_.r, bb_.t)", "def __init__(self):\r\n\t\t# Publishers\r\n\t\tself._pub_rate = rospy.Publisher('robot/joint_state_publish_rate', UInt16, queue_size=10)\r\n\t\tself.image_pub = rospy.Publisher(\"baxter_view\",Image,queue_size=4)\r\n\t\tself._obj_state = rospy.ServiceProxy(\"/gazebo/set_model_state\",SetModelState)\r\n\t\t\r\n\t\t# Link with baxter interface\r\n\t\tself._left_arm = baxter_interface.limb.Limb(\"left\")\r\n\t\tself._right_arm = baxter_interface.limb.Limb(\"right\")\r\n\t\tself._left_joint_names = self._left_arm.joint_names()\r\n\t\tself.grip_left = baxter_interface.Gripper('left', CHECK_VERSION)\r\n\r\n\t\tprint(\"Getting robot state... \")\r\n\t\tself._rs = baxter_interface.RobotEnable(CHECK_VERSION)\r\n\t\tself._init_state = self._rs.state().enabled\r\n\t\tprint(\"Enabling robot... \")\r\n\t\tself._rs.enable()\r\n\t\t\r\n\t\t# Control parameters\r\n\t\tself._rate = 500.0 # Hz\r\n\t\tself._pub_rate.publish(self._rate)\r\n\t\tself.bridge = CvBridge()\r\n\t\tself._left_arm.set_joint_position_speed(0.3)\r\n\t\tself._object_type = 0\r\n\t\tself.object_position = Point(x=0.0, y=0.0, z=0.0)\r\n\t\tself.object_v = 0.0", "def generate_villan(self, diff=1):\n vrad = 10\n v_dx = 3\n v_dy = 3\n v_mag = 5\n v_color = (255,0,0)\n \n villan = Ball(random.random()*(self.frame_width-vrad*diff), -vrad, \n random.random()*v_dx*diff - v_dx/2, random.random()*v_dy*diff+1, \n vrad*diff, v_mag*diff, v_color)\n self.villans.append(villan)", "def __init__(self, pos, radius):\n self.pos = pos\n self.radius = radius", "def __init__(self):\n self.Robot = Robot()\n self.Omega = matrix()\n # self.Omega.value[0][0] = 1.0\n # self.Omega.value[1][1] = 1.0\n self.Xi = matrix()\n # Xi.value[0][0] = 0.0\n # Xi.value[1][0] = 0.0\n self.measure = {}\n self.landMarkCount = 0\n self.init = False\n self.bearing = 0\n self.x = 0\n self.y = 0\n \n # TODO", "def __init__(self, commands=[], turtle_name=\"Terry\", speed=6, shape=\"classic\"):\n super().__init__()\n turtle.colormode(255)\n self._name = turtle_name\n super().speed(speed)\n super().shape(shape)\n self.commands = commands\n self._pc = 0\n self._loop_stack = []\n self._variables = {'x':0, 'y':0}", "def __init__(\n self,\n front_left_vertex,\n front_right_vertex,\n back_left_vertex,\n back_right_vertex,\n strength,\n ):\n\n self.front_left_vertex = front_left_vertex\n self.front_right_vertex = front_right_vertex\n self.back_left_vertex = back_left_vertex\n self.back_right_vertex = back_right_vertex\n self.strength = strength\n\n # Initialize the line vortices that make up the ring vortex.\n self.front_leg = LineVortex(\n origin=self.front_right_vertex,\n termination=self.front_left_vertex,\n strength=self.strength,\n )\n self.left_leg = LineVortex(\n origin=self.front_left_vertex,\n termination=self.back_left_vertex,\n strength=self.strength,\n )\n self.back_leg = LineVortex(\n origin=self.back_left_vertex,\n termination=self.back_right_vertex,\n strength=self.strength,\n )\n self.right_leg = LineVortex(\n origin=self.back_right_vertex,\n termination=self.front_right_vertex,\n strength=self.strength,\n )\n\n # Initialize a variable to hold the centroid of the ring vortex.\n self.center = ps.geometry.centroid_of_quadrilateral(\n self.front_left_vertex,\n self.front_right_vertex,\n self.back_left_vertex,\n self.back_right_vertex,\n )", "def __make_carriage(self):\n # Create base rectangle\n length = self.parameters['carriage_length']\n width = self.parameters['carriage_width']\n height = self.parameters['carriage_height']\n carriage = fso.Box(x=length, y=width, z=height)\n\n # Subtract slide from carraige\n slide_width = self.parameters['slide_width'] + 2*self.parameters['slide_tolerance']\n slide_height = self.parameters['slide_height'] + 2*self.parameters['slide_tolerance']\n slide_negative = fso.Box(x=2*length, y=slide_width, z=slide_height)\n carriage = carriage - slide_negative\n\n # Create mounting holes\n radius = 0.5*self.parameters['carriage_screw_size']\n base_hole = fso.Cylinder(r=radius,l=2*height)\n hole_list = []\n for i in (-1,1):\n for j in (-1,1):\n xpos = i*0.5*self.parameters['carriage_screw_dL']\n ypos = j*0.5*self.parameters['carriage_screw_dW']\n hole = base_hole.copy()\n hole.translate([xpos,ypos,0])\n hole_list.append(hole)\n # Remove hole material\n # print hole_list\n carriage -= hole_list\n carriage.set_color(self.carriage_color,recursive=True)\n self.carriage = carriage", "def __init__(self):\n pygame.init()\n self.rain_settings = RSettings()\n\n self.screen = pygame.display.set_mode((0, 0), pygame.FULLSCREEN)\n self.rain_settings.screen_width = self.screen.get_rect().width\n self.rain_settings.screen_height = self.screen.get_rect().height\n pygame.display.set_caption(\"Raindrops\")\n\n self.rain = pygame.sprite.Group()\n\n self._create_rain()", "def __init__(self, pos, radius=0):\n super().__init__(pos, radius)", "def make_ball(id):\n ball = Ball()\n\n ball.id = id\n\n # Size of the ball\n # ball.size = random.randrange(10, 30)\n ball.size = 10\n\n # Starting position of the ball.\n # Take into account the ball size so we don't spawn on the edge.\n ball.x = random.randrange(ball.size, WINDOW_WIDTH - ball.size)\n ball.y = random.randrange(ball.size, WINDOW_HEIGHT - ball.size)\n\n # Speed and direction\n ball.speed(DEFAULT_SPEED)\n\n # Color\n ball.color = (0, 0, random.randrange(128,256))\n\n return ball", "def __init__(\n self,\n ai=None,\n index=None,\n resources=None,\n coords=None,\n ramps=None,\n is_ours=False,\n is_enemies=False,\n ):\n self.ai = ai\n self.grid = self.ai.game_info.placement_grid.data_numpy\n self.index = str(index)\n self.resources = resources\n self.coords: Point2 = coords\n self.turret_positions = [[]]\n self.turret_queue = None\n self.grid_points = [[]]\n x_offset = Point2((20.0, 0.0))\n y_offset = Point2((0.0, 20.0))\n # need to assert the points are in the map actually\n self.top_left = Point2(self.coords.offset(-x_offset).offset(y_offset))\n self.bottom_right = Point2(self.coords.offset(x_offset).offset(-y_offset))\n self.ramps: Union[List[RampExt]] = ramps\n self.is_ours: bool = is_ours\n self.is_enemies: bool = is_enemies\n\n self.borders = []\n for ramp in self.ramps:\n ramp.name += f\"+ of {self}\"", "def __init__(self):\n self._pos = Vector2(250, 250)\n self._color = (randint(0, 255), randint(0, 255), randint(0, 255), 255)\n\n self._ticks_alive = 0\n self._dead = False", "def __init__(self, name, type_name, diameter, radius_of_curvature, elbow_angle, orientation, surface_roughness):\n self.name = name\n self.type = type_name\n self.diameter = diameter\n self.radius_of_curvature = radius_of_curvature\n self.orientation = orientation\n self.surface_roughness = surface_roughness\n self.elbow_angle = elbow_angle\n self.RperD = radius_of_curvature / diameter\n self.surface_roughnessratio = surface_roughness / diameter", "def __init__(self, shape, r=2, d=-1):\n self.radius = r\n if d == -1:\n self.stride = 2*r+1\n else:\n self.stride = d\n self.image_shape = shape\n self.patch_shape = ( r*2+1, 2*r+1 )", "def __init__(self, pos, breed='black', caste='worker'):\n self.name = 'ant'\n self.type = 'insect'\n self.breed = breed\n self.caste = caste\n self.carry_food = False\n\n self.pos = pos\n\n self.speed = {'black': 80.0, 'red': 80.0}[breed]\n self.width = {'black': 10, 'red': 5}[breed]\n self.height = {'black': 4, 'red': 2}[breed]\n self.mass = {'black': 5, 'red': 2}[breed]\n self.color = {\n 'black': (128, 128, 128, 128),\n 'red': (255, 0, 0, 128)\n }[breed]\n\n if caste == 'queen':\n self.width *= 2.5\n self.height *= 2.5\n self.speed /= 2.0\n self.mass *= 5\n\n self.rect = Rect(self.pos[0] - self.width // 2,\n self.pos[1] - self.height // 2,\n self.width, self.height)\n\n self.orientation = rnd.uniform(0, 2 * math.pi)\n\n # ---------\n self.epsilon = 0.99\n self.s = None\n self.a = None\n self.r = 0\n self.s_ = None\n self.p = None", "def fill(self, *args, **kwargs):\r\n closed = kwargs.pop('closed', True)\r\n return super(RadarAxes, self).fill(closed=closed, *args, **kwargs)", "def __init__(self, _pendown=1, gridmode=False, gridsize=50, homeX = 50 + 25 + 5, homeY = 50 + 25 + 5, canvWidth = 400, canvHeight = 200, \\\n turtleMainColor=\"#00A651\", turtleAccentColor=\"#FFF600\", speed = 5, rotspeed = 5, pencolor = 'red', penwidth=3):\n self._turtleMainColor = turtleMainColor\n self._turtleAccentColor = turtleAccentColor\n self._speed = speed\n self._rotspeed = rotspeed\n self._pendown = _pendown\n self._pencolor = pencolor\n self._penwidth = penwidth\n self._rotation = 90\n self._gridsize = gridsize\n self._gridmode = gridmode\n \n if(gridmode and homeX == 80):\n homeX = 0\n homeY = 0\n \n self._x = homeX\n self._y = homeY\n self._homeX = homeX\n self._homeY = homeY\n \n self._canvWidth = canvWidth\n self._canvHeight = canvHeight\n self._actions = []\n self._levelDataString = [] \n \n self._walls = []\n self._lava = []\n \n self._appendCurrentState();" ]
[ "0.61111647", "0.5954283", "0.591362", "0.5875578", "0.58498406", "0.583478", "0.5827427", "0.57784045", "0.5732358", "0.56992537", "0.56636226", "0.5654638", "0.5638714", "0.5612462", "0.5602551", "0.56001383", "0.5584329", "0.5574126", "0.55410784", "0.55343825", "0.5528675", "0.55267155", "0.5524589", "0.5501198", "0.54686195", "0.54641235", "0.54561025", "0.5422895", "0.5417655", "0.5405508", "0.53951347", "0.5384531", "0.53793436", "0.5378757", "0.5373945", "0.53597236", "0.5356042", "0.53513765", "0.5350981", "0.53481275", "0.5347474", "0.53452617", "0.5341433", "0.533647", "0.5334654", "0.53249055", "0.5324839", "0.5320247", "0.532021", "0.5319753", "0.5318274", "0.5318111", "0.5316378", "0.5302483", "0.5300432", "0.52930814", "0.5288417", "0.52875984", "0.5286676", "0.5285968", "0.5283442", "0.5283275", "0.52674466", "0.52631515", "0.5263096", "0.5263096", "0.5258699", "0.5256982", "0.525263", "0.523827", "0.5236002", "0.52335495", "0.52290845", "0.52275133", "0.5224799", "0.52190876", "0.5215487", "0.52107257", "0.520989", "0.5208652", "0.52073884", "0.5200429", "0.5200136", "0.5200006", "0.5195725", "0.51839435", "0.51830363", "0.51778746", "0.5174319", "0.51721805", "0.51709604", "0.5170168", "0.51587546", "0.51346666", "0.51285166", "0.5119543", "0.5110616", "0.5107047", "0.51066923", "0.51050127" ]
0.54834235
24
Initializes the scheduler to poll every five minutes and start it
def _init_scheduler(self): self._sched = BackgroundScheduler() self._sched.add_job(self._check_rain, trigger='cron', minute='*/5') self._sched.start()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def initialize_scheduler(self):\n scheduler = BackgroundScheduler()\n scheduler.add_job(self.do, 'interval', minutes=1)\n scheduler.start()\n self.do()", "def initialize_scheduler(self):\n scheduler = BackgroundScheduler()\n scheduler.add_job(self.do, 'interval', minutes=1)\n scheduler.start()\n self.do()", "def schedule_start(self):\n self.initialize_scheduler()", "def schedule_start(self):\n print(\"Scheduler for monitoring request is running\")\n self.initialize_scheduler()", "def __run_schedules():\n while True:\n __scheduler.run()", "def initialize_scheduler():\n\n with SCHED_LOCK:\n\n # Check if scheduler should be started\n start_jobs = not len(SCHED.get_jobs())\n\n # Update check\n github_minutes = CONFIG.CHECK_GITHUB_INTERVAL if CONFIG.CHECK_GITHUB_INTERVAL and CONFIG.CHECK_GITHUB else 0\n\n schedule_job(versioncheck.checkGithub, 'Check GitHub for updates',\n hours=0, minutes=github_minutes, seconds=0)\n\n # Our interval should never be less than 30 seconds\n monitor_seconds = CONFIG.MONITORING_INTERVAL if CONFIG.MONITORING_INTERVAL >= 30 else 30\n\n if CONFIG.PMS_IP and CONFIG.PMS_TOKEN:\n schedule_job(plextv.get_real_pms_url, 'Refresh Plex server URLs',\n hours=12, minutes=0, seconds=0)\n schedule_job(pmsconnect.get_server_friendly_name, 'Refresh Plex server name',\n hours=12, minutes=0, seconds=0)\n\n schedule_job(activity_pinger.check_recently_added, 'Check for recently added items',\n hours=0, minutes=0, seconds=monitor_seconds * bool(CONFIG.NOTIFY_RECENTLY_ADDED))\n schedule_job(activity_pinger.check_server_response, 'Check for Plex remote access',\n hours=0, minutes=0, seconds=monitor_seconds * bool(CONFIG.MONITOR_REMOTE_ACCESS))\n schedule_job(activity_pinger.check_server_updates, 'Check for Plex updates',\n hours=12 * bool(CONFIG.MONITOR_PMS_UPDATES), minutes=0, seconds=0)\n\n # If we're not using websockets then fall back to polling\n if not CONFIG.MONITORING_USE_WEBSOCKET or POLLING_FAILOVER:\n schedule_job(activity_pinger.check_active_sessions, 'Check for active sessions',\n hours=0, minutes=0, seconds=monitor_seconds)\n\n # Refresh the users list and libraries list\n user_hours = CONFIG.REFRESH_USERS_INTERVAL if 1 <= CONFIG.REFRESH_USERS_INTERVAL <= 24 else 12\n library_hours = CONFIG.REFRESH_LIBRARIES_INTERVAL if 1 <= CONFIG.REFRESH_LIBRARIES_INTERVAL <= 24 else 12\n\n if CONFIG.PMS_TOKEN:\n schedule_job(plextv.refresh_users, 'Refresh users list',\n hours=user_hours, minutes=0, seconds=0)\n\n if CONFIG.PMS_IP and CONFIG.PMS_TOKEN:\n schedule_job(pmsconnect.refresh_libraries, 'Refresh libraries list',\n hours=library_hours, minutes=0, seconds=0)\n\n backup_hours = CONFIG.BACKUP_INTERVAL if 1 <= CONFIG.BACKUP_INTERVAL <= 24 else 6\n\n schedule_job(database.make_backup, 'Backup PlexPy database',\n hours=backup_hours, minutes=0, seconds=0, args=(True, True))\n schedule_job(config.make_backup, 'Backup PlexPy config',\n hours=backup_hours, minutes=0, seconds=0, args=(True, True))\n\n # Start scheduler\n if start_jobs and len(SCHED.get_jobs()):\n try:\n SCHED.start()\n except Exception as e:\n logger.info(e)\n\n # Debug\n #SCHED.print_jobs()", "async def run_scheduler(self):\n while True:\n interval = 60\n for s in await self.get_service('data_svc').locate('schedules'):\n now = datetime.now().time()\n diff = datetime.combine(date.today(), now) - datetime.combine(date.today(), s.schedule)\n if interval > diff.total_seconds() > 0:\n self.log.debug('Pulling %s off the scheduler' % s.name)\n sop = copy.deepcopy(s.task)\n sop.set_start_details()\n await self._services.get('data_svc').store(sop)\n self.loop.create_task(self.run_operation(sop))\n await asyncio.sleep(interval)", "def start(self):\n\n self.loadConf()\n self.loadDrivers()\n self.loadFeeds()\n self.runScheduler()\n self.scheduler.print_jobs()\n self.scheduler.start()\n self.printConf(\"test\")\n print(\"scheduler started\")", "def AutonomousPeriodic(self):\n Scheduler.GetInstance().Run()", "def setup_periodic_tasks(sender, **kwargs):\n sender.add_periodic_task(60, scheduled_task.s(), name='A scheduled task')", "def start(self) -> None:\n self.bus.subscribe(\"cache:ready\", self.revive)\n self.bus.subscribe(\"scheduler:add\", self.add)\n self.bus.subscribe(\"scheduler:persist\", self.persist)\n self.bus.subscribe(\"scheduler:remove\", self.remove)\n self.bus.subscribe(\"scheduler:upcoming\", self.upcoming)\n self.scheduler = sched.scheduler(time.time, time.sleep)\n cherrypy.process.plugins.Monitor.start(self)", "async def _start_cron_task(self):\n pass", "def tasks_start(sender, **kwargs):\n sender.add_periodic_task(5.0, get_heartbeat.s())\n sender.add_periodic_task(5.0, monitor_resource_util.s())", "def start_scheduler():\n from security_monkey import scheduler\n scheduler.setup_scheduler()\n scheduler.scheduler.start()", "def run_scheduled_tasks(self) -> None:\n self.scheduler.run(False)", "def scheduler(self):\n while True:\n if self.sch.empty():\n self.log.info(\"No scheduled jobs detected. Entering idle state\")\n bits = bitarray()\n # generate random 7B bitarrays\n for _ in range(pow(self.cube_dim,3)):\n bits.append(bool(random.getrandbits(1)))\n self.sch.enter(self.transmit_freq, 4, self.transmit, argument=(0, bits), kwargs={})\n else:\n try:\n self.log.info(\"Scheduled jobs detected. Serving through scheduler runner\")\n self.sch.run()\n except IOError as exc:\n self.log.exception(\"\"\"Scheduler runner encountered an error while executing the \n top level event: %s\"\"\", exc)\n sys.exit(1) # exit with status code 1", "async def start_periodically_refresh_appointments(): # pylint: disable=invalid-name\n await asyncio.sleep(60)\n await app[\"snct_scrapper\"].refresh_appointments_every_minutes()", "def __init__(self, scheduler_name, task, interval, delay=0):\n\n self.scheduler_name = scheduler_name\n self.task = task\n self.interval = interval\n self.delay = delay\n self.scheduler = sched.scheduler(time.time, time.sleep)\n self.__running = False\n super(Scheduler, self).__init__(name=self.scheduler_name)\n self.setDaemon(True)", "async def test_manual_schedule(self):\n await self.populate_test_data() # Populate data in foglamp.scheduled_processes\n\n scheduler = Scheduler(_address, _m_port)\n await scheduler.start()\n\n # Declare manual interval schedule\n manual_schedule = ManualSchedule()\n manual_schedule.name = 'manual task'\n manual_schedule.process_name = 'sleep10'\n manual_schedule.repeat = datetime.timedelta(seconds=0)\n\n await scheduler.save_schedule(manual_schedule)\n manual_schedule = await scheduler.get_schedule(manual_schedule.schedule_id)\n\n await scheduler.queue_task(manual_schedule.schedule_id) # Added a task to the _scheduler queue\n await asyncio.sleep(5)\n\n tasks = await scheduler.get_running_tasks()\n assert len(tasks) == 1\n\n await self.stop_scheduler(scheduler)", "def start_engine():\r\n traffic = TrafficCollector()\r\n weather = WeatherController()\r\n client = MongoClient()\r\n db = client.jam_forecaster\r\n\r\n scheduler = BlockingScheduler()\r\n scheduler.add_job(get_data, trigger='cron', hour='6-22', minute='*/5', second='0', max_instances=10, args=[traffic, weather, db])\r\n scheduler.start()", "def startSchedule(self):\n DPxStartDinSched()", "async def test_startup_schedule(self):\n await self.populate_test_data() # Populate data in foglamp.scheduled_processes\n\n scheduler = Scheduler(_address, _m_port)\n await scheduler.start()\n\n # Declare schedule startup, and execute\n startup_schedule = StartUpSchedule() # A scheduled process of the _scheduler\n startup_schedule.name = 'startup schedule'\n startup_schedule.process_name = 'sleep30'\n startup_schedule.repeat = datetime.timedelta(seconds=0) # set no repeat to startup\n\n await scheduler.save_schedule(startup_schedule)\n\n await asyncio.sleep(1)\n # Assert no tasks ar running\n tasks = await scheduler.get_running_tasks()\n assert len(tasks) == 0\n\n await scheduler.get_schedule(startup_schedule.schedule_id) # ID of the schedule startup\n\n await self.stop_scheduler(scheduler)\n\n scheduler = Scheduler()\n await scheduler.start()\n\n await asyncio.sleep(2)\n # Assert only 1 task is running\n tasks = await scheduler.get_running_tasks()\n assert len(tasks) == 1\n\n scheduler.max_running_tasks = 0 # set that no tasks would run\n await scheduler.cancel_task(tasks[0].task_id)\n\n await asyncio.sleep(2)\n\n # Assert no tasks are running\n tasks = await scheduler.get_running_tasks()\n assert len(tasks) == 0\n\n scheduler.max_running_tasks = 1\n\n await asyncio.sleep(2)\n\n # Assert a single task is running\n tasks = await scheduler.get_running_tasks()\n assert len(tasks) == 1\n\n await self.stop_scheduler(scheduler)", "def __init__(self, *args, time_frame=3, **kargs):\n super(Scheduler, self).__init__(*args, **kargs)\n self.time_frame = time_frame\n self.running_jobs = queue.Queue()\n self.scheduler_manager = []\n self.task_manager = None", "def __init__(self, interval=1.0):\n\n super(VirtualTimeSyncScheduler, self).__init__()\n self.interval = interval", "def run(self):\n self.timer.start()\n \n while not Status.is_final(self.status):\n if self.request:\n self.handle_request()\n \n if self.status == Status.RUNNING:\n # Clean up orphaned schedules and undead schedulers.\n # Schedule.objects.orphaned().update(scheduler=None)\n # CronSchedule.objects.orphaned().update(scheduler=None)\n \n cron = CronSchedule.objects.unclaimed()[:SCHEDULER_LIMIT]\n simple = Schedule.objects.unclaimed()[:SCHEDULER_LIMIT]\n for schedule in itertools.chain(cron, simple):\n self.log.info('Claiming %s.' % schedule)\n schedule.scheduler = self\n schedule.save()\n self.add(schedule)\n if not Status.is_final(self.status):\n self.wait()\n self.request = Scheduler.objects.get(pk=self.pk).request", "def __init__(self):\n\n super(VirtualTimeScheduler, self).__init__()\n self.event_queue = Queue.PriorityQueue()", "def runScheduler(self):\n\n for source in self.sources:\n intervals = [\n int(self.sources[source]['metrics'][x]['interval']) for x\n in range(0, len(self.sources[source]['metrics']))]\n sourceInterval = self.gcd(intervals)\n self.sources[source]['sourceInterval'] = sourceInterval\n self.logger.debug(self.sources[source]['metrics'])\n\n self.scheduler.add_job(\n self.getDriverData, 'interval', args=[\n self.sources[source]['metrics']],\n seconds=sourceInterval)", "def _event_loop(self):\n while True:\n self.scheduler.run(blocking=True)\n time.sleep(1)", "def _configure_scheduler(self, scheduler: Scheduler, callback: Callable[[], None]) -> None:\n if self.is_cron:\n # Scheduler always executes at the exact minute to check for cron triggering\n scheduler.every().minute.at(\":00\").do(callback)\n else:\n # Only activate when an interval is specified\n # If not the only way is to trigger the poll by the api `trigger` endpoint\n if self._poll_interval:\n # Scheduler executes every interval seconds to execute the poll\n scheduler.every(self._poll_interval).seconds.do(callback)", "def start_updater(self, interval, clbk):\n self._scheduler = BlockingScheduler(executors={\n 'default': {'type': 'threadpool', 'max_workers': 1}\n })\n\n def job():\n clbk(self.check_feeds())\n\n self._scheduler.add_job(job, trigger='interval', minutes=interval)\n self._scheduler.start()", "def __init__(self, *args, **kwargs):\n BaseScheduler.__init__(self, *args, **kwargs)", "def autonomousInit(self):\n #self.timer.reset()\n #self.timer.start()\n pass", "def post_scheduler_start(self):\n pass", "def __init__(self):\n self._update_scheduled = False", "def start(self):\n gevent.spawn_later(self._period, self._run)", "def monitor(self):\n if self.startup():\n time.sleep(0.250)\n self.run()", "def _run_scheduled_daily_tasks():\n worker.add_task(daily.run)", "def _start_polling(self):\n self._handle = asyncio.get_event_loop().create_task(self._poll())", "def start(self):\r\n return self.schedule()", "async def test_create_interval(self):\n await self.populate_test_data() # Populate data in foglamp.scheduled_processes\n\n scheduler = Scheduler(_address, _m_port)\n await scheduler.start()\n\n # assert that the schedule type is interval\n interval_schedule = IntervalSchedule()\n assert interval_schedule.schedule_type == Schedule.Type.INTERVAL\n\n interval_schedule.name = 'sleep10'\n interval_schedule.process_name = \"sleep10\"\n interval_schedule.repeat = datetime.timedelta(seconds=1)\n\n await scheduler.save_schedule(interval_schedule)\n\n await self.stop_scheduler(scheduler)", "def start(self):\r\n thread = threading.Thread(target=self.run)\r\n try:\r\n thread.start()\r\n except RuntimeError as e:\r\n raise SchedulerError(f\"Failed to start worker '{self.WORKER_ID}': \" + str(e))", "def startService(self):\n super(_SiteScheduler, self).startService()\n self._transientSchedule(self.now(), self.now())", "def start(self):\n while True:\n LogService.log_info(\"aggregator\", \"Creating statistics\")\n self.create_statistics()\n LogService.log_info(\"aggregator\", \"Cleaning up\")\n self.cleanup_measurements()\n LogService.log_info(\"aggregator\", \"Sleeping for 60 minutes\")\n time.sleep(60*60)", "def _initJobs(self):\n super(DigestManager, self)._initJobs()\n conf = self.config.container_manager\n\n job4 = LoopingCall(self.performRequestedScan)\n job4.start(float(conf.activescan_interval))\n self.jobs.append(job4)", "def run(self):\n # starting program, run hello feeds\n self.do_jobs(self.run_start)\n\n while not self.terminate:\n now = time.localtime()\n now_time = now.tm_hour * 60 + now.tm_min\n\n # next run is at most 30sec away\n next_run = 30\n\n # button hold triggered\n if self.button_hold:\n self.button_hold = False\n self.do_jobs(self.run_hold)\n\n # button tap triggered\n if self.button_tap:\n self.button_tap = False\n self.do_jobs(self.run_tap)\n\n # look for scheduled feeds to run\n when_tasks = []\n for t in self.run_when:\n if t['when'] <= now_time:\n if not t['ran_today']:\n t['ran_today'] = True\n when_tasks.append(t)\n else:\n t['ran_today'] = False\n self.do_jobs(when_tasks)\n\n # look for interval feeds to run\n interval_tasks = []\n for t in self.run_interval:\n if t['next'] <= time.mktime(now):\n t['next'] = time.mktime(now) + t['interval']\n interval_tasks.append(t)\n if time.mktime(now) - t['next'] < next_run:\n next_run = time.mktime(now) - t['next']\n\n self.do_jobs(interval_tasks)\n\n # wait until we have work to do\n if next_run >= 1:\n signal.alarm(next_run)\n signal.pause()\n else:\n time.sleep(0.25)\n\n # quitting program, run stop feeds\n self.do_jobs(self.run_stop)", "def __init__(self):\n\n super(Scheduler, self).__init__()\n self.num_steps = 0\n self.current_time = 0.0\n self.components = []", "def start(self):\n while True:\n self.pull_accounts_rolls()\n sleep(PULL_FREQUENCY_SECONDS)", "def pump(self):\n # Current implementation polls every second; later implementation may\n # switch to inotify in which case this will have to change.\n self.runtime.dispatcher.pump()\n sched = self.runtime.getScheduleService()\n sched.advance(1.0)\n sched.pump()\n self.runtime.dispatcher.pump()", "def start(self):\n \n if not self.is_running:\n self._timer = threading.Timer(self.interval, self._run)\n self._timer.start()\n self.is_running = True", "def timer_setup(self):\n pass", "def _start_scheduled(self, method, interval: int, wait_until_ready: bool):\n async def run_method():\n if wait_until_ready:\n await self.bot.wait_until_ready()\n\n while not self.bot.is_closed():\n await asyncio.sleep(interval)\n\n try:\n await method()\n except Exception as e:\n self.bot.dispatch('scheduled_error', method.__name__, e)\n\n task = self.loop.create_task(run_method())\n self._scheduled_tasks.append(task)", "def setup_periodic_tasks(sender, **kwargs):\n # Schedule Task Every Day at 12:00 AM UTC Time\n sender.add_periodic_task(\n crontab(hour=0, minute=0),\n to_do_fehrist_tasks_reminder.s(),\n )\n # Reference add_periodic_table call method via s method\n # https://docs.celeryproject.org/en/stable/userguide/periodic-tasks.html\n # Setting these up from within the on_after_configure handler means that\n # we’ll not evaluate the app at module level when using test.s()", "def start(self):\n if self._offset == 0 and self._interval == 0:\n raise ValueError(\"timer will not fire because offset and interval are both zero\")\n \n self._apply_schedule()\n self._started = True", "def set_scheduler(self, scheduler):\n self.scheduler = scheduler", "def __init__(self) -> None:\n\n self._local = CurrentThreadScheduler._Local()", "def scheduler(self):\n\n while not self.stop.is_set():\n # Getting job from the schedule queue\n for job in self.job_gen():\n executor = threading.Thread(target=self.executor, args=(job,))\n executor.start()\n self.running_jobs.put((executor, job))\n\n time.sleep(SCHEDULER.FINEDELAY)", "def _run_scheduled_weekly_tasks():\n worker.add_task(weekly.run)", "def run_cron(self):\n logging.info(\n 'Starting with the \"cron\" parameter, we will run once and then exit.'\n )\n self.request_messages()\n logging.info(\"Nothing more to be done, we will exit.\")\n exit(0)", "async def test_interval_none_repeat(self):\n await self.populate_test_data() # Populate data in foglamp.scheduled_processes\n\n scheduler = Scheduler(_address, _m_port)\n await scheduler.start()\n\n # assert that the schedule type is interval\n interval_schedule = IntervalSchedule()\n assert interval_schedule.schedule_type == Schedule.Type.INTERVAL\n\n interval_schedule.name = 'sleep10'\n interval_schedule.process_name = \"sleep10\"\n interval_schedule.repeat = datetime.timedelta(seconds=0)\n\n await scheduler.save_schedule(interval_schedule)\n\n await asyncio.sleep(1)\n # Assert only 1 task is running\n tasks = await scheduler.get_running_tasks()\n assert len(tasks) == 1\n\n await asyncio.sleep(12)\n # Assert only 1 task is running\n tasks = await scheduler.get_running_tasks()\n assert len(tasks) == 1\n\n await self.stop_scheduler(scheduler)", "def register_auto_refresh(self):\n self._auto_refresh_task = asyncio.create_task(self._auto_refresh())", "def run_checks():\n while True:\n if datetime.now() > core.misc_data.check_date+timedelta(minutes=45):\n for stuff in stuff_to_do:\n threading.Thread(target=stuff).start()\n core.misc_data.check_date = datetime.now() + config.utils.tasks.repeat_every\n time.sleep(5*60*60)", "def __init__(self, scheduler):\n self._scheduler = scheduler\n self._result = None\n self._timeouts = None", "def cron(self):\n return", "def setup_schedule():\n for project in Project.select():\n if (project.schedule_interval is not None) and (project.schedule_interval > 0):\n schedule.add_job(pull_build_project, \"interval\", id=\"building_\" + str(project.id),\n hours=project.schedule_interval,\n args=[project, \"master\"])", "def start(self):\n self.monitor_lc.start(self.interval)", "def start(self):\n self.__data[\"status\"] = TASK.RUNNING # Set status running dor task\n self.__data[\"last_run\"] = time.time() # Update last run\n self.task_changed([\"status\", \"last_run\"]) # Send changed event", "def start_monitor():\n monitor_enabled = config_json[env]['MONITOR_ENABLED']\n monitor_trigger_interval_s = int( config_json[env]['MONITOR_TRIGGER_INTERVAL_S'] )\n\n # IF SCHEDULE IS ENABLED IN CONFIG:\n if monitor_enabled == \"1\":\n\n print(\"\\nSpace Weather Service Monitor: ENABLED (running every %s seconds)\" % monitor_trigger_interval_s)\n\n # RUN INITIAL CHECK SPACE WEATHER\n processes.process_check_space_weather()\n\n # CREATE SCHEDULER W/ INTERVAL TRIGGER AND START\n scheduler = BackgroundScheduler()\n scheduler.add_job(\n func = processes.process_check_space_weather,\n trigger = IntervalTrigger( seconds = monitor_trigger_interval_s ),\n id = 'check_space_weather',\n name = 'Checking Space Weather Every 30 Seconds')\n scheduler.start()\n atexit.register( lambda: scheduler.shutdown() )\n else:\n print(\"\\nSpace Weather Service Monitor: DISABLED\")", "async def add_schedulers(self):\n # first update\n tasks = [charger.schedules_async_refresh() for charger in self.chargers_data]\n if tasks:\n await asyncio.wait(tasks)\n self.hass.async_add_job(self.refresh_sites_state)\n self.hass.async_add_job(self.refresh_equalizers_state)\n\n # Add interval refresh for site state interval\n async_track_time_interval(\n self.hass,\n self.refresh_sites_state,\n timedelta(seconds=SCAN_INTERVAL_STATE_SECONDS),\n )\n\n # Add interval refresh for equalizer state interval\n async_track_time_interval(\n self.hass,\n self.refresh_equalizers_state,\n timedelta(seconds=SCAN_INTERVAL_EQUALIZERS_SECONDS),\n )\n\n # Add interval refresh for schedules\n async_track_time_interval(\n self.hass,\n self.refresh_schedules,\n timedelta(seconds=SCAN_INTERVAL_SCHEDULES_SECONDS),\n )\n\n # Add interval refresh for consumption sensors\n async_track_time_interval(\n self.hass,\n self.refresh_consumption_sensors,\n timedelta(seconds=SCAN_INTERVAL_CONSUMPTION_SECONDS),\n )", "def populate_scheduler(program: dict) -> sched.scheduler:\n scheduler = sched.scheduler(time.time, time.sleep)\n\n now = datetime.now(tz=tzlocal()).timestamp()\n for switch_id in switches.keys():\n current_state = 'off'\n for tm, command in program[switch_id].items():\n if tm < now:\n current_state = command\n else:\n logging.info(f'Scheduling: Switch {switch_id}: {datetime.fromtimestamp(tm, tz=tzlocal()).strftime(\"%H:%M:%S %z\")}: {command}')\n scheduler.enterabs(tm, 1, send_command_to_switch, argument=(switch_id, command,))\n logging.info(f'Setting current state for Switch {switch_id}: {current_state}')\n send_command_to_switch(switch_id, current_state)\n return scheduler", "async def inbound_task_call(self):\n from cocotb.triggers import Timer\n await Timer(0, units=\"ps\")", "def run(self):\n rate = rospy.Rate(self._run_rate)\n while not rospy.is_shutdown():\n try:\n rate.sleep()\n except:\n break", "def run(self):\n self.logger.info(f'Running {self.__class__.__name__}')\n while True:\n last_check = time.time()\n self.collect_new_events()\n while time.time() - last_check < self._check_for_new_events_interval:\n self.logger.debug('Waiting for new events collection: new collection in {}s'.format(\n self._check_for_new_events_interval - (time.time() - last_check)))\n time.sleep(1)", "def TeleopPeriodic(self):\n Scheduler.GetInstance().Run()\n LiveWindow.Run()", "def schedule_start(self):\n if not self.is_scheduled():\n loop = asyncio.get_event_loop()\n self._virt_path_task = loop.run_in_executor(None, self._handle_event)\n self.status = EventTaskStatus.EXEC_QUEUED", "def __init__(self, meta, pid):\r\n # Parse workers\r\n self.workers = []\r\n workers = meta.get(\"workers\", None)\r\n if workers is None:\r\n raise SchedulerError(f\"Requires 'workers' field\")\r\n \r\n if not isinstance(workers, list):\r\n raise SchedulerError(f\"Expected 'workers' as a list\")\r\n\r\n for worker in workers:\r\n name = worker.get('type', None)\r\n background = worker.get('async', False)\r\n args = worker.get('args', {})\r\n if name is None:\r\n raise SchedulerError(f\"Requires 'type' field\")\r\n\r\n if not isinstance(background, bool):\r\n raise SchedulerError(f\"Expected 'async' as a bool\")\r\n\r\n try:\r\n worker = WorkerFactory.build(name, args, pid, background)\r\n except FactoryError as e:\r\n raise SchedulerError(f\"Error building '{name}': {str(e)}\")\r\n\r\n self.workers.append(worker)\r\n\r\n # Parse mode\r\n self.mode = None\r\n self.time = None\r\n schedule = meta.get('schedule', None)\r\n if schedule is None:\r\n self.mode = self.MODE_INSTANT\r\n else:\r\n mode = schedule.get('mode', None)\r\n if mode is None:\r\n raise SchedulerError(f\"'schedule' requires 'mode' field\")\r\n \r\n self.mode = self.MODE_MAP.get(mode, None)\r\n if self.mode is None:\r\n raise SchedulerError(f\"Unrecognized value for 'mode': {mode}\")\r\n\r\n # Get the delay\r\n if self.mode == self.MODE_DELAY:\r\n delay = schedule.get('delay', None)\r\n if delay is None:\r\n raise SchedulerError(f\"'schedule' requires 'seconds' field when in the specified mode\")\r\n\r\n if not isinstance(delay, (int, float)):\r\n raise SchedulerError(f\"Expected 'seconds' as a float or int\")\r\n\r\n self.time = delay\r\n elif self.mode == self.MODE_ALARM:\r\n time = schedule.get('time', None)\r\n if time is None:\r\n raise SchedulerError(f\"'schedule' requires 'time' field when in the specified mode\")\r\n \r\n try:\r\n trigger_time = datetime.strptime(time, '%m/%d/%y %H:%M:%S')\r\n except Exception as e:\r\n print(e)\r\n raise SchedulerError(f\"Failed to parse '{time}' as a datetime object\")\r\n\r\n self.time = trigger_time", "def run(self):\n r = rospy.Rate(100)\n while not rospy.is_shutdown():\n r.sleep()", "def _initScheduler(self) -> torch.optim.lr_scheduler.ReduceLROnPlateau:\n\n return torch.optim.lr_scheduler.ReduceLROnPlateau(\n self.optimizer, \n mode=cfg.training.scheduler_mode,\n factor=cfg.training.scheduler_factor,\n patience=cfg.training.scheduler_patience,\n threshold=cfg.training.scheduler_threshold\n )", "def __init__(self, reschedule_on_failure=True):\n self.reschedule_on_failure = reschedule_on_failure\n super().__init__()\n \n def _run_job(self, job):\n try:\n super()._run_job(job)\n except Exception:\n logger.error(format_exc())\n job.last_run = datetime.datetime.now()\n job._schedule_next_run()", "def run_hourly_hygienist(self):\n self.ensure_timebox_trackers_accurate()\n self.copy_tasks_with_schedule_string()", "def __init__(self, database_manager=DataBaseManager(), emailer=EmailSender()):\n self.database_manager = database_manager\n self.emailer = emailer\n # Set available timeslots\n self.initial_time_slots = ['09:00:00',\n '10:00:00',\n '11:00:00',\n '12:00:00',\n '13:00:00',\n '14:00:00',\n '15:00:00',\n '16:00:00',\n '17:00:00']", "def getSchedulers():", "def run(self):\n IScheduler(self.subStore).tick()", "def start(self, *_):\n try:\n box_configurations = self.bc_dao.run_query(QUERY_PROCESSES_FOR_BOX_ID(self.box_id))\n\n for box_config in box_configurations:\n handler = RepeatTimer(TRIGGER_INTERVAL, self.manage_process, args=[box_config.process_name])\n self.thread_handlers[box_config.process_name] = handler\n handler.start()\n self.logger.info(f'Started Supervisor Thread for {box_config.process_name}, '\n f'triggering every {TRIGGER_INTERVAL} seconds')\n except LookupError as e:\n self.logger.error(f'Supervisor failed to start because of: {e}')", "def __init__(self, pool, params = None):\n\n # initialize thread\n Thread.__init__(self)\n\n # store link to threads pool\n self.pool = pool\n\n # set control parameteres\n self.threadsWorking = 0\n try:\n self.delay = params['delay']\n except KeyError:\n self.delay = 30\n try:\n self.maxJobs = params['jobsToPoll']\n except KeyError:\n self.maxJobs = 100\n\n self.sessionPool = params['sessionPool']\n self.groupsUnderProcessing = Set([])\n self.jobPerTask = None\n\n # start scheduler thread\n self.setDaemon(1)\n self.start()", "def new_scheduler(self) -> BaseScheduler:\n scheduler = BackgroundScheduler()\n scheduler.start()\n atexit.register(lambda: scheduler.shutdown())\n return scheduler", "def start_task():\n get_results_from_message_queue()\n test_all_servers_connection()", "def scheduler(self):\n return self.__scheduler", "def start_monitor_loop(self):\n read_file = read_config_file.ConfigFileReader()\n\n communication_time = read_file.get_send_communication_time()\n metrics_array = read_file.get_metrics()\n\n self.add_metrics_to_monitor_object(communication_time, metrics_array)", "def start(self):\n\n def send_forever():\n while True:\n if self.stop_event.is_set():\n return\n\n start = time.time()\n for task in self.tasks:\n try:\n if start - task.last_call_succeeded_time >= task.interval_s:\n if task.last_ref:\n ready_refs, _ = ray.wait([task.last_ref], timeout=0)\n if len(ready_refs) == 0:\n continue\n data = task.task_func()\n task.last_call_succeeded_time = time.time()\n if task.callback_func and ray.is_initialized():\n task.last_ref = task.callback_func(\n data, send_timestamp=time.time()\n )\n except Exception as e:\n logger.warning(\n f\"MetricsPusher thread failed to run metric task: {e}\"\n )\n\n # For all tasks, check when the task should be executed\n # next. Sleep until the next closest time.\n least_interval_s = math.inf\n for task in self.tasks:\n time_until_next_push = task.interval_s - (\n time.time() - task.last_call_succeeded_time\n )\n least_interval_s = min(least_interval_s, time_until_next_push)\n\n time.sleep(max(least_interval_s, 0))\n\n if len(self.tasks) == 0:\n raise ValueError(\"MetricsPusher has zero tasks registered.\")\n\n self.pusher_thread = threading.Thread(target=send_forever)\n # Making this a daemon thread so it doesn't leak upon shutdown, and it\n # doesn't need to block the replica's shutdown.\n self.pusher_thread.setDaemon(True)\n self.pusher_thread.start()", "def basicSetup(self):\n stats = self.default_statistics()\n sched, _ = self.schedulerSetup(stats[\"max_trials\"])\n\n self.assertEqual(len(sched._hyperbands), 1)\n self.assertEqual(sched._cur_band_filled(), True)\n\n filled_band = sched._hyperbands[0]\n for bracket in filled_band:\n self.assertEqual(bracket.filled(), True)\n return sched", "def start(self, exceptions):\n if not self._interval:\n return\n with self._lock:\n self._running.set()\n self._threshold = 0\n self._reads_since_check = 0\n self._writes_since_check = 0\n self._exceptions = exceptions\n LOGGER.debug('Heartbeat Checker Started')\n self._start_new_timer()", "def schedule(self):\r\n n = self.next()\r\n if n is not None:\r\n if self.clock:\r\n self.cl = self.clock.callLater(n, self.run)\r\n else:\r\n self.cl = core.call_later(n, self.run)\r\n else:\r\n self.cl = None", "def lr_scheduler(self, lr_init, global_step):\n pass", "def __init__(self, interval=1.0):\n\n self.last_input_time = -1.0\n self.last_output_time = -1.0\n self.last_spent = -1.0\n self.last_dt = -1.0\n\n super(RealTimeSyncScheduler, self).__init__()\n self.set_interval(interval)", "def start(self):\n self.timer.start(500)", "def _get_scheduler(self):\n return self.__scheduler", "def _get_scheduler(self):\n return self.__scheduler", "def _get_scheduler(self):\n return self.__scheduler", "def _get_scheduler(self):\n return self.__scheduler", "def _get_scheduler(self):\n return self.__scheduler" ]
[ "0.7595585", "0.7595585", "0.7579649", "0.749416", "0.7138583", "0.7092702", "0.67534935", "0.6750422", "0.67337924", "0.6687412", "0.65673363", "0.65599716", "0.6548511", "0.652253", "0.6520828", "0.64790255", "0.6437538", "0.6316568", "0.631423", "0.6258985", "0.62449056", "0.6239972", "0.6224071", "0.6185637", "0.61843276", "0.6071042", "0.6060284", "0.60583925", "0.60531706", "0.6035131", "0.60272217", "0.60143346", "0.6011336", "0.6005967", "0.59758836", "0.5973129", "0.59567463", "0.59522766", "0.5929192", "0.5899517", "0.58827496", "0.5864269", "0.5851452", "0.5838672", "0.5837631", "0.57981765", "0.579357", "0.5783234", "0.577846", "0.5767245", "0.5765897", "0.5765882", "0.5765526", "0.57624006", "0.57343656", "0.57138574", "0.5704992", "0.5695665", "0.56951946", "0.5673155", "0.5671108", "0.56663954", "0.5665928", "0.5657905", "0.5636895", "0.5635682", "0.56239873", "0.56178707", "0.56082904", "0.5606867", "0.5595726", "0.55943394", "0.5575338", "0.55687165", "0.5567197", "0.5562936", "0.55533594", "0.5546801", "0.55324715", "0.55237633", "0.5519936", "0.5518782", "0.55123365", "0.5508152", "0.5505865", "0.55013543", "0.5492495", "0.54902893", "0.5485947", "0.548593", "0.548222", "0.5481159", "0.54771304", "0.54768556", "0.54747957", "0.5469092", "0.5469092", "0.5469092", "0.5469092", "0.5469092" ]
0.775051
0
Calculate the maximum amount of rain between now and now+minute Remote procedure to be called by the core of Domos
def rain_max(self, key=None, name=None, lat=None, lon=None, minute=0): self.logger.info("added sensor for rain max %s : %s for %s minutes" % (lat, lon, minute)) if key and lat and lon and minute: try: minute = int(minute) except: return False new_rain = Rain(key, lat, lon, minute, self._max) self._rain.append(new_rain) return True else: return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def rain_rate(self, value):\n if not value:\n return 0\n return await self.rain(value * 60)", "def max_humidity(self):\n return 60", "def find_tim(self):\n start_max = 0\n finish_max = 0\n op_mode = self.op_number + ',' + self.mode_number\n for resource in self.resources:\n end_time = resource.usage[op_mode][\"start_time\"] + resource.usage[op_mode][\"duration\"]\n if end_time > finish_max:\n finish_max = end_time\n start_max = resource.usage[op_mode][\"start_time\"]\n self.tim = finish_max\n self.sim = start_max", "def getRemainingRunTime(self) -> int:\n if not self.debug:\n self.myFieldFox.write(\"SYST:BATT:ARTT?\")\n ret = self.myFieldFox.read()\n else:\n ret = 60\n return ret", "def max(self):\n\n return time_stat(self, stat=\"max\")", "def evaluate(self, time) -> float:\n ...", "def minutesSinceLastUpdate(self):\n if self.seenTimes == []:\n return 0\n latestTime = max(self.seenTimes)\n return int(self.timeCode())-int(latestTime)", "def end_time(self) -> float:\r\n ...", "def wall_time(self):", "def normalized_total_time(p, max_time=3600000):\n if \"cdgp.wasTimeout\" in p and p[\"cdgp.wasTimeout\"] == \"true\":\n v = 3600000\n else:\n v = int(float(p[\"result.totalTimeSystem\"]))\n return max_time if v > max_time else v", "def v(self):\n\n # TODO This translation formula works, but needs simplified.\n\n # PWM duration can go from 0 to 4095 with 4095 representing max rpm\n# print(\"MuleBot.v MuleBot.dcMotorPWMDurationLeft:\", MuleBot.dcMotorPWMDurationLeft)\n speed_percentage = float(MuleBot.dcMotorPWMDurationLeft) / 4095.0\n# print(\"speed_percentage: \", speed_percentage)\n\n rpm = speed_percentage * self.motorMaxRPM\n# print(\"rpm: \", rpm)\n\n secondsPerMinute = 60\n revs_per_second = rpm / secondsPerMinute\n# print(\"--revs_per_second\", revs_per_second)\n\n inches_per_rev = 2.0 * math.pi * MuleBot.WHEEL_RADIUS\n INCHES_PER_METER = 39.3701\n meters_per_rev = inches_per_rev / INCHES_PER_METER\n# print(\"--meters_per_rev\", meters_per_rev)\n\n meters_per_second = meters_per_rev * revs_per_second\n\n# print(\"--meters_per_second: \", meters_per_second)\n return meters_per_second", "def remaining_ms():", "def event_based_r_factor(self):\n # assign variables\n rain_energy = 'rain_energy'\n rain_volume = 'rain_volume'\n erosivity = 'erosivity'\n r_factor = 'r_factor'\n\n # derive rainfall energy (MJ ha^-1 mm^-1)\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{rain_energy}\"\n \"=0.29*(1.-(0.72*exp(-0.05*{rain_intensity})))\".format(\n rain_energy=rain_energy,\n rain_intensity=self.rain_intensity),\n overwrite=True)\n\n # derive rainfall volume\n \"\"\"\n rainfall volume (mm)\n = rainfall intensity (mm/hr)\n * (rainfall interval (min)\n * (1 hr / 60 min))\n \"\"\"\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{rain_volume}\"\n \"= {rain_intensity}\"\n \"*({rain_interval}\"\n \"/60.)\".format(\n rain_volume=rain_volume,\n rain_intensity=self.rain_intensity,\n rain_interval=self.rain_interval),\n overwrite=True)\n\n # derive event erosivity index (MJ mm ha^-1 hr^-1)\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{erosivity}\"\n \"=({rain_energy}\"\n \"*{rain_volume})\"\n \"*{rain_intensity}\"\n \"*1.\".format(\n erosivity=erosivity,\n rain_energy=rain_energy,\n rain_volume=rain_volume,\n rain_intensity=self.rain_intensity),\n overwrite=True)\n\n # derive R factor (MJ mm ha^-1 hr^-1 yr^1)\n \"\"\"\n R factor (MJ mm ha^-1 hr^-1 yr^1)\n = EI (MJ mm ha^-1 hr^-1)\n / (rainfall interval (min)\n * (1 yr / 525600 min))\n \"\"\"\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{r_factor}\"\n \"={erosivity}\"\n \"/({rain_interval}\"\n \"/525600.)\".format(\n r_factor=r_factor,\n erosivity=erosivity,\n rain_interval=self.rain_interval),\n overwrite=True)\n\n # remove temporary maps\n gscript.run_command(\n 'g.remove',\n type='raster',\n name=['rain_energy',\n 'rain_volume',\n 'erosivity'],\n flags='f')\n\n return r_factor", "def workflow(now, realtime):\n szx = 7000\n szy = 3500\n # Create the image data\n imgdata = np.zeros((szy, szx), 'u1')\n sts = now - datetime.timedelta(minutes=2)\n metadata = {'start_valid': sts.strftime(\"%Y-%m-%dT%H:%M:%SZ\"),\n 'end_valid': now.strftime(\"%Y-%m-%dT%H:%M:%SZ\"),\n 'product': 'a2m',\n 'units': '0.02 mm'}\n\n gribfn = mrms.fetch('PrecipRate', now)\n if gribfn is None:\n print((\"mrms_rainrate_comp.py NODATA for PrecipRate: %s\"\n ) % (now.strftime(\"%Y-%m-%dT%H:%MZ\"),))\n return\n\n # http://www.nssl.noaa.gov/projects/mrms/operational/tables.php\n # Says units are mm/hr\n fp = gzip.GzipFile(gribfn, 'rb')\n (_, tmpfn) = tempfile.mkstemp()\n tmpfp = open(tmpfn, 'wb')\n tmpfp.write(fp.read())\n tmpfp.close()\n grbs = pygrib.open(tmpfn)\n grb = grbs[1]\n os.unlink(tmpfn)\n os.unlink(gribfn)\n\n val = grb['values']\n # Convert into units of 0.1 mm accumulation\n val = val / 60.0 * 2.0 * 50.0\n val = np.where(val < 0., 255., val)\n imgdata[:, :] = np.flipud(val.astype('int'))\n\n (tmpfp, tmpfn) = tempfile.mkstemp()\n\n # Create Image\n png = Image.fromarray(np.flipud(imgdata))\n png.putpalette(mrms.make_colorramp())\n png.save('%s.png' % (tmpfn,))\n\n mrms.write_worldfile('%s.wld' % (tmpfn,))\n # Inject WLD file\n routes = \"c\" if realtime else \"\"\n prefix = 'a2m'\n pqstr = (\"/home/ldm/bin/pqinsert -i -p 'plot a%s %s \"\n \"gis/images/4326/mrms/%s.wld GIS/mrms/%s_%s.wld wld' %s.wld\"\n \"\") % (routes, now.strftime(\"%Y%m%d%H%M\"), prefix, prefix,\n now.strftime(\"%Y%m%d%H%M\"), tmpfn)\n subprocess.call(pqstr, shell=True)\n # Now we inject into LDM\n pqstr = (\"/home/ldm/bin/pqinsert -i -p 'plot a%s %s \"\n \"gis/images/4326/mrms/%s.png GIS/mrms/%s_%s.png png' %s.png\"\n \"\") % (routes, now.strftime(\"%Y%m%d%H%M\"), prefix, prefix,\n now.strftime(\"%Y%m%d%H%M\"), tmpfn)\n subprocess.call(pqstr, shell=True)\n\n if realtime:\n # Create 900913 image\n cmd = (\"gdalwarp -s_srs EPSG:4326 -t_srs EPSG:3857 -q -of GTiff \"\n \"-tr 1000.0 1000.0 %s.png %s.tif\") % (tmpfn, tmpfn)\n subprocess.call(cmd, shell=True)\n # Insert into LDM\n pqstr = (\"/home/ldm/bin/pqinsert -i -p 'plot c %s \"\n \"gis/images/900913/mrms/%s.tif GIS/mrms/%s_%s.tif tif' %s.tif\"\n \"\") % (now.strftime(\"%Y%m%d%H%M\"), prefix, prefix,\n now.strftime(\"%Y%m%d%H%M\"), tmpfn)\n subprocess.call(pqstr, shell=True)\n\n j = open(\"%s.json\" % (tmpfn,), 'w')\n j.write(json.dumps(dict(meta=metadata)))\n j.close()\n # Insert into LDM\n pqstr = (\"/home/ldm/bin/pqinsert -i -p 'plot c %s \"\n \"gis/images/4326/mrms/%s.json GIS/mrms/%s_%s.json json' \"\n \"%s.json\") % (now.strftime(\"%Y%m%d%H%M\"), prefix, prefix,\n now.strftime(\"%Y%m%d%H%M\"), tmpfn)\n subprocess.call(pqstr, shell=True)\n for suffix in ['tif', 'json', 'png', 'wld']:\n if os.path.isfile(\"%s.%s\" % (tmpfn, suffix)):\n os.unlink('%s.%s' % (tmpfn, suffix))\n\n os.close(tmpfp)\n os.unlink(tmpfn)", "def rainfall_event(self):\n\n # assign local variables\n datatype = 'strds'\n increment = str(self.rain_interval)+' minutes'\n raster = 'raster'\n iterations = int(self.rain_duration)/int(self.rain_interval)\n rain_excess = 'rain_excess'\n net_difference = 'net_difference'\n\n # create raster space time datasets\n gscript.run_command(\n 't.create',\n type=datatype,\n temporaltype=self.temporaltype,\n output=self.elevation_timeseries,\n title=self.elevation_title,\n description=self.elevation_description,\n overwrite=True)\n gscript.run_command(\n 't.create',\n type=datatype,\n temporaltype=self.temporaltype,\n output=self.depth_timeseries,\n title=self.depth_title,\n description=self.depth_description,\n overwrite=True)\n gscript.run_command(\n 't.create',\n type=datatype,\n temporaltype=self.temporaltype,\n output=self.erdep_timeseries,\n title=self.erdep_title,\n description=self.erdep_description,\n overwrite=True)\n gscript.run_command(\n 't.create',\n type=datatype,\n temporaltype=self.temporaltype,\n output=self.flux_timeseries,\n title=self.flux_title,\n description=self.flux_description,\n overwrite=True)\n gscript.run_command(\n 't.create',\n type=datatype,\n temporaltype=self.temporaltype,\n output=self.difference_timeseries,\n title=self.difference_title,\n description=self.difference_description,\n overwrite=True)\n\n # register the initial digital elevation model\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.elevation_timeseries,\n maps=self.elevation,\n start=self.start,\n increment=increment,\n flags='i',\n overwrite=True)\n\n # create evolution object\n evol = Evolution(elevation=self.elevation,\n precipitation=self.precipitation,\n start=self.start,\n rain_intensity=self.rain_intensity,\n rain_interval=self.rain_interval,\n rain_duration=self.rain_duration,\n walkers=self.walkers,\n runoff=self.runoff,\n mannings=self.mannings,\n detachment=self.detachment,\n transport=self.transport,\n shearstress=self.shearstress,\n density=self.density,\n mass=self.mass,\n grav_diffusion=self.grav_diffusion,\n erdepmin=self.erdepmin,\n erdepmax=self.erdepmax,\n k_factor=self.k_factor,\n c_factor=self.c_factor,\n m=self.m,\n n=self.n,\n threads=self.threads,\n fill_depressions=self.fill_depressions)\n\n # determine mode and run model\n if self.mode == 'simwe_mode':\n (evolved_elevation, time, depth, erosion_deposition,\n difference) = evol.erosion_deposition()\n # remove relative timestamps from r.sim.water and r.sim.sediment\n gscript.run_command(\n 'r.timestamp',\n map=depth,\n date='none')\n gscript.run_command(\n 'r.timestamp',\n map=erosion_deposition,\n date='none')\n\n elif self.mode == \"usped_mode\":\n (evolved_elevation, time, depth, erosion_deposition,\n difference) = evol.usped()\n\n elif self.mode == \"rusle_mode\":\n (evolved_elevation, time, depth, sediment_flux,\n difference) = evol.rusle()\n\n else:\n raise RuntimeError(\n '{mode} mode does not exist').format(mode=self.mode)\n\n # register the evolved maps\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.elevation_timeseries,\n maps=evolved_elevation,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.depth_timeseries,\n maps=depth,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n try:\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.erdep_timeseries,\n maps=erosion_deposition,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n except (NameError, CalledModuleError):\n pass\n try:\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.flux_timeseries,\n maps=sediment_flux,\n start=evol.start,\n increment=increment,\n flags='i', overwrite=True)\n except (NameError, CalledModuleError):\n pass\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.difference_timeseries,\n maps=difference,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n\n # run the landscape evolution model\n # as a series of rainfall intervals in a rainfall event\n i = 1\n while i < iterations:\n\n # update the elevation\n evol.elevation = evolved_elevation\n print evol.elevation\n\n # update time\n evol.start = time\n print evol.start\n\n # derive excess water (mm/hr) from rainfall rate (mm/hr)\n # plus the depth (m) per rainfall interval (min)\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{rain_excess}\"\n \"={rain_intensity}\"\n \"+{depth}\"\n \"/1000.\"\n \"/{rain_interval}\"\n \"*60.\".format(\n rain_excess=rain_excess,\n rain_intensity=self.rain_intensity,\n depth=depth,\n rain_interval=self.rain_interval),\n overwrite=True)\n\n # update excess rainfall\n rain_intensity = 'rain_intensity'\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{rain_intensity} = {rain_excess}\".format(\n rain_intensity='rain_intensity',\n rain_excess=rain_excess),\n overwrite=True)\n evol.rain_intensity = rain_intensity\n\n # determine mode and run model\n if self.mode == \"simwe_mode\":\n (evolved_elevation, time, depth, erosion_deposition,\n difference) = evol.erosion_deposition()\n # remove relative timestamps\n # from r.sim.water and r.sim.sediment\n gscript.run_command(\n 'r.timestamp',\n map=depth,\n date='none')\n gscript.run_command(\n 'r.timestamp',\n map=erosion_deposition,\n date='none')\n\n elif self.mode == \"usped_mode\":\n (evolved_elevation, time, depth, erosion_deposition,\n difference) = evol.usped()\n\n elif self.mode == \"rusle_mode\":\n (evolved_elevation, time, depth, sediment_flux,\n difference) = evol.rusle()\n\n else:\n raise RuntimeError(\n '{mode} mode does not exist').format(mode=self.mode)\n\n # register the evolved maps\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.elevation_timeseries,\n maps=evolved_elevation,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.depth_timeseries,\n maps=depth,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n try:\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.erdep_timeseries,\n maps=erosion_deposition,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n except (NameError, CalledModuleError):\n pass\n try:\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.flux_timeseries,\n maps=sediment_flux,\n start=evol.start,\n increment=increment,\n flags='i', overwrite=True)\n except (NameError, CalledModuleError):\n pass\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.difference_timeseries,\n maps=difference,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n\n # remove temporary maps\n gscript.run_command(\n 'g.remove',\n type='raster',\n name=['rain_excess'],\n flags='f')\n\n i = i+1\n\n # compute net elevation change\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{net_difference}\"\n \"={evolved_elevation}-{elevation}\".format(\n net_difference=net_difference,\n elevation=self.elevation,\n evolved_elevation=evol.elevation),\n overwrite=True)\n gscript.write_command(\n 'r.colors',\n map=net_difference,\n rules='-',\n stdin=difference_colors)", "def max_time(self):\n return self.time[np.argmax(self.flux)]", "def get_heater_rod_status(self, simulation, el_load_file, actual_time, start_datetime, start_sim_inh, end_sim_inh):\n if simulation:\n # file based simulation - values are read from the file\n # file based simulation - values are read from the file\n # hour_of_year = 1\n simtime = int(math.floor(((actual_time - start_datetime).seconds / (60.0 * 15.0)) + start_sim_inh * 60.0 / 15.0)) # simulationstime in quarters = 15 minutes slots\n if (simtime >= 35040): # actual time exceeds the first year (there are 35 040 slots of 15 minutes in a year)\n simtime = simtime - math.floor(simtime / 35040) * 35040\n line1 = utils.get_significant_parts(el_load_file[simtime].rstrip().split(\" \"))\n y1 = float(utils.get_ith_column(2, line1))\n return y1 # as load from 0 to 1\n else:\n # real time calculation - values are received via MQTT? - dead for now\n return 0", "def get_max_temp(self):\n self.max_temp = self.domain[1] * 2", "def _get_time_interval_in_minutes(self):\n return self.visa.get_request_interval_in_minutes()", "def rainfall_series(self):\n\n # assign local temporal variables\n datatype = 'strds'\n increment = str(self.rain_interval)+\" minutes\"\n raster = 'raster'\n rain_excess = 'rain_excess'\n net_difference = 'net_difference'\n #iterations = sum(1 for row in precip)\n\n # create a raster space time dataset\n gscript.run_command(\n 't.create',\n type=datatype,\n temporaltype=self.temporaltype,\n output=self.elevation_timeseries,\n title=self.elevation_title,\n description=self.elevation_description,\n overwrite=True)\n gscript.run_command(\n 't.create',\n type=datatype,\n temporaltype=self.temporaltype,\n output=self.depth_timeseries,\n title=self.depth_title,\n description=self.depth_description,\n overwrite=True)\n gscript.run_command(\n 't.create',\n type=datatype,\n temporaltype=self.temporaltype,\n output=self.erdep_timeseries,\n title=self.erdep_title,\n description=self.erdep_description,\n overwrite=True)\n gscript.run_command(\n 't.create',\n type=datatype,\n temporaltype=self.temporaltype,\n output=self.flux_timeseries,\n title=self.flux_title,\n description=self.flux_description,\n overwrite=True)\n gscript.run_command(\n 't.create',\n type=datatype,\n temporaltype=self.temporaltype,\n output=self.difference_timeseries,\n title=self.difference_title,\n description=self.difference_description,\n overwrite=True)\n\n # register the initial digital elevation model\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.elevation_timeseries,\n maps=self.elevation,\n start=self.start,\n increment=increment,\n flags='i',\n overwrite=True)\n\n # create evolution object\n evol = Evolution(\n elevation=self.elevation,\n precipitation=self.precipitation,\n start=self.start,\n rain_intensity=self.rain_intensity,\n rain_interval=self.rain_interval,\n walkers=self.walkers,\n runoff=self.runoff,\n mannings=self.mannings,\n detachment=self.detachment,\n transport=self.transport,\n shearstress=self.shearstress,\n density=self.density,\n mass=self.mass,\n grav_diffusion=self.grav_diffusion,\n erdepmin=self.erdepmin,\n erdepmax=self.erdepmax,\n k_factor=self.k_factor,\n c_factor=self.c_factor,\n m=self.m,\n n=self.n,\n threads=self.threads,\n fill_depressions=self.fill_depressions)\n\n # open txt file with precipitation data\n with open(evol.precipitation) as csvfile:\n\n # check for header\n has_header = csv.Sniffer().has_header(csvfile.read(1024))\n\n # rewind\n csvfile.seek(0)\n\n # skip header\n if has_header:\n next(csvfile)\n\n # parse time and precipitation\n precip = csv.reader(csvfile, delimiter=',', skipinitialspace=True)\n\n # initial run\n initial = next(precip)\n evol.start = initial[0]\n evol.rain_intensity = 'rain_intensity'\n # compute rainfall intensity (mm/hr)\n # from rainfall observation (mm)\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{rain_intensity}\"\n \"={rain_observation}\"\n \"/{rain_interval}\"\n \"*60.\".format(\n rain_intensity=evol.rain_intensity,\n rain_observation=float(initial[1]),\n rain_interval=self.rain_interval),\n overwrite=True)\n\n # determine mode and run model\n if self.mode == \"simwe_mode\":\n (evolved_elevation, time, depth, erosion_deposition,\n difference) = evol.erosion_deposition()\n # remove relative timestamps\n # from r.sim.water and r.sim.sediment\n gscript.run_command(\n 'r.timestamp',\n map=depth,\n date='none')\n gscript.run_command(\n 'r.timestamp',\n map=erosion_deposition,\n date='none')\n\n elif self.mode == \"usped_mode\":\n (evolved_elevation, time, depth, erosion_deposition,\n difference) = evol.usped()\n\n elif self.mode == \"rusle_mode\":\n (evolved_elevation, time, depth, sediment_flux,\n difference) = evol.rusle()\n\n else:\n raise RuntimeError(\n '{mode} mode does not exist').format(mode=self.mode)\n\n # register the evolved maps\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.elevation_timeseries,\n maps=evolved_elevation,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.depth_timeseries,\n maps=depth,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n try:\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.erdep_timeseries,\n maps=erosion_deposition,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n except (NameError, CalledModuleError):\n pass\n try:\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.flux_timeseries,\n maps=sediment_flux,\n start=evol.start,\n increment=increment,\n flags='i', overwrite=True)\n except (NameError, CalledModuleError):\n pass\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.difference_timeseries,\n maps=difference,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n\n # run the landscape evolution model for each rainfall record\n for row in precip:\n\n # update the elevation\n evol.elevation=evolved_elevation\n\n # update time\n evol.start=row[0]\n\n # compute rainfall intensity (mm/hr)\n # from rainfall observation (mm)\n rain_intensity = 'rain_intensity'\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{rain_intensity}\"\n \"={rain_observation}\"\n \"/{rain_interval}\"\n \"*60.\".format(\n rain_intensity=rain_intensity,\n rain_observation=float(row[1]),\n rain_interval=self.rain_interval),\n overwrite=True)\n\n # derive excess water (mm/hr) from rainfall rate (mm/hr)\n # plus the depth (m) per rainfall interval (min)\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{rain_excess}\"\n \"={rain_intensity}\"\n \"+{depth}\"\n \"/1000.\"\n \"/{rain_interval}\"\n \"*60.\".format(\n rain_excess=rain_excess,\n rain_intensity=rain_intensity,\n depth=depth,\n rain_interval=self.rain_interval),\n overwrite=True)\n\n # update excess rainfall\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{rain_intensity} = {rain_excess}\".format(\n rain_intensity='rain_intensity',\n rain_excess=rain_excess),\n overwrite=True)\n evol.rain_intensity = rain_intensity\n\n # determine mode and run model\n if self.mode == \"simwe_mode\":\n (evolved_elevation, time, depth, erosion_deposition,\n difference) = evol.erosion_deposition()\n # remove relative timestamps\n # from r.sim.water and r.sim.sediment\n gscript.run_command(\n 'r.timestamp',\n map=depth,\n date='none')\n gscript.run_command(\n 'r.timestamp',\n map=erosion_deposition,\n date='none')\n\n elif self.mode == \"usped_mode\":\n (evolved_elevation, time, depth, erosion_deposition,\n difference) = evol.usped()\n\n elif self.mode == \"rusle_mode\":\n (evolved_elevation, time, depth, sediment_flux,\n difference) = evol.rusle()\n\n else:\n raise RuntimeError(\n '{mode} mode does not exist').format(mode=self.mode)\n\n # register the evolved maps\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.elevation_timeseries,\n maps=evolved_elevation,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.depth_timeseries,\n maps=depth,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n try:\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.erdep_timeseries,\n maps=erosion_deposition,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n except (NameError, CalledModuleError):\n pass\n try:\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.flux_timeseries,\n maps=sediment_flux,\n start=evol.start,\n increment=increment,\n flags='i', overwrite=True)\n except (NameError, CalledModuleError):\n pass\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.difference_timeseries,\n maps=difference,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n\n # remove temporary maps\n gscript.run_command(\n 'g.remove',\n type='raster',\n name=['rain_excess'],\n flags='f')\n\n # compute net elevation change\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{net_difference}\"\n \"= {evolved_elevation}-{elevation}\".format(\n net_difference=net_difference,\n elevation=self.elevation,\n evolved_elevation=evol.elevation),\n overwrite=True)\n gscript.write_command(\n 'r.colors',\n map=net_difference,\n rules='-',\n stdin=difference_colors)", "def solar(self, _mask, _target, _args):\n return self.get_sensor(\"pv_yield_now\")", "def _psp_max_time(rise, decay, rise_power):\n return rise * np.log(1 + (decay * rise_power / rise))", "def last_5_mins(conn,from_time):\n durTot = 0\n time = '{}'.format(from_time)\n query = ''' SELECT sum(duration) FROM events WHERE event_type = 'Cycle End' AND unix_time > ?'''\n c = conn.cursor()\n c.execute(query,(time,))\n (data, ) = c.fetchone()\n try:\n \t durTot = round(data,2)\n except:\n\tpass\n return durTot", "def endTime(self) -> float:\n try: return self.times[-1]\n except IndexError: return 0.0", "def max_time(self):\n #{{{ function to return time of last sample\n\n if self.maxtime == -1:\n return stock.now()\n\n return self.maxtime", "def get_current_simulated_time(self):\n\n query = \"SELECT MAX(time) FROM patient_signal_values\"\n\n return self.mysql_obj.fetch_value(query)", "def _calculate_monomer(self, raw=False):\n ta = self.TimeAxis\n # transition frequency\n om = self.system.elenergies[1]-self.system.elenergies[0]\n # transition dipole moment\n dm = self.system.dmoments[0,1,:]\n # dipole^2\n dd = numpy.dot(dm,dm)\n # natural life-time from the dipole moment\n gama = [0.0] #[-1.0/self.system.get_electronic_natural_lifetime(1)]\n sbi = self.system.get_SystemBathInteraction(self.TimeAxis)\n reorg = sbi.CC.get_reorganization_energy(0,0)\n \n if self.system._has_system_bath_coupling:\n # correlation function\n ct = self.system.get_egcf((0,1)) \n gt = self._c2g(ta,ct.data)\n tr = {\"ta\":ta,\"dd\":dd,\"om\":om-self.rwa,\"ct\":ct,\"gt\":gt,\"gg\":gama,\"fwhm\":0.0}\n else:\n tr = {\"ta\":ta,\"dd\":dd,\"om\":om-self.rwa,\"gg\":gama,\"fwhm\":0.0}\n \n if self._gauss_broad:\n tr[\"fwhm\"] = self.gauss\n\n tr[\"re\"] = reorg\n\n if self._gauss_broad:\n tr[\"fwhm\"] = self.gauss\n\n # calculates the one transition of the monomer \n data = numpy.real(self.one_transition_spectrum_abs(tr))\n data_fl = numpy.real(self.one_transition_spectrum_fluor(tr))\n\n \n for ii in range(2,self.system.Nb[1]+1):\n \n # transition frequency\n om = self.system.elenergies[ii]-self.system.elenergies[0]\n # transition dipole moment\n dm = self.system.dmoments[0,ii,:]\n # dipole^2\n dd = numpy.dot(dm,dm)\n # natural life-time from the dipole moment\n gama = [0.0] #[-1.0/self.system.get_electronic_natural_lifetime(ii)]\n \n if self.system._has_system_bath_coupling:\n # correlation function\n ct = self.system.get_egcf((0,ii)) \n gt = self._c2g(ta,ct.data)\n tr = {\"ta\":ta,\"dd\":dd,\"om\":om-self.rwa,\"ct\":ct,\"gt\":gt,\"gg\":gama,\"fwhm\":0.0}\n else:\n tr = {\"ta\":ta,\"dd\":dd,\"om\":om-self.rwa,\"gg\":gama,\"fwhm\":0.0}\n\n if self._gauss_broad: \n tr[\"fwhm\"] = self.gauss\n \n if self._gauss_broad:\n tr[\"fwhm\"] = self.gauss\n \n data += numpy.real(self.one_transition_spectrum_abs(tr))\n\n # we only want to retain the upper half of the spectrum\n Nt = len(self.frequencyAxis.data)//2 \n do = self.frequencyAxis.data[1]-self.frequencyAxis.data[0]\n st = self.frequencyAxis.data[Nt//2]\n # we represent the Frequency axis anew\n axis = FrequencyAxis(st,Nt,do)\n\n # multiply the spectrum by frequency (compulsory prefactor)\n if not raw:\n data = axis.data*data\n data_fl = (axis.data**3)*data_fl\n\n \n spect_abs = LinSpectrum(axis=axis, data=data)\n fluor_spect = LinSpectrum(axis=axis, data=data_fl)\n \n return {\"abs\": spect_abs, \"fluor\": fluor_spect}", "def traffic_restoration_time_to_healed_or_new_endpoints_in_minutes(self) -> Optional[int]:\n return pulumi.get(self, \"traffic_restoration_time_to_healed_or_new_endpoints_in_minutes\")", "def callback_max_wall_time_reached(self, event):\n self.perform_final_actions()\n self._max_wall_time_reached = True", "def get_utilization(self, current_time):\n\n # If the server is not serving, not online, and was not serving this\n # time period, move the anchor.\n if (not self.is_serving) and \\\n (not self.online) and \\\n (self.utilization == 0) and \\\n len(self.queue) == 0:\n self.utilization_anchor = current_time\n\n # If the server is serving or has people waiting...\n elif self.is_serving or len(self.queue) != 0:\n if current_time == self.utilization_anchor:\n self.utilization = 1\n else:\n self.utilization = self.utilization + (\n (1-self.utilization) /\n ((current_time-self.utilization_anchor)*1.0))\n\n # If the server is online but is not doing anything...\n elif self.online and \\\n (not self.is_serving) and \\\n len(self.queue) == 0:\n if current_time == self.utilization_anchor:\n self.utilization = 0\n else:\n self.utilization = self.utilization + (\n (0-self.utilization) /\n ((current_time-self.utilization_anchor)*1.0))\n\n # If we are on the hour and the server has been online,\n # we flush the results and reset the utilization.\n if current_time != 0 and \\\n (current_time + 1) % _get_sec(\"01:00:00\", spd_factor) == 0 and \\\n self.online:\n self.utilization_series[_get_ttime(\n current_time + 1 - _get_sec(\"01:00:00\", spd_factor), \n spd_factor)] = self.utilization\n\n\n #self.output_queue.server_statistics.append(\n # [self.id,\n # self.utilization,\n # _get_ttime(current_time, spd_factor)])\n\n self.utilization = 0\n self.utilization_anchor = current_time + 1", "def max_retire_time(self):\n return self._max_retire_time", "def max_temp(self):\n return 30", "def rain(walls):\n if type(walls) is not list or walls is None:\n return 0\n if len(walls) <= 1:\n return 0\n\n total = 0\n for i in range(1, len(walls) - 1):\n le = max(walls[:i])\n ri = max(walls[i + 1:])\n mini = min(le, ri)\n if walls[i] < mini:\n total += mini - walls[i]\n return total\n # total = 0\n # n = len(walls)\n # for i in range(1, n-1):\n # # left max\n # left = walls[i]\n # for j in range(i):\n # left = max(left, walls[j])\n # # right max\n # right = walls[i]\n # for j in range(i+1, n):\n # right = max(right, walls[j])\n # total += min(left, right) - walls[i]\n # return total", "def lifetime_max_drawdown(daily_drawdown):\n\n return round(daily_drawdown.min()*100, 2)", "def risetime_calc(self):\n\n # given the transmitter's 20%-80% risetime, and assuming a\n # Gaussian impulse response, calculate the 10%-90% risetime\n # cell G3\n\n #self.tx_1090_rise = 1.518*self.tx_2080_rise #Fix 1 : Formula not same as in Cell T7\n self.tx_1090_rise = 329*1000/self.tx_2080_rise\n \n # calculate the effective risetimes for the fiber channel, given\n # the bandwidths calculated in the previous section, assuming\n # a Gaussian impulse response model\n self.cd_1090_rise = 0.48E6 / self.bw_cd\n self.md_1090_rise = 0.48E6 / self.bw_md\n\n # calculate the risetime for the link receiver, given its\n # bandwidth and assuming a single pole impulse response\n # Cell T7\n self.rx_1090_rise = 0.329E6/self.rx_bw\n\n # calculate the risetime for the test receiver used for transmitter\n # eye displays, given its bandwidth and assuming a single pole\n # response\n self.rx_txeye_1090_rise = 0.329E6 / self.txeye_rx_bw\n\n # calculate Te from column H and Tc from column I\n tr_tx_2 = self.tx_1090_rise**2*self.l_1\n tr_rx_2 = self.rx_1090_rise**2*self.l_1\n tr_cd_2 = np.square(self.cd_1090_rise)\n tr_md_2 = np.square(self.md_1090_rise)\n self.te = np.sqrt(tr_cd_2 + tr_md_2 + tr_tx_2) # column H\n \n self.tc = np.sqrt(tr_cd_2 + tr_md_2 + tr_tx_2 + tr_rx_2) # column I\n \n\n # end of GbE10..risetime_calc", "def evaluateY(self, time) -> float:\n ...", "def max_intensity(self, time):\n ti = np.where(time == self.times)[0][0]\n return self.timesteps[ti].max()", "def freq_minutes(self):\n return 5", "def shared_runners_minutes_limit(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"shared_runners_minutes_limit\")", "def getMaxSimTime(self):\n return self.max_simsecs_value", "def TimeToRefill(self):\n # Get current timestamp in miliseconds from unix epoch\n now = int(time.time() * 1000)\n timeatrefile = self.status['timestamp'] + self.status['refillIn']\n\n timetorefil = timeatrefile - now + 1000 # plus one second fudge factor\n if timetorefil < 0:\n timetorefil = 0\n\n # Return value in seconds\n return timetorefil / 1000.0", "def GetMonotime():\n return float(open(PROC_UPTIME).read().split()[0])", "def prada(self):\n scale_factor = 1.0 / (1.0 + self.snapshot.header.redshift)\n r200c_physical = self.r200c * scale_factor / 1000.0 # units Mpc\n\n v200 = (\n (self.snapshot.const.G * self.m200c)\n / r200c_physical\n * self.snapshot.const.Mpc ** 2\n / 1000.0 ** 2\n ) ** 0.5 # units km/s\n\n def y(x, vmax, v200):\n func = np.log(1 + x) - (x / (1 + x))\n return ((0.216 * x) / func) ** 0.5 - (vmax / v200)\n\n concentration = np.zeros((len(self.vmax)))\n for halo in range(self.N_halos):\n if v200[halo] > self.vmax[halo]:\n concentration[halo] = -9999.0\n else:\n try:\n concentration[halo] = newton(\n y, x0=5.0, args=(self.vmax[halo], v200[halo])\n )\n except:\n concentration[halo] = -9999.0\n\n return concentration", "async def raindelay(self) -> dict:\n return await self._request(\"get\", \"restrictions/raindelay\")", "def surface_runoff_flux(runoff, drain):\n return runoff - drain", "def rain(walls):\n if not walls:\n return 0\n res = 0\n size = len(walls)\n\n for i in range(1, size - 1):\n left = walls[i]\n for j in range(i):\n left = max(left, walls[j])\n right = walls[i]\n\n for j in range(i + 1, size):\n right = max(right, walls[j])\n res = res + (min(left, right) - walls[i])\n\n return res", "def maTail(self):\n return self.maCruise * sqrt(self.speedRatio)", "def min_humidity(self):\n return 0", "def litres(time):\n return int(time / 2)", "def findMaximumDeviationLoop(junctions, wires, resistances, voltages, currents):\n raise NotImplementedError", "def native_max_value(self) -> float:\n return TEMP_MAXIMUM", "def ra_dec_calculate(self) -> dict:\n for sec in range(self.delta_time):\n if 0 < self.ra_start + self.one_sec_walk_ra < 360 * 3600:\n self.ra = self.ra_start + self.one_sec_walk_ra\n self.ra_start = self.ra\n else:\n self.ra = self.ra_start + self.one_sec_walk_ra - 360 * 3600\n self.ra_start = self.ra\n if self.ra_dec_min < self.ra < self.ra_dec_max:\n self.dec = self.dec_start - self.one_sec_walk_dec\n self.dec_start = self.dec\n else:\n self.dec = self.dec_start + self.one_sec_walk_dec\n self.dec_start = self.dec\n\n ra_res = f'{int(self.ra // (3600 * 15))}:{int((self.ra % 3600) // 60)}:' \\\n f'{round(float((self.ra % 3600) % 60), 1)}'\n dec_res = f'{int(self.dec // 3600)}:{int((self.dec % 3600) // 60)}:' \\\n f'{round(float((self.dec % 3600) % 60), 1)}'\n moon = {\n 'ra': ra_res,\n 'dec': dec_res\n }\n return moon", "def handleReturnTime(rtt):\n pass", "def monitor(self, rms):\n pass", "def _handle_time_limits(self, calculation):\n from aiida.common.exceptions import NotExistent\n\n # if previous calculation failed for the same reason, do not restart\n try:\n prev_calculation_remote = calculation.base.links.get_incoming().get_node_by_label('parent_folder')\n prev_calculation_status = prev_calculation_remote.creator.exit_status\n if prev_calculation_status in FleurCalculation.get_exit_statuses(['ERROR_TIME_LIMIT']):\n self.ctx.is_finished = True\n self.results()\n return ProcessHandlerReport(True)\n except NotExistent:\n pass\n\n self.report('FleurCalculation failed due to time limits, I restart it from where it ended')\n\n # increase wallclock time\n propose_wallclock = self.ctx.inputs.metadata.options['max_wallclock_seconds'] * 2\n if propose_wallclock > self.ctx.max_queue_wallclock_sec:\n propose_wallclock = self.ctx.max_queue_wallclock_sec\n self.ctx.inputs.metadata.options['max_wallclock_seconds'] = propose_wallclock\n\n # increase number of nodes\n propose_nodes = self.ctx.num_machines * 2\n if propose_nodes > self.ctx.max_queue_nodes:\n propose_nodes = self.ctx.max_queue_nodes\n self.ctx.num_machines = propose_nodes\n\n remote = calculation.base.links.get_outgoing().get_node_by_label('remote_folder')\n\n # resubmit providing inp.xml and cdn from the remote folder\n self.ctx.is_finished = False\n if _is_remote_reusable(self.ctx.inputs, calculation):\n if 'fleurinp' in self.ctx.inputs:\n del self.ctx.inputs.fleurinp\n self.ctx.inputs.parent_folder = remote\n\n return ProcessHandlerReport(True)", "def minutes(self):\n return int((self.end - self.start).total_seconds()) / 60", "def get_time(self) -> float:\n self.rocket.update()\n return self.rocket.time", "def returnGlobalTimer(self):\n self.globalTime = (time.time() - self.globalStartRef) + self.addedTime #Reports time in minutes, addedTime is for population reboot.\n return self.globalTime/ 60.0", "def rate_last(self):\n diff = (self.time - self.lasts[0][0]).total_seconds()\n try:\n return (self.pos - self.lasts[0][1]) / FAC / diff\n except ZeroDivisionError:\n return 0.0", "def _get_cpu_interval(self):\n self._polling_execute_frequency = int(self._plugin_conf[u'main'][u'polling_frequency'])\n\n if 5 <= self._polling_execute_frequency < 60:\n return cpmCPUTotalMonIntervalValue # replaces cpmCPUTotal5SecRev\n elif 60 <= self._polling_execute_frequency < 300:\n return cpmCPUTotal1minRev\n elif 300 <= self._polling_execute_frequency:\n return cpmCPUTotal5minRev\n else:\n return cpmCPUTotal1minRev", "def calculate_minutes(time):\n return int(time / 60)", "def rrtime(self):\n if len(self.data.peaks):\n diff = ((self.data._masked[:-1] + self.data._masked[1:])\n / (2 * self.data.fs))\n return diff.compressed()", "def get_rmax(self):\n return self.rmax", "def lastTick():", "def generate_fire_recurrence(self):\r\n \r\n self.time_to_next_fire = round(weibullvariate(self.scale_parameter, self.shape_parameter),2)\r\n return self.time_to_next_fire", "def calculate():\n con = mdb.connect(constants.sql_.IP, constants.sql_.USER, constants.sql_.PASS,\n constants.sql_.DB)\n# dicti = {}\n liste = mdb_get_table(constants.sql_tables.cron.name)\n# with con:\n# cur = con.cursor()\n# sql = 'SELECT * FROM '+constants.sql_tables.cron.name\n# cur.execute(sql)\n# results = cur.fetchall()\n# field_names = [i[0] for i in cur.description]\n# j = 0\n# for row in results:\n# for i in range(0, len(row)):\n# dicti[field_names[i]] = row[i]\n# liste.append(dicti)\n# dicti = {}\n# j = j + 1\n# con.close\n time = localtime()\n HOME.date = strftime(\"%Y-%m-%d 00:00:00\", time)\n # check for daylight saving\n if getattr(localtime(), 'tm_isdst') > 0:\n delta = 2\n else:\n delta = 1\n sunrise = ((HOME.next_rising(ephem.Sun())).datetime() +\n datetime.timedelta(hours=delta, minutes=0, seconds=0))\n sunset = ((HOME.next_setting(ephem.Sun())).datetime() +\n datetime.timedelta(hours=delta, minutes=0, seconds=0))\n for eintrag in liste:\n dynamic = False\n for setting in eintrag:\n if setting == \"Sonne\" and str(eintrag.get(\"Sonne\")) <> \"None\":\n dynamic = True\n if str(eintrag.get(\"Sonne\")) == \"rise\":\n time = sunrise.replace(second=0)\n else:\n time = sunset.replace(second=0)\n elif setting == \"Rohtime\" and str(eintrag.get(\"Rohtime\")) <> \"None\":\n dynamic = True\n time = eintrag.get(\"Rohtime\")\n for setting in eintrag:\n if setting == \"offset\" and str(eintrag.get(\"offset\")) <> \"None\":\n time = time + datetime.timedelta(hours=0, minutes=int(eintrag.get(\"offset\")),\n seconds=0)\n if setting == \"Zufall\" and str(eintrag.get(\"Zufall\")) <> \"None\":\n time = (time +\n datetime.timedelta(hours=0,\n minutes=random.randrange(int(eintrag.get(\"Zufall\"))),\n seconds=0))\n if dynamic:\n with con:\n #time = time - datetime.timedelta(seconds=int(str(time)[6:]))\n cur = con.cursor()\n sql = ('UPDATE %s SET Time = \"%s\" WHERE Id = \"%s\"'\n % (constants.sql_tables.cron.name, str(time), str(eintrag.get(\"Id\"))))\n cur.execute(sql)\n con.close\n return True", "def max_pwm(self):\r\n return self._max_pwm", "def lunarperigee(time):\n dtor = np.pi / 180\n t1 = 1 + time\n t2 = t1 * t1\n t3 = t2 * t1\n perigee = (\n 334.329653 * dtor\n + 4069.0340329575 * dtor * t1\n - 0.010325 * dtor * t2\n - 1.2e-5 * dtor * t3\n )\n return perigee", "def virtual_round(self):\n return math.ceil(self.virtual_time())", "def LocalUpdate(self):\n\n # Get current timestamp in miliseconds from unix epoch\n t = int(time.time() * 1000)\n\n # Number of times refill has occured\n lstrefil = self.status['timestamp'] - (60000 - self.status['refillIn'])\n nrefil = (t - lstrefil) / 60000.0\n\n if nrefil > 1:\n self.status['tokensLeft'] += self.status['refillRate'] * \\\n int(nrefil)\n\n if self.status['tokensLeft'] > 60 * self.status['refillRate']:\n self.status['tokensLeft'] = 60 * self.status['refillRate']\n\n # Update timestamps\n self.status['timestamp'] = t\n self.status['refillIn'] = int((1 - nrefil % 1) * 60000)", "def evaluateTime(self, *args):\n return _osgAnimation.Motion_evaluateTime(self, *args)", "def logging_time(self, cur_res_val=0):\n self.fixed_val = self.new_val\n self.minutes_val += 1\n \n if cur_res_val:\n if self.cur_hour == 23:\n self.time_counter[str(0)] = 0\n else:\n self.time_counter[str(self.cur_hour+1)] = 0\n if cur_res_val < 30:\n self.time_counter[str(self.time_hour)] = self.minutes_val\n self.minutes_val = 0\n self.new_hour_flag = False\n elif cur_res_val >= 30:\n if self.time_hour - self.cur_hour:\n self.time_counter[str(self.cur_hour)] = self.minutes_val\n self.minutes_val = 0\n self.new_hour_flag = False\n print(self.time_counter)", "def rain(walls):\n if not walls:\n return 0\n if len(walls) <= 2:\n return 0\n rn = 0\n for r in range(1, len(walls) - 1):\n left = max(walls[:r])\n right = max(walls[r + 1:])\n minn = min(left, right)\n if walls[r] < minn:\n rn += minn - walls[r]\n return rn", "def raw_rain_sensor_temp(self) -> int:\n self._update_analog_value_cache()\n return self.analog_cache.rain_sensor_temp", "def completedPrecoveryMaxDate(instance):\n # Get a cursor from the DB connection.\n cursor = Conection.connect(DB_USER, DB_PASSWD, instance, DB_HOST)\n \n # Fetch the maximum MJD precovery has processed.\n sql = 'select max(epoch_mjd) from detections d, tracklet_attrib ta '\n sql += 'where d.det_id = ta.det_id and ta.tracklet_id in '\n sql += '(select tracklet_id from history_precoveries)'\n nRes = cursor.execute(sql)\n return(cursor.fetchone()[0])", "def ResMax(max_lev_upstream, oceanside_maxes, datetime_maxes):\r\n y_res_maxes = []\r\n deltaT_maxes = []\r\n for row in range(len(oceanside_maxes)-1):\r\n peak_lag = (datetime_maxes.iloc[row+1]-datetime_maxes.iloc[row]).seconds/3600\r\n if ~np.isnan(oceanside_maxes.iloc[row]) & (peak_lag>=2.0) & (peak_lag<=4.0):\r\n y_res_maxes.append(max_lev_upstream.iloc[row+1])\r\n deltaT_maxes.append(datetime_maxes.iloc[row+1]-datetime_maxes.iloc[row])\r\n elif ~np.isnan(oceanside_maxes.iloc[row]): # if peak lag is more than 4 hours or less than 1, erroneous data\r\n y_res_maxes.append(np.nan)\r\n deltaT_maxes.append(np.nan)\r\n # if last value in oceanside array is not nan, append another nan on the return arrays\r\n if (datetime_maxes.iloc[row+1]==datetime_maxes.iloc[-1]) & ~np.isnan(oceanside_maxes.iloc[row+1]): \r\n y_res_maxes.append(np.nan)\r\n deltaT_maxes.append(np.nan)\r\n y_res_maxes = np.array(y_res_maxes)\r\n deltaT_maxes = np.array(deltaT_maxes)\r\n return deltaT_maxes, y_res_maxes", "def averageTime(self):\n \n pass", "def SearchMaxElongation(body, startTime):\n if body == Body.Mercury:\n s1 = 50.0\n s2 = 85.0\n elif body == Body.Venus:\n s1 = 40.0\n s2 = 50.0\n else:\n raise InvalidBodyError()\n syn = _SynodicPeriod(body)\n iter = 1\n while iter <= 2:\n plon = EclipticLongitude(body, startTime)\n elon = EclipticLongitude(Body.Earth, startTime)\n rlon = _LongitudeOffset(plon - elon) # clamp to (-180, +180]\n\n # The slope function is not well-behaved when rlon is near 0 degrees or 180 degrees\n # because there is a cusp there that causes a discontinuity in the derivative.\n # So we need to guard against searching near such times.\n if rlon >= -s1 and rlon < +s1:\n # Seek to the window [+s1, +s2].\n adjust_days = 0.0\n # Search forward for the time t1 when rel lon = +s1.\n rlon_lo = +s1\n # Search forward for the time t2 when rel lon = +s2.\n rlon_hi = +s2\n elif rlon > +s2 or rlon < -s2:\n # Seek to the next search window at [-s2, -s1].\n adjust_days = 0.0\n # Search forward for the time t1 when rel lon = -s2.\n rlon_lo = -s2\n # Search forward for the time t2 when rel lon = -s1.\n rlon_hi = -s1\n elif rlon >= 0.0:\n # rlon must be in the middle of the window [+s1, +s2].\n # Search BACKWARD for the time t1 when rel lon = +s1.\n adjust_days = -syn / 4.0\n rlon_lo = +s1\n rlon_hi = +s2\n # Search forward from t1 to find t2 such that rel lon = +s2.\n else:\n # rlon must be in the middle of the window [-s2, -s1].\n # Search BACKWARD for the time t1 when rel lon = -s2.\n adjust_days = -syn / 4.0\n rlon_lo = -s2\n # Search forward from t1 to find t2 such that rel lon = -s1.\n rlon_hi = -s1\n\n t_start = startTime.AddDays(adjust_days)\n t1 = SearchRelativeLongitude(body, rlon_lo, t_start)\n if t1 is None:\n return None\n\n t2 = SearchRelativeLongitude(body, rlon_hi, t1)\n if t2 is None:\n return None\n\n # Now we have a time range [t1,t2] that brackets a maximum elongation event.\n # Confirm the bracketing.\n m1 = _neg_elong_slope(body, t1)\n if m1 >= 0.0:\n raise InternalError() # there is a bug in the bracketing algorithm!\n\n m2 = _neg_elong_slope(body, t2)\n if m2 <= 0.0:\n raise InternalError() # there is a bug in the bracketing algorithm!\n\n # Use the generic search algorithm to home in on where the slope crosses from negative to positive.\n tx = Search(_neg_elong_slope, body, t1, t2, 10.0)\n if tx is None:\n return None\n\n if tx.tt >= startTime.tt:\n return Elongation(body, tx)\n\n # This event is in the past (earlier than startTime).\n # We need to search forward from t2 to find the next possible window.\n # We never need to search more than twice.\n startTime = t2.AddDays(1.0)\n iter += 1", "def max_time(self) -> float:\r\n if(len(self.operations_by_name) == 0):\r\n return -1\r\n return max(map(lambda x: x[\"time_step\"], self.operations_by_name.values()))", "def rain(walls):\n waterTotal = 0\n\n for i in range(1, len(walls) - 1):\n\n leftWall = walls[i]\n\n for j in range(i):\n leftWall = max(leftWall, walls[j])\n\n rightWall = walls[i]\n\n for j in range(i + 1, len(walls)):\n rightWall = max(rightWall, walls[j])\n\n waterTotal += min(leftWall, rightWall) - walls[i]\n\n return waterTotal", "def millis() -> int:", "def part1() -> int:\n longest_sleeper = max(sleep_times, key=lambda g: len(sleep_times[g]))\n sleepiest_minute = max(\n sleep_times[longest_sleeper], key=sleep_times[longest_sleeper].count)\n\n return longest_sleeper * sleepiest_minute", "def rain_approximation(\n pr: xr.DataArray,\n tas: xr.DataArray,\n thresh: str = \"0 degC\",\n method: str = \"binary\",\n):\n prlp = pr - snowfall_approximation(pr, tas, thresh=thresh, method=method)\n prlp.attrs[\"units\"] = pr.attrs[\"units\"]\n return prlp", "def max_return(cookies, cps, time_left, item_cost, item_cps):\n time = float((item_cost - cookies) / cps)\n\n if time > time_left:\n time = time_left\n\n ratio = (item_cps / time) * 2.15 ** ((time_left - time) / time_left)\n\n return ratio", "def evaluate_time(self, pid, edge):\n return self.get_process_speed(pid) * edge.get_time()", "def runtime_cal(start,end) :\n run_time = end - start\n mm = int(run_time/60)\n ss = round(run_time%60)\n return mm, ss", "def run(ts):\n nc = netCDF4.Dataset(('/mesonet/data/iemre/%s_mw_mrms_daily.nc'\n '') % (ts.year,), 'a')\n offset = iemre.daily_offset(ts)\n ncprecip = nc.variables['p01d']\n\n # We want this mrms variable to replicate the netcdf file, so the\n # origin is the southwestern corner\n ts += datetime.timedelta(hours=24)\n gmtts = ts.astimezone(pytz.timezone(\"UTC\"))\n\n gribfn = gmtts.strftime((\"/mnt/a4/data/%Y/%m/%d/mrms/ncep/\"\n \"RadarOnly_QPE_24H/\"\n \"RadarOnly_QPE_24H_00.00_%Y%m%d-%H%M00.grib2.gz\"))\n if not os.path.isfile(gribfn):\n print(\"merge_mrms_q3.py MISSING %s\" % (gribfn,))\n return\n\n fp = gzip.GzipFile(gribfn, 'rb')\n (_, tmpfn) = tempfile.mkstemp()\n tmpfp = open(tmpfn, 'wb')\n tmpfp.write(fp.read())\n tmpfp.close()\n grbs = pygrib.open(tmpfn)\n grb = grbs[1]\n lats, _ = grb.latlons()\n os.unlink(tmpfn)\n\n val = grb['values']\n # Anything less than zero, we set to zero\n val = np.where(val < 0, 0, val)\n\n # CAREFUL HERE! The MRMS grid is North to South\n # set top (smallest y)\n y0 = int((lats[0, 0] - iemre.NORTH) * 100.0)\n y1 = int((lats[0, 0] - iemre.SOUTH) * 100.0)\n x0 = int((iemre.WEST - mrms.WEST) * 100.0)\n x1 = int((iemre.EAST - mrms.WEST) * 100.0)\n # print 'y0:%s y1:%s x0:%s x1:%s' % (y0, y1, x0, x1)\n ncprecip[offset, :, :] = np.flipud(val[y0:y1, x0:x1])\n # m = MapPlot(sector='midwest')\n # x, y = np.meshgrid(nc.variables['lon'][:], nc.variables['lat'][:])\n # m.pcolormesh(x, y, ncprecip[offset,:,:], range(10), latlon=True)\n # m.postprocess(filename='test.png')\n # (fig, ax) = plt.subplots()\n # ax.imshow(mrms)\n # fig.savefig('test.png')\n # (fig, ax) = plt.subplots()\n # ax.imshow(mrms[y0:y1,x0:x1])\n # fig.savefig('test2.png')\n nc.close()", "def maxResolution(self,wave = None):\n\n d = 2000.0*self.height*math.tan(self.angle/2) # Max pathlength in microns.\n dn = self.n.getDerivative(wave) # dn/dy of materail\n return d*dn #", "def max_age(self):\n return 120 if self.realtime else 1800", "def realtime(self):", "def set_defensive_ratio(self):\n bx = self.get_standard_stats()\n team = self.get_team_stats()\n opp_team = self.get_opp_team_stats()\n if bx[\"minutes\"] > 0:\n opp_fga = opp_team[\"t2p_int\"] + opp_team[\"t3p_int\"]\n opp_fgm = opp_team[\"t2p_conv\"] + opp_team[\"t3p_conv\"]\n try:\n dor = Decimal(opp_team[\"reb_of\"] / (opp_team[\"reb_of\"] + team[\"reb_def\"]))\n except ZeroDivisionError:\n print(BCOLORS.FAIL + \"Error: División por cero\" + BCOLORS.ENDC)\n dor = 0\n except InvalidOperation:\n print(BCOLORS.FAIL + \"Error: Invalid Operation\" + BCOLORS.ENDC)\n dor = 0\n\n try:\n dfg = Decimal(opp_fgm / opp_fga)\n except ZeroDivisionError:\n print(BCOLORS.WARNING + \"Error: División por cero\" + BCOLORS.ENDC)\n dfg = 0\n try:\n fmwt = Decimal((dfg * (1 - dor)) / (dfg * (1 - dor) + (1 - dfg) * dor))\n except:\n fmwt = 0\n stops1 = bx[\"steals\"] + bx[\"block_shots\"] * fmwt * (1 - Decimal('1.07') * dor) + bx[\"reb_def\"] * (1 - fmwt)\n\n try:\n stops2 = (Decimal((opp_fga - opp_fgm - team[\"block_shots\"]) / team[\"minutes\"]) * fmwt * (1 - Decimal('1.07') * dor) + Decimal((opp_team[\"turnovers\"] - team[\"steals\"]) / team[\"minutes\"])) * bx[\"minutes\"] + Decimal(bx[\"fouls_cm\"] / team[\"fouls_cm\"]) * Decimal('0.4') * opp_team[\"tl_int\"] * (1 - Decimal(opp_team[\"tl_conv\"] / opp_team[\"tl_int\"]))**2\n except ZeroDivisionError:\n print(BCOLORS.WARNING + \"Error: División por cero\" + BCOLORS.ENDC)\n stops2 = 0\n except InvalidOperation:\n print(BCOLORS.WARNING + \"Error: Invalid Operation\" + BCOLORS.ENDC)\n stops2 = 0\n\n stops = stops1 + stops2\n poss = self.get_team_possessions()\n if bx[\"minutes\"] > 0:\n stop_percentage = (float(stops) * float(opp_team[\"minutes\"])) / (float(poss) * float(bx[\"minutes\"]))\n else:\n stop_percentage = 0.00\n opp_points = opp_team[\"t2p_conv\"] * 2 + opp_team[\"t3p_conv\"] * 3 + opp_team[\"tl_conv\"]\n team_defensive_rating = 100 * (float(opp_points) / poss)\n try:\n d_pts_per_scposs = float(opp_points) / (float(opp_fgm) + (1 - (1 - (float(opp_team[\"tl_conv\"]) / float(opp_team[\"tl_int\"])))**2) * float(opp_team[\"tl_int\"])*0.4)\n result = Decimal(team_defensive_rating) + Decimal('0.2') * (100 * Decimal(d_pts_per_scposs) * (1 - Decimal(stop_percentage)) - Decimal(team_defensive_rating))\n except ZeroDivisionError:\n print(BCOLORS.WARNING + \"Error: División por cero\" + BCOLORS.ENDC)\n d_pts_per_scposs = 0\n result = 0.00\n\n\n\n # print(\"dor: \" + str(dor))\n # print(\"dfg: \" + str(dfg))\n # print(\"fmwt: \" + str(fmwt))\n # print(\"stops1: \" + str(stops1))\n # print(\"stops2: \" + str(stops2))\n # print(\"stops: \" + str(stops))\n # print(\"poss: \" + str(poss))\n # print(\"stop_percentage: \" + str(stop_percentage))\n # print(\"opp_points: \" + str(opp_points))\n # print(\"team_defensive_rating: \" + str(team_defensive_rating))\n # print(\"d_pts_per_scposs: \" + str(d_pts_per_scposs))\n # print(\"drtg: \" + str(result) + \"\\n\")\n else:\n result = 0.00\n self.drtg = \"%.2f\" % round(result, 2)", "def test_first_ten_minutes_are_ignored():\n m = monitor.Monitor(warmup_interval_s=60)\n t = datetime(2010, 1, 1, 0, 0)\n m.set_outside_temperature(10, t)\n assert m.temperature_update(20, t) == None\n m.boiler_on(t)\n assert m.temperature_update(21, t + timedelta(seconds=120)) == None\n assert m.temperature_update(23, t + timedelta(seconds=1320)) == (11, 6.0)", "def max_return(self) -> Optional[float]:\n return pulumi.get(self, \"max_return\")", "def getCurrentAnimRange():\n return int(oma.MAnimControl.minTime().value), int(oma.MAnimControl.maxTime().value)", "def read_line(self):\n self.read_calibrated()\n\n avg = 0\n summ = 0\n online = False\n\n for i in range(0, self.NUM_SENSORS):\n val = self.sensorValues[i]\n if val > 500: online = True\n if val > 50:\n multiplier = i * 1000\n avg += val * multiplier\n summ += val\n\n if online == False:\n if self.lastValue < (self.NUM_SENSORS-1)*1000/2:\n return 0\n else:\n return (self.NUM_SENSORS-1)*1000\n\n self.lastValue = avg/summ\n return self.lastValue", "def get_maximum_heating_output(region: int, q_rtd_h: float) -> np.ndarray:\n\n # outdoor temperature, degree C, (8760 times)\n theta_ex = read_conditions.read_temperature(region)\n\n # absolute humidity, kg/kgDA, (8760 times)\n x_ex = read_conditions.read_absolute_humidity(region)\n\n # relative humidity, %, (8760 times)\n h_ex = read_conditions.get_relative_humidity(theta_ex, x_ex)\n\n # coefficient for defrosting, (8760 times)\n c_df_h = np.where((theta_ex < 5.0) & (h_ex >= 80.0), 0.77, 1.0)\n\n alpha_max_h = 1.0\n\n return q_rtd_h * c_df_h * 3600 * 10**(-6) * alpha_max_h", "def get_rain():\n global rain\n\n # Report rain only if the condition is 'rainy' (and not always).\n if weather_condition == CONDITION_RAINY and random.random() > 0.7:\n rain += round(random.random(), 2)\n return rain", "def get_mc_livetime(n_gen, γ=-1.6, f_crab=3.39*10**-11, r_max=35000, e_min=0.01, e_max=30):\n return n_gen*(γ+1) / (f_crab * r_max**2 * np.pi * (e_max**(γ+1) - e_min**(γ+1)))", "def last_seen_minutes(self):\n return (self.last_seen.seconds % 3600) / 60", "def max_time(self):\n return self._max_time" ]
[ "0.59764725", "0.5706513", "0.5661375", "0.5645471", "0.5608999", "0.56051016", "0.55437905", "0.55279684", "0.5525821", "0.5406212", "0.53948325", "0.5393251", "0.5372555", "0.5347693", "0.5315714", "0.5262582", "0.5261224", "0.5259729", "0.52450776", "0.524259", "0.52310866", "0.52239656", "0.52212614", "0.5218735", "0.5208346", "0.52048355", "0.5193882", "0.51810914", "0.5163793", "0.5154705", "0.51544684", "0.5151587", "0.51387113", "0.5137898", "0.51346505", "0.5132707", "0.51258254", "0.5114292", "0.51041716", "0.5089379", "0.5077521", "0.50752246", "0.5072589", "0.5041844", "0.5040088", "0.5036159", "0.50317585", "0.50194836", "0.50181246", "0.4988247", "0.49873453", "0.49861476", "0.49752593", "0.4974172", "0.49599782", "0.49562532", "0.4943095", "0.49407452", "0.49401072", "0.49391115", "0.492983", "0.49297884", "0.49260175", "0.4924803", "0.492223", "0.4918249", "0.49166614", "0.49128476", "0.49094382", "0.490169", "0.4899665", "0.48969588", "0.4896905", "0.48892978", "0.4889243", "0.48856544", "0.48833278", "0.48740402", "0.48734292", "0.4871782", "0.48705956", "0.48704287", "0.48679793", "0.48632297", "0.4860873", "0.4855392", "0.4844518", "0.484347", "0.48416892", "0.4839028", "0.48329058", "0.48323467", "0.4828383", "0.4828001", "0.48222134", "0.48216555", "0.4819694", "0.48178846", "0.48158848", "0.48150146" ]
0.5800529
1
Returns all the session names
def all (self): sparql_results = self.query (""" select distinct ?rs ?session ?name ?number ?pid ?sitename where { ?rs rdf:type austalk:RecordedSession . ?rs olac:speaker ?participant . ?participant austalk:id ?pid . ?participant austalk:recording_site ?site . ?site rdfs:label ?sitename . ?rs austalk:prototype ?session . ?session austalk:name ?name . ?session austalk:id ?number . } ORDER BY ?name""") results = [] for result in sparql_results["results"]["bindings"]: results.append (Session ( client = self.client, identifier = result["rs"]["value"], prototype = result["session"]["value"], name = result["name"]["value"], number = result["number"]["value"], site = result["sitename"]["value"], participantId = result["pid"]["value"])) return results
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def filtered_session_names(self):\n return list(self.stage.filtered_sessions.keys())", "def name_list(qbo_session):\n\n return qbo_session.name_list()", "def sessions(self):\n return utils.listItems(self, '/status/sessions')", "def list(self):\n return {str(k): v for k, v in self.rpc.call(MsfRpcMethod.SessionList).items()} # Convert int id to str", "def sessions(self):\n return list(Session.get_sessions(self))", "def list():\n cmd_output = None\n\n try:\n cmd_output = tmux_exec('ls')\n except CalledProcessError:\n return []\n\n sessions = cmd_output.strip().split('\\n')\n sessions = map(lambda session: session.split(':')[0], sessions)\n\n return sessions", "def get_sessions(self):\n\n return self.all_sessions", "def sessions(self):\n logger.debug(\"Get sessions\")\n return self._raw_api.sessions.get()", "def session_list(self, endpoint_name=None):\n if endpoint_name is None:\n _, body = self.request('/v1.1/endpoint/sessions', 'GET')\n else:\n _, body = self.request('/v1.1/endpoints/%s/sessions' % endpoint_name, 'GET')\n return body", "def search_sessions(name: str, provider: Optional[str] = None) -> List[str]:\n sessions = session_list(provider=provider).sessions\n name = name.lower()\n return [s.id for s in sessions if s.id.lower().startswith(name)]", "def get_sessions_list():\n sessions = Session.query.all()\n result = sessions_schema.dump(sessions).data\n return jsonify({'status': 'success', 'message': None, 'data': result}), 200", "def search_session_providers(name: str) -> List[str]:\n from renku.core.plugin.session import get_supported_session_providers\n\n name = name.lower()\n return [p.name for p in get_supported_session_providers() if p.name.lower().startswith(name)]", "def namelist(self):\n return self._handle.namelist()", "def namelist(self):\n return self._handle.namelist()", "def sessions(self):\n return self.rpc.compatiblesessions(self.modulename)", "def get_sessions(self):\n return self.current_sessions", "def sessions(self):\n\n return File.session_choices", "def namelist(self):\n return self._handle.getnames()", "def namelist(self):\n return self._handle.getnames()", "def get_session_ids(self):\n with self._sessions_lock:\n session_ids = self.sessions.keys()\n\n return session_ids", "def fusion_api_get_active_sessions(self):\n return self.loginsession.get_active_sessions()", "def sessions(self):\n return self._sessions", "def _sessions(self):\n return self.__sessions", "def iter_sessions():\n return iter(_session_stack)", "def get_active_sessions():\n\n # The output changes based on locales, force it to be YY-MM-DD\n # for the benefit of split()\n os.environ['LANG'] = 'en_GB.utf8'\n try:\n output = subprocess.check_output(['who']).rstrip()\n except subprocess.CalledProcessError:\n print 'UNKNOWN: unable to invoke who'\n sys.exit(NAGIOS_UNKNOWN)\n\n # Nothing to process\n if not output:\n return {}\n\n sessions = {}\n for line in output.split(\"\\n\"):\n fields = line.split()\n sessions[fields[1]] = {\n 'user': fields[0],\n 'date': fields[2],\n 'time': fields[3],\n 'source': fields[4][1:-1] if len(fields) >= 5 else None,\n }\n\n return sessions", "def getNames():\r\n return [\"Server1\", \"Server2\", \"Client1\", \"Client2\"]", "def get_names(self):\n return self.names", "def get_all_sessions(self) -> list:\n sessions = list()\n for stream_id in self.streams.keys():\n tcpsession, session_position, network_tuple = self.streams[stream_id]\n sessions.append(tcpsession.get_session(session_position - 1))\n return sessions", "def get_player_names(self):\n names = [user['name'] for user in self.server.status().raw['players']['sample']]\n return names", "def get_usernames(self) -> list:\n db_list = list(self.cursor.execute('SELECT * FROM sqlite_master'))\n users = [db_list[i][1] for i in range(0, len(db_list), 2)]\n return users", "def getSessionId(self) -> List[int]:\n return self.pool.getSessionId()", "def getSessionByUsername(self, username):\n match = []\n for session in self.sessions:\n if (session.identifier[1] == username):\n match.append(session)\n return match", "def get_names(self):\n\n return self.mod_suites.keys()", "def getNames(self) -> List[unicode]:\n ...", "def find_sessions(sfe):\n print(\"-\" * 20 + \" find_sessions started\")\n isessions = sfe.list_iscsisessions()\n json_isessions = isessions.to_json()\n return json_isessions", "def get_names(self):\n return self.__names", "def names(self) -> list[str]:", "def tmux_sessions_infos():\n\tpipe = os.popen(\"tmux ls\")\n\toutput = pipe.read()\n\tfor line in output.splitlines():\n\t\tid, remaining = line.split(\": \", 1)\n windows, remaining = remaining.split(\" (created \", 1)\n time, remaining = remaining.split(\") \")\n status = 'detached'\n if remaining.find(')') >= 0:\n status = remaining.split(\"(\")[-1].strip(\")\")\n yield (id, windows, time, status)", "def print_sessions(self):\n print(\"[Printing Sessions]\")\n for key in self.sessions.keys():\n print(f\"{key}:\\n\\t{self.sessions[key]}\")", "def keys(self):\n tuples = self._execute(\"SELECT name FROM users\")\n ret = [tup[0] for tup in tuples]\n return ret", "def names(self):\n return [x for x in self._dict.keys()]", "def get_users_name(self, session) -> Tuple[int, str, str]:\n users = (\n session.query(User.chat_id, User.first_name, User.last_name)\n .filter(User.is_admin==False)\n .all()\n )\n return users", "def server_names(self):\n return self._server_names", "def names(self):\n return self._names", "def names(self):\n return self._names", "def names(self):\n return self._names", "def namelist(self):\n return []", "def session_strings(self) -> AbstractSet[tuple[str, str]]:\n try:\n return strings if (strings := self._parser[constants.SESSION_STRINGS_SECTION].items()) else None\n except configparser.NoSectionError:\n raise configparser.Error(f\"Конфигурационный файл сессии не содержит секции \"\n f\"{constants.SESSION_STRINGS_SECTION}, пожалуйста, добавьте её\")", "def names(cls):\n return cls.__by_name.keys()", "def names(self) -> Sequence[str]:\n return pulumi.get(self, \"names\")", "def names(self) -> Sequence[str]:\n return pulumi.get(self, \"names\")", "async def get_cache_names(self) -> list:\n conn = await self.random_node()\n return await cache_get_names_async(conn)", "def getNames(self):\n return self._Names", "def names(self):\n return self.__names", "def _getNames(self):\n return self._items.keys()", "def getnames(self) -> List[Dict[str, Any]]:\n # NOTE: warning this does not yet support pagination\n return self.rpc_call(\"getnames\")", "def filtered_sessions(self):\n return self.stage.filtered_sessions", "def return_names(self):\n return self.__name_list", "def names():\n code = \"\"\"repeat with ss in screen savers\n log (name of ss as text)\nend repeat\"\"\"\n return applescript.tell.app(\"System Events\", code).err.splitlines()", "def _getSessionsBySpeaker(self, request):\n # Ensure that the speaker key is valid and that the speaker exists\n speaker = _getEntityByWebsafeKey(request.websafeSpeakerKey, 'Speaker')\n # Return all of the speaker's sessions\n return ndb.get_multi(speaker.sessions)", "def names(self):\n\t\treturn", "def names(self) -> List:\n ...", "def sessions(self, *args, **kwargs):\r\n return self._get('Sessions', *args, **kwargs)", "def get_sessions(self, network_tuple: NetworkTuple) -> list:\n tcpsession = self.sessions[network_tuple]\n session_list = tcpsession.get_sessions_list()\n return session_list", "def get_sessions(url: str, token: str) -> List[Session]:\n sessions_url = f'{url}api/sessions'\n response = requests.get(sessions_url, params={'token': token})\n assert(response.status_code == 200)\n sessions_raw = json.loads(response.text)\n sessions = []\n for session_raw in sessions_raw:\n session = Session(\n path = session_raw['path'],\n last_activity = dateutil.parser.isoparse(session_raw['kernel']['last_activity']),\n execution_state = session_raw['kernel']['execution_state']\n )\n assert(session['execution_state'] in valid_execution_states)\n sessions.append(session)\n\n sessions.sort(key=lambda session: session['last_activity'], reverse=True)\n return sessions", "def get_profile_names():\n import botocore.session\n return botocore.session.get_session().full_config.get('profiles', {}).keys()", "def getOqiNames( self ):\n\n if self.oqiNames:\n return self.oqiNames.keys()\n\n n = self.adb.get( \"nSrss\" )\n for indx in xrange( n ):\n name = self.adb.get( \"srsName\", indx )\n self.oqiNames[ name ] = indx\n\n return self.oqiNames.keys()", "def get_speaker_sessions(self, request):\n return self.session_service.get_speaker_sessions(\n request.websafeSpeakerKey)", "def getStationsName(self) :\n names = []\n for sts in self._stations :\n names.append(sts.getName())\n\n return names", "def full_names(self) -> List[str]:\n self.names = [\n \".\".join(prod)\n for prod in product(*self._namespaces, self.terminals)\n ]\n return self.names", "def getPuttyConnections():\n psessions = []\n os.system(r'regedit /a /e \"%userprofile%\\desktop\\putty-registry.reg\" HKEY_CURRENT_USER\\Software\\Simontatham')\n pdef = os.path.join(winshell.desktop(), \"putty-registry.reg\")\n r = open(pdef, 'r').read().splitlines()\n prefix = \"[HKEY_CURRENT_USER\\Software\\Simontatham\\PuTTY\\Sessions\"\n for l in r:\n if l.startswith(prefix):\n psessions.append(l[len(prefix) + 1:-1])\n return psessions", "def getLinIterVarNames( self ):\n\n self.updateAdb( )\n\n return self.iterNames.keys()", "def tracker_list():\n trackers = db.execute(\"SELECT DISTINCT name FROM trackers\")\n names = [tup[0] for tup in trackers.fetchall()]\n return names", "def facenames ( self ):\n self._facenames = []\n self.EnumerateFacenames()\n return self._facenames", "def findSessions(self, channel):\n found = []\n for ss in self.sessions:\n try:\n _channel = channel.decode(ss.encoding)\n if _channel == ss.name:\n found.append(ss)\n if ss.matchNick(_channel):\n found.append(ss)\n except UnicodeDecodeError:\n continue\n if found == []:\n found = [self.defaultSession]\n return found", "def names(cls) -> List[str]:", "def list_unique_names(self):\n return [os.path.splitext(x)[0] for x in os.listdir(self._event_dir)]", "def get_names(self):\n\n # log.debug(str(inspect.stack()[1][3]) + \" --> OC.get_names()\")\n return [x.options['name'] for x in self.get_list()]", "def list():\n rino.login.list()", "def compatible_sessions(self, mname):\n return self.rpc.call(MsfRpcMethod.ModuleCompatibleSessions, [mname])", "def describe_sessions(StackName=None, FleetName=None, UserId=None, NextToken=None, Limit=None, AuthenticationType=None):\n pass", "def servicenames(self):\n\t\tnames = []\n\t\tfor k,v in self.services.items():\n\t\t\tnames += [v.servicename]\n\t\treturn names", "def list_remote_access_sessions(arn=None, nextToken=None):\n pass", "def get_names(self):\n return [doc['name'] for doc in self.vocab]", "def name_servers(self) -> Sequence[str]:\n return pulumi.get(self, \"name_servers\")", "def users():\n access_token = session['access_token']\n return \"%s\" % list_users(access_token)", "def getTokens(self):\n self.__require_privilaged_access()\n with DBSession(self.__config_db) as session:\n user = self.getLoggedInUser()\n sessionTokens = session.query(Session) \\\n .filter(Session.user_name == user) \\\n .filter(Session.can_expire.is_(False)) \\\n .all()\n\n result = []\n for t in sessionTokens:\n result.append(SessionTokenData(\n t.token,\n t.description,\n str(t.last_access)))\n\n return result", "def all_users():\n\treturn [unicode(name[:-4]).lower() for name in os.listdir(os.path.join(WORLD_DIR, 'players'))]", "def get_token_names(self) -> List[str]:\n return list(self._tokens.keys())", "def names():\n pass", "def get_cryptomatte_names(self):\n return [self.cryptomattes[x][\"name\"] for x in self.cryptomattes]", "def keys(self):\n if self.db == None:\n raise AssertionError(\"DB not open\")\n\n self.lock.acquire()\n try:\n usernames = list(self.db.keys())\n finally:\n self.lock.release()\n usernames = [u for u in usernames if not u.startswith(\"--Reserved--\")]\n return usernames", "def sessions(self):\n for session_id in self.get_sessions(): \n session = Session(self.session_cache, self.sid, session_id)\n yield session", "def show_event_names(self):\n print(\"Names: {}\".format(\" \".join(self.list_unique_names())))", "def obs_names(self):\n return self._obs_names", "def dump_sessions(self):\n logger.info(\"sessions:\")\n with self._sessions_lock:\n for session_id in self.sessions:\n logger.info(session_id)\n\n # def set_session_master(self, handler):\n # \"\"\"\n # Call the setmaster() method for every session. Returns True when\n # a session having the given handler was updated.\n # \"\"\"\n # found = False\n #\n # with self._sessions_lock:\n # for session_id in self.sessions:\n # found = self.sessions[session_id].set_master(handler)\n # if found is True:\n # break\n #\n # return found", "def tab_names(self):\n return self.tab_ctrl.tab_names", "def node_name_list(self):\n return list(self._node_reg.keys())", "def get_sessions(sessions, time_feat_dict):\n filt = Session.filter_time_func(time_feat_dict)\n return [s for s in sessions if filt(shortstr2time(s['start']))]", "def keys(self):\r\n if self.db == None:\r\n raise AssertionError(\"DB not open\")\r\n\r\n self.lock.acquire()\r\n try:\r\n usernames = self.db.keys()\r\n finally:\r\n self.lock.release()\r\n usernames = [u for u in usernames if not u.startswith(\"--Reserved--\")]\r\n return usernames" ]
[ "0.80101985", "0.70593977", "0.7002973", "0.699865", "0.6964485", "0.6819654", "0.6812653", "0.67430043", "0.67428505", "0.673906", "0.6594309", "0.6576731", "0.6563603", "0.6563603", "0.65445304", "0.653696", "0.65162706", "0.64749575", "0.64749575", "0.6448738", "0.64160097", "0.64039236", "0.63763416", "0.63452107", "0.63057655", "0.6284386", "0.62463266", "0.6244077", "0.6218941", "0.61965686", "0.6181647", "0.617921", "0.61675423", "0.6160163", "0.6145285", "0.6139143", "0.61258", "0.61136645", "0.60897285", "0.6052575", "0.60516757", "0.6046088", "0.60434353", "0.60365605", "0.60365605", "0.60365605", "0.6031289", "0.603016", "0.5990661", "0.5974328", "0.5974328", "0.5966443", "0.5961204", "0.59492946", "0.59409285", "0.5940909", "0.5931274", "0.59290105", "0.59273076", "0.592639", "0.5925848", "0.5888865", "0.58709663", "0.58615655", "0.5844538", "0.58440036", "0.58159274", "0.57698584", "0.5767974", "0.57629055", "0.57584536", "0.5758423", "0.5755347", "0.5751727", "0.574873", "0.5743701", "0.5733324", "0.57233703", "0.5715678", "0.57077146", "0.57068884", "0.5703611", "0.5698728", "0.56946915", "0.5682491", "0.567546", "0.56751215", "0.5673191", "0.5670837", "0.5664538", "0.56639236", "0.5658127", "0.5657542", "0.56518584", "0.56515414", "0.5648101", "0.5647671", "0.5646381", "0.5644061", "0.5638071" ]
0.60177344
48
Returns all the session names for a participant
def filter_by_participant (self, participant): sparql_results = self.query (""" select distinct ?rs ?session ?name ?number ?pid ?sitename where { BIND (<%s> AS ?participant) ?rs rdf:type austalk:RecordedSession . ?rs olac:speaker ?participant . ?participant austalk:id ?pid . ?participant austalk:recording_site ?site . ?site rdfs:label ?sitename . ?rs austalk:prototype ?session . ?session austalk:name ?name . ?session austalk:id ?number . } ORDER BY ?name""" % participant.identifier) results = [] for result in sparql_results["results"]["bindings"]: results.append (Session ( client = self.client, identifier = result["rs"]["value"], prototype = result["session"]["value"], name = result["name"]["value"], number = result["number"]["value"], site = result["sitename"]["value"], participantId = result["pid"]["value"])) return results
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def filtered_session_names(self):\n return list(self.stage.filtered_sessions.keys())", "def search_sessions(name: str, provider: Optional[str] = None) -> List[str]:\n sessions = session_list(provider=provider).sessions\n name = name.lower()\n return [s.id for s in sessions if s.id.lower().startswith(name)]", "def teammates_player_names(self):\n return [p.name for p in self.teammates]", "def name_list(qbo_session):\n\n return qbo_session.name_list()", "def get_player_names(self):\n names = [user['name'] for user in self.server.status().raw['players']['sample']]\n return names", "def all (self):\n sparql_results = self.query (\"\"\"\n select distinct ?rs ?session ?name ?number ?pid ?sitename\n where {\n \n ?rs rdf:type austalk:RecordedSession .\n ?rs olac:speaker ?participant .\n \n ?participant austalk:id ?pid .\n ?participant austalk:recording_site ?site .\n ?site rdfs:label ?sitename .\n \n ?rs austalk:prototype ?session .\n ?session austalk:name ?name .\n ?session austalk:id ?number .\n }\n ORDER BY ?name\"\"\")\n\n results = []\n\n for result in sparql_results[\"results\"][\"bindings\"]:\n\n results.append (Session (\n client = self.client,\n identifier = result[\"rs\"][\"value\"],\n prototype = result[\"session\"][\"value\"],\n name = result[\"name\"][\"value\"],\n number = result[\"number\"][\"value\"],\n site = result[\"sitename\"][\"value\"],\n participantId = result[\"pid\"][\"value\"]))\n\n return results", "def getSessionByUsername(self, username):\n match = []\n for session in self.sessions:\n if (session.identifier[1] == username):\n match.append(session)\n return match", "def get_speaker_sessions(self, request):\n return self.session_service.get_speaker_sessions(\n request.websafeSpeakerKey)", "def get_users_name(self, session) -> Tuple[int, str, str]:\n users = (\n session.query(User.chat_id, User.first_name, User.last_name)\n .filter(User.is_admin==False)\n .all()\n )\n return users", "def list(self):\n return {str(k): v for k, v in self.rpc.call(MsfRpcMethod.SessionList).items()} # Convert int id to str", "def _getSessionsBySpeaker(self, request):\n # Ensure that the speaker key is valid and that the speaker exists\n speaker = _getEntityByWebsafeKey(request.websafeSpeakerKey, 'Speaker')\n # Return all of the speaker's sessions\n return ndb.get_multi(speaker.sessions)", "def speaker_list(self):\n return \", \".join(str(speaker.person) for speaker in self.speakers.all())", "def session_list(self, endpoint_name=None):\n if endpoint_name is None:\n _, body = self.request('/v1.1/endpoint/sessions', 'GET')\n else:\n _, body = self.request('/v1.1/endpoints/%s/sessions' % endpoint_name, 'GET')\n return body", "def sessions(self):\n return utils.listItems(self, '/status/sessions')", "async def list(self):\n all = (await self.get(self.profiles_list))['results']\n log(\"retrieved participant metadata.\")\n return all or []", "def get_names(self):\r\n names = []\r\n for p in self.people:\r\n names.append(p.get_name())\r\n return names", "def get_active_sessions():\n\n # The output changes based on locales, force it to be YY-MM-DD\n # for the benefit of split()\n os.environ['LANG'] = 'en_GB.utf8'\n try:\n output = subprocess.check_output(['who']).rstrip()\n except subprocess.CalledProcessError:\n print 'UNKNOWN: unable to invoke who'\n sys.exit(NAGIOS_UNKNOWN)\n\n # Nothing to process\n if not output:\n return {}\n\n sessions = {}\n for line in output.split(\"\\n\"):\n fields = line.split()\n sessions[fields[1]] = {\n 'user': fields[0],\n 'date': fields[2],\n 'time': fields[3],\n 'source': fields[4][1:-1] if len(fields) >= 5 else None,\n }\n\n return sessions", "def find_sessions(sfe):\n print(\"-\" * 20 + \" find_sessions started\")\n isessions = sfe.list_iscsisessions()\n json_isessions = isessions.to_json()\n return json_isessions", "def get_exercise_recording_full_names(self):\n full_names = set()\n for er in self.exercise_recordings:\n full_names.add(er.full_name)\n return full_names", "def filter_by_session (self, site_id, participant_id, session_id):\n\n query = \"\"\"\n select distinct * where {\n\n BIND (\"%s\" AS ?pid)\n BIND (\"%s\" as ?sessionid)\n \n ?participant austalk:id ?pid .\n ?rc rdf:type austalk:RecordedComponent .\n ?rc olac:speaker ?participant .\n ?rc austalk:session ?sessionid .\n\n ?rc austalk:prototype ?prototype .\n ?prototype austalk:name ?name .\n ?prototype austalk:shortname ?shortname .\n \n optional { ?rc austalk:audiorating ?audiorating .}\n optional { ?rc austalk:videorating ?videorating .}\n optional { ?rc austalk:comment ?comment .}\n \n }\n \"\"\" % (participant_id, session_id)\n\n\n sparql_results = self.query (query)\n results = []\n for result in sparql_results[\"results\"][\"bindings\"]:\n\n for field in ['audiorating', 'videorating', 'comment']:\n if field not in result:\n result[field] = {'value': ''}\n\n comp = Component (\n client = self.client,\n identifier = result[\"rc\"][\"value\"],\n participantId = result[\"pid\"][\"value\"],\n sessionId = result[\"sessionid\"][\"value\"],\n prototype = result[\"prototype\"][\"value\"],\n audiorating = result[\"audiorating\"][\"value\"],\n videorating = result[\"videorating\"][\"value\"],\n comment = result[\"comment\"][\"value\"],\n name = result[\"name\"][\"value\"],\n componentId = result[\"shortname\"][\"value\"],\n site = site_id,\n )\n comp.details()\n results.append(comp)\n return results", "def sessions(self):\n return list(Session.get_sessions(self))", "def search_session_providers(name: str) -> List[str]:\n from renku.core.plugin.session import get_supported_session_providers\n\n name = name.lower()\n return [p.name for p in get_supported_session_providers() if p.name.lower().startswith(name)]", "def get_conference_sessions(self, request):\n return self.session_service.get_conference_sessions(\n request.websafeConferenceKey)", "def all_participants_data(study_name: str):\n # get all participants' name-ids\n participants = CC_driver.get_all_participants(study_name)\n\n if len(participants) > 0:\n participants_rdd = CC_driver.sc.parallelize(participants)\n results = participants_rdd.map(\n lambda participant: diagnose_pipeline(participant[\"identifier\"], CC_worker, config))\n results.count()\n else:\n print(study_name, \"- Study contains no participant.\")", "def findSessions(self, channel):\n found = []\n for ss in self.sessions:\n try:\n _channel = channel.decode(ss.encoding)\n if _channel == ss.name:\n found.append(ss)\n if ss.matchNick(_channel):\n found.append(ss)\n except UnicodeDecodeError:\n continue\n if found == []:\n found = [self.defaultSession]\n return found", "def get_sessions(self):\n\n return self.all_sessions", "def getNames(self) -> List[unicode]:\n ...", "def participants_group_name(self):\n return self.short_name+\"_participants\"", "def get_sessions_list():\n sessions = Session.query.all()\n result = sessions_schema.dump(sessions).data\n return jsonify({'status': 'success', 'message': None, 'data': result}), 200", "def get_sessions(sessions, time_feat_dict):\n filt = Session.filter_time_func(time_feat_dict)\n return [s for s in sessions if filt(shortstr2time(s['start']))]", "def get_activity_names(self) -> np.ndarray:\n return self.activity_names", "def sessions(self):\n logger.debug(\"Get sessions\")\n return self._raw_api.sessions.get()", "def tracker_list():\n trackers = db.execute(\"SELECT DISTINCT name FROM trackers\")\n names = [tup[0] for tup in trackers.fetchall()]\n return names", "def player_names(players):\r\n string = ''\r\n for p in players:\r\n string = string + p.name + ', '\r\n return string", "def sessions(self):\n\n return File.session_choices", "def getMembersName(self):\n listMemberName = []\n for member in self.playersAndRoles:\n listMemberName.append(member.user.name)\n random.shuffle(listMemberName)\n return listMemberName", "def list_users(self, user=None):\n from expfactory.database.models import Participant\n\n participants = Participant.query.all()\n users = []\n for user in participants:\n users.append(self.print_user(user))\n return users", "def get_all_names(self):\r\n return [person.name for person in self.__person_repository.elements]", "def getParticpants(self):\n return participants", "def _getConferenceSessions(self, request):\n # Ensure that websafeConferenceKey is a valid conference key\n confKey = _raiseIfWebsafeKeyNotValid(request.websafeConferenceKey,\n 'Conference')\n # Retrieve all sessions that have a matching conference key\n sessions = Session.query(Session.conference == confKey).fetch()\n return sessions", "def get_user_session_ids_for_task(user_id: str, task_name: str) -> List[str]:\n listOfSessions = os.listdir('Plots/Research/'+user_id+'/'+task_name)\n try:\n listOfSessions.remove('.DS_Store')\n except:\n pass\n return listOfSessions", "def filter_by_site (self, label):\n \n sparql_results = self.query (\"\"\"\n select distinct ?rs ?session ?name ?number ?pid\n WHERE {\n ?rs rdf:type austalk:RecordedSession .\n ?rs olac:speaker ?participant .\n \n ?participant austalk:id ?pid .\n ?participant austalk:recording_site ?site .\n ?site rdfs:label \"%s\" .\n \n ?rs austalk:prototype ?session .\n ?session austalk:name ?name .\n ?session austalk:id ?number .\n \n }\n ORDER BY ?name\"\"\" % label)\n\n results = []\n\n for result in sparql_results[\"results\"][\"bindings\"]:\n results.append (Session (\n client = self.client,\n identifier = result[\"rs\"][\"value\"],\n prototype = result[\"session\"][\"value\"],\n name = result[\"name\"][\"value\"],\n number = result[\"number\"][\"value\"],\n # site = result[\"sitename\"][\"value\"],\n participantId = result[\"pid\"][\"value\"]))\n\n return results", "def get_sessions(self, network_tuple: NetworkTuple) -> list:\n tcpsession = self.sessions[network_tuple]\n session_list = tcpsession.get_sessions_list()\n return session_list", "def get_team_names(driver):\n name_elements = driver.find_elements_by_class_name(\"name\")\n team_names = [name.text for name in name_elements]\n return team_names", "def _list(room_name):\n members = redis.smembers(room_name)\n \n if str(members) == 'set()':\n text = '```Users in list: none```'\n return text\n\n text = 'Users in list: %s ' % ','.join(members)\n \n return text", "def get_sessions_by_type(self, request):\n return self.session_service.get_conference_sessions_by_type(\n request.websafeConferenceKey, request.sessionType)", "def list(self, datasource_name=None, event_name=None, requested_after=None, requested_before=None, session_type=None):\n query = {\n 'dataSourceName': datasource_name,\n 'eventName': event_name,\n 'requestedBefore': requested_before,\n 'requestedAfter': requested_after,\n 'sessionType': session_type\n }\n response, _, headers = self._client.request_with_headers('GET', 'sessions', params=query)\n\n return [SessionResponse(item, headers) for item in response.get('items', [])]", "def get_names(self):\n return self.names", "def list():\n cmd_output = None\n\n try:\n cmd_output = tmux_exec('ls')\n except CalledProcessError:\n return []\n\n sessions = cmd_output.strip().split('\\n')\n sessions = map(lambda session: session.split(':')[0], sessions)\n\n return sessions", "def get_finished_experiments(self, session):\n from expfactory.database.models import Participant, Result\n\n finished = []\n subid = session.get(\"subid\")\n\n if subid is not None:\n p = Participant.query.filter(\n Participant.id == subid\n ).first() # better query here\n\n # Get results for the participant\n for result in Result.query.filter(participant_id=p.id):\n finished.append(result.exp_id)\n return finished", "def get_participants(self):\n return self.participants_group.user_set.all()", "def names(self) -> list[str]:", "def get_sessions(self):\n return self.current_sessions", "def getNames():\r\n return [\"Server1\", \"Server2\", \"Client1\", \"Client2\"]", "def GetSessions(firebase: firebase) -> None:\n\n global sessions\n obj_key_list = []\n \n result = firebase.get('/session', None)\n \n if result is None:\n print(\"no sessions found\")\n return\n \n for i in result.keys():\n obj_key_list.append(i)\n \n for i in obj_key_list:\n session = Session()\n session.setId(i)\n session.setCourseId(result[i]['courseid'])\n session.setDOW(result[i]['DOW'])\n session.setSessionNumber(result[i]['session_number'])\n session.setSessionDate(result[i]['session_date'])\n session.setSessionTimeStart(result[i]['session_time_start'])\n session.setSessionTimeEnd(result[i]['session_time_end'])\n sessions.append(session)", "def participants(self):\n for participant in self.get_data(\"participants\"):\n yield Participant(participant, **self._new_session_args)\n\n return", "def get_player_list(tournament):\n database = TinyDB('db.json')\n players_table = database.table('players')\n # retrieving the list of identifiers of players following a tournament\n id_list = tournament['Liste indice Joueurs']\n player_list = []\n for player_id in id_list:\n # getting the players\n player = players_table.get(doc_id=player_id)\n player_list.append(player)\n return player_list", "def get_unclaimed_users_str(session):\n result = ''\n for p in session.players:\n if p.uid == -1:\n result += f'{p.name}, '\n result = result.rstrip(', ')\n return result", "def get_session_ids(self):\n with self._sessions_lock:\n session_ids = self.sessions.keys()\n\n return session_ids", "def get_students(self):\n return u', '.join([c.student.username for c in self.candidates.all()])", "def names(self) -> Sequence[str]:\n return pulumi.get(self, \"names\")", "def names(self) -> Sequence[str]:\n return pulumi.get(self, \"names\")", "def itersessions(self):\n for x in np.unique(self.sessions):\n yield x, self.loc[self.sessions == x, :]", "def sessions(self):\n return self._sessions", "def get_nice_names(self) -> List[str]:\n result = []\n for elements in self._get_results_list():\n result.append(elements[1])\n return result", "def get_names(self):\n\n return self.mod_suites.keys()", "def participants(self):\r\n return Participants(self)", "def get_members(self):\r\n database = main.connect_to_cloudsql()\r\n cursor = database.cursor()\r\n query = (\"SELECT username from \" + ENV_DB + \".Groups WHERE gid='{}'\").format(self.g_id)\r\n cursor.execute(query)\r\n data = cursor.fetchall()\r\n database.close()\r\n return list(i[0] for i in data)", "def get_patient_names(self):\n\t# use pre-defined patient names\n\tif (self.data_names is not None):\n\t\tassert (os.path.isfile(self.data_names))\n\t\twith open(self.data_names) as f:\n\t\t\tcontent = f.readlines()\n\t\tpatient_names = [x.strip() for x in content]\n\t# use all the patient names in data_root\n\telse:\n\t\tpatient_names = os.listdir(self.data_root[0])\n\t\tpatient_names = [name for name in patient_names if 'brats' in name.lower()]\n\treturn patient_names", "def get_users_admins_name(self, session) -> Tuple[int, str, str]:\n users = (\n session.query(User.chat_id, User.first_name, User.last_name)\n .all()\n )\n return users", "def user_names(self):\n results = []\n for user_detail in self.users:\n results.append(user_detail.user_name)\n results.sort()\n return results", "def getStudyNames(self):\n try:\n con = self.getMetadataDatabaseConnection()\n results = con.cursor()\n con.cursor().callproc('qiime_assets.get_study_names', [results])\n study_name_list = []\n for row in results:\n if row[0] is None:\n continue\n else:\n study_name_list.append(row)\n return study_name_list\n except Exception, e:\n print 'Exception caught: %s.\\nThe error is: %s' % (type(e), e)\n return False", "def get_names(r):\n names = []\n for name in r[\"results\"]:\n x = name[\"name\"]\n name.append(x)\n return name", "def filtered_sessions(self):\n return self.stage.filtered_sessions", "def get_participants_data(self):\n participants = []\n for (email, uid) in self.tokens.items():\n participant = {} \n participant['uid'] = uid\n participant['email'] = email\n response = 0\n questions = 0\n sections = [x for x in self.values() if ISurveySection.providedBy(x)]\n for section in sections:\n response += len(section.responses.get(uid, {}))\n questions += len(section.question_ids)\n if response != 0:\n participant['finished'] = Decimal(response) / Decimal(questions) * 100\n else:\n participant['finished'] = 0 \n participants.append(participant)\n return participants", "def getnames(self) -> List[Dict[str, Any]]:\n # NOTE: warning this does not yet support pagination\n return self.rpc_call(\"getnames\")", "def participants(self):\n return Participants(self)", "def get_profile_names():\n import botocore.session\n return botocore.session.get_session().full_config.get('profiles', {}).keys()", "def getSessionId(self) -> List[int]:\n return self.pool.getSessionId()", "def get_names(self):\n return [doc['name'] for doc in self.vocab]", "def get_usernames(self) -> list:\n db_list = list(self.cursor.execute('SELECT * FROM sqlite_master'))\n users = [db_list[i][1] for i in range(0, len(db_list), 2)]\n return users", "def keys(self):\n tuples = self._execute(\"SELECT name FROM users\")\n ret = [tup[0] for tup in tuples]\n return ret", "def sessions(self):\n return self.rpc.compatiblesessions(self.modulename)", "def get_campaign_name_list(self):\n campaigns = self.find('campaigns', {})\n campaign_names = []\n for campaign in campaigns:\n if 'name' in campaign:\n campaign_names.append(campaign['name'])\n return campaign_names", "def get_all_sessions(self) -> list:\n sessions = list()\n for stream_id in self.streams.keys():\n tcpsession, session_position, network_tuple = self.streams[stream_id]\n sessions.append(tcpsession.get_session(session_position - 1))\n return sessions", "def GetUserNamesList():\n\n # Create a list\n time.sleep(5)\n usernameslist = []\n\n html = Global.driver.page_source\n\n page = soup(html, \"lxml\")\n\n # Get all usernames\n table = page.find('div', class_=\"user-management-table-view\")\n\n tablebody = table.find('tbody')\n\n elements = tablebody.find_all('tr')\n\n for tr_tag in elements:\n usernameelement = tr_tag.find('span')\n\n username = usernameelement.text.strip('\\n')\n\n usernameslist.append(username)\n\n return usernameslist", "def getLinIterVarNames( self ):\n\n self.updateAdb( )\n\n return self.iterNames.keys()", "def get_seq_names(self) -> List[str]:\n return [seq.Name.lower() for seq in self.Sequencers]", "def return_names(self):\n return self.__name_list", "def gen_static_session(self, set_choice, session_len, num_sessions):\n return_seq = []\n chosen_set = self.split_data[set_choice]\n\n for user_index, group in chosen_set.groupby('user_index'):\n group = group.reset_index(drop=True)\n start_i = group[(group.iloc[0]['datetime'] + pd.Timedelta(hours=session_len * num_sessions)) < group['datetime']]\n if start_i.shape[0] > 0:\n start_i = start_i.index[0]\n for i in range(start_i, group.shape[0]):\n session_diff = np.floor((group.iloc[i-1]['datetime'] - group['datetime']).apply(lambda x: x.total_seconds() / 60 / 60 / session_len))\n sessions = [group[session_diff == h] for h in range(num_sessions - 1, -1, -1)] # (num_sessions)\n return_seq.append([user_index,\n [session['poi_index'].to_list() for session in sessions],\n [session['timestamp'].to_list() for session in sessions],\n group.iloc[i]['poi_index'],\n len(sessions)])\n return return_seq", "def collect_members_names(survey_response_list):\n\tmembers_names = []\n\tfor response in survey_response_list:\n\t\tmembers_names.append(response['questions'][0]['answers'][0]['text'])\n\treturn members_names", "def namelist(self):\n return self._handle.namelist()", "def namelist(self):\n return self._handle.namelist()", "def gen_static_session(self, set_choice, session_len, num_sessions):\r\n return_seq = []\r\n chosen_set = self.split_data[set_choice]\r\n\r\n for user_index, group in chosen_set.groupby('user_index'):\r\n group = group.reset_index(drop=True)\r\n start_i = group[(group.iloc[0]['datetime'] + pd.Timedelta(hours=session_len * num_sessions)) < group['datetime']]\r\n if start_i.shape[0] > 0:\r\n start_i = start_i.index[0]\r\n for i in range(start_i, group.shape[0]):\r\n session_diff = np.floor((group.iloc[i-1]['datetime'] - group['datetime']).apply(lambda x: x.total_seconds() / 60 / 60 / session_len))\r\n sessions = [group[session_diff == h] for h in range(num_sessions - 1, -1, -1)] # (num_sessions)\r\n return_seq.append([user_index,\r\n [session['poi_index'].to_list() for session in sessions],\r\n [session['timestamp'].to_list() for session in sessions],\r\n group.iloc[i]['poi_index'],\r\n len(sessions)])\r\n return return_seq", "def only_desired_sessions(prefs, caps):\n return [\n [(stud+'_'+p, True) for p in pref]\n for stud, pref in prefs.items()]", "def get_participants(reactome_id):\n react_url = 'http://www.reactome.org/ContentService/data/event/' \\\n + reactome_id + '/participatingReferenceEntities'\n headers = {'Accept': 'application/json'}\n res = requests.get(react_url, headers=headers)\n if not res.status_code == 200:\n return []\n json = res.json()\n up_ids = []\n for res in json:\n if not res.get('databaseName') == 'UniProt':\n continue\n up_id = res.get('identifier')\n if up_id is not None:\n up_ids.append(up_id)\n return up_ids", "def namelist(self):\n return self._handle.getnames()", "def namelist(self):\n return self._handle.getnames()", "def selectnamesE(data):\n col = []\n for name in list(data):\n if name.startswith('peer_'):\n col.append(name)\n else:\n col = col\n \n return col", "def tmux_sessions_infos():\n\tpipe = os.popen(\"tmux ls\")\n\toutput = pipe.read()\n\tfor line in output.splitlines():\n\t\tid, remaining = line.split(\": \", 1)\n windows, remaining = remaining.split(\" (created \", 1)\n time, remaining = remaining.split(\") \")\n status = 'detached'\n if remaining.find(')') >= 0:\n status = remaining.split(\"(\")[-1].strip(\")\")\n yield (id, windows, time, status)" ]
[ "0.6723869", "0.63031816", "0.62869734", "0.62178284", "0.6213438", "0.6011497", "0.60053223", "0.59663093", "0.5954626", "0.5935582", "0.5870372", "0.5859776", "0.5857662", "0.5844726", "0.57861197", "0.57147163", "0.5673998", "0.56739765", "0.56506336", "0.5621848", "0.5617745", "0.5616195", "0.55978554", "0.55789727", "0.5577217", "0.557043", "0.5566993", "0.5564511", "0.5552853", "0.5548814", "0.55331343", "0.55162096", "0.5478338", "0.5466444", "0.5457198", "0.5448718", "0.54130757", "0.5386564", "0.53838724", "0.5371807", "0.535391", "0.53455746", "0.53426975", "0.53397894", "0.53386927", "0.53348213", "0.5330544", "0.5313559", "0.5312659", "0.5301115", "0.5299142", "0.52785504", "0.5257964", "0.52442425", "0.52155536", "0.5206412", "0.5206078", "0.5205943", "0.5196039", "0.5195696", "0.51895535", "0.51895535", "0.51819193", "0.51769114", "0.5172159", "0.51720715", "0.51691383", "0.51653093", "0.51601845", "0.5152307", "0.51475626", "0.51459867", "0.5142072", "0.51372814", "0.513488", "0.51266646", "0.51253647", "0.5121671", "0.5121612", "0.51176095", "0.5116969", "0.5109951", "0.510542", "0.51042324", "0.5103044", "0.5102241", "0.51021385", "0.5092942", "0.50911456", "0.5090366", "0.50893724", "0.50882274", "0.50882274", "0.50836045", "0.5083403", "0.5070576", "0.5065263", "0.5065263", "0.5059121", "0.50570387" ]
0.67719805
0
Returns all the session names for a site identified by site label
def filter_by_site (self, label): sparql_results = self.query (""" select distinct ?rs ?session ?name ?number ?pid WHERE { ?rs rdf:type austalk:RecordedSession . ?rs olac:speaker ?participant . ?participant austalk:id ?pid . ?participant austalk:recording_site ?site . ?site rdfs:label "%s" . ?rs austalk:prototype ?session . ?session austalk:name ?name . ?session austalk:id ?number . } ORDER BY ?name""" % label) results = [] for result in sparql_results["results"]["bindings"]: results.append (Session ( client = self.client, identifier = result["rs"]["value"], prototype = result["session"]["value"], name = result["name"]["value"], number = result["number"]["value"], # site = result["sitename"]["value"], participantId = result["pid"]["value"])) return results
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def filtered_session_names(self):\n return list(self.stage.filtered_sessions.keys())", "def search_sessions(name: str, provider: Optional[str] = None) -> List[str]:\n sessions = session_list(provider=provider).sessions\n name = name.lower()\n return [s.id for s in sessions if s.id.lower().startswith(name)]", "def sessions(self):\n return utils.listItems(self, '/status/sessions')", "def name_list(qbo_session):\n\n return qbo_session.name_list()", "def find_sessions(sfe):\n print(\"-\" * 20 + \" find_sessions started\")\n isessions = sfe.list_iscsisessions()\n json_isessions = isessions.to_json()\n return json_isessions", "def getSessionByUsername(self, username):\n match = []\n for session in self.sessions:\n if (session.identifier[1] == username):\n match.append(session)\n return match", "def list_sites():\n result = []\n querystring = 'select sitename from {};'.format(TABLES[0]))\n res = execute_query(querystring)\n if res:\n result = [x[0] for x in res]\n return result", "def describe_sessions(StackName=None, FleetName=None, UserId=None, NextToken=None, Limit=None, AuthenticationType=None):\n pass", "def get_site_names(self, include = ['*'], exclude = []):\n \n raise NotImplementedError('get_site_names')", "def get_all_site_names(_current_parser=None):\n parser = _get_parser(_current_parser)\n return [site for site in parser if site != \"DEFAULT\"]", "def getSEsForSite( siteName ):\n result = getSiteSEMapping()\n if not result['OK']:\n return result\n\n mapping = result['Value']\n if siteName in mapping:\n return S_OK( mapping[siteName] )\n\n return S_OK( [] )", "def list(self):\n return {str(k): v for k, v in self.rpc.call(MsfRpcMethod.SessionList).items()} # Convert int id to str", "async def fetch_site_devices(ipf: IPFabricClient, site: str) -> List:\n request = {\n TableFields.snapshot: ipf.active_snapshot,\n TableFields.columns: [\"hostname\"],\n TableFields.filters: ipf.parse_filter(f\"siteName = {site}\"),\n }\n res = await ipf.api.post(url=URIs.devices, json=request)\n res.raise_for_status()\n return [rec[\"hostname\"] for rec in res.json()[\"data\"]]", "def getSessionCount(self):\n logger.debug('Getting the number of sessions discovered...')\n return get_text(get_element_by_css(\"span[data-nsmodule='sessionsdiscovered']\"))", "def sessions(self):\n logger.debug(\"Get sessions\")\n return self._raw_api.sessions.get()", "def search_session_providers(name: str) -> List[str]:\n from renku.core.plugin.session import get_supported_session_providers\n\n name = name.lower()\n return [p.name for p in get_supported_session_providers() if p.name.lower().startswith(name)]", "def sessions(self):\n\n return File.session_choices", "def sessions(self):\n return list(Session.get_sessions(self))", "def get_sites():\n sites = [ x.get('siteid') for x in Schedconfig.objects.values('siteid').distinct() ]\n locale.setlocale(locale.LC_ALL, '')\n sites = sorted(sites, key=locale.strxfrm)\n return sites", "def sessions(self, *args, **kwargs):\r\n return self._get('Sessions', *args, **kwargs)", "def get_active_sessions():\n\n # The output changes based on locales, force it to be YY-MM-DD\n # for the benefit of split()\n os.environ['LANG'] = 'en_GB.utf8'\n try:\n output = subprocess.check_output(['who']).rstrip()\n except subprocess.CalledProcessError:\n print 'UNKNOWN: unable to invoke who'\n sys.exit(NAGIOS_UNKNOWN)\n\n # Nothing to process\n if not output:\n return {}\n\n sessions = {}\n for line in output.split(\"\\n\"):\n fields = line.split()\n sessions[fields[1]] = {\n 'user': fields[0],\n 'date': fields[2],\n 'time': fields[3],\n 'source': fields[4][1:-1] if len(fields) >= 5 else None,\n }\n\n return sessions", "def tracker_list():\n trackers = db.execute(\"SELECT DISTINCT name FROM trackers\")\n names = [tup[0] for tup in trackers.fetchall()]\n return names", "def list_secgroups(self, name=None):", "def get_namespaces(self, label_selector=None):\n return self.core_client.list_namespace(label_selector=label_selector)", "def get_ls_session_dates(soup):\n ls_session_dates = soup.find(\n \"select\", attrs={\n \"id\": \"ContentPlaceHolder1_ddlSession\"}).find_all(\"option\")\n return [ls.text for ls in ls_session_dates]", "def getTrackingPluginNames(context):\n\n gsm = getGlobalSiteManager()\n global_plugins = set([p.name for p in gsm.registeredAdapters()\n if p.provided == IAnalyticsTrackingPlugin])\n\n lsm = getSite().getSiteManager()\n local_plugins = set([p.name for p in lsm.registeredAdapters()\n if p.provided == IAnalyticsTrackingPlugin])\n\n values = sorted(list(global_plugins | local_plugins))\n return SimpleVocabulary.fromValues(values)", "def get_sessions(self):\n\n return self.all_sessions", "def findSessions(self, channel):\n found = []\n for ss in self.sessions:\n try:\n _channel = channel.decode(ss.encoding)\n if _channel == ss.name:\n found.append(ss)\n if ss.matchNick(_channel):\n found.append(ss)\n except UnicodeDecodeError:\n continue\n if found == []:\n found = [self.defaultSession]\n return found", "def get_list_hub(showOnly=False):\n # start Requests session\n sc = requests.Session()\n\n # import cookies from Firefox\n sc.cookies.update(get_cookies('imhsc.imhadmin.net'))\n\n # send request\n vpx = sc.post('https://imhsc.imhadmin.net/index.php?v=Hub')\n\n # check if login failed\n check_sc_login(vpx.text)\n\n # parse with BS4\n bs = BeautifulSoup(vpx.text, \"xml\")\n\n # server=0 net=2\n slist = []\n for trr in bs.tbody.find_all('tr'):\n try:\n tsrv = trr.find_all('td')[0].text.strip()\n except:\n continue\n slist.append(tsrv)\n if not showOnly:\n print(tsrv)\n\n return slist", "def getNames():\r\n return [\"Server1\", \"Server2\", \"Client1\", \"Client2\"]", "def list():\n cmd_output = None\n\n try:\n cmd_output = tmux_exec('ls')\n except CalledProcessError:\n return []\n\n sessions = cmd_output.strip().split('\\n')\n sessions = map(lambda session: session.split(':')[0], sessions)\n\n return sessions", "def list_remote_access_sessions(arn=None, nextToken=None):\n pass", "def sessions(self):\n return self.rpc.compatiblesessions(self.modulename)", "def readUserSession(datafile):\n for line in datafile:\n pages = line.split()\n total = len(pages)\n # Select user sessions with 2 or more pages\n if total < 2:\n continue\n\n # Exclude outliers by removing extreme long sessions\n if total > 500:\n continue\n\n return [PAGE_CATEGORIES[int(i) - 1] for i in pages]\n return []", "def GetUserNamesList():\n\n # Create a list\n time.sleep(5)\n usernameslist = []\n\n html = Global.driver.page_source\n\n page = soup(html, \"lxml\")\n\n # Get all usernames\n table = page.find('div', class_=\"user-management-table-view\")\n\n tablebody = table.find('tbody')\n\n elements = tablebody.find_all('tr')\n\n for tr_tag in elements:\n usernameelement = tr_tag.find('span')\n\n username = usernameelement.text.strip('\\n')\n\n usernameslist.append(username)\n\n return usernameslist", "def get_sessions(self):\n return self.current_sessions", "def getSiteName():\n return os.environ['SITENAME']", "def get_node_names(self, label_selector=None):\n return [node.metadata.name for node in self.get_nodes(label_selector).items]", "def get_sessions_list():\n sessions = Session.query.all()\n result = sessions_schema.dump(sessions).data\n return jsonify({'status': 'success', 'message': None, 'data': result}), 200", "def session_list(self, endpoint_name=None):\n if endpoint_name is None:\n _, body = self.request('/v1.1/endpoint/sessions', 'GET')\n else:\n _, body = self.request('/v1.1/endpoints/%s/sessions' % endpoint_name, 'GET')\n return body", "def site_name(self, obj):\n site = obj.site\n return (\"%s\" % (site.name))", "def names():\n code = \"\"\"repeat with ss in screen savers\n log (name of ss as text)\nend repeat\"\"\"\n return applescript.tell.app(\"System Events\", code).err.splitlines()", "def list_templates(site_name):\n siteid = _get_site_id(site_name)\n cur = conn.cursor(cursor_factory=pgx.RealDictCursor)\n querystring = 'select id, name from {} where site_id = %s;'\n result = execute_query(querystring.format(TABLES[5]), (siteid,))\n tplist = [row['name'] for row in cur]\n return tplist", "def iter_sessions():\n return iter(_session_stack)", "def sitename(self) :\n\t\ttry :\n\t\t\treturn self._sitename\n\t\texcept Exception as e:\n\t\t\traise e", "def find_all(client):\n return list(map(lambda s: Site(s), client.get_api_resource(\"self/sites\")))", "def get_user_session_ids_for_task(user_id: str, task_name: str) -> List[str]:\n listOfSessions = os.listdir('Plots/Research/'+user_id+'/'+task_name)\n try:\n listOfSessions.remove('.DS_Store')\n except:\n pass\n return listOfSessions", "def get_sessions(sessions, time_feat_dict):\n filt = Session.filter_time_func(time_feat_dict)\n return [s for s in sessions if filt(shortstr2time(s['start']))]", "def list_sites(keys):\n key_store = KeyStore(get_config_file())\n for site, key in key_store.list_sites().iteritems():\n if keys:\n click.echo(\"{} = {}\".format(site, key))\n else:\n click.echo(site)", "def getLabels(self) -> List[str]:\n\n results = self.service.users().labels().list(userId='me').execute()\n labels = results.get('labels', [])\n\n return labels", "def getSlaveNames():", "def get_session_label(dcm):\n session_label = ''\n\n if dcm.get('Manufacturer').find('GE') != -1 and dcm.has_key('StudyID'):\n session_label = dcm.get('StudyID')\n else:\n session_label = dcm.get('StudyInstanceUID')\n\n return session_label", "def itersessions(self):\n for x in np.unique(self.sessions):\n yield x, self.loc[self.sessions == x, :]", "def getSessionId(self) -> List[int]:\n return self.pool.getSessionId()", "def get_servers_list(opts):\n servers_list = {}\n\n options = opts.get(PACKAGE_NAME, {})\n\n if options: # If no label given [fn_splunk_integration]\n server_list = {PACKAGE_NAME}\n else: # If label given [fn_splunk_integration:label]\n servers = SplunkServers(opts)\n server_list = servers.get_server_name_list()\n\n # Creates a dictionary that is filled with the splunk servers\n # and there configurations \n for server_name in server_list:\n servers_list[server_name] = opts.get(server_name, {})\n validate_fields([\"host\", \"port\"], servers_list[server_name])\n user = servers_list[server_name].get(\"username\", None)\n splunk_pass = servers_list[server_name].get(\"splunkpassword\", None)\n token = servers_list[server_name].get(\"token\", None)\n if not ((user and splunk_pass) or token):\n raise ValueError(\"Either username/splunkpassword or token need to be given\")\n elif token:\n servers_list[server_name][\"username\"] = None\n servers_list[server_name][\"splunkpassword\"] = None\n\n return servers_list", "def get_sessions(url: str, token: str) -> List[Session]:\n sessions_url = f'{url}api/sessions'\n response = requests.get(sessions_url, params={'token': token})\n assert(response.status_code == 200)\n sessions_raw = json.loads(response.text)\n sessions = []\n for session_raw in sessions_raw:\n session = Session(\n path = session_raw['path'],\n last_activity = dateutil.parser.isoparse(session_raw['kernel']['last_activity']),\n execution_state = session_raw['kernel']['execution_state']\n )\n assert(session['execution_state'] in valid_execution_states)\n sessions.append(session)\n\n sessions.sort(key=lambda session: session['last_activity'], reverse=True)\n return sessions", "def _get_ids_from_label(self, label):\r\n keys = self.list_keys()\r\n results = []\r\n for key in keys:\r\n if key['label'] == label:\r\n results.append(key['id'])\r\n return results", "def get_list_shared(showOnly=False):\n # start Requests session\n sc = requests.Session()\n\n # import cookies from Firefox\n sc.cookies.update(get_cookies('imhsc.imhadmin.net'))\n\n # send request\n vpx = sc.post('https://imhsc.imhadmin.net/index.php?v=Shared')\n\n # check if login failed\n check_sc_login(vpx.text)\n\n # parse with BS4\n bs = BeautifulSoup(vpx.text, \"xml\")\n\n # server=0 net=2\n slist = []\n for trr in bs.tbody.find_all('tr'):\n try:\n tsrv = re.match(r'(.+) \\(', trr.find_all('td')[0].text, re.I).group(1).strip()\n except:\n continue\n slist.append(tsrv)\n if not showOnly:\n print(tsrv)\n\n return slist", "def fusion_api_get_active_sessions(self):\n return self.loginsession.get_active_sessions()", "def get_session_info_from_loris(self):\n # TODO refactor bids_import pipeline to use same functions as dcm2bids below. To be done in different PR though\n loris_session_info = self.db.pselect(\n \"SELECT * FROM session WHERE CandID = %s AND Visit_label = %s\",\n (self.cand_id, self.visit_label)\n )\n\n return loris_session_info[0] if loris_session_info else None", "def search_spider_names(project, apikey, name=''):\n payload = {'project': project, 'apikey': apikey, 'spider': name}\n req = requests.get(DASH_API_URL + 'spiders/list.json',\n params=payload)\n if req.status_code == 200:\n return [s.get('id') for s in req.json().get('spiders', [])]\n return []", "def get_station_names(self):\n station_names = []\n for wrapper in self.soup.find_all(\"div\", {\"class\": \"stop-wrapper\"}):\n station_name = ' '.join(wrapper.find(\"h3\").text.split(' ')[:-1])\n station_names.append(station_name)\n return np.array(station_names).T", "def list_domain_names():\n pass", "def _getSessionsBySpeaker(self, request):\n # Ensure that the speaker key is valid and that the speaker exists\n speaker = _getEntityByWebsafeKey(request.websafeSpeakerKey, 'Speaker')\n # Return all of the speaker's sessions\n return ndb.get_multi(speaker.sessions)", "def session_strings(self) -> AbstractSet[tuple[str, str]]:\n try:\n return strings if (strings := self._parser[constants.SESSION_STRINGS_SECTION].items()) else None\n except configparser.NoSectionError:\n raise configparser.Error(f\"Конфигурационный файл сессии не содержит секции \"\n f\"{constants.SESSION_STRINGS_SECTION}, пожалуйста, добавьте её\")", "def servicenames(self):\n\t\tnames = []\n\t\tfor k,v in self.services.items():\n\t\t\tnames += [v.servicename]\n\t\treturn names", "def all (self):\n sparql_results = self.query (\"\"\"\n select distinct ?rs ?session ?name ?number ?pid ?sitename\n where {\n \n ?rs rdf:type austalk:RecordedSession .\n ?rs olac:speaker ?participant .\n \n ?participant austalk:id ?pid .\n ?participant austalk:recording_site ?site .\n ?site rdfs:label ?sitename .\n \n ?rs austalk:prototype ?session .\n ?session austalk:name ?name .\n ?session austalk:id ?number .\n }\n ORDER BY ?name\"\"\")\n\n results = []\n\n for result in sparql_results[\"results\"][\"bindings\"]:\n\n results.append (Session (\n client = self.client,\n identifier = result[\"rs\"][\"value\"],\n prototype = result[\"session\"][\"value\"],\n name = result[\"name\"][\"value\"],\n number = result[\"number\"][\"value\"],\n site = result[\"sitename\"][\"value\"],\n participantId = result[\"pid\"][\"value\"]))\n\n return results", "def getUsersBySSID():\n\tstats = {}\n\tms = MobileStation.objects.filter(ssid__isnull=False)\n\tfor ssid in set(MobileStation.objects.values_list('ssid', flat=True)):\n\t\tstats[ssid] = MobileStation.objects.areAssociated().filter(ssid=ssid).count()\n\treturn stats", "def _sessions(self):\n return self.__sessions", "def get_sessions_in_wishlist(self, request):\n user = endpoints.get_current_user()\n return self.wishlist_service.get_sessions_in_wishlist(user)", "def get_session_name(self, vid):\n return \"ssn-{0}\".format(vid)", "def sessions(self):\n return self._sessions", "def server_site_name(self):\n return dsdb._samdb_server_site_name(self)", "def get_current_labels(self):\n partitions = self.get_partitions(self.persistence)\n return partitions.keys()", "def get_labels():\n\n logging.info(\"Getting metadata about labels\")\n\n labels = []\n\n if len(args.labels) == 0:\n logging.warning(\"No labels specified, assuming all labels. If you have a lot of labels in your inbox you could hit API limits quickly.\")\n results = GMAIL_CLIENT.users().labels().list(userId='me').execute()\n\n labels = results.get('labels', [])\n else:\n logging.info('Using labels: %s ', args.labels)\n\n for label in args.labels:\n labels.append({'id': label})\n\n if not labels:\n logging.info('No labels found.')\n sys.exit()\n\n return labels", "def getNSites(self):\n return self.nsites", "def token_labels(self) -> dict[str, str]:\n # This label is used to identify the site in the `skupper link status` command\n # self.name ({skupper_network.identifier}-{ns.cluster.name}-{ns.name}) can be longer than 63 characters\n # so use cluster.name-namespaced.name instead and trim it to 63 characters\n # a namespace can't be in more than one skupper network, so it's safe to omit the skupper network identifier\n return {\"token-receiver\": f\"{self.cluster.name}-{self.namespace.name}\"[0:63]}", "def server_names(self):\n return self._server_names", "def get_global_variable_names(self):\n return [b\"\".join(_i).strip().decode()\n for _i in self._f.variables[\"name_glo_var\"][:]]", "def get_profile_names():\n import botocore.session\n return botocore.session.get_session().full_config.get('profiles', {}).keys()", "def GetRegisterList():\n return ida_idp.ph_get_regnames()", "def sessionsStar(self, nodeList) :\n\t\t# i client , len(nodeList)-1 server \n\t\tsessIDlist = []\n\t\tstarSize = len(nodeList)\n\t\t#Create sync sessions and store session IDs in a list\n\t\tfor i in range(starSize -1) :\n \t\tsessIDlist.append((i, starSize-1, nodeList[i].createSyncSession(nodeList[starSize-1], \\\n\t\t\t\tnodeList[starSize-1].instanceID)))\n\t\treturn sessIDlist", "def get_server_info_list(self):\n # TODO: 不要では?特に理由がなければ削除する\n result = []\n if self._server_sock is not None:\n result.append(\"Sever address: %s\" %\n str(self._server_sock.getsockname()))\n else:\n result.append(\"Sever address: Not initialized yet.\")\n result.append(\"Handler: %s\" %\n str(self._data_handler.__class__))\n result.append(\"Sessions: %d\" % len(self._sessions))\n for idx, session_thread in enumerate(self._sessions):\n result.append(\"Session[%d]: %s\" % (\n idx, str(session_thread.client_address)))\n return result", "def get_sitemodulename(self):\n return self.sitemodulename", "def wishlist_sessions(self, user):\n wishlist_key = self.get_wishlist_key(user)\n session_keys = [ndb.Key(urlsafe=wsck) for wsck in\n wishlist_key.get().sessionKeys]\n sessions = ndb.get_multi(session_keys)\n return sessions", "def filtered_sessions(self):\n return self.stage.filtered_sessions", "def get_sites(subj):\n sites = []\n for sub in subj:\n sites.append(sub.split('_')[1])\n sites = list(set(sites))\n sites.sort()\n\n return sites", "def get_names(self):\n\n return self.mod_suites.keys()", "def getSessionsByHighlightSearch(self, request):\n sessions = self._getSessionsByHighlightSearch(request)\n # Return individual SessionForm object per Session\n return SessionForms(\n items=[self._copySessionToForm(session) for session in sessions]\n )", "def get_users_name(self, session) -> Tuple[int, str, str]:\n users = (\n session.query(User.chat_id, User.first_name, User.last_name)\n .filter(User.is_admin==False)\n .all()\n )\n return users", "def get_unique_label_list(self) -> List[str]:\n return self.tasks.get_label_list()", "def showlinuxsession():\n print('\\nAPI-KEY: {0}'.format(middleware.ixn.apiKey))\n print('Session ID: {0}'.format(middleware.ixn.sessionId.split('/')[-1]))\n print()", "def get_sessions_by_type(self, request):\n return self.session_service.get_conference_sessions_by_type(\n request.websafeConferenceKey, request.sessionType)", "def site_name(self):\n # TODO: add a check lookup dictionary for other telescopes\n # to ensure astropy compatibility\n return self.meta[\"header\"][\"TELESCOP\"]", "def se_list(self, dest):\n hostList=self.findSites_(dest)\n req=''\n reqtmp=[]\n concString = '||'\n\n for arg in hostList:\n reqtmp.append(' Member(\"'+arg+'\" , other.GlueCESEBindGroupSEUniqueID) ')\n\n if len(reqtmp): req += \" && (\" + concString.join(reqtmp) + \") \"\n\n return req", "def read_sessions(self):\n path = self.get_session_path()\n # catch?\n return yaml.load(open(path, encoding=\"utf8\"))", "def get_team_names(driver):\n name_elements = driver.find_elements_by_class_name(\"name\")\n team_names = [name.text for name in name_elements]\n return team_names", "def getPuttyConnections():\n psessions = []\n os.system(r'regedit /a /e \"%userprofile%\\desktop\\putty-registry.reg\" HKEY_CURRENT_USER\\Software\\Simontatham')\n pdef = os.path.join(winshell.desktop(), \"putty-registry.reg\")\n r = open(pdef, 'r').read().splitlines()\n prefix = \"[HKEY_CURRENT_USER\\Software\\Simontatham\\PuTTY\\Sessions\"\n for l in r:\n if l.startswith(prefix):\n psessions.append(l[len(prefix) + 1:-1])\n return psessions", "def list_silos(self, kwargs):\n verbose = kwargs.get(\"verbose\", False)\n attributes = ALL if verbose else [\"cn\", \"objectClass\"]\n\n self.display(\n self.engine.query(\n self.engine.SILOS_FILTER(),\n attributes, base=','.join([\"CN=AuthN Policy Configuration,CN=Services,CN=Configuration\", self.engine.base_dn])\n ),\n verbose\n )", "def get_player_names(self):\n names = [user['name'] for user in self.server.status().raw['players']['sample']]\n return names" ]
[ "0.6083844", "0.5573907", "0.5564926", "0.54466736", "0.5325585", "0.53207505", "0.5316571", "0.527415", "0.5223453", "0.5201878", "0.5165152", "0.514091", "0.5095732", "0.50864303", "0.5076436", "0.50540745", "0.50449497", "0.5036865", "0.50282997", "0.50101763", "0.49757937", "0.49587727", "0.4914938", "0.49044168", "0.4901336", "0.48946995", "0.4886746", "0.4883055", "0.48687926", "0.4866625", "0.48520875", "0.48519826", "0.4851686", "0.4849107", "0.48387355", "0.48249805", "0.4822674", "0.48120055", "0.4805462", "0.4798911", "0.47910282", "0.4787917", "0.4787687", "0.4771645", "0.47612876", "0.4749471", "0.4712568", "0.47112483", "0.47087854", "0.47048938", "0.47027677", "0.46965933", "0.4696299", "0.4685312", "0.4682149", "0.4681275", "0.46685192", "0.4652605", "0.46408653", "0.46264195", "0.4623435", "0.46187055", "0.46088418", "0.46086854", "0.46050236", "0.4603865", "0.46034434", "0.46031818", "0.45980883", "0.45785242", "0.45767215", "0.45759875", "0.45608693", "0.45515862", "0.45512167", "0.4540918", "0.4528524", "0.45196742", "0.4518834", "0.45118183", "0.45101875", "0.4500229", "0.4495941", "0.4485737", "0.4484794", "0.4482438", "0.44781592", "0.447326", "0.44709513", "0.44692805", "0.4469023", "0.44614545", "0.44570684", "0.44570133", "0.44565257", "0.44554475", "0.44524798", "0.444734", "0.44432083", "0.44393066" ]
0.71040463
0
Simple name representation for a session
def __unicode__ (self): return self.name
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def user_name(self):\n return utils.to_unicode(lib.sp_session_user_name(self._sp_session))", "def name(self) -> str:\n return f\"{self._inst} {self._sid_data['sid']} {self._data[self._sid_data['sid_name']]}\"", "def get_session_name(self, vid):\n return \"ssn-{0}\".format(vid)", "def name(self):\n return str(self.name)", "def session_scope_prefix():\n return ''.join(choice(ascii_uppercase) for _ in range(6)) + '-'", "def name ( self ) :\n return self.__name if self.__name else ''", "def name(self):\n return str()", "def name(self): \n\t\treturn self._name", "def name(self):\n return self[\"name\"]", "def Name(self) -> str:", "def Name(self) -> str:", "def Name(self) -> str:", "def Name(self) -> str:", "def get_name():", "def name():\n pass", "def name():\n pass", "def name(self):\n\t\treturn self.name_", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")", "def name(self) -> str:\n return pulumi.get(self, \"name\")" ]
[ "0.69617414", "0.6902516", "0.68717957", "0.6436865", "0.642208", "0.63811517", "0.637589", "0.6362622", "0.63417363", "0.6340011", "0.6340011", "0.6340011", "0.6340011", "0.63188624", "0.6313412", "0.6313412", "0.6306013", "0.63002694", "0.63002694", "0.63002694", "0.63002694", "0.63002694", "0.63002694", "0.63002694", "0.63002694", "0.63002694", "0.63002694", "0.63002694", "0.63002694", "0.63002694", "0.63002694", "0.63002694", "0.63002694", "0.63002694", "0.63002694", "0.63002694", "0.63002694", "0.63002694", "0.63002694", "0.63002694", "0.63002694", "0.63002694", "0.63002694", "0.63002694", "0.63002694", "0.63002694", "0.63002694", "0.63002694", "0.63002694", "0.63002694", "0.63002694", "0.63002694", "0.63002694", "0.63002694", "0.63002694", "0.63002694", "0.63002694", "0.63002694", "0.63002694", "0.63002694", "0.63002694", "0.63002694", "0.63002694", "0.63002694", "0.63002694", "0.63002694", "0.63002694", "0.63002694", "0.63002694", "0.63002694", "0.63002694", "0.63002694", "0.63002694", "0.63002694", "0.63002694", "0.63002694", "0.63002694", "0.63002694", "0.63002694", "0.63002694", "0.63002694", "0.63002694", "0.63002694", "0.63002694", "0.63002694", "0.63002694", "0.63002694", "0.63002694", "0.63002694", "0.63002694", "0.63002694", "0.63002694", "0.63002694", "0.63002694", "0.63002694", "0.63002694", "0.63002694", "0.63002694", "0.63002694", "0.63002694", "0.63002694" ]
0.0
-1
Delete one or more keys specified by ``keys``
async def delete(self, *keys, **kwargs): def gen_keys(keys): all_keys = [] for key in keys: if isinstance(key, list): all_keys += gen_keys(keys=key) else: all_keys.append(key) return all_keys all_keys = gen_keys(keys) for key in all_keys: await self._client_conn.hdel(key=self.name, field=key)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete_many(self, keys):\n raise NotImplementedError()", "def delete_many(self, keys):\n try:\n if keys:\n self._cache.delete(*map(self.prepare_key, keys))\n except Exception as err:\n return self.warn_or_error(err)", "def delete(cls, *keys):\n todelete = []\n namespace, kind, member = Schema.Get(cls)\n for key in keys:\n assert isinstance(key, str)\n todelete.append(Key(namespace, kind, key)) \n Lisa.delete(*todelete)", "def delete_many(self, keys):\n return self.delete_many_values(keys)", "def delete_many(self, *keys):\n self.collection.remove({'_id': {'$in': keys}})\n return True", "def Delete(keys):\n keys, multiple = NormalizeAndTypeCheckKeys(keys)\n\n if multiple and not keys:\n return\n\n req = datastore_pb.DeleteRequest()\n req.key_list().extend([key._Key__reference for key in keys])\n\n tx = _MaybeSetupTransaction(req, keys)\n\n resp = datastore_pb.DeleteResponse()\n try:\n apiproxy_stub_map.MakeSyncCall('datastore_v3', 'Delete', req, resp)\n except apiproxy_errors.ApplicationError, err:\n raise _ToDatastoreError(err)", "def remove_keys(data: dict, keys: list[str]) -> None:\n for k in keys:\n _ = data.pop(k, None)", "def delete(cls, keys, pipe=None):\n with cls._pipe(pipe) as pipe:\n core = cls.core(pipe)\n core.delete(*keys)", "def delete_many(self, keys, version=None, client=None):\r\n\r\n if client is None:\r\n client = self.get_client(write=True)\r\n\r\n if not keys:\r\n return\r\n\r\n keys = [self.make_key(k, version=version) for k in keys]\r\n try:\r\n return client.delete(*keys)\r\n except ConnectionError:\r\n raise ConnectionInterrupted(connection=client)", "def remove_keys(_dict, _keys):\n if isinstance(_keys, str):\n if _keys in _dict:\n del _dict[_keys]\n else:\n for _key in _keys:\n _dict = remove_keys(_dict, _key)\n return _dict", "def delete(self, *keys: KeyT) -> ResponseT:\n return self._split_command_across_slots(\"DEL\", *keys)", "def delete(self, keys: List[K]) -> List[bool]:\n raise NotImplementedError('delete must be reimplemented in concrete implementation')", "def delete(\n self, keys: Optional[Iterable[Text]] = None\n ) -> Dict[Text, StateDictInterface]:\n records = self.execute()\n assert isinstance(records, dict)\n\n self.store.delete_many(records.values(), keys=keys)\n return records", "def del_quiet(dic, *keys):\n for key in keys:\n try:\n del dic[key]\n except KeyError:\n pass", "def deleteTable(*keys):\r\n\treturn getGameData().delTable(*keys)", "def remove_keys(d, keys):\n pp = deepcopy(d)\n if isinstance(keys, (list, tuple)):\n for k in keys:\n pp.pop(k, None)\n else:\n pp.pop(keys, None)\n return pp", "def del_dict_keys(dict_in, keys):\n for key in keys:\n if key in dict_in:\n del dict_in[key]\n return dict_in", "def delete_keys_from_dict(d, keys):\n if isinstance(d, dict):\n for field in d.keys():\n if field in keys:\n del d[field]\n elif isinstance(d[field], dict) or isinstance(d[field], list) or isinstance(d[field], set):\n delete_keys_from_dict(d[field], keys)\n elif isinstance(d, dict) or isinstance(d, list) or isinstance(d, set):\n for i in d:\n delete_keys_from_dict(i, keys)", "def multi_del(self, keys, no_update_log=False):\n # TODO: write better documentation: why would user need the no_update_log param?\n opts = (no_update_log and TyrantProtocol.RDBMONOULOG or 0)\n if not isinstance(keys, (list, tuple)):\n keys = list(keys)\n\n wait(self.proto.misc(\"outlist\", keys, opts))", "def delete_keys_from_dict(dictionary, list_keys):\n for k in list_keys:\n try:\n del dictionary[k]\n except KeyError:\n pass\n for v in dictionary.values():\n if isinstance(v, dict):\n delete_keys_from_dict(v, list_keys)\n\n return dictionary", "def del_seqs(self, keys):\n for j in range(len(keys)):\n del self._d_seqs[keys[j]]\n self._num_seqs = int(len(self._d_seqs))\n self._d_seqs = self._d_seqs\n self._seqs = list(self._d_seqs)", "def delete_many(self, keys, version=None):\r\n res = 0\r\n for key in [self.make_key(k, version=version) for k in keys]:\r\n client = self.get_server(key)\r\n res += self.delete(key, client=client)\r\n return res", "def except_keys(dic, *keys):\n ret = dic.copy()\n for key in keys:\n try:\n del ret[key]\n except KeyError:\n pass\n return ret", "def rem(self, keys: Union[str, Iterable]):\n return(self.db.delVal(db=self.sdb, key=self._tokey(keys)))", "def rem(self, keys: Union[str, Iterable]):\n return(self.db.delVal(db=self.sdb, key=self._tokey(keys)))", "def rem(self, keys: Union[str, Iterable]):\n return(self.db.delVal(db=self.sdb, key=self._tokey(keys)))", "def remove_keys(_dict, keys):\n if not _dict:\n return None\n new = dict(_dict)\n for key in keys:\n new.pop(key, None)\n return new", "def try_del(d, keys):\n for key in keys:\n try:\n del d[key]\n except KeyError:\n pass", "def remove_keys_from_dict(dictionary, keys):\n\n # Copy dictionary\n dictionary_updated = dictionary.copy()\n try:\n [dictionary_updated.pop(key) for key in keys]\n except:\n print(\"Error: No ratio and sampling strategy parameters\")\n return dictionary_updated", "def keep_in_dictionary(self,dictionary,*keys):\r\n remove_keys = [k for k in dictionary if k not in keys]\r\n self.remove_from_dictionary(dictionary,*remove_keys)", "def delete_keys_from_dict(dict_del, the_keys):\n # make sure the_keys is a set to get O(1) lookups\n if type(the_keys) is not set:\n the_keys = set(the_keys)\n for k, v in dict_del.items():\n if k in the_keys:\n del dict_del[k]\n if isinstance(v, dict):\n delete_keys_from_dict(v, the_keys)\n if isinstance(v, list):\n for item in v:\n if isinstance(item, dict):\n delete_keys_from_dict(item, the_keys)\n return dict_del", "def delete_keys(prefix, *args):\n rc = redis.StrictRedis(host=REDIS_SINGLE_HOST, port=REDIS_PORT, db=0)\n # 如果有多个参数,将多个参数拼接成一个key\n if args:\n for i in args:\n prefix = str(prefix) + str(i)\n keys = rc.keys(\"*\" + prefix + \"*\")\n for key in keys:\n rc.delete(key)", "def delete_metadata(d, keys):\n for data in keys:\n d = del_dict_attrs(d, '.'.join(data.split('.')[1:]))\n return d", "def delete(self, *names):\n if len(names) != 1:\n raise RedisClusterException(\"deleting multiple keys is not implemented in pipeline command\")\n\n return self.execute_command('DEL', names[0])", "def delete_metadata(self, keys):\n return self.manager.delete_metadata(self, keys)", "def delete_by_key(self, key_fields):\n tmp = dict(zip(self._key_columns, key_fields))\n return self.delete_by_template(tmp)", "def remove_from_dictionary(self,dictionary,*keys):\r\n for key in keys:\r\n if key in dictionary:\r\n value = dictionary.pop(key)\r\n logger.info(\"removed item with key '%s' and value '%s'\" %(key,value))\r\n else:\r\n logger.info(\"Key '%s' not found\" %(key))", "def _del_item(dic: dict, keys: list):\n\tdic = _get_item(dic, keys[:-1])\n\tdel dic[keys[-1]]", "def remove_by_keys(self, keys):\n return list(filter(lambda item: item.keyword not in set(keys), self._metadata))", "def delete_keys_tags(self,\r\n index,\r\n deletedkeys):\r\n\r\n\r\n for k_temp in deletedkeys:\r\n k_temp = k_temp.strip()\r\n if k_temp in set(self.get_keys()):\r\n self.discard_index_from_key(k_temp, index)\r\n if self.get_indexes_for_key(k_temp) == set():\r\n self.eliminate_key(k_temp)\r\n for t_temp in self.get_tags():\r\n if k_temp in self.get_keys_for_tag(t_temp):\r\n self.discard_key_from_tag(t_temp,k_temp)\r\n if not self.get_keys_for_tag(t_temp):\r\n self.delete_tag(t_temp)", "def delete(self, resource, keys, url_prefix, auth, session, send_opts):\n success = True\n exc = HTTPErrorList('At least one key-value update failed.')\n\n for key in keys:\n req = self.get_metadata_request(\n resource, 'DELETE', 'application/json', url_prefix, auth, key)\n prep = session.prepare_request(req)\n resp = session.send(prep, **send_opts)\n if resp.status_code == 204:\n continue\n err = (\n 'Delete failed for {}: {}, got HTTP response: ({}) - {}'\n .format(resource.name, key, resp.status_code, resp.text))\n exc.http_errors.append(HTTPError(err, request=req, response=resp))\n success = False\n\n if not success:\n raise exc", "def invalidate_keys(self, keys):\r\n if not keys:\r\n return\r\n flush, flush_keys = self.find_flush_lists(keys)\r\n\r\n if flush:\r\n cache.delete_many(flush)\r\n if flush_keys:\r\n self.clear_flush_lists(flush_keys)", "def del_all(self, items):\n for item in items:\n item.key.delete()\n logger.debug(\"Deleted all the items\")", "def _delete_keys_script(key_pattern):\n return \"\"\"\n local curkey = redis.call('keys', '%(key_pattern)s')\n if next(curkey) then\n redis.call('del', unpack(curkey))\n end\n \"\"\" % dict(key_pattern = key_pattern)", "def delete_multi(self, keys, dead_time=0, key_prefix=''):\n\n\t\tserver_keys, _deprefix = yield self._map_keys_to_servers(keys, key_prefix)\n\n\t\t# send out all requests on each server before reading anything\n\t\tdead_servers = []\n\n\t\tif dead_time is None:\n\t\t\tdead_time = 0\n\n\t\tret = True\n\n\t\tfor server in server_keys.iterkeys():\n\t\t\tcommands = []\n\t\t\tfor prefixed_key, _original_key in server_keys[server]:\n\t\t\t\tcommands.append(\"delete %s %d\\r\\n\" % (prefixed_key, dead_time))\n\n\t\t\ttry:\n\t\t\t\tserver.send_cmds(''.join(commands))\n\t\t\texcept tcp.ConnectionClosedException:\n\t\t\t\tserver.mark_dead()\n\t\t\t\tdead_servers.append(server)\n\n\t\t# if any servers died on the way, don't expect them to respond.\n\t\tfor server in dead_servers:\n\t\t\tdel server_keys[server]\n\n\t\tfor server, keys in server_keys.iteritems():\n\t\t\ttry:\n\t\t\t\tfor _key in keys:\n\t\t\t\t\tres = yield server.read_line()\n\t\t\t\t\tif res != \"DELETED\":\n\t\t\t\t\t\tself._debuglog(\"expected 'DELETED', got %r\" % (res, ))\n\t\t\texcept tcp.ConnectionClosedException:\n\t\t\t\tserver.mark_dead()\n\t\t\t\tret = False\n\n\t\traise StopIteration(ret)", "def Exclude(*keys):\n\n def exclude(row):\n res = dict(row)\n for k in keys:\n if k in res:\n del res[k]\n return res\n\n return \"Exclude\" >> beam.Map(exclude)", "def without_keys(keys):\n keys = frozenset(keys) # frozenset has efficient membership lookup\n return filter_keys_c(fnot(partial(operator.contains, keys)))", "def delete_metadata(self, keys=None):\n return self.manager.delete_metadata(self, keys=keys)", "def delete(self, *names):\n\n return [shard.delete(*keys) for shard, keys\n in self.gather_keys_by_shard(names)]", "def unset(keys):\n exit_code = 0\n if len(keys) == 1:\n return exit_code\n for x in keys[1:]:\n try:\n del environ[x]\n except KeyError:\n exit_code = 1\n pass\n return exit_code", "def filter_keys_in_set(ds, keys):\n logger.info(\"For each element in the dataset, keeping only values with keys: %s.\", ', '.join(keys))\n\n def filter_keys(x):\n return {k: v for k, v in x.items() if k in keys}\n\n return ds.map(filter_keys, num_parallel_calls=TF_AUTOTUNE)", "def deleteAttributes(self, keys):\n self.graph.deleteExtendedAttributes(self.entityId, keys)", "def del_license(fitsfile, keys):\n try:\n for key in keys:\n pyfits.delval(fitsfile, key)\n except KeyError:\n print(\"License information not found.\", file=sys.stderr)", "async def delete_files(client, bucket, files: Set):\n result = await client.delete_objects(Bucket=bucket, Delete={\n 'Objects': [{'Key': file} for file in files]\n })\n print(result)", "def unset_qos_key(self, qos_id, keys):\n put_body = json.dumps({'keys': keys})\n resp, body = self.put('qos-specs/%s/delete_keys' % qos_id, put_body)\n self.validate_response(schema.unset_qos_key, resp, body)\n return rest_client.ResponseBody(resp, body)", "def omit(self, *keys):\n return _({k: self[k] for k in self._ if k not in keys})", "def delete_objects(self, bucket_name, key_list):\n result = []\n for key in key_list:\n self.delete_object(bucket_name, key)\n return result", "def _delete_keys(bucket, keys):\n result = {}\n status = -1\n output = None\n try:\n deleted_result = bucket.delete_keys(keys)\n for deleted in deleted_result.deleted:\n result[deleted.key] = {'status': 0, 'output': None}\n for error in deleted_result.errors:\n result[error.key] = {'status': -1, 'output': error.message}\n except:\n status = -1\n output = \"Failed to delete keys, error: %s\" % (traceback.format_exc())\n\n for key in keys:\n if key not in result:\n result[key] = {'status': status, 'output': output}\n return result", "def prune_option_list(opts, keys):\n opt_d = opt_to_dict(opts)\n for k in keys:\n if k in opt_d:\n del opt_d[k]\n return [k for item in opt_d.iteritems() for k in item]", "def delete_key_HELPER(data_dict, key_list, key_to_delete):\n data_dict = get_key_from_dict_HELPER(data_dict, key_list[:-1])\n data_dict.pop(key_to_delete)\n return data_dict", "def test_remove_multiple_key(self):\n\n expected = {\n \"Hello\": \"world\",\n \"World\": {\"world\": \"hello\"},\n \"pyfunceble\": [\"funilrys\"],\n }\n\n actual = Dict(self.test_subject).remove_key([\"funilrys\", \"Py\"])\n\n self.assertEqual(expected, actual)", "def clear_flush_lists(self, keys):\r\n cache.delete_many(keys)", "def UnLoadDictKeys(dct, keys_lst):\n if not keys_lst:\n return dct\n SanityCheck.ValidateTypes(((dct, dict), (keys_lst, list)))\n\n new_dct = {}\n for key in dct:\n if key in keys_lst:\n continue\n new_dct[key] = dct[key]\n\n return new_dct", "def delete_metadata(self, snapshot, keys):\n response_list = []\n snapshot_id = base.getid(snapshot)\n for k in keys:\n resp, body = self._delete(\"/snapshots/%s/metadata/%s\" %\n (snapshot_id, k))\n response_list.append(resp)\n\n return common_base.ListWithMeta([], response_list)", "def removeAllKeys(self) -> None:\n ...", "def unset(self, fields, keys, table_name=None, match_any=False):\n return self._get_storage().unset(fields, keys, table_name=table_name, match_any=match_any)", "def remove_data(self, data_keys):\r\n self._verify_key_types(name='data', keys=data_keys)\r\n if isinstance(data_keys, (str, unicode)):\r\n data_keys = [data_keys]\r\n for data_key in data_keys:\r\n del self[data_key]", "def attribute_del(self, serial, domain, keys=()):\n\n if keys:\n q = (\"delete from attributes \"\n \"where serial = ? and domain = ? and key = ?\")\n self.executemany(q, ((serial, domain, key) for key in keys))\n else:\n q = \"delete from attributes where serial = ? and domain = ?\"\n self.execute(q, (serial, domain))", "def delete(self, key):", "def delete_metadata(self, volume, keys, deletes=10, delete_size=3):\n if len(keys) < deletes * delete_size:\n raise exceptions.InvalidArgumentsException(\n \"Not enough metadata keys to delete: \"\n \"%(num_keys)s keys, but asked to delete %(num_deletes)s\" %\n {\"num_keys\": len(keys),\n \"num_deletes\": deletes * delete_size})\n # make a shallow copy of the list of keys so that, when we pop\n # from it later, we don't modify the original list.\n keys = list(keys)\n random.shuffle(keys)\n action_name = (\"cinder_v%s.delete_%s_metadatas_%s_times\"\n % (self.version, delete_size, deletes))\n with atomic.ActionTimer(self, action_name):\n for i in range(deletes):\n to_del = keys[i * delete_size:(i + 1) * delete_size]\n self._get_client().volumes.delete_metadata(volume, to_del)", "def delete_metadata(self, keys=None):\n return self.parent.delete_metadata_for_node(self, keys=keys)", "def without_keys(d, keys):\n return {x: d[x] for x in d if x not in keys}", "def del_psana_options(self, keys):\n try:\n for key in keys:\n self._data.psana_cfg_dict.pop(key, None)\n except:\n print 'Invalid keys to remove from psana options:', keys", "async def clear_entry(self, *keys):\n\n keys = tuple(keys)\n\n async with self._lock:\n if keys in self._non_existent:\n log.info(\n \"Clearing non-existent entry %s for table %s\", keys, self.table\n )\n self._non_existent.remove(keys)\n\n removed = bool(self.entries.pop(keys, False))\n\n if removed:\n log.info(\"Clearing entry %s for table %s\", keys, self.table)\n\n return removed", "def clear(self, keys=None, records=None, **kwargs):\n keys = keys or kwargs.get('key')\n records = records or kwargs.get('record')\n if isinstance(keys, list) and isinstance(records, list):\n return self.client.clearKeysRecords(keys, records, self.creds, self.transaction, self.environment)\n elif isinstance(records, list) and not keys:\n return self.client.clearRecords(records, self.creds, self.transaction, self.environment)\n elif isinstance(keys, list) and records:\n return self.client.clearKeysRecord(keys, records, self.creds, self.transaction, self.environment)\n elif isinstance(records, list) and keys:\n return self.client.clearKeyRecords(keys, records, self.creds, self.transaction, self.environment)\n elif keys and records:\n return self.client.clearKeyRecord(keys, records, self.creds, self.transaction, self.environment)\n elif records:\n return self.client.clearRecord(records, self.creds, self.transaction, self.environment)\n else:\n require_kwarg('record or records')", "def remove(self, table_name, keys=None, any=False, eids=None):\n table = self.db.table(table_name)\n if eids is not None:\n LOGGER.debug(\"%r: remove(eids=%r)\" % (table_name, eids))\n if isinstance(eids, list):\n return table.remove(eids=eids)\n else:\n return table.remove(eids=[eids])\n else:\n LOGGER.debug(\"%r: remove(keys=%r)\" % (table_name, keys))\n return table.remove(self._getQuery(keys, any))", "def filter_keys_out(items, keys):\n for key, value in items.items():\n if key in keys:\n continue\n yield key, value", "def _filter_dict(src_dict, key_set):\n for k in set(src_dict.keys()) - key_set:\n src_dict.pop(k)", "def unlink(self, *keys: KeyT) -> ResponseT:\n return self._split_command_across_slots(\"UNLINK\", *keys)", "def delete_keypairs(self,\n keypairs,\n check=True):\n for keypair in keypairs:\n self._client.delete(keypair.id)\n\n if check:\n self.check_keypairs_presence(keypairs, must_present=False)", "async def delete_metadata(dbcon: DBConnection, object_type: str, object_id: int,\n keys: Optional[Iterable[str]] = None):\n\n async def _run(cur: Cursor) -> None:\n if keys:\n # noinspection PyTypeChecker\n for key in keys:\n q = \"\"\"delete from object_metadata where object_type=%s and object_id=%s and `key`=%s\"\"\"\n q_args = (object_type, object_id, key) # type: Tuple\n await cur.execute(q, q_args)\n else:\n q = \"\"\"delete from object_metadata where object_type=%s and object_id=%s\"\"\"\n q_args = (object_type, object_id)\n await cur.execute(q, q_args)\n\n await dbcon.transact(_run)", "def delete_keys_with_prefix(prefix):\n rc = redis.StrictRedis(host=REDIS_SINGLE_HOST, port=REDIS_PORT, db=0)\n keys = rc.keys(\"*\" + prefix + \"*\")\n for key in keys:\n rc.delete(key)", "def delete_dict_entries(dictionary, entries):\n\n for key in entries:\n if key in dictionary:\n del dictionary[key]\n\n return dictionary\n # parameters = {key: parameters[key] for key in parameters if key not in del_parameter}", "def remove_states(self, keys: list):\n if self.spec.graph:\n self.spec.graph.clear_children(keys)", "def filterKeys(document, keys):\n return {key: document[key] for key in keys}", "def delete(self, *args: str):\n toDelete = {}\n for a in args:\n toDelete[a] = None\n return self._object.update(meta=toDelete)", "def filter_keys(data, keys=[]):\n # filter key\n for filter_key in keys:\n if filter_key in data:\n del data[filter_key]\n\n # filter sub dictionaries\n for _, value in data.items():\n if type(value) == dict:\n filter_keys(value, keys)", "def delete(self, key):\n pass", "def delete(self, key):\n pass", "def only_some_keys(dic, *keys):\n ret = {}\n for key in keys:\n ret[key] = dic[key] # Raises KeyError.\n return ret", "def unset(self, keys=None):\n if not keys:\n keys = self._trans_dict.keys()\n for key in keys:\n key = key.upper()\n self._trans_dict[key] = key", "def test_sftp_delete_all_keys(self):\n config = Config()\n metadata_bucket = config.config.get(\"metadata\", \"bucket\")\n data_bucket = config.config.get(\"data\", \"bucket\")\n metadata_provider = sftp.Sftp(config, metadata_bucket).connect()\n provider = sftp.Sftp(config, data_bucket).connect()\n for key, metadata in metadata_provider.list().items():\n metadata_provider.delete(key)\n for key, data in provider.list().items():\n provider.delete(key)\n metadata_provider.disconnect()\n provider.disconnect()", "def delete_attributes(self, attrs):\r\n assert(isinstance(attrs, list)), \"Argument must be a list of names of keys to delete.\"\r\n self._manager.domain.delete_attributes(self.id, attrs)\r\n self.reload()\r\n return self", "def remove(self, fields, pipe=None):\n if not fields:\n return\n\n if self.key_name in fields:\n raise InvalidOperation('cannot remove the redis key')\n\n with self._pipe(pipe) as pipe:\n core = self.core(pipe=pipe)\n core.hdel(self.key, *fields)\n\n def cb():\n for k in fields:\n try:\n del self._data[k]\n except KeyError:\n pass\n\n pipe.on_execute(cb)", "def delete_bykey(cls, keydict):\n cls.dbm().modelclass_deletebykey(cls, keydict)", "def delete_key(self,\r\n dkey):\r\n\r\n\r\n if (input(queries.DELETE_CONF_BEG\r\n +dkey+queries.DELETE_CONF_END) in YESTERMS):\r\n\r\n if dkey in self.keys():\r\n\r\n for i_temp in self.get_all_indexes():\r\n if dkey in self.get_keys_from_note(i_temp):\r\n tempnote = self.get_note(i_temp).delete_keys({dkey})\r\n self.add_note(i_temp,note=tempnote)\r\n if self.get_keys_from_note(i_temp) == set():\r\n temp = self.get_keys_from_note(i_temp)\r\n temp.add(VOIDTERM)\r\n self.add_note(i_temp,\r\n keyset_only=temp)\r\n self.add_keys_tags(i_temp,\r\n {VOIDTERM})\r\n\r\n self.delete_keys_tags(i_temp, {dkey})", "def many(keys: List[str]):\n for key in keys:\n actions.key(key)", "def batch_request_payload(delete_keys=(), **kwargs):\n payload = {\n \"operation\": \"download\",\n \"transfers\": [\"basic\"],\n \"ref\": {\"name\": \"refs/heads/master\"},\n \"objects\": [\n {\n \"oid\": \"12345678\",\n \"size\": 8\n }\n ]\n }\n\n for key in delete_keys:\n del payload[key]\n\n payload.update(kwargs)\n return payload", "def _generate_delete_sql(self, delete_keys):\n for key in delete_keys:\n app_label, sql_name = key\n old_node = self.from_sql_graph.nodes[key]\n operation = DeleteSQL(sql_name, old_node.reverse_sql, reverse_sql=old_node.sql)\n sql_deps = [n.key for n in self.from_sql_graph.node_map[key].children]\n sql_deps.append(key)\n self.add_sql_operation(app_label, sql_name, operation, sql_deps)", "def deleteDistortionKeywords(hdr):\n # We need to use '.pop' to guard against the possibility, however remote,\n # that the keyword has already been removed before calling this function.\n for kw in DIST_KWS:\n hdr.pop(kw, None)\n\n # This can use 'del' since it will work even if the keywords\n # are missing altogether since the multi_kw uses wild-cards\n for multi_kw in DIST_MULTI_KWS:\n del hdr[multi_kw]" ]
[ "0.8140219", "0.8091643", "0.80413496", "0.8027012", "0.7771675", "0.7751918", "0.7727257", "0.7682047", "0.7680383", "0.7599218", "0.7451378", "0.7443246", "0.73895985", "0.7378481", "0.7349482", "0.7295384", "0.72044706", "0.7172078", "0.71646804", "0.711923", "0.7093819", "0.70718133", "0.7067456", "0.70660895", "0.70660895", "0.70660895", "0.7063682", "0.7059187", "0.69828", "0.6960323", "0.6881653", "0.6845987", "0.6789528", "0.6772873", "0.6767385", "0.67309165", "0.67264366", "0.6693104", "0.6688797", "0.6671918", "0.6627721", "0.6564646", "0.65286654", "0.64946425", "0.6453439", "0.6420939", "0.6413092", "0.63777745", "0.6332951", "0.6325445", "0.62842965", "0.6235126", "0.6177052", "0.6155111", "0.6135354", "0.61214334", "0.6113621", "0.6108001", "0.61057323", "0.6088258", "0.6068391", "0.60674506", "0.6049522", "0.60174954", "0.599601", "0.599471", "0.5981613", "0.59581846", "0.5953094", "0.5921946", "0.5895247", "0.5888775", "0.5882334", "0.58728266", "0.58572996", "0.5839878", "0.58208466", "0.58154863", "0.5795838", "0.57688487", "0.5767303", "0.57375544", "0.57368976", "0.57300484", "0.57255584", "0.5690344", "0.5681465", "0.56757915", "0.56757915", "0.5675198", "0.56712204", "0.56600684", "0.5630334", "0.56267244", "0.5626701", "0.5614739", "0.5608434", "0.5606344", "0.5605907", "0.56047064" ]
0.8353874
0
Return a boolean indicating whether key field exists
async def exists(self, field, **kwargs): return await self._client_conn.hexists(key=self.name, field=field)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def has_key(self, key):\n return key in self.db", "def has(self, key):", "def has(self, key):\n return False", "def has_key(self, key):\n return self.__dict__.has_key(key)", "def has_key(self, name):\n return self[name] <> None", "def key_in_field(self, key, fields):\n for field in fields:\n if key in field:\n return True\n return False", "def has_key(self, key):\n return key in self", "def has_key(self, key):\n return self.contains(key)", "def has(self, key):\n return self.data.get(key, None) is not None", "def exists(self, key_name: str) -> bool:\n pass", "def has(self, key):\n return self.collection.find_one({'_id': key}) is not None", "def exists(self):\n try:\n key = self.key\n except DoesNotExist:\n \"\"\"\n If the object doesn't exists anymore, its PK is deleted, so the\n \"self.key\" call will raise a DoesnotExist exception. We catch it\n to return False, as the field doesn't exists too.\n \"\"\"\n return False\n else:\n return self.connection.exists(key)", "def has_attribute(self, key):\n return key in self.__dict", "def has_key(self,index):\n\t\ttry:\n\t\t\tself.__get(index)\n\t\t\treturn True\n\t\texcept:\n\t\t\treturn False", "def has_key(self, name):\n return name in list(self.keys())", "def has(self, key):\r\n # handle any special cases\r\n if key.scope == Scope.content:\r\n self._load_definition()\r\n elif key.scope == Scope.parent:\r\n return True\r\n\r\n # it's not clear whether inherited values should return True. Right now they don't\r\n # if someone changes it so that they do, then change any tests of field.name in xx._field_data\r\n return key.field_name in self._fields", "def has(key, nodename=None):\n value = _get_property(key, nodename, False)\n return bool(value)", "def has(self, key) -> bool:\r\n if self.get(key) is not None:\r\n return True\r\n return False", "def containsKey(self, key):\n return get(key) != None", "def hasValue(self, key):\n return self.has_key('__' + key)", "def hasKey(self,\n key):\n return self.__keyCount.has_key(key)", "def haskey(featureVals, fkey):\n try:\n featureVals[fkey]\n except KeyError:\n return False\n\n #warn(HASKEYMSG % (fkey))\n return True", "def has(self, key):\n return key in self._store", "def __contains__(self, key):\n try:\n if self[key]:\n return True\n except KeyError:\n return False", "def has_field(self, field_name: str) -> bool:\n return bool(self.try_get_field(field_name))", "def hasField(self) -> bool:\n return bool(self.__field)", "def has_key(cls, id):\n return super().has_key(id)", "def _has(self, key):\n path = self._get_key_path(key)\n return exists(path)", "def __contains__(self, key):\n try:\n self[key]\n return True\n except:\n return False", "def has_key(self, key):\n return key in self.code_table", "def contains(self, key: int) -> bool:\n if key in self.d:\n return True\n else:\n return False", "def field_exists(table, field):\n fieldList = [f.name for f in arcpy.ListFields(table)]\n return True if field in fieldList else False", "def has_key(self, key):\n return key.lower() in self._data", "def exists(self) -> bool:\n try:\n result = self.get()\n except KeyError:\n return False\n return True", "def tag_key_exists(self, key):\n return key in self.map", "def key_exists(key, dictionary):\n return key in dictionary and dictionary[key] is not None", "def __contains__(self, key):\n return hasattr(self, key)", "def has_keys(self) -> bool:\n \n for key, value in self.key_satified.items():\n if value is not True:\n return False\n return True", "def __contains__(self, key):\n return self._lookup(key).value is not None", "def __contains__(self, key):\n try:\n self._get(key)\n return True\n except Exception:\n return False", "async def _exists(self, key):\n return key in SimpleMemoryBackend._cache", "def __contains__(self, key):\n found = True\n try:\n self.__getitem__(key)\n except:\n found = False\n return found", "def __contains__(self, key):\n return key in self.keys", "def has_key(self, name, *args, **kwargs):\n if not name in self._list(*args, **kwargs):\n return False\n return True", "def has(self, key):\n return os.path.isfile(self._filename(key))", "def __contains__(self, key):\n\t\treturn key in self.__dStore", "def __contains__(self, key: K) -> bool:\n return key in self._table", "def key_exists(key, value):\n\n response = table.query(\n KeyConditionExpression = Key(key).eq(value)\n )\n\n if response['Items']:\n return True\n\n return False", "def fieldExists(layerDefinition, fieldName):\r\n field_names = [layerDefinition.GetFieldDefn(i).GetName() for i in range(layerDefinition.GetFieldCount()) ]\r\n return ( len(filter(lambda x: fieldName in x, field_names)) <> 0)", "def has_field(cls, field) -> bool:\n try:\n cls._meta.get_field(field)\n return True\n except models.FieldDoesNotExist:\n return False", "def contains_key_at(self, key, index):\r\n return index < self.num_keys() and self.keys[index] == key", "def __contains__(self, key):\n return self.keys[self._linear_probe(key, \"contains\")] is not None", "def has_field(self, field):\n return field in self.extra_fields", "def has(self, key: str) -> Any:\n return key in self.variables", "def dexists(self, name, key):\n return key in self.db[name]", "def key_exists(dictionary, key):\n\n exists = dictionary.get(key, None)\n return exists is not None", "def exists(self):\n return os.path.exists(self.key_file)", "def contains(self, key):\n if key in self.key_list:\n return True\n return False", "def exists(self, key):\n try:\n return (self.salt + str(key)) in self.DB\n except KeyError:\n return False", "def exists(self, conn, key):\n return conn.exists(key)", "def has_attr(self, key):\n return key in self.attrs", "def array_key_exists(name, item):\n return item.has_key(name);", "def contains(self, key):\n try:\n self.keyvaluepair_set.get(key=key)\n return True\n except KeyValuePair.DoesNotExist:\n return False", "def contains(self, key):\n\n return key in self.keys()", "def _is_unique_key(self, key):\n return self._in_keys(key, self._unique_keys)", "def __contains__(self, logical_key):\n try:\n self[logical_key]\n return True\n except KeyError:\n return False", "def contains(self, key):\n return self.__db.contains(key)", "def checkIfExists(self, key):\n\t\t\t\n\t\tif self.db.get(key):\n\t\t\treturn self.db.get(key)\n\t\telse:\n\t\t\treturn False", "def has_key(self, key):\n if '.' in key:\n first, second = key.split('.', 1)\n return self[first].has_key(second)\n else:\n return key in self.keys()", "def __contains__(self, key):\n\n return key in self.keys_set", "def has(self, key, from_global=None):\n return self.get(key, from_global=from_global) is not None", "def has_file_key(self, key):\n return self.fileList.has_key( key )", "def exist(self, key, value):\n query = \"SELECT * FROM {} WHERE {} = '{}'\".format(self.table, key, value)\n self.cur.execute(query)\n result = self.cur.fetchall()\n return len(result) > 0", "def has_field(self, str field_name):\n cdef std_string fn = <std_string> field_name.encode('UTF-8')\n return self.mdb.get().has_field(fn)", "def __contains__(self, key):\n with SessionContext(self.SessionClass) as session:\n q = session.query(PAW2_DBObject)\n return q.filter(PAW2_DBObject.key == key).count() == 1", "def keypair_exists(self, name):\n return name in [key.name for key in self.get_all_key_pairs()]", "def has_attribute_key(graph_element, attribute_key):\n return attribute_key in graph_element.get_attributes().keys() # return whether key is present", "def exist(self, key):\n record = self._storage.get(key, None)\n if record:\n return record.ttl >= time.time()\n return False", "def check_if_field_is_empty(dict, key):\n try:\n if dict[key] == \"\":\n print(\"The field \", key, \" is empty! Revision number is \", \\\n current_revision, \" in the file \", file_holder)\n return True\n except KeyError:\n pass", "def has(\n obj: Union[JSONArray, JSONObject], # pylint: disable=unsubscriptable-object\n key: Union[int, str, FlatKey], # pylint: disable=unsubscriptable-object\n):\n try:\n get(obj, key)\n return True\n except JSONKeyError:\n return False", "async def contains(self, key: str) -> bool:", "def hasCustomData( self, key ):\n return str(key) in self._customData", "async def _exists(self, key):\n return await self.client.append(key, b'')", "def has_key(self, key):\n return key in self.responses", "def has_user_data(self, key):\n return isinstance(self._user_data, dict) and key in self._user_data", "def contains(self, key: int) -> bool:\n return self._find_key(key, find_empty=False) >= 0", "def __contains__(self, key):\n return key in self._get_storage()", "def is_known_field(self, name):\n return (name in self.fields) or (name in self.collections) or (name == self.id_field_name) or (name == 'cid')", "def __contains__(self, item, key):\n ndx = self._findPostion(key)\n return ndx is not None", "def _verify_key_exists(self, key, lookup_dict):\n exists = False\n if get_occurrence_of_key(lookup_dict, key) > 0:\n exists = True\n return exists", "def isValidKey(key):\n return True", "def has_family_key(self, name):\n return name in self.family_keys()", "def __contains__(self, key):\n return key in self._index", "def __contains__(self, key):\n return (key in self.index)", "def __contains__(self, key: str) -> bool:\n return key in self.tables", "def ifExist(file_name, key):\n\tif exists(file_name) and exists(key):\n\t\treturn True\n\telse:\n\t\treturn False", "def __contains__(self, key):\n position = self.hash(key)\n\n for _ in range(self.table_capacity):\n if self.array[position] is None:\n return False\n elif self.array[position][0] == key:\n return True\n else:\n position = (position + 1) % self.table_capacity\n return False", "def __contains__(self, key: K) -> bool:\n raise NotImplementedError", "def IsKeysOnly(self):\n return self.__keys_only", "def exists(self):\n return bool(self.get())" ]
[ "0.77291733", "0.75882906", "0.7558047", "0.75283784", "0.7455858", "0.7444074", "0.7414749", "0.7410323", "0.7327293", "0.7311727", "0.72822326", "0.7263533", "0.72487915", "0.7246976", "0.72341603", "0.71927756", "0.7162223", "0.71532154", "0.7148976", "0.7122543", "0.70960265", "0.70738095", "0.70529145", "0.7029546", "0.7011331", "0.70008403", "0.69814414", "0.6951395", "0.69396484", "0.692456", "0.6871402", "0.6856088", "0.6840506", "0.6828243", "0.68216085", "0.68151027", "0.67923903", "0.67726684", "0.6755913", "0.6754437", "0.6732046", "0.6730745", "0.6726507", "0.6703209", "0.67017406", "0.66986537", "0.6696312", "0.6695081", "0.66895646", "0.66789335", "0.66708845", "0.66632485", "0.66497546", "0.6640302", "0.6628974", "0.661343", "0.6609876", "0.66075456", "0.6605862", "0.65999866", "0.658937", "0.6582048", "0.6579586", "0.6575871", "0.65586805", "0.65558016", "0.65495795", "0.6535643", "0.652335", "0.6512093", "0.64972174", "0.6482063", "0.64649355", "0.6457455", "0.64571565", "0.64459676", "0.64390504", "0.64300495", "0.6413851", "0.64100415", "0.6407942", "0.6407069", "0.63788533", "0.6377607", "0.6376086", "0.6373664", "0.6370059", "0.636984", "0.63557833", "0.63363034", "0.631623", "0.631547", "0.63150674", "0.6302909", "0.629758", "0.6289542", "0.6287012", "0.6286602", "0.627957", "0.62696195" ]
0.66703105
51
Return the value at key ``name``, or None if the key doesn't exist
async def get(self, field, default=None, **kwargs): return await self._client_conn.hget(self.name, field=field)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_value(name):\n\n named_value = get_named_value_raw(name)\n if named_value is not None:\n return named_value.value", "def __getitem__(self, name):\n value = self.get(name)\n if value is not None:\n return value\n raise KeyError(name)", "def find_by_name(self, name):\n return self.get(name)", "def get_by_name(self, name):\n return self.by_name.get(name.upper())", "def get(self, name):\n # see if its in the store or return none\n if name in self.store:\n return self.store[name]\n else:\n return None", "def get(self, name, default=None):\n\t\treturn self[name] if self[name] is not None else default", "def getValue(self, name):\n values = self.__get('values')\n return values[name]", "def getValue(name, default=None):", "def get(self, name):\n for func in (self.getarg, self.getflag, self.getcmd):\n try:\n return func(name)\n except KeyError:\n pass\n return None", "def __getitem__(self, name):\n return self.get(name)", "def get(self, keyname: str, default: Optional[Any] = None) -> Any:\n try:\n return self[keyname]\n except KeyError:\n return default", "def GetValueByName(self, name):", "def __getitem__(self, name):\r\n return self.get(name)", "def get(self, name: str) -> Value:\n if name in self.read_hooks:\n func = self.read_hooks[name]\n log.debug(\"Will use function {} to read input\".format(func))\n val = func(name)\n return val\n if name in self._map:\n return self._map[name]\n log.debug(\"Did not find a mapping for variable '{}' in {}\".format(name, self._map))\n return self.default_value", "def __getitem__(self, name):\n \n # Can you have a variable and a structure with the same name?\n if name in self.vars:\n return self.vars[name]\n \n name = name.upper()\n if name in self.structs:\n return self.struct[name]\n\n raise KeyError('%s not found as a variable or structure' % (name))", "def get(name, default=None):", "def get_named_value_raw(name):\n\n try:\n return _global_keys[name].get()\n except KeyError:\n named_value = NamedValue.query(NamedValue.name == name).get()\n if named_value is not None:\n _global_keys[name] = named_value.key\n return named_value", "def __getitem__(self, name):\n if name in self.data: return self.data[name]", "def lookup(self, name):\n return self.fieldDict[name]", "def get_thing(name):\n things = get_things()\n return things[name]", "def value_for(cls, name: str) -> t.Any:\n for key, value in list(cls.__labels__.items()):\n if isinstance(value, NameTitle) and value.name == name:\n return key\n return None", "def get_parameter(self, name: str) -> any:\r\n if name in self.kwargs:\r\n return self.kwargs[name]\r\n for x in self.args:\r\n if isinstance(x, dict) and name in x:\r\n return x[name]\r\n else:\r\n return None", "def name(self, name):\n return self[self.name_cache[name]]", "def get_input_by_name(self, name):\n for var in self.inputs:\n if var.get_object().name == name:\n return var\n return None", "async def read_variable_by_name(self, name: str) -> Optional[Variable]:\n try:\n response = await self._client.get(f\"/variables/name/{name}\")\n return pydantic.parse_obj_as(Variable, response.json())\n except httpx.HTTPStatusError as e:\n if e.response.status_code == status.HTTP_404_NOT_FOUND:\n return None\n else:\n raise", "def getvalue(self, name, *default):\n try:\n return self.getattr(name).value\n except KeyError:\n if default:\n return default[0]\n raise", "def get_node_by_name(self, name):\n\n for node in self.nodes:\n if node.name == name:\n return node\n\n return None", "def get_object(self, name):\n try:\n return self.data['objects'][normalize_object_name(name)]\n except KeyError:\n return None", "def __getitem__(self, name):\n return self.entry[name]", "def get(self, name, default):\n try:\n return self[name]\n except KeyError:\n self.set(name, default)\n return default", "def get(self, name):\n\n if not name in self.store.keys():\n raise NotFoundInDataStore()\n\n return self.store[name][1]", "def lookup(scopes, name):\n # type: (Scopes[T], str) -> Optional[T]\n\n for scope in scopes:\n for key, val in scope:\n if key == name:\n return val\n return None", "def get_value(obj, name):\n if isinstance(obj, dict):\n return obj.get(name)\n\n return getattr(obj, name, obj)", "def get(self, name, default=None):\n return self._storage.get(name, default)", "def __getitem__(self, name):\n if name in self:\n try:\n return getattr(self, name)\n except AttributeError:\n pass\n\n raise KeyError(name)", "def getValue(self, name):\n\n return getattr(self, name)", "def __getitem__(self, name):\n return self.f_get(name)", "def get_el_by_name(items: List[Dict[str, Any]], name: str) -> Dict[str, Any]:\n for item in items:\n if item[\"name\"] == name:\n return item\n print(\"error, key name not found by value\", name, \"in list: \", items)\n sys.exit(1)", "def __getitem__(self, name):\n\n processes = self.getHash( 'nameHash' ) # safely get copy of process name dictionary\n\n try:\n r = processes[name]\n except KeyError:\n r = None # no such process called 'name'\n\n return r", "def __getitem__(self, name):\n\n processes = self.getHash( 'nameHash' ) # safely get copy of process name dictionary\n\n try:\n r = processes[name]\n except KeyError:\n r = None # no such process called 'name'\n\n return r", "def get_result_by_name(self, name):\n found = None\n for result in self.results:\n if result.heading == name:\n found = result\n break\n return found", "def get_value(name):\n\n metrics = get_metrics()[0]\n\n name = name[len(NAME_PREFIX):] # remove prefix from name\n try:\n result = metrics['data'][name]\n except StandardError:\n result = 0\n\n return result", "def _get(obj, name):\n try:\n # try to get value using dict's __getitem__ descriptor first\n return dict.__getitem__(obj, name)\n except TypeError:\n # if it's a dict, then preserve the TypeError\n if isinstance(obj, dict):\n raise\n # otherwise try one last time, relying on __getitem__ if any\n return obj[name]", "def get_attr(attributes, name):\n try:\n return attributes.getValue(name)\n except KeyError:\n return None", "def getAttributeByName(self, name):\n\n for eachAttribute in self._attributes:\n if eachAttribute.getName() == name:\n return eachAttribute\n\n return None", "def __get_value(name: str, strict: bool = True, upwards: bool = True, context: th.Optional[ContextType] = None):\n\n var, name = greedy_import_context(name, upwards=upwards) if context is None else (context, name)\n for split in name.split(\".\") if name else []:\n if isinstance(var, dict):\n if split not in var:\n if strict:\n raise KeyError('Invalid key \"%s\"' % name)\n else:\n return None\n var = var[split]\n else:\n if not hasattr(var, split):\n if strict:\n raise AttributeError(\"Invalid attribute %s\" % name)\n else:\n return None\n var = getattr(var, split)\n return var", "def __getitem__(self, name):\n try:\n field = self.fields[name]\n except KeyError:\n raise KeyError(\n \"Key '%s' not found in '%s'. Choices are: %s.\" % (\n name,\n self.__class__.__name__,\n ', '.join(sorted(f for f in self.fields)),\n )\n )\n\n return self._fields[name]", "def getParam(self, params, name):\n return params.get(name)", "def get_node(self, name):\n if name in self._nodes:\n return self._nodes[name]\n return None", "def _get(self, key):\n try:\n val = getattr(self, f\"_{key}\")\n if val is not None:\n return val\n else:\n self._load()\n return getattr(self, f\"_{key}\")\n except AttributeError:\n return None", "def _get(self, name):\n raise NotImplementedError", "def get_item(self, name: str) -> Optional[Item]:\n item = self.filter_items(name, limit=1)\n return item[0] if item else None", "def __getitem__(self, name):\n return self._items[name.lower()][1]", "def lookup_by_name(cls, name):\n return cls.__by_name[name]", "def nodeFromName(self, name):\n for item in self.items():\n if isinstance(item, NodeItem):\n if item.name() == name:\n return item\n return None", "def getValue(self, name: unicode) -> object:\n ...", "def get_argument(self, name):\n val = self.arguments.get(name)\n if val:\n return val[0]\n return None", "def get(self, name):\n # `name` is most likely a capname, so we try that first\n for i in (self._by_capname, self._by_var, self._by_tcap_code):\n if i.get(name):\n return i.get(name)\n else:\n raise TerminfoError(\"'%s' is not a valid terminfo entry\" % name)", "def __getitem__(self, name):\n\n self._lock.acquire()\n try:\n key = self.key(name)\n entry = self._dict.get(key)\n if not entry:\n entry = Entry(key)\n self._dict[key]=entry\n if self._maxsize:\n entry._next = entry._previous = None\n self._access(entry)\n self._checklru()\n elif self._maxsize:\n self._access(entry)\n finally:\n self._lock.release()\n\n entry._lock.acquire()\n try:\n value = self._unpack(entry)\n if value is NOT_INITIALIZED:\n opened = self.check(name,entry)\n value = self.build(name,opened,entry)\n self._pack(entry,value)\n self.commit()\n else:\n opened = self.check(name,entry)\n if opened is not None:\n value = self.build(name,opened,entry)\n self._pack(entry,value)\n self.commit()\n return value\n finally:\n entry._lock.release()", "def value_from_datadict(self, data, files, name):\n return data.get(name, None)", "def get_variable(self, name, visual=None):\n # get the variables list\n if visual is None:\n variables = self.variables.values()\n else:\n variables = self.get_visual(visual)['variables']\n variables = [v for v in variables if v.get('name', '') == name]\n if not variables:\n return None\n return variables[0]", "def get_attribute_by_name(self, name):\n if name in self._attributes:\n return self._attributes[name]", "def get_value(self, key: str) -> Any:\r\n if key is None:\r\n return self.data\r\n try:\r\n return self.data[key]\r\n except KeyError:\r\n return None", "def get_symbol(self, name): # pylint: disable=no-self-use,unused-argument\n if name in self._symbol_cache:\n return self._symbol_cache[name]\n return None", "def get_data(self, name, tags=None):\n\n if isinstance(tags, string_types):\n message = \"tags should be a list or None, got tags={}\".format(tags)\n raise TypeError(message)\n\n potential_matches = self._name_index[name]\n if tags is None:\n matches = potential_matches\n else:\n matches = []\n for potential_match in potential_matches:\n is_match = all(tag in potential_match.tags for tag in tags)\n if is_match:\n matches.append(potential_match)\n n_matches = len(matches)\n if n_matches == 0:\n return None\n elif n_matches == 1:\n return matches[0]\n else:\n message = \"Ambiguous criteria: found {} matches for\" \\\n \" name={}, tags={}\".format(n_matches, name, tags)\n raise ValueError(message)", "def get_attr_value_with_name(self, name, default=no_default):\n try:\n attr = self.get_attr_with_name(name)\n return attr.value\n except ValueError:\n if default is not no_default:\n return default\n\n raise", "def config_value(name):\n def get():\n try:\n return config.get('yourls', name)\n except (NoOptionError, NoSectionError):\n return None\n return get", "def get_key_pair(self, keyname):\r\n try:\r\n return self.get_all_key_pairs(keynames=[keyname])[0]\r\n except IndexError: # None of those key pairs available\r\n return None", "def get(self, var_name):\n if var_name in self._var_names:\n iv = self._var_names.index(var_name)\n return self._vals[iv]\n elif var_name in self._params:\n return self._params[var_name]\n else:\n raise KeyError(\"Nothing found for %s in vars (%s) or params (%s)\" % (str(var_name),\n ', '.join(self._var_names),\n ', '.join(self._params.keys())))", "def get(self, name):\n\n # Fast path: check for a non-conditional param or for a conditional param\n # that was defined in the current scope.\n full_cond_name = self._get_name(name)\n if full_cond_name in self.values:\n if self._conditions_are_active():\n return self.values[full_cond_name]\n else:\n raise ValueError(\n 'Conditional parameter {} is not currently active'.format(\n full_cond_name))\n\n # Check for any active conditional param.\n found_inactive = False\n full_name = self._get_name(name, include_cond=False)\n for name, val in self.values.items():\n hp_parts = self._get_name_parts(name)\n hp_scopes = hp_parts[:-1]\n hp_name = hp_parts[-1]\n hp_full_name = self._get_name(\n hp_name,\n scopes=hp_scopes,\n include_cond=False)\n if full_name == hp_full_name:\n if self._conditions_are_active(hp_scopes):\n return val\n else:\n found_inactive = True\n\n if found_inactive:\n raise ValueError(\n 'Conditional parameter {} is not currently active'.format(\n full_cond_name))\n else:\n raise ValueError(\n 'Unknown parameter: {}'.format(full_name))", "def get_if_exist(self, data, key):\n if key in data:\n return data[key]\n return None", "def get(self, name, default=UNDEFINED):\n try:\n return self.__getattr__(name)\n except AttributeError:\n return default", "def get(self, name, section=__section_default):\n \n if self.parser.has_option(section, name):\n return self.parser[section][name]\n else:\n return None", "def __getitem__(self, name):\n tag = self._find(name)\n if tag is not None:\n return tag.text\n raise KeyError(name)", "def get_value(val_name, default=None):\r\n configuration = get_configuration()\r\n return configuration.get(val_name, default)", "def get(self, key):\n if key in self.fields:\n return self.fields.get(key).get()\n return None", "def find(name, *dicts):\n for d in dicts:\n if type(d) == str:\n return d\n elif name in d and d[name] is not None:\n return d[name]\n\n return None", "def field_by_name(self, name):\r\n return self._by_name[name]", "def get(self, key):\n # TODO: Check if the given key exists and return its associated value\n hash_key = self._bucket_index(key) # Gets the index of the key\n\n if self.buckets[hash_key].is_empty() is False: # If the hask_key exists\n for key_value_pair in self.buckets[hash_key]: # Iteratre through the value pair\n if key_value_pair[0] is key: # If the key matches\n return key_value_pair[1] # Return the value\n raise KeyError(\"Key doesn't exist\") # If key doesn't exist, return None", "def get_output_by_name(self, name):\n for var in self.outputs:\n if var.get_object().name == name:\n return var\n logger.exception(\"Output variable with name {0} not found\".format(name))\n return None", "def get_config_value(self, name):\r\n if name in self.config_values:\r\n return self.config_values[name]", "def __getattr__(self, name):\n value = self.__dict__.get(name)\n if not value:\n raise AttributeError('No such attribute {0}'.format(name))\n return value", "def get(name: str):\n if name not in Replacements._rep:\n return None\n return Replacements._rep[name]", "def get(self, name=None):\n raise NotImplementedError", "def get_value(key):\n\n request_dict = RequestFileCom.file_to_dict()\n\n try:\n\n return request_dict[key]\n\n except:\n\n return None", "def helper_retrieve_last_request_get_dict_key_val_index_zero_or_return_none(self, key_name):\n try:\n return self.last_request_get_dict[key_name][0]\n except:\n return None", "def lookup_name(self, name):\n if name not in self.rule_dict:\n raise PegvmException(\"Failed to find rule named '{}'\".format(name))\n return self.rule_dict[name]", "def __getattr__ (self, name):\n\t\ttry:\n\t\t\treturn self.__dict__[name]\n\t\texcept KeyError:\n\t\t\treturn self.__dict__[\"value\"][name]", "def checkLookup(self, name):\n if not self.symbols.has_key(name):\n # we don't care\n return None\n # is it one we really care about\n t = self.symbols[name].getType()\n if t == \"typedef\":\n t = self.symbols[name].getAliasType()\n if t == \"general\" or t == \"struct\" or t == \"union\":\n return self.symbols[name]", "def __getitem__(self, name):\n idx = self.lookup[name]\n return self.stack[idx][1]", "def get(self, name):\n parts = name.split('.', 1)\n return getattr(self, parts[0]).get(parts[1], self.input_params_default[parts[1]])", "def get_sensor(name):\n for sen in SENSORS:\n if sen.name == name or sen.key == name:\n return sen\n return None", "def get(self, name: str) -> typing.Any:\n raise NotImplementedError()", "def __getitem__(self, name):\n ikEl = self.infoKinds.get(name, None)\n if ikEl:\n return ikEl.toDict(self)\n return None", "def lookup(name):", "def lookup(name):", "def get(self, key: K) -> Optional[V]:\n return self.mget([key])[0]", "def get(self, name):\n pass", "def get_system_value(name: str):\n return Config.objects.first().__dict__[name]", "def get(cls, name):\n cls.initialize()\n if isinstance(name, cls):\n return name\n else:\n return cls.mapping[name]", "def get_data(self, data_name: str):\n\n if data_name in self.__data.keys():\n return self.__data[data_name]\n else:\n print('There is not any keys named {0} in database'.format(data_name))\n return None" ]
[ "0.76976645", "0.76092434", "0.76021016", "0.7348735", "0.7155338", "0.70988375", "0.70493174", "0.69803816", "0.6933706", "0.6867564", "0.68649185", "0.68042666", "0.67748713", "0.672533", "0.67226636", "0.67117316", "0.6637197", "0.6624307", "0.66132957", "0.65803933", "0.6578484", "0.65675867", "0.6558693", "0.6543721", "0.6511197", "0.6493667", "0.64849216", "0.6472526", "0.6456433", "0.6456307", "0.6444573", "0.64292854", "0.6421646", "0.6419552", "0.6419198", "0.6417528", "0.64133644", "0.6407715", "0.6406344", "0.6406344", "0.63445586", "0.6342158", "0.6334862", "0.6320701", "0.6308353", "0.6300377", "0.629844", "0.62943095", "0.62764543", "0.62521034", "0.6230049", "0.62163997", "0.6205648", "0.62020034", "0.61996084", "0.61756676", "0.61708033", "0.61704755", "0.6166151", "0.6149059", "0.61472243", "0.61343366", "0.6130061", "0.61195225", "0.6114734", "0.61145043", "0.610411", "0.6104098", "0.61007845", "0.6098996", "0.6093684", "0.6092315", "0.60829115", "0.6082215", "0.6079767", "0.60785913", "0.6075752", "0.60661757", "0.6061598", "0.60410285", "0.60405564", "0.6039306", "0.60386795", "0.6038367", "0.60381585", "0.60327935", "0.6016286", "0.6012984", "0.6007527", "0.6005475", "0.6002015", "0.59950066", "0.5993047", "0.5990674", "0.5986611", "0.5986611", "0.5980219", "0.5973502", "0.596563", "0.5965122", "0.59613717" ]
0.0
-1
Return a random value
async def get_random(self, default=None, **kwargs): all_dict = await self.get_all() if all_dict: key = random.choice(list(all_dict.keys())) return { key: all_dict[key] } else: return default
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_random_value(self):\r\n return random.randint(1, 10)", "def random():\r\n return R.NextDouble()", "def random():\n return constant(1)", "def get_random_value():\n return randint(0, 255) / 256.0", "def get_random_value():\n return randint(0, 255) / 256.0", "def getRandom(self) -> int:", "def getRandom(self) -> int:", "def random() -> float:\n ...", "def random(self):\r\n return random.randint(1, 4)", "def randomize_value(self) -> None:", "def getRandom(self) -> int:\n # 此处可以优化\n datas = list(self.data.keys())\n pos = self.rand.randint(0, len(datas) - 1)\n val = datas[pos]\n return val", "def getRandom(self):\n return self.nums[randint(0, len(self.nums)-1)]", "def rand(self):\n return self.State.rand()", "def getRandom(self):\n random_index = randint(0, len(self.list_val)-1)\n return self.list_val[random_index]", "def get_random(self):\n return self._get_random()", "def random_float():\n return (random() - 0.5) * 2", "def maybe(self):\n return random.getrandbits(1)", "def _random_function(self, random_state):\n return random_state.rand", "def random_test(self):\r\n return 1", "def random_test(self):\r\n return 1", "def getRandom(self) -> int:\n\n return random.choice(self.nodes).val", "def getRandom(self):\n return random.choice(self.data)", "def rand(self):\n raise NotImplementedError(\"Not implemented yet.\")", "def getRandom(self) -> int:\n return random.choice(self.array)", "def getRandom(self) -> int:\n return random.choice(list(self.d.keys()))", "def random(self):\n return self._randomize()", "def getRandom(self) -> int:\n size = len(self.value_set)\n if size > 0:\n from random import randint\n x = randint(1, size)\n return self.values[x - 1]", "def _get_random_returns(self): \n return self.asset_process.distrib.random()", "def _gen_random_number() -> float:\n return uniform(0, 1000)", "def getRandom(self) -> int:\n import random\n return random.choice(self._array)", "def computer_random():\r\n ci = random.sample(range(1,43),5)\r\n return ci", "def rand(self):\n raise NotImplementedError", "def random(self):\n return self._random", "def rand_uni_val() -> float:\n return random.uniform(0, 1)", "def getRandom(self) -> int:\n return choice(self.array)", "def getRandom(self) -> int:\n count = len(self.arr)\n return self.arr[randint(0, count-1)]", "def getRandom(self) -> int:\n return random.choice(self.l)", "def randj():#could be combined with randik\n return(int(round(random.random()*1)))", "def getRandom(self) -> int:\n return random.choice(tuple(self.l))", "def getRandom(self) -> int:\n return random.choice(self.arr)", "def get_random_value(self):\n\n # trapezoidal distribution parameters\n loc = self.value[0]\n scale = self.value[3] - self.value[0]\n c = (self.value[1] - self.value[0]) / scale\n d = (self.value[2] - self.value[0]) / scale\n\n # random value from trapezoidal distribution\n r = trapz.rvs(c, d, loc=loc, scale=scale)\n return r", "def random_number() -> int:\r\n return random.randint(1, 3)", "def getRandom(self) -> int:\n return random.choice(list(self.set))", "def random (self, checkfn=None):\n if len(self) == 0:\n return None\n return self.random_pick(checkfn=checkfn)[1]", "def random_number():\n random_num = random.choice(empty)\n return random_num", "def getRandom(self) -> int:\n return random.choice(self.keys)", "def getRandom(self) -> int:\n return choice(self.arr)", "def getRandom(self) -> int:\n return choice(self.arr)", "def getRandom(self) -> int:\n return choice(self.arr)", "def getRandom(self):\n result, node, index = self.node, self.node.next, 1\n\n while node:\n if random.random() < (1.0 / (index+1)):\n result = node\n node = node.next\n index += 1\n return result.val\n\n\n\n # Your Solution object will be instantiated and called as such:\n # obj = Solution(head)\n # param_1 = obj.getRandom()", "def getRandom(self):\n # pick a random number from the list\n return random.choice(self.nums)", "def getRandom(self) -> int:\n return random.choice(self.items)", "def getRandom(self) -> int:\n return self.nums[random.randint(0, len(self.nums) - 1)]", "def getRandom(self):\n \n return self.data[random.randint(0, len(self.data) - 1)]", "def getValue(self):\n return random.choices(self.indices, weights=self.weights, k=1)[0]", "def getRandom(self) -> int:\n return random.choice(self.list)", "def getRandom(self) -> int:\n return random.choice(self.list)", "def getRandom(self) -> int:\n return random.choice(self.list)", "def getRandom(self) -> int:\n return random.choice(self.list)", "def _randint(*args, **kwargs):\n return random.randint(*args, **kwargs)", "def randomRGBValue(self):\n return random.randrange(0, 256)", "def get_random(self):\n base_genom = \"1\" * sum(self._size_var)\n return utils.randomise_a_string(base_genom)", "def getRandom(self) -> int:\n if self.data:\n return self.data[random.randrange(len(self.data))]\n else:\n return None", "def rs():\n return random.choice([-1,1])", "def rs():\n return random.choice([-1,1])", "def getRandom(self) -> int:\n from random import choice\n return choice(self.list)", "def valg(self):\n num = random.randint(0, 2)\n return self.mulige_trekk[num]", "def getRandom(self) -> int:\n return random.choice(self.elements)", "def get_random(self):\n self.random_range = list(np.array(self.friendly_range) * self.conversion)\n return np.random.uniform(self.random_range[0], self.random_range[1], 1)[0]", "def get_temperature(self):\n rand_number = randint(18, 30)\n return rand_number", "def getRandom(self):\n return random.choice(self.vec)", "def my_random(a):\r\n import random\r\n r = random.randint(0, 100)\r\n return a + r", "def random_attk_int(self):\n attk_random = random.randint(1, 2)\n return attk_random", "def getRandom(self) -> int:\n import random \n \n count = 0\n node = self.head\n while node:\n if random.randint(0, count) == 0:\n ans = node.val\n node = node.next\n count += 1\n return ans", "def pickSecretNumber(): \n return random.randrange(1, 11)", "def getRandom(self) -> int:\n return random.choice(self._list)", "def randomMethod(self):\n return random.random()", "def base_pick():\n\n rnd = generate_random(2, 15)\n return rnd", "def getRandom(self) -> int:\n return random.choice(self.store_list)", "def getRandom(self) -> int:\n # Note randint range is inclusive at both end\n random_idx = random.randint(0, len(self.slot) - 1)\n return self.slot[random_idx]", "def _random(self, key):\n\n if hasattr(key, \"encode\"):\n key = key.encode('ascii')\n\n value = (zlib.crc32(key, self.seed) & MAX_VALUE)\n\n return value * INV_MAX_VALUE", "def random_number():\n return random.randint(0, 9999)", "def i_rand_a():\n return i_random() % 95 + 32", "def random(self):\n\n return self._random", "def generate_value(loc, data):\n return np.random.randint(100, size=1)", "def getRandom(self) -> int:\n if self.counts:\n # key = list(self.counts.keys())\n return random.choice(self.counts.keys(), weights=list(self.counts.values()))\n return None", "def getRandomValue(value1, value2):\r\n \r\n value1 = int(value1)\r\n value2 = int(value2)\r\n if value1 > value2:\r\n return random.randint(value2, value1)\r\n else:\r\n return random.randint(value1, value2)", "def get_number(maxValue):\r\n return random.randint(1, maxValue)", "def getRandom(self):\n res = self.head.val\n cur = self.head.next\n count = 2\n\n while cur != None:\n if random() <= 1.0 / count:\n res = cur.val\n\n count += 1\n cur = cur.next\n return res", "def getRandom(self):\n return random.choice(self.ls)", "def safe_rand(self):\n rand_n = np.random.rand()\n if rand_n == float(1):\n rand_n -= 1e-10\n return rand_n", "def getRandom(self):\n randomIndex = random.randrange(0, self.size)\n return self.nums[randomIndex]", "def getRandom(self):\n if not self.l:\n return -1\n return random.choice(self.l)", "def getRandom(self) -> int:\n if not self.array: return -1\n rand = randrange(0, len(self.array))\n return self.array[rand]", "def rand(self):\n self.state = (self.a * self.state + self.c)\n return self.state", "def getRandom(self):\n import random\n res = -1\n len = 0\n head = self.head\n while head:\n if random.randint(0,len) == 0:\n res = head.val\n head = head.next\n len += 1\n return res", "def demo_a_number(random_number):", "def getRandom(self):\r\n return self.data[rnd.randrange(self.len)]", "def computer_generate(self):\n return choice[random.randrange(3)]", "def choose(self):\n\n i = bisect.bisect(self._p, random.random())\n return self._values[i]", "def generate_random_value(self, type):\n generators = {\n str: lambda: self.generate_random_string(20, uppercase=True, punctuations=True),\n int: lambda: random.randrange(100000),\n float: lambda: random.random() * 100000.0,\n bool: lambda: bool(random.getrandbits(1)),\n list: lambda: self.generate_random_list_or_string(),\n dict: lambda: self.generate_random_dict_or_string()\n }\n generator = generators[type]\n return generator()" ]
[ "0.866445", "0.8307578", "0.826182", "0.8153366", "0.81142724", "0.8095736", "0.8095736", "0.8003371", "0.79239017", "0.783512", "0.7658656", "0.7626082", "0.75570095", "0.7474726", "0.7451215", "0.7407844", "0.7406439", "0.740506", "0.73764557", "0.73764557", "0.73675555", "0.73354113", "0.7318657", "0.73172486", "0.7314898", "0.7309283", "0.73034954", "0.7301367", "0.730088", "0.72965527", "0.7260444", "0.72604185", "0.7222816", "0.72003645", "0.7195245", "0.71786463", "0.71779853", "0.7166698", "0.7163869", "0.71595687", "0.71558595", "0.7151425", "0.7143425", "0.7138987", "0.71307015", "0.7097357", "0.70926875", "0.70926875", "0.70926875", "0.70921034", "0.7085504", "0.7068462", "0.7053324", "0.7052881", "0.7048234", "0.7048112", "0.7048112", "0.7048112", "0.7048112", "0.704429", "0.704301", "0.70385265", "0.7031479", "0.70142466", "0.70142466", "0.7008293", "0.7005622", "0.70032597", "0.6995081", "0.69936955", "0.69888437", "0.6976108", "0.6952505", "0.6951392", "0.6930053", "0.6926739", "0.6925142", "0.69235665", "0.692239", "0.69218516", "0.69187814", "0.68998927", "0.68706715", "0.6857873", "0.6856078", "0.68527603", "0.6849803", "0.68485916", "0.68467295", "0.6845502", "0.6843345", "0.68374985", "0.6824634", "0.681574", "0.680977", "0.68095607", "0.67860913", "0.6785018", "0.6783185", "0.6783", "0.6781105" ]
0.0
-1
Lee la base de datos de la carpeta y la carga como un data frame
def lee_base_covid(fecha): zipname = "data/"+fecha+"COVID19MEXICO.zip" filename = fecha+"COVID19MEXICO.csv" try: start_time = time() zipfile = ZipFile(zipname) df = pd.read_csv(zipfile.open(filename), parse_dates=True) end_time = time() total_time = end_time - start_time except FileNotFoundError: print("La base de datos no se encuentra en la carpeta.") print(f"La base de datos tardó en cargarse {total_time} segundos") return df
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _setup_dataframe(self, serie, metadata=None):\n header = self.get_data_header(serie, dataset='cnv')\n df = self.get_data_in_frame(serie, header, dataset='cnv')\n df = self.df_handler.map_column_names_of_dataframe(df)\n\n return df", "def __create_data_frame(self, soup):\n self.__data_frame = pd.read_html(str(soup))[0]\n timestamp = self.__navigate_rows(soup)\n # rename dataframe columns by columns name in sqlite\n self.__data_frame = self.__data_frame.rename(\n columns=self.__columns_name)\n self.__data_frame['time'] = pd.Series(timestamp)\n self.__data_frame['chg_perc'] = self.__data_frame['chg_perc'].\\\n str.replace('%', '')\n self.__data_frame['created_date'] = datetime.now()\n # save_file(self.__name_file, self.__data_frame.to_string())", "def get_data(self)->pd.DataFrame:\n pass", "def get_data(self):\n\n return pd.read_sql_query(\"Select * from {table}\".format(table=self.table_name), con=self.con)", "def fetch_data(self) -> pd.DataFrame:\r\n os.chdir(r'\\\\192.168.8.90\\投研部\\Jessica\\test_data')\r\n if self.tic in ['RB.CCRI', 'HC.CCRI', 'I.CCRI', 'J.CCRI', 'JM.CCRI', 'ZC.CCRI']:\r\n f = pd.read_hdf('data.h5', 'snc')\r\n if self.tic in ['CU.CCRI', 'ZN.CCRI', 'AL.CCRI', 'NI.CCRI']:\r\n f = pd.read_hdf('data.h5', 'met')\r\n data = f.loc[f.loc[:, 'sec_code'] == self.tic, :]\r\n # extract I.CCRI data\r\n table = pd.pivot_table(data, index=['date'], columns=['factor_code'], values='factor_value')\r\n table = table.sort_values(by='date')\r\n \r\n return table", "def open_data(table):\n engine = create_engine(myDB, encoding='latin1') \n conn = engine.connect()\n select = conn.execute('select * from ' + table)\n\n df = pd.DataFrame(select.fetchall()) \n df.columns = select.keys()\n\n conn.close()\n return df", "def load_dataset():\n\n df_ = pd.read_excel(\"D:\\VERİBİLİMİOKULU\\VERİSETLERİ\\post_bilgileri.xlsx\")\n df = df_.copy()\n return df", "def get_updated_dataframe():\n # pylint: disable=import-outside-toplevel\n from sotaque_brasileiro.io import fetch_paginated_data\n records = fetch_paginated_data(constants.API_RECORDS_ENDPOINT.value)\n df = parse_records_to_dataframe(records) # pylint: disable=invalid-name\n return df", "def load_renter_data():\n return pd.read_sql_query(_sql_query, _con)", "def _get_data(self):\n \n print(\"Getting Data...\")\n self.data = sgs.dataframe(self.serie_name, \n start = self.start_date, \n end = self.end_date)\n\n print(f\"Done! {self.data.shape[0]} rows were collected\")\n \n self.data.reset_index(inplace=True)\n self.data.columns = ['date', 'cdi']\n\n return self.data", "def get_df_from_db(self, query):\n cursor = self.conn.cursor()\n cursor.execute(query)\n data = cursor.fetchall()\n col_des = cursor.description\n col_des = [tuple([x[0].split('.')[1] if '.' in x[0] else x[0]] + list(x[1:])) for x in col_des]\n col_name = [col_des[i][0] for i in range(len(col_des))]\n ret_df = pd.DataFrame([list(i) for i in data], columns=col_name)\n return ret_df", "def to_dataframe(self):\n return df_util.to_dataframe(requests.get(self.__url).json())", "def dataframe(self):\n if not self.all_records:\n print('No rows cached.')\n return\n dict_list = [row.as_dict() for row in self.all_records]\n columns = self.all_records[0].keys\n dataframe = pd.DataFrame(dict_list, columns=columns)\n return dataframe", "def _get_data(*, from_web: bool) -> pd.DataFrame:\n\n df = read_in_data.SaveFormats.CSV.read(from_web=from_web)\n return df", "def get_df_from_db(self, query):\n cursor = self.conn.cursor()\n cursor.execute(\"set hive.execution.engine = tez\")\n cursor.execute(\"set tez.queue.name = sephora_internal\")\n cursor.execute(query)\n data = cursor.fetchall()\n col_des = cursor.description\n col_des = [tuple([x[0].split('.')[1] if '.' in x[0] else x[0]] + list(x[1:])) for x in col_des]\n col_name = [col_des[i][0] for i in range(len(col_des))]\n df = pd.DataFrame([list(i) for i in data], columns=col_name)\n return df", "def uploader_actividad(df,to_model):\n\tengine = create_engine(\"mssql+pyodbc://sa:BaseSQL123@10.160.8.96:1433/vpcanales?driver=SQL+Server+Native+Client+11.0\")\n\n\tfecha = df.loc[0,'Fecha']\n\tprint(fecha.month)\n\tprint(fecha.year)\n\n\tif to_model.__name__==\"Activacion\":\n\n\t\tActivacion.objects.filter(fecha_actividad__month=fecha.month ,\n\t\t\tfecha_actividad__year=fecha.year).delete()\n\n\t\tfor row in df.itertuples():\n\t\t\tconnection = engine.raw_connection()\n\t\t\tcursor=connection.cursor()\n\t\t\t#Se ejecuta el SP por cada registro del dataframe\n\t\t\tstring=\"\"\"exec sp_insert_into_activacion\n\t\t\t@fecha_actividad='{0}',\n\t\t\t@plataforma='{1}',\n\t\t\t@tecnologia='{2}',\n\t\t\t@terminal='{3}',\n\t\t\t@cantidad='{4}',\n\t\t\t@codigo_plan='{5}',\n\t\t\t@mes={6},\n\t\t\t@ano={7},\n\t\t\t@codigo_agente='{8}'\n\t\t\t \"\"\".format(row[2],\n\t\t\trow[5],\n\t\t\trow[6],\n\t\t\trow[7],\n\t\t\trow[-2],\n\t\t\trow[4],\n\t\t\trow[2].month,\n\t\t\trow[2].year,\n\t\t\trow[3])\n\t\t\tcursor.execute(string).commit()\n\n\t\tresults = Activacion.objects.filter(fecha_actividad__month=fecha.month,\\\n\t\t\tfecha_actividad__year=fecha.year)\\\n\t\t\t.aggregate(suma = Sum('cantidad'))\n\n\n\t\tresults.update(Activacion.objects.filter(fecha_actividad__month=fecha.month,\\\n\t\t\tfecha_actividad__year=fecha.year)\\\n\t\t\t.aggregate(count = Count('cantidad')))\n\n\t\tprint(\"*************************\",results)\n\n\t\treturn results\n\n\n\n\telse:\n\n\t\tAlta.objects.filter(fecha_actividad__month=fecha.month ,\n\t\t\tfecha_actividad__year=fecha.year).delete()\n\n\t\tfor row in df.itertuples():\n\t\t\tconnection = engine.raw_connection()\n\t\t\tcursor=connection.cursor()\n\t\t\t#Se ejecuta el SP por cada registro del dataframe\n\t\t\tstring=\"\"\"exec sp_insert_into_alta\n\t @fecha_actividad='{0}',\n\t @plataforma='{1}',\n\t @tecnologia='{2}',\n\t @terminal='{3}',\n\t @cantidad='{4}',\n\t @codigo_plan='{5}',\n\t @mes={6},\n\t @ano={7},\n\t @codigo_agente='{8}' \"\"\".format(row[2],\n\t row[5],\n\t row[6],\n\t row[7],\n\t row[-2],\n\t row[4],\n\t row[2].month,\n\t row[2].year,\n\t row[3])\n\t\t\tcursor.execute(string).commit()\n\n\n\t\tresults = Alta.objects.filter(fecha_actividad__month=fecha.month,\\\n\t\t\tfecha_actividad__year=fecha.year)\\\n\t\t\t.aggregate(suma = Sum('cantidad'))\n\n\n\t\tresults.update(Alta.objects.filter(fecha_actividad__month=fecha.month,\\\n\t\t\tfecha_actividad__year=fecha.year)\\\n\t\t\t.aggregate(count = Count('cantidad')))\n\n\t\tprint(\"*************************\",results)\n\n\n\n\t\treturn results", "def create_dataframe():\r\n\r\n df = pd.read_csv('data/data.csv', header=0)\r\n return df", "def get_db_data(self, sql_string):\n connection_string = f\"\"\"\n host='{self.host}' \n dbname='{self.db_name}' \n user='{self.user}' \n password='{self.password}' \n port='{self.port}'\n \"\"\"\n\n with psycopg2.connect(connection_string) as connection:\n cursor = connection.cursor()\n cursor.execute(sql_string)\n\n dataframe = pd.DataFrame(cursor.fetchall())\n dataframe.columns = [desc[0] for desc in cursor.description]\n\n return dataframe", "def Dbase_to_DF(self):\n for item in sorted(self.dbase.keys()):\n self.dataFRAME[item]=self.dbase[item]\n return self.dataFRAME", "def make_dataset(self, df, **kwargs):\n\t\treturn df", "def download_data(base_url,\n lista_anni,\n lista_inquinanti):\n \n # Inizializziamo la lista dei df ognuno dei quali corrisponde ad un inquinante\n df_template = pd.DataFrame(columns=['jd','h','1','2','3','4','5','6','7','8','9','10','11','13','14','15','16','38','39','40',\n '41','45','47','48','49','55','56','57','60','83','84','85','86','87','Anno','Inquinante'])\n lista_df = [df_template]\n\t\n\t# Per ogni inquinante\n for chimico in lista_inquinanti:\n \t# Per ogni anno\n for anno in lista_anni:\n print('Retrieving {} for year {} from {}'.format(chimico, anno, compose_url(base_url, anno, chimico)))\n \n # Esegui la richiesta\n r = requests.get(compose_url(base_url, anno, chimico))\n\n # Crea il rispettivo dataframe\n df = write_response(r)\n print('{} rows'.format(len(df)))\n\t\t\t\n\t\t\t# Prendi la linea che corrisponde all'header del df\n columns_ = df.iloc[0].index[0]\n \n \"\"\" Individua i nomi delle colonne splittando la stringa che li contiene tutti\n ed escludendo lestringhe vuote ottenute tramite lo split\"\"\"\n clean_columns = [item.strip()\\\n for item in columns_.split(' ')\\\n if len(item)!=0]\n \n # aggiungo le colonne Anno e Inquinante\n columns = clean_columns + ['Anno', 'Inquinante']\n\t\t\t\n list_rows = []\n # Per ogni linea del df\n for line_idx in range(1, len(df)):\n \t\n # Come nel caso precedente splitto la linea per ottenere le diverse celle\n line = df.iloc[line_idx].values[0].strip().split(' ')\n \n # Quindi ottengo la lista delle celle della riga i-th\n raw_line = [item for item in line if len(item)!=0] \n \n # Aggiungiamo le colonne anno e inquinante\n list_rows += [raw_line + [anno, chimico]]\n\t\t\t\n\t\t\t# Definiamo il nuovo dataset \n df_idx = pd.DataFrame(list_rows, columns=columns)\n \n # Creiamo aggiungiamo alla lista di df da concatenare quello appena creato \n lista_df += [df_idx]\n\n\t# Facciamo la union dei df tenendo conto che le colonne possono essere diverse (concat con pandas)\n df_final = pd.concat(lista_df, ignore_index=False)\n\n # sostituisco i NaN e -999.0 con un valore vuoto\n df_final = df_final.fillna('')\n df_final = df_final.replace(to_replace='-999.0', value='')\n \n return df_final", "def make_dataframe(self):\n logging.info('*** Creating the dataframes from the source files ' )\n \n for k in self.datasets_keys:\n #for k in ['igra2' , 'ncar']:\n \n logging.info('*** Creating the dataframe for the dataset: %s ' , k ) \n \n p_levels = self.data[k]['df']['observations_table']['z_coordinate'][:]\n logging.debug(' Loaded the z_coordinate')\n \n z_type = self.data[k]['df']['observations_table']['z_coordinate_type'][:]\n logging.debug(' Loaded the z_coordinate_type')\n \n obs_variable = self.data[k]['df']['observations_table']['observed_variable'][:]\n logging.debug(' Loaded the observed_variable')\n \n obs_values = self.data[k]['df']['observations_table']['observation_value'][:]\n logging.debug(' Loaded the observation_value')\n \n observation_id = self.data[k]['df']['observations_table']['observation_id'][:]\n logging.debug(' Loaded the observation_id')\n \n units = self.data[k]['df']['observations_table']['units'][:].astype(int)\n logging.debug(' Loaded the units') \n \n report_id = self.data[k]['df']['observations_table']['report_id'][:] \n logging.debug(' Loaded the report_id')\n \n date_time = self.data[k]['df']['observations_table']['date_time'][:]\n logging.debug(' Loaded the date_time (deltas)')\n \n lat , lon = self.data[k]['df']['observations_table']['latitude'][:] , self.data[k]['df']['observations_table']['longitude'][:]\n logging.debug(' Loaded the lat,lon ')\n \n \n self.obs_table_columns = list(self.data[k]['df']['observations_table'].keys() )\n \n self.data[k]['df'].close()\n \n \"\"\" Creating a dataframe \"\"\"\n columns = ['date_time', 'z_coordinate' , 'z_coordinate_type', 'observed_variable' , 'observation_value' , 'report_id' , 'observation_id' , 'latitude' , 'longitude', 'units']\n logging.info(' Loaded the data, creating dataframe ')\n \n df = pd.DataFrame( list(zip( date_time, p_levels, z_type, obs_variable , obs_values, report_id, observation_id , lat , lon, units ) ) , columns = columns ) \n \n \n \"\"\" Storing the dataframe \"\"\" ### try using xarrays ??? \n logging.debug('Storing the DF ' ) \n self.data[k]['dataframe'] = df\n \n logging.debug(' PD dataframe created !!! ')", "def getBaseMonetaria(self):\n #Obtener la url de descarga del cvs\n urlPackage=\"https://datos.gob.ar/api/3/action/package_show?id=sspm-factores-explicacion-base-monetaria\"\n s=requests.get(urlPackage).content\n objJson = json.loads(s)\n resultado = objJson['result']['resources']\n selector = 0\n ultimoResultado = resultado[selector]\n urlDescarga = ultimoResultado['url']\n descripcion = ultimoResultado['description']\n print(\"Descargando: {}\".format(descripcion))\n print(\"Archivo: {}\".format(urlDescarga))\n \n #Descargar la url con cvs y generar pandas dataframe\n contenidoCVS = requests.get(urlDescarga).content\n flujoCVS = io.StringIO(contenidoCVS.decode('utf-8'))\n df_temp = pd.read_csv(flujoCVS)\n \n #transform string to datetime\n df_temp['indice_tiempo'] = pd.to_datetime(df_temp['indice_tiempo'], format='%Y-%m-%d', errors='ignore')\n df_temp['indice_tiempo'] = df_temp['indice_tiempo'].dt.date\n #set index\n df_temp.set_index('indice_tiempo', inplace=True)\n \n return df_temp", "def get_df(self) -> pd.DataFrame:\n return pd.DataFrame(self.fetchall(), columns=self.headers())", "def dataframe(self):\n\n if self._dataframe is None:\n try:\n import pandas as pd\n except ImportError:\n raise RuntimeError('To enable dataframe support, '\n 'run \\'pip install datadotworld[pandas]\\'')\n\n self._dataframe = pd.DataFrame.from_records(self._iter_rows(),\n coerce_float=True)\n\n return self._dataframe", "def load_data():\n df = pd.read_csv(\"https://raw.githubusercontent.com/Andrea-Giuliani/Python-Project/master/data/final_dataset.csv\",sep=',') \n return df", "def test_dataframe(self):\n\n url=\"http://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.data\"\n readerobject=requester.url_to_df(url)\n self.assertIsInstance(readerobject,pd.DataFrame)", "def fetch(url, header_path, id, ip, dbase, targets_table):\n # url = 'http://esimbad/testGSAV7/reslabo?FENID=resLaboPatDitep&NIP={}' \\\n # '&STARTDATE={}&ENDDATE={}'\n\n # header_path = '~/workspace/data/biology/header.csv'\n # constant names specific to our database\n KEY1 = 'id'\n KEY2 = 'NIP'\n C1J1 = 'C1J1'\n\n header = pd.read_csv(header_path, sep=';', encoding='latin1').columns\n\n\n engine = get_engine(id, ip, dbase)\n\n df_ids = sql2df(engine, targets_table)[[KEY1, 'nip', C1J1]]\n df_ids.rename({'nip': KEY2}, inplace=True, axis=1)\n df_ids['patient_id'] = df_ids[KEY1]\n\n cols = [KEY2, 'Analyse', 'Resultat', 'Date prelvt']\n df_res = pd.DataFrame(data=None, columns=cols)\n\n for index, row in df_ids.iterrows():\n nip = row[KEY2].replace(' ', '')\n patient_id = row['patient_id']\n c1j1_date = row[C1J1].date()\n start_date = c1j1_date - timedelta(weeks=8)\n\n c1j1 = str(c1j1_date).replace('-', '')\n start = str(start_date).replace('-', '')\n\n req = requests.get(url.format(nip, start, c1j1))\n values = BeautifulSoup(req.content, 'html.parser').body.text\n\n new_df = pd.read_csv(StringIO(values), sep=';', header=None,\n index_col=False, names=header)\n new_df = new_df.loc[:, cols + ['LC']] # remove LC\n\n # normalize nip\n new_df[KEY2] = row[KEY2]\n # new_df[KEY2] = new_df[KEY2].map(str)\n # new_df[KEY2] = [nip[:4] + '-' + nip[4:] for nip in new_df[KEY2]]\n\n new_df.drop('LC', axis=1, inplace=True)\n\n df_res = pd.concat([df_res, new_df], axis=0,\n sort=False, ignore_index=True)\n\n return df_res", "def _data_frame(content):\n response = loads(content)\n key = [x for x in response.keys() if x in c.response_data][0]\n frame = DataFrame(response[key])\n final_frame = _convert(frame)\n return final_frame", "def fetch_db_dataframe(self, table_name):\n try:\n df = pd.read_sql(\"SELECT * from \" + table_name, self.engine)\n print(\"-I- Completed read of DataFrame from \" + table_name)\n return df\n except Exception as e:\n print(\"-W- \" + str(e))", "def sourceToDataframe(self):\n df = pd.read_excel(self.filename)\n df.columns = df.iloc[10]\n df = df.drop(df.index[:11])\n self.df = df #makes this df accessible to the whole class now\n self.insertODN()\n display(df.head())", "def pp_carga(carga):\n df = pd.DataFrame(carga) \n df.columns = df.iloc[0]\n df = df.drop(df.index[0])\n df = df.reset_index(drop=True);\n return df", "def setup_data(self, data: pd.DataFrame) -> pd.DataFrame:\n return data", "def df():\n fs.df()", "def load_data_sql(): \r\n conn = mysql.connect(**st.secrets[\"mysql\"])\r\n\r\n data = pd.read_sql('SELECT * FROM song_data', conn)\r\n lookup_table = pd.read_sql('SELECT * FROM lookup_table', conn)\r\n \r\n return data, lookup_table", "def _get_data(self):\n project_name, experiment_id = self.parent._get_parent_identifiers()\n\n self._data = self.repository.get_dataframe_data(\n project_name, self.id, experiment_id=experiment_id\n )", "def set_data(self, year, month):\n data = _download_to_df(self.url, self.table_name, year, month)\n data = data.loc[:, self.table_columns]\n with self.con:\n data.to_sql(self.table_name, con=self.con, if_exists='replace', index=False)\n self.con.commit()", "def get_training_data(db_conn):\n return pd.read_sql('''select * from churn_model.churn_data;''', db_conn)", "def fao1():\n df = pd.read_csv(\"/Users/Elsa/Desktop/Covid_Agosto/Proyecto_Elsa/Proyecto_individual_Elsa/src/main/FAO.csv\",encoding=\"ISO-8859-1\")\n \n return df", "def load_up_initial_db(self, date_dict):\n df_tot = []\n for chunk in pd.read_sql_table(self.table, self.disk_engine, chunksize=10000, parse_dates=date_dict):\n df_tot.append(chunk)\n self.df = pd.concat(df_tot)", "def getDataframe(self):\n self._loadCSVFile()\n self._cleanProcessDf()\n return self._df", "def read(name, db):\n \n # Make connection with the database\n\tconn = sqlite3.connect(db)\n\tdf = pd.read_sql_query(\"select * from \" + name + ';', conn)\n \n # Print loaded data table name and return DataFrame\n\tprint(name + ': loaded')\n\treturn df", "def __init__(self):\n BDLQuery.__init__(self)\n self.pandas_df = []", "def dataFrame(self):\n\n memory_file = StringIO(initial_value=self.sparql_result.decode('utf-8'), newline='\\n')\n reader = DictReader(memory_file)\n\n schema = StructType(\n list(map(lambda f: StructField(f, StringType()), reader.fieldnames))\n )\n\n data = list(map(lambda d: [d[f] for f in reader.fieldnames], list(reader)))\n\n return self.spark.createDataFrame(data, schema)", "def read_data(transferencias):\n # read in triples of user/artist/playcount from the input dataset\n transfer_count = transferencias.groupby([\"REFERENCIA_1\", \"REFERENCIA_ORIGEN\"]).IMPORTE.count().reset_index()\n\n # for index, row in transfer_count.iterrows():\n # cliente = Empresa.objects.get(fiscal_id=str(row['REFERENCIA_ORIGEN']))\n # proveedor = Empresa.objects.get(fiscal_id=str(row['REFERENCIA_1']))\n # proveedor.clients.add(cliente)\n # cliente.providers.add(proveedor)\n\n print(\"clientes y proveedores cargados\")\n\n data = pandas.DataFrame()\n data['user'] = transfer_count['REFERENCIA_ORIGEN'].astype(\"category\")\n data['artist'] = transfer_count['REFERENCIA_1'].astype(\"category\")\n data['transfers'] = transfer_count['IMPORTE'].astype(\"category\")\n\n transfers = coo_matrix((data['transfers'].astype(float),\n (data['artist'].cat.codes.copy(),\n data['user'].cat.codes.copy())))\n\n return data, transfers", "def load_pdo():\n df = pd.read_csv('./archive/pdo_monthly.csv')\n pdo = pd.DataFrame(index = pd.to_datetime(df['yyyymm'], format='%Y%m'), columns=['pdo'])\n pdo['pdo'] = df.pdo.values\n return pdo", "def withOutPandas()-> None:\n logging.info(f\"Making sure the DB is set up {getTime()}\" )\n\n with getCon() as conn:\n with getCursor(conn,True) as cur:\n cur.execute(\"CREATE TABLE IF NOT EXISTS data (iso_code TEXT,continent TEXT,location TEXT,date DATE,total_cases FLOAT,new_cases FLOAT,new_cases_smoothed FLOAT,total_deaths FLOAT,new_deaths FLOAT,new_deaths_smoothed FLOAT,total_cases_per_million FLOAT,new_cases_per_million FLOAT,new_cases_smoothed_per_million FLOAT,total_deaths_per_million FLOAT,new_deaths_per_million FLOAT,new_deaths_smoothed_per_million FLOAT,reproduction_rate FLOAT,icu_patients FLOAT,icu_patients_per_million FLOAT,hosp_patients FLOAT,hosp_patients_per_million FLOAT,weekly_icu_admissions FLOAT,weekly_icu_admissions_per_million FLOAT,weekly_hosp_admissions FLOAT,weekly_hosp_admissions_per_million FLOAT,new_tests FLOAT,total_tests FLOAT,total_tests_per_thousand FLOAT,new_tests_per_thousand FLOAT,new_tests_smoothed FLOAT,new_tests_smoothed_per_thousand FLOAT,positive_rate FLOAT,tests_per_case FLOAT,tests_units TEXT,total_vaccinations FLOAT,people_vaccinated FLOAT,people_fully_vaccinated FLOAT,total_boosters FLOAT,new_vaccinations FLOAT,new_vaccinations_smoothed FLOAT,total_vaccinations_per_hundred FLOAT,people_vaccinated_per_hundred FLOAT,people_fully_vaccinated_per_hundred FLOAT,total_boosters_per_hundred FLOAT,new_vaccinations_smoothed_per_million FLOAT,stringency_index FLOAT,population FLOAT,population_density FLOAT,median_age FLOAT,aged_65_older FLOAT,aged_70_older FLOAT,gdp_per_capita FLOAT,extreme_poverty FLOAT,cardiovasc_death_rate FLOAT,diabetes_prevalence FLOAT,female_smokers FLOAT,male_smokers FLOAT,handwashing_facilities FLOAT,hospital_beds_per_thousand FLOAT,life_expectancy FLOAT,human_development_index FLOAT,excess_mortality_cumulative_absolute FLOAT,excess_mortality_cumulative FLOAT,excess_mortality FLOAT,excess_mortality_cumulative_per_million FLOAT)\")\n cur.execute(\"TRUNCATE data\")\n \n with open(DATA_FILE) as f:\n data = list(csv.reader(f))\n logging.info(f\"Slicing {getTime()}\")\n\n SLICE_SIZE = len(data) // 100\n rows = [data[i:i + SLICE_SIZE] for i in range(1, len(data), SLICE_SIZE)]\n logging.info(f\"Finished slicing {getTime()}\")\n logging.info(f\"Inserting {getTime()}\")\n\n with Pool(2) as p:\n p.map(insert,rows)\n logging.info(f\"Finished Inserting {getTime()}\")\n \n logging.info(f\"Gettign Uniqe Contries {getTime()}\")\n with getCon() as conn:\n with getCursor(conn) as cur:\n cur.execute(\"SELECT DISTINCT location FROM data\")\n result =cur.fetchall()\n with open(RESULT_FILE,\"w\", newline='') as r:\n writer = csv.DictWriter(r,fieldnames=[\"Uniqe Countries\"])\n writer.writeheader()\n writer.writerow({\"Uniqe Countries\":len(result)})", "def __init__(self, df):\n self.data = df", "def __init__(self, header):\n self.database = pd.DataFrame(columns=header)\n self.database[\"date\"] = self.database[\"date\"].astype('datetime64[ns]')", "def set_data(self, df):\n self.df = df", "def getDataFrame(self):\n return self.df", "def cursor_to_dataframe(cur):\n description = cur.description\n column_names = [item.name for item in description]\n data = cur.fetchall()\n df = pandas.DataFrame(data, columns=column_names)\n cur.close()\n return df", "def pandas_convert(self):\n data = {}\n\n for names in self.data[0]:\n col_values = []\n\n if names in objects:\n for items in self.data[0][names]:\n col_values = []\n\n col_name = names + \"_\" + items\n\n for i in range(len(self.data)):\n col_values.append(self.data[i][names][items])\n\n data[col_name] = col_values\n else:\n for i in range(len(self.data)):\n col_values.append(self.data[i][names])\n \n data[names] = col_values\n\n self.pandas_df = pd.DataFrame(data=data)\n self.__clean_df()\n\n return self.pandas_df", "def get_dataframe() -> pandas.DataFrame:\n database_connection = processing.establish_connection(database_path)\n dataframe = database_connection.to_dataframe(['CustomerId', 'InvoiceDate', 'Total'], table_name)\n database_connection.close()\n dataframe = processing.get_invoice_date_fixed(dataframe)\n analyze_dataframe = dataframe.copy()\n total_sum_dataframe = processing.get_column_sum(analyze_dataframe)\n\n customer_count_dataframe = processing.drop_duplicates(analyze_dataframe)\n customer_count_dataframe = processing.get_column_count(customer_count_dataframe)\n return customer_count_dataframe.merge(total_sum_dataframe, how='inner', on='InvoiceDate')", "def __get_all_data(self,tickr):\n self.__csvurl=f\"https://query1.finance.yahoo.com/v7/finance/download/{tickr}?period1=1092873600&period2={int(datetime.now().timestamp())}&interval=1d&events=history&includeAdjustedClose=true\"\n s=get_historic_data(self.__csvurl)\n\n \"\"\"you should not be able to access dataframe from outside the class\"\"\"\n df=pd.read_csv(io.StringIO(s.decode('utf-8')))\n df=df.dropna()\n df_columns=['Date','High','Low','Close','Adj Close']\n\n if not set(df_columns).issubset(df.columns):\n raise ValueError(f\"One or more columns are missing {df_columns}\")\n\n if len(df.index)<5:\n raise ValueError(f\"Cannot calculate EMA 5\")\n\n if len(df.index)<20:\n raise ValueError(f\"Cannot calculate SMA 20\")\n\n \"\"\"set date as index (required for filtering,sorting,grouping etc etc\"\"\"\n df['Date'] = pd.to_datetime(df['Date'], format = '%Y-%m-%d')\n\n df.set_index(['Date'], inplace=True)\n\n\n return df", "def dataframe(self):\n\t\treturn self._dataframe", "def _get_df_from_db(self, tab_name: str, cols: list or str = \"*\",\n condition: str or None = None, limit: int or None = None):\n cols = ', '.join(cols) if cols != '*' else cols\n sql_query = \"\"\"SELECT {cols} FROM {tab} \"\"\".format(cols=cols, tab=tab_name)\n if condition:\n sql_query += \"\"\"WHERE {cond} \"\"\".format(cond=condition)\n if limit:\n sql_query += \"\"\"LIMIT {l}\"\"\".format(l=limit)\n df = pd.read_sql(sql_query, self.engine)\n return df", "def update_database():\n\n # We obtain the data from the official database\n df = getData.extractData()\n\n # We save the dataframe for later use in the API\n auxiliary.saveToCsv(df, 'app/resources')", "def glass_pandas(self):\n # pandas.set_option('display.width', 120)\n # TODO timeit (git_implementation) vs (my_implementation)\n # * df = pd.DataFrame(json.loads(r.text))\n # * df = df.set_index('t')\n # * df.index = pd.to_datetime(df.index, unit='s')\n # * df = df.sort_index()\n # * s = df.v\n # * s.name = '_'.join(url.split('/')[-2:])\n # * return s\n # for elem in self.loaded:\n # _metric, _data = elem[1]['_metrics'], elem[1]['_data']\n # try:\n # frame_keys = ['t'] + list(_data[0]['o'].keys())\n # framed = pandas.DataFrame(\n # data=[{k: (_data[iters]['t'] if k in 't' else _data[iters]['o'][k])\n # for k in frame_keys} for iters in range(len(_data))],\n # columns=frame_keys)\n # except KeyError:\n # framed = pandas.DataFrame(_data)\n # framed.set_index('t', inplace=True)\n # framed.index = pandas.to_datetime(\n # framed.index.to_flat_index(), unit='s', infer_datetime_format=True)\n # framed.sort_index(inplace=True)\n # framed.name = _metric\n # print(framed.name)\n # print(framed)", "def get_main_dataset(self) -> pd.DataFrame:\n pass", "def construct_data_frame(self) -> pd.DataFrame:\n data_frame = self.base_data_frame[\n [self.name_col, self.description_col]\n ].reset_index()\n data_frame.columns = [\"label_encoder\", \"name\", \"description\"]\n\n return data_frame.set_index(\"label_encoder\")", "def getIndicePreciosInternosAlPorMayorBase2015(self):\n #Obtener la url de descarga del cvs\n urlPackage=\"https://datos.gob.ar/api/3/action/package_show?id=sspm-indice-precios-internos-basicos-al-por-mayor-dic-2015-100\"\n s=requests.get(urlPackage).content\n objJson = json.loads(s)\n resultado = objJson['result']['resources']\n selector = 0\n ultimoResultado = resultado[selector]\n urlDescarga = ultimoResultado['url']\n descripcion = ultimoResultado['description']\n print(\"Descargando: {}\".format(descripcion))\n print(\"Archivo: {}\".format(urlDescarga))\n \n #Descargar la url con cvs y generar pandas dataframe\n contenidoCVS = requests.get(urlDescarga).content\n flujoCVS = io.StringIO(contenidoCVS.decode('utf-8'))\n df_temp = pd.read_csv(flujoCVS)\n \n #transform string to datetime\n df_temp['indice_tiempo'] = pd.to_datetime(df_temp['indice_tiempo'], format='%Y-%m-%d', errors='ignore')\n df_temp['indice_tiempo'] = df_temp['indice_tiempo'].dt.date\n #set index\n df_temp.set_index('indice_tiempo', inplace=True)\n \n return df_temp", "def load_dataframe(self) -> None:\n with open(self.__data_path.split('.')[0] + '_dtypes.json', 'r') as f:\n dtypes = json.load(f)\n self.__DataFrame = pd.read_csv(self.__data_path, dtype=dtypes)\n self.map_items()", "def get_data_from_database(query, db_connection):\n\n dataframe = pandas.read_sql(query, con=db_connection)\n print(\"Data from database: \", dataframe.head(5))\n print(\"Size of dataframe from database: \", dataframe.shape)\n\n return dataframe", "def get_dataframe(self):\n self.logger.info('Fetching movie records...')\n session = connect()\n\n cols = [\n Movie.movie_id,\n Movie.title,\n Movie.start_year,\n Movie.genres,\n Movie.description,\n Movie.kind,\n ]\n\n filters = [\n Movie.description.isnot(None),\n Movie.genres.isnot(None),\n ]\n\n query = session.query(*cols).filter(*filters).order_by(Movie.start_year.desc())\n\n try:\n return pd.read_sql(query.statement, session.bind)\n finally:\n session.close()", "def loadValueTableFromSqlite(): \n conn = sqlite3.connect(prefix + args.db)\n df = io.read_frame(\"SELECT * FROM value\", conn) \n return df", "def convert_to_df(data):\r\n ans = pd.DataFrame(data)\r\n return ans", "def query_save_data_frame(self, query):\n self.recordset_df = pd.read_sql_query(query, self.con)\n return self", "def refresh(self) -> pd.DataFrame:\n log.info(f\"Refreshing {self.fqtable}\")\n df = db.db_to_df(fqtable=self.fqtable, ids=self.ids)\n io.df_to_parquet(df=df, path=self.path)\n return df", "def dataframe(self):\n return self.generator.dataframe", "def read_and_prepare_dataframe(start_date='1980-01-01'):\n \n # Read the dataset and rename 'dt' to 'Date'\n df = pd.read_csv('Data/GlobalLandTemperaturesByCountry.csv', parse_dates=['dt'])\n df.rename(columns={'dt':'Date'}, inplace=True)\n \n # Filter for Canada\n df = df[df['Country']=='Canada']\n \n # Filter out data prior to start date\n df = df[df['Date'] >= start_date]\n \n # To ensure data is sorted\n df = df.sort_values('Date')\n \n # Set index to Date and return the final dataframe\n return df.set_index('Date')", "def Mydata():\n\n stmt = db.session.query(Appsdata).statement\n df = pd.read_sql_query(stmt, db.session.bind)\n \n return jsonify(df.to_dict())", "def get_data():\n \n \"\"\" Prepare variables\"\"\"\n urls = {\"cases\": \"https://github.com/CSSEGISandData/COVID-19/raw/master/csse_covid_19_data/csse_covid_19_time_series/time_series_19-covid-Confirmed.csv\",\n \"deaths\": \"https://github.com/CSSEGISandData/COVID-19/raw/master/csse_covid_19_data/csse_covid_19_time_series/time_series_19-covid-Deaths.csv\",\n \"recovered\": \"https://github.com/CSSEGISandData/COVID-19/raw/master/csse_covid_19_data/csse_covid_19_time_series/time_series_19-covid-Recovered.csv\"}\n\n localnames = {\"cases\": \"Cases.csv\",\n \"deaths\": \"Deaths.csv\",\n \"recovered\": \"Recovered.csv\"}\n\n dfs = {\"cases\": None,\n \"deaths\": None,\n \"recovered\": None}\n\n \"\"\" Download\"\"\"\n for key in urls.keys():\n url = urls[key]\n localname = localnames[key]\n urllib.request.urlretrieve(url, localname)\n\n \"\"\" Load variables\"\"\"\n for key in dfs.keys():\n dfs[key] = pd.read_csv(localnames[key])\n \n \"\"\" Return\"\"\"\n return(dfs)", "def fetchall_df(result_proxy):\n# result = result_proxy.fetchall(keep_col_names=T) ???\n result = [row for row in tqdm(result_proxy)]\n return pd.DataFrame(result, columns=result[0].keys())", "def get_dataframe(start_date=INITIAL_DATE, end_date=None) -> pd.DataFrame:\n\n end_date = (\n end_date\n if end_date is not None\n else dt.datetime.utcnow() - dt.timedelta(days=1)\n ).date()\n\n dates = pd.date_range(start_date, end_date)\n\n with futures.ThreadPoolExecutor() as ex:\n\n df = pd.concat(ex.map(get_dataframe_for_date, dates))\n\n df.rename(columns=str.lower, inplace=True)\n\n df.drop(columns=[c for c in df.columns if \"/\" in c], inplace=True)\n\n df[\"datetime\"] = pd.to_datetime(df[\"last_update\"])\n\n df[\"date\"] = df.datetime.map(lambda d: d.date())\n\n # df[\"county\"] = df.admin2\n renames = {\n \"country_region\": \"country\",\n \"province_state\": \"state\",\n \"admin2\": \"county\",\n }\n\n df.rename(columns=renames, inplace=True)\n\n df.drop(\n columns=[\"last update\", \"last_update\", \"lat\", \"long_\", \"combined_key\"],\n inplace=True,\n )\n\n return df", "def table_save_data_frame(self, table_name):\n self.recordset_df = pd.read_sql_table(table_name, self.con)\n return self", "def getdata(self, columns, rtkencoding):\r\n self.restab = pd.read_csv(self.pathname, encoding=rtkencoding,\r\n usecols=columns)[columns]", "def _load_df(self):\n oauth_json = self.plugin_config[\"service_account_credentials\"]\n with tempfile.NamedTemporaryFile(mode=\"w+\", suffix=\".json\") as ntf:\n json.dump(oauth_json, ntf)\n ntf.seek(0)\n\n gc = gspread.service_account(filename=ntf.name)\n \n sheet_url = self.plugin_config[\"sheet_url\"]\n sheet = gc.open_by_url(sheet_url)\n self.worksheet = sheet.get_worksheet(0)\n data = self.worksheet.get_all_values()\n colnames = data.pop(0)\n\n self._df = pd.DataFrame(data, columns=colnames)", "def pandaData():\r\n tweets = pd.DataFrame()\r\n return tweets", "def data_frame_creator(self):\n\n return pd.DataFrame()", "def __gen_datatable__(self):\n # | - __generate_data_table\n rows_list = []\n for Job_i in self.Job_list:\n # | - FOR LOOP BODY\n entry_param_dict = {}\n for prop, value in Job_i.job_params.items():\n entry_param_dict[prop] = value\n\n entry_param_dict[\"Job\"] = Job_i\n entry_param_dict[\"path\"] = Job_i.full_path\n entry_param_dict[\"max_revision\"] = Job_i.max_revision\n entry_param_dict[\"revision_number\"] = Job_i.revision_number\n\n rows_list.append(entry_param_dict)\n # __|\n\n data_frame = pd.DataFrame(rows_list)\n\n return(data_frame)\n # __|", "def create_dataframe(connection: sqlite3.Connection) -> pd.DataFrame:\n dataframe = pd.read_sql_query(f\"\"\"\n SELECT\n combined_jobs.id, combined_jobs.company, combined_jobs.link, combined_jobs.location,\n combined_jobs.date, combined_jobs.content, combined_jobs.title, location_cache.location,\n location_cache.latitude, location_cache.longitude\n FROM\n combined_jobs\n LEFT OUTER JOIN\n location_cache on (combined_jobs.location = location_cache.location)\"\"\",\n connection)\n print(dataframe)\n return dataframe", "def get_data():\r\n data = pd.read_csv(FILE_PATH)\r\n # Replace 'Zero KM' by year 2022 assuming it's a new car\r\n data['Ano'] = data['Ano'].str.replace('Zero KM', '2021').replace('2022', '2021')\r\n data['Ano'] = data['Ano'].astype(int)\r\n data['Automático'] = data['Automático'].astype(int)\r\n return data", "def targets_to_dataframe(conn):\n return connect_database.get_table_into_pandas('target_info',conn)", "def importar_visualizar_tabela(self):\r\n\r\n self.tabela_clientes = pd.read_csv('telecom_users.csv') # armazenando arquivo csv em uma variavel\r\n self.tabela_clientes = self.tabela_clientes.drop([\"Unnamed: 0\"], axis=1) # apagando a coluna Unnamed: 0, axist=1 -> para excluir a coluna, axist=0 -> excluir a linha (exist = eixo)\r\n print(self.tabela_clientes)\r\n # print(self.tabela_clientes.columns) # para mostrar todas as colunas da tabela \r\n self.tabela_clientes['NovaColuna'] = 1 # criar uma nova coluna se não existir, se caso ja exista, irá substituir todos os valores na coluna para 1\r", "def getDbase(self):\n for item in self.sqlData: # for every colummn name in the data\n self.sqdbase[item]=np.array(self.sqlData[item]) # add to the dictionary the clomunm name and the corresponding data\n \n self.sqlData['index'] = list(range(len(self.sqlData['time']))) # since we sometimes have even a column for index(pandas put it automatically) and sometimes not which will not be used for Stats we dropp it out\n self.sqdbase.pop('index') # we make sure that all dataFRames we are working with has inde column and the drop it\n return self.sqdbase", "def get_df(self):\n data = self.load_data()\n userID, itemID = self.get_user_and_item_ids(data)\n rating = data[:, 1]\n data_np = np.stack((userID, itemID, rating), axis=-1)\n df = pd.DataFrame(data_np)\n df.columns = [\"userID\", \"itemID\", \"rating\"]\n return df", "def dataframe(self):\n frames = []\n for game in self.__iter__():\n df = game.dataframe\n if df is not None:\n frames.append(df)\n if frames == []:\n return None\n return pd.concat(frames)", "def compile_dataframe_default(self):\n\t\tdata = [\n\t\t\t['ford','mustang','coupe','A'],\n\t\t\t['chevy','camaro','coupe','B'],\n\t\t\t['ford','fiesta','sedan','C'],\n\t\t\t['ford','focus','sedan','A'],\n\t\t\t['ford','taurus','sedan','B'],\n\t\t\t['toyota','camry','sedan','B']\n\t\t]\n\n\t\tself.data = pd.DataFrame(data, columns = self.data_cols)", "def prepare_data():\n df = pd.read_csv('Wholesale customers data.csv')\n df_numeric = df[['Fresh', 'Milk', 'Grocery', 'Frozen', 'Detergents_Paper', 'Delicassen']]\n return df, df_numeric", "def FetchQueryResultToDF(data, col_name: List[str]) -> pd.DataFrame:\r\n result = []\r\n for row in data:\r\n to_be_append = []\r\n for col in row:\r\n to_be_append.append(col)\r\n result.append(to_be_append)\r\n df = pd.DataFrame(result, columns=col_name)\r\n print(df)\r\n return df", "def __generate_data_table__(self):\n # | - __generate_data_table__\n rows_list = []\n for job in self.job_var_lst:\n revisions = self.job_revision_number(job)\n for revision in range(revisions + 1)[1:]:\n # | - FOR LOOP BODY\n entry_param_dict = {}\n for prop in job:\n entry_param_dict[prop[\"property\"]] = prop[\"value\"]\n\n entry_param_dict[\"variable_list\"] = job\n entry_param_dict[\"path\"] = self.var_lst_to_path(job)\n\n entry_param_dict[\"max_revision\"] = revisions\n entry_param_dict[\"revision_number\"] = revision\n\n rows_list.append(entry_param_dict)\n # __|\n\n data_frame = pd.DataFrame(rows_list)\n\n return(data_frame)\n # __|", "def get_execute_table(self, limit=None):\n query = self.select_all()\n self.cur.execute(query)\n if limit is None:\n result = self.cur.fetchall()\n else:\n result = self.cur.fetchmany(limit)\n return to_data_frame(result)", "def get_execute_table(self, limit=None):\n query = self.select_all()\n self.cur.execute(query)\n if limit is None:\n result = self.cur.fetchall()\n else:\n result = self.cur.fetchmany(limit)\n return to_data_frame(result)", "def DataLoader():\n #importing data\n House_Prices_Uncleaned = pd.read_csv(\"zillow_data/Zip_zhvi_uc_sfrcondo_tier_0.33_0.67_sm_sa_mon.csv\")\n #Cleaning house prices data\n\n House_Prices=pd.DataFrame(House_Prices_Uncleaned[\"RegionName\"][House_Prices_Uncleaned[\"CountyName\"]==\"New York County\"])\n\n House_Prices[\"Price\"]=pd.DataFrame(House_Prices_Uncleaned[\"2020-09-30\"])\n\n House_Rent_Uncleaned= pd.read_csv(\"zillow_data/Zip_ZORI_AllHomesPlusMultifamily_SSA.csv\")\n\n #Cleaning house rent data\n House_Rent=pd.DataFrame(House_Rent_Uncleaned[\"RegionName\"])\n House_Rent[\"Rent\"]=pd.DataFrame(House_Rent_Uncleaned[\"2020-09\"])\n\n return House_Prices, House_Rent", "def data(self):\n dfdata = pd.concat([self.weights, self.returns, self.category], axis=1)\n dfdata.columns = ['weights', 'returns', self.category_name]\n if self.period is not None:\n dfdata['date'] = self.period\n return dfdata", "def run(self) -> DataFrame:\n with self.create_census_api_session():\n logger.info('Retrieving variables...')\n variables: Variables = self.get_variables()\n logger.info('Retrieving ACS tables...')\n tables = self.get_tables()\n\n # Add geometry\n gazetteer_files: List[GazetteerFile] = []\n shapefiles: List[Shapefile] = []\n if self.geometry == 'points':\n logger.info('Retrieving Gazetteer files...')\n gazetteer_files.extend(self.get_gazetteer_files())\n elif self.geometry == 'polygons':\n logger.info('Retrieving shapefiles...')\n shapefiles.extend(self.get_shapefiles())\n dataframe = self.assemble_dataframe(variables, tables, gazetteer_files, shapefiles)\n return dataframe", "def load_data_frame(name):\n\n return DataFrame(name).load()", "def df():\n path, _ = os.path.split(os.path.abspath(__file__))\n project_path = os.path.join(path, os.pardir, os.pardir)\n\n values_path = os.path.join(project_path, \"data\", \"raw\", \"pumps_train_values.csv\")\n labels_path = os.path.join(project_path, \"data\", \"raw\", \"pumps_train_labels.csv\")\n\n train = pd.read_csv(values_path, index_col='id', parse_dates=[\"date_recorded\"])\n labels = pd.read_csv(labels_path, index_col='id')\n\n return train.join(labels)", "def creat_df(urlist):\n country = []\n head = []\n for i in range(len(urlist)):\n country.append(urlist[i][2])\n head.append(urlist[i][4])\n df = pd.DataFrame.from_dict({\"Country\": country, \"Head\": head})\n return df" ]
[ "0.67799217", "0.6739709", "0.672318", "0.6723062", "0.6696471", "0.6675288", "0.6670434", "0.6617273", "0.65177196", "0.64534414", "0.64362025", "0.6365479", "0.63300186", "0.63290566", "0.6327225", "0.63239545", "0.63196117", "0.63137674", "0.630125", "0.6300652", "0.6264492", "0.6256432", "0.62324035", "0.6180351", "0.61794555", "0.61788", "0.6176623", "0.61587125", "0.61497086", "0.61380196", "0.61105865", "0.60665435", "0.6066435", "0.6064469", "0.6061606", "0.6059838", "0.6058731", "0.60499907", "0.6035701", "0.6030222", "0.60019326", "0.6001785", "0.6000684", "0.5988705", "0.5983374", "0.59724075", "0.59722877", "0.59694064", "0.5944242", "0.5938449", "0.59354305", "0.5930005", "0.59258634", "0.59256595", "0.5920742", "0.59140444", "0.5903718", "0.5903025", "0.590169", "0.5896573", "0.58822256", "0.58805203", "0.5877828", "0.58750325", "0.58721393", "0.5866933", "0.58623683", "0.58615005", "0.5854871", "0.5854492", "0.5853305", "0.5852947", "0.58431923", "0.5841819", "0.58315396", "0.58301586", "0.58126813", "0.58001053", "0.5799189", "0.5795491", "0.5795273", "0.5793335", "0.5791455", "0.5780287", "0.57791793", "0.5774069", "0.57715786", "0.57715064", "0.5768966", "0.575776", "0.57565165", "0.57556015", "0.57467055", "0.57467055", "0.5744563", "0.57372755", "0.57366616", "0.5734418", "0.57322097", "0.57267773" ]
0.6087042
31
Tratamiento de la base datos para poder usarla en el algoritmo
def prepare_melody(melody_dframe): S = [] # es la lista en la que queda guardada la melodía en el formato correcto for i in range(len(melody_dframe)): # este if es para contemplar los silencios como una nota de pitch 0 if i > 0 and abs(S[-1][1]-melody_dframe.iloc[i, 0]) > 0.001: s = [] # por eso se le da el final de la nota anterior como inicio de la nueva nota s.append(S[-1][1]) # queremos que dure hasta que se empieza a cantar de nuevo s.append(melody_dframe.iloc[i, 0]) s.append(0) # y el pitch evidentemente es 0 S.append(s) s = [] s.append(melody_dframe.iloc[i, 0]) s.append(melody_dframe.iloc[i, 0]+melody_dframe.iloc[i, 1]) if i > 0 and (i+1) < len(melody_dframe) and S[-1][1] > melody_dframe.iloc[i+1, 0]: S[-1][1] = melody_dframe.iloc[i+1, 0] s.append(melody_dframe.iloc[i, 2]) S.append(s) return {melody_dframe.columns.values[3]: S}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def insert_data(self):\n # Make a connexion with a mock database\n self.generate_data_collection()", "def _database(self):\n ...", "def _importInDjango(self):\n\n with open(settings.DATA_PATH, 'r', encoding='latin-1') as csv_file:\n reader = csv.DictReader(csv_file, delimiter=';')\n for raw in reader:\n\n # Créer ou mettre à jour la division\n division, created = Division.objects.get_or_create(\n nom=raw['Division']\n )\n if created:\n self.stdout.write(\n 'Divion {} ajoutée'.format(division.nom)\n )\n\n # Créer ou mettre à jour les équipes\n equipeDom, created = Equipe.objects.get_or_create(\n nom=raw['Equipe 1'],\n division=division\n )\n if created:\n self.stdout.write(\n 'Equipe {} ajoutée'.format(equipeDom.nom)\n )\n\n equipeExt, created = Equipe.objects.get_or_create(\n nom=raw['Equipe 2'],\n division=division\n )\n if created:\n self.stdout.write(\n 'Equipe {} ajoutée'.format(equipeExt.nom)\n )\n\n # Créer ou mettre à jour la rencontre\n scoreDom = 0 if raw['Score 1'] == '' else int(raw['Score 1'])\n scoreExt = 0 if raw['Score 2'] == '' else int(raw['Score 2'])\n forfaitDom = True if raw['Forfait 1'] == 'true' else False\n forfaitExt = True if raw['Forfait 2'] == 'true' else False\n date = datetime.datetime.strptime(raw['Date de rencontre'], '%d/%m/%Y')\n heure = datetime.datetime.strptime(raw['Heure'], '%H:%M')\n rencontre, created = Rencontre.objects.update_or_create(\n numero=int(raw['N° de match']),\n equipeDom=equipeDom,\n equipeExt=equipeExt,\n defaults={\n 'date': date,\n 'heure': heure,\n 'scoreDom': scoreDom,\n 'scoreExt': scoreExt,\n 'forfaitDom': forfaitDom,\n 'forfaitExt': forfaitExt,\n }\n )\n if created:\n self.stdout.write(\n 'Rencontre {} / {} ajoutée'.format(\n rencontre.equipeDom,\n rencontre.equipeExt\n )\n )", "def retrieve_from_db(self):\n pass", "def consultar_todos_DB(self):\n registros = db.session.query(ModelConcurso).all()\n for registro in registros:\n print(registro)", "def db_values(self, db):", "def get_data(self):\n products_list = []\n for category in CATEGORIES:\n json_data = self.url_to_json(category)\n pages_nb = self.retrieve_cat_pages_nb(json_data)\n for page in range(pages_nb):\n page_json_data = self.page_to_json(category, page+1)\n products = page_json_data[\"products\"]\n for p in products:\n params = {\n 'brands': \"\",\n 'product_name_fr': \"\",\n 'nutrition_grades': \"\",\n 'stores': \"\",\n 'url': \"\",\n 'categories': \"\"\n }\n for key in params:\n try:\n params[key] = p[key]\n except KeyError:\n continue\n if params['product_name_fr'] != \"\" and params['nutrition_grades'] != \"\" and params['url'] != \"\" and params['categories'] != \"\":\n product = Product(brand=params['brands'],\n name=params['product_name_fr'],\n nutrition_grade=params['nutrition_grades'],\n stores=params['stores'], url=params['url'],\n category=params['categories'])\n products_list.append(product)\n try:\n self.manager.save_all(self.clean_data(products_list))\n print(f\"\\n La base de données |{DB_NAME}| a été peuplée \\n\")\n except:\n print(\"\\n Une erreur s'est produite lors \"\n \"du peuplement de la base de données \\n\")", "def load_data(db_handler):\n\n from random import seed\n from random import random\n \n seed(1)\n\n new_notes = []\n\n for i in range(1,10):\n\n new_notes.append({\n\n\n 'title': str(i) + str(random()),\n 'content': 'Lorem ipsum' + str(i),\n 'active': True,\n 'created_by':\"Cristhian\" + str(i),\n 'created_at': date.today(),\n 'edited_at':date.today(),\n \n })\n\n new_notes.append(\n {\n \"active\": False,\n \"content\": \"Jesenia\",\n \"edited_at\": \"2019-10-24\",\n \"title\": \"Jesenia La chica de al lado\",\n \"created_by\": \"Cristhian1\",\n \"created_at\": \"2019-10-24\"\n })\n\n new_notes.append(\n {\n \"active\": False,\n \"title\": \"La vida de los numeros\",\n \"content\": \"Lorem ipsum y los numeros de la muerte\",\n \"edited_at\": \"2019-10-25\",\n \"created_by\": \"Jesenia\",\n \"created_at\": \"2019-10-24\"\n })\n\n Note.insert_many(new_notes).execute()\n\n User(name=\"Cristhian\", email=\"test@gmail.com\",\n password=b'$2b$12$U/QjtHt/j0xRT4r8Hx3fOe93EssM6M0iiUaQJOrTd64RXbxvhw6Ii').save()", "def db_table(self):", "def TranspasoMongoDB_Mysql(request):\n assert isinstance(request, HttpRequest)\n \n # conexión con mongo\n #client = pymongo.MongoClient(\"mongodb+srv://Prueba:prueba@cluster0.xv7cj.mongodb.net/Prueba?retryWrites=true&w=majority\")\n # PARA PRUEBA LOCAL\n client = pymongo.MongoClient(\"mongodb://localhost:27017\")\n # conexión con la base de datos\n dbMongo = client['Prueba']\n \n # dentro de Prueba elegimos la colecciónn\n collection = dbMongo['entries']\n \n #x = collection.insert_one(dictsensor)\n #Impresion de datos\n lista = []\n for valFreestyle in collection.find():\n valFreestyle.pop('_id')\n valFreestyle['date'] = int(valFreestyle['date'])\n listaAux = valFreestyle.values()\n #listaAux.pop('_id')\n listas = tuple(listaAux)\n lista.append(listas)\n\n for item in lista:\n print(item)\n\n\n # Conexion con la base de datos MySql PC Universidad\n db = mysql.connector.connect(user='root',\n db='pruebatesis',\n passwd='admin',\n host='localhost' )\n \n mycursor = db.cursor()\n #str(Contenido a convertir)\n # QUERY\n #querry = \"\"\"INSERT INTO sensorFreeStyle (date,dateString,rssi,device,direction,rawbg,sgv,type,utcOffset,sysTime) VALUES (%d,%s,%d,%s,%s,%d,%d,%s,%d,%s)\"\"\"\n querry = \"INSERT INTO sensorFreeStyle (date,dateString,rssi,device,direction,rawbg,sgv,type,utcOffset,sysTime) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)\"\n querry2 = \"INSERT INTO sensorFreeStyle (date,dateString,rssi,device,direction,rawbg,sgv,type,utcOffset,sysTime) VALUES (%d,'%s',%d,'%s','%s',%d,%d,'%s',%d,'%s')\"\n print(\"-------\")\n print(type(lista[0][0]))#'int'\n print(type(lista[0][1]))#'str'\n print(type(lista[0][2]))#'int'\n print(type(lista[0][3]))#'str'\n print(type(lista[0][4]))#'str'\n print(type(lista[0][5]))#'int'\n print(type(lista[0][6]))#'int'\n print(type(lista[0][7]))#'str'\n print(type(lista[0][8]))#'int'\n print(type(lista[0][9]))#'str'\n print(\"-------\")\n #print(querry % (lista[0]))\n listaQuerry = []\n i= 0\n for item in lista:\n listaQuerry.append(querry2 % item)\n #print(listaQuerry[i])\n i+=1\n #for indice in listaQuerry:\n #print(indice)\n #respuesta = mycursor.execute(indice)\n\n #querry3 = \"INSERT INTO customers (name, address) VALUES (%s,%s)\"\n #mycursor.execute(querry3,('rr','rr'))\n #querry3 = \"INSERT INTO customers (name, address) VALUES ('rr','rr')\"\n #mycursor.execute(querry3)\n \n try:\n mycursor.executemany(querry,lista)\n db.commit()\n except:\n print(\"Eror: \"+ error)\n \n print(\"Number record inserted, ID:\", mycursor.lastrowid)\n db.close() \n return render(\n request,\n 'app/about.html',\n {\n 'title':'About',\n 'message':'Your application description page.',\n 'year':datetime.now().year,\n 'lista':lista,\n }\n )", "def run(self):\n self.db.table('materia').insert([\n {\n 'nombre': 'Cálculo Diferencial e Integral',\n 'ano': 2,\n 'cuatrimestre': 1,\n 'alias': 'calculo-diferencial-e-integral',\n 'numeroUrl': 1891,\n 'esRecursable': False,\n },\n {\n 'nombre': 'Estructuras de Datos',\n 'ano': 2,\n 'cuatrimestre': 1,\n 'alias': 'estructuras-de-datos',\n 'numeroUrl': 241,\n 'esRecursable': False,\n },\n {\n 'nombre': 'Inglés Técnico 1',\n 'ano': 2,\n 'cuatrimestre': 1,\n 'alias': 'ingles-tecnico-1',\n 'numeroUrl': 665,\n 'esRecursable': False,\n },\n {\n 'nombre': 'Teoría de la Computación 1',\n 'ano': 2,\n 'cuatrimestre': 1,\n 'alias': 'teoria-de-la-computacion-1',\n 'numeroUrl': 1865,\n 'esRecursable': False,\n },\n {\n 'nombre': 'Programación Orientada a Objetos',\n 'ano': 2,\n 'cuatrimestre': 1,\n 'alias': 'programacion-orientada-a-objetos',\n 'numeroUrl': 246,\n 'esRecursable': False,\n },\n {\n 'nombre': 'Ingeniería de Requerimientos',\n 'ano': 2,\n 'cuatrimestre': 2,\n 'alias': 'ingenieria-de-requerimientos',\n 'numeroUrl': 2006,\n 'esRecursable': False,\n },\n {\n 'nombre': 'Métodos Computacionales para el Cálculo',\n 'ano': 2,\n 'cuatrimestre': 2,\n 'alias': 'métodos-computacionales-para-el-cálculo',\n 'numeroUrl': 2036,\n 'esRecursable': False,\n },\n {\n 'nombre': 'Teoría de la Computación 2',\n 'ano': 2,\n 'cuatrimestre': 2,\n 'alias': 'teoría-de-la-computación-2',\n 'numeroUrl': 2013,\n 'esRecursable': False,\n },\n {\n 'nombre': 'Arquitecturas y Organización de Computadoras 1',\n 'ano': 2,\n 'cuatrimestre': 2,\n 'alias': 'arquitecturas-y-organización-de-computadoras-1',\n 'numeroUrl': 2052,\n 'esRecursable': False,\n },\n {\n 'nombre': 'Programación Concurrente',\n 'ano': 2,\n 'cuatrimestre': 2,\n 'alias': 'programación-concurrente',\n 'numeroUrl': 2059,\n 'esRecursable': False,\n }\n ])", "def carregarData(self, ide):\r\n try:\r\n self.__id = str(ide)\r\n self.cursor.execute(\"SELECT * FROM DATAS WHERE ID = %s;\" %(self.__id))\r\n if self.cursor.rowcount == 1:\r\n return self.cursor.fetchone()\r\n else:\r\n return None\r\n except:\r\n return None", "def get_data(traffic, weather, db):\r\n traffic_data = traffic.get_traffic()\r\n weather_data = weather.get_weather()\r\n db.traffic.insert_many(traffic_data)\r\n db.weather.insert_many(weather_data)\r\n print('于 {} 完成数据爬取及存储'.format(datetime.now()))\r\n print('当前交通数据条数 {}'.format(db.traffic.count_documents({})))\r\n print('当前天气数据条数 {}'.format(db.weather.count_documents({})))", "def fetch_data(self):", "def cargarProductosSinObra(self):\n\n self.limpiarTabla(self.tableProductos)\n\n ##Cnsulta para obtener todos los productos del sistema, con su correspondiente\n ##codigo de barra, monodroga, descuento, importe\n query=self.sesion.query(ProductoModel.codigo_barra,ProductoModel.id_medicamento,ProductoModel.id_presentacion,MonodrogaModel.nombre,ProductoModel.importe).\\\n join(MedicamentoModel).filter(ProductoModel.id_medicamento==MedicamentoModel.nombre_comercial).\\\n join(MonodrogaModel).filter(MedicamentoModel.id_monodroga==MonodrogaModel.nombre).\\\n filter(ProductoModel.baja==False).order_by(ProductoModel.codigo_barra)\n\n ##Se cargan los datos obtenidos en la tabla de Producto\n for n, obj in enumerate(query):\n self.tableProductos.insertRow(n)\n self.tableProductos.setItem(n, 0, QtGui.QTableWidgetItem(str(obj[0])))\n self.tableProductos.setItem(n, 1, QtGui.QTableWidgetItem(str(obj[1])))\n self.tableProductos.setItem(n, 2, QtGui.QTableWidgetItem(str(obj[2])))\n self.tableProductos.setItem(n, 3, QtGui.QTableWidgetItem(str(obj[3])))\n self.tableProductos.setItem(n, 4, QtGui.QTableWidgetItem(str(0)))\n self.tableProductos.setItem(n, 5, QtGui.QTableWidgetItem(str(obj[4])))\n\n ##Se carga la cantidad de cada producto en la tabla\n for row,producto in enumerate(ProductoModel.buscarTodos(ProductoModel.codigo_barra,self.sesion)):\n self.tableProductos.setItem(row,6,QtGui.QTableWidgetItem(str(producto.getCantidad(self.sesion))))", "def _convert_rosetta_db_to_basic_db(self):\n pass", "def connect_db_and_load_data(cls):\n db.connect()\n db.create_tables([Product], safe=True)\n load_data(transform_data('./inventory.csv'))", "def get_data(self, user, password, table):\n self.my_connect = SetData.GetData(self.host, self.database, self.charset)\n self.my_connect.connect(user, password)\n self.my_connect.select(\"SELECT * FROM {}\".format(table))\n self.result = self.my_connect.result", "def lista_ventas(self,tipo,lista,filtro):\n self.lista=self.builder.get_object(lista)\n self.lista.clear()#Limpia la lista\n busqueda = \"\"\n\n if tipo==\"\":\n print(\"Llego a buscar ventas en BD\")\n #result=self.db.execute('SELECT * FROM Venta')\n busqueda = self.db.execute('SELECT ventaID ,fechaVenta, fechaInicio, fechaFin, C.nombre, P.nombre FROM Cliente C, Paquete P, Venta V WHERE V.IdCli = C.clienteID AND V.IdPaq = P.paqueteID')\n elif tipo == \"Cliente\":\n print(\"Busco venta por nombre del cliente\")\n busqueda = self.db.execute(\"SELECT ventaID ,fechaVenta, fechaInicio, fechaFin, C.nombre, P.nombre FROM Cliente C, Paquete P, Venta V WHERE V.IdCli = C.clienteID AND V.IdPaq = P.paqueteID AND C.nombre LIKE '%\"+filtro+\"%'\")\n elif tipo == \"Viaje\":\n print(\"Busco venta por nombre del paquete\")\n busqueda = self.db.execute(\"SELECT ventaID ,fechaVenta, fechaInicio, fechaFin, C.nombre, P.nombre FROM Cliente C, Paquete P, Venta V WHERE V.IdCli = C.clienteID AND V.IdPaq = P.paqueteID AND P.nombre LIKE '%\"+filtro+\"%'\")\n elif tipo == \"Fecha de inicio\":\n print(\"Busco venta por fecha de inicio\")\n busqueda = self.db.execute(\"SELECT ventaID ,fechaVenta, fechaInicio, fechaFin, C.nombre, P.nombre FROM Cliente C, Paquete P, Venta V WHERE V.IdCli = C.clienteID AND V.IdPaq = P.paqueteID AND fechaInicio LIKE '%\"+filtro+\"%'\")\n elif tipo == \"Fecha de fin\":\n print(\"Busco venta por fecha de fin\")\n busqueda = self.db.execute(\"SELECT ventaID ,fechaVenta, fechaInicio, fechaFin, C.nombre, P.nombre FROM Cliente C, Paquete P, Venta V WHERE V.IdCli = C.clienteID AND V.IdPaq = P.paqueteID AND fechaFin LIKE '%\"+filtro+\"%'\")\n \n for row in busqueda: \n #Empieza por la [1] porque el ID es la [0]\n # self.lista.append([row[4],row[5],row[1],row[2],row[3]])\n self.lista.append([row[1],row[2],row[3],row[4],row[5],row[0]])\n print(\"Listo ventas en tabla\")", "def load_data(self):", "def get_all(self):\n id_proyecto = unicode(request.url.split(\"/\")[-3])\n fase_table_filler.init(\"\", id_proyecto)\n tmpl_context.widget = self.table\n value = self.fase_filler.get_value()\n d = dict(value_list = value, model = \"Fases\", accion = \"./buscar\")\n d[\"direccion_anterior\"] = \"../..\"\n return d", "def init_data():\n ps = Planete_Solidaire(name=\"Planète Solidaire\").save()\n\n maya = Human4j(\n firstname='Maya',\n lastname='Hannachi',\n number='0620902819',\n email='maya.hannachi@epita.fr',\n school='EPITA'\n ).save()\n mailinh = Human4j(\n firstname='Mai-Linh',\n lastname='Lannes',\n number='0612632032',\n email='mai-linh.lannes@epita.fr',\n school='EPITA'\n ).save()\n michel = Human4j(\n firstname='Michel',\n lastname='Sasson',\n number='0662739612',\n email='michel.sasson@epita.fr',\n school='EPITA'\n ).save()\n cedric = Human4j(\n firstname='Cédric',\n lastname='Joly',\n number='',\n email='cedric.joly@epita.fr',\n school='EPITA'\n ).save()\n caroline = Human4j(\n firstname='Caroline',\n lastname='De Paoli',\n number='',\n email='caroline.depaoli@isg.fr',\n school='ISG'\n ).save()\n\n binome = Binome4j().save()\n binome.human1.connect(mailinh)\n binome.human2.connect(maya)\n\n ps.binome.connect(binome)\n ps.cedric.connect(cedric)\n ps.michel.connect(michel)\n ps.caroline.connect(caroline)", "def make_query(self):", "def Tratamentos_dos_dados(self):\r\n self.tabela_clientes[\"TotalGasto\"] = pd.to_numeric(self.tabela_clientes[\"TotalGasto\"], errors=\"coerce\") # transformar coluna que deveria ser número e está como texto em número, errors=\"coerce\" -> se der erro em algo deixa vazio\r\n \r\n self.tabela_clientes = self.tabela_clientes.dropna(how='all', axis=1) # remover as colunas que estam 100% vazia, how='all' -> todas\r\n \r\n self.tabela_clientes = self.tabela_clientes.dropna() # remover a linha que tem algum valor vazio\r\n\r\n print(self.tabela_clientes.info()) # informações sobre a tabela \r", "def uploader_actividad(df,to_model):\n\tengine = create_engine(\"mssql+pyodbc://sa:BaseSQL123@10.160.8.96:1433/vpcanales?driver=SQL+Server+Native+Client+11.0\")\n\n\tfecha = df.loc[0,'Fecha']\n\tprint(fecha.month)\n\tprint(fecha.year)\n\n\tif to_model.__name__==\"Activacion\":\n\n\t\tActivacion.objects.filter(fecha_actividad__month=fecha.month ,\n\t\t\tfecha_actividad__year=fecha.year).delete()\n\n\t\tfor row in df.itertuples():\n\t\t\tconnection = engine.raw_connection()\n\t\t\tcursor=connection.cursor()\n\t\t\t#Se ejecuta el SP por cada registro del dataframe\n\t\t\tstring=\"\"\"exec sp_insert_into_activacion\n\t\t\t@fecha_actividad='{0}',\n\t\t\t@plataforma='{1}',\n\t\t\t@tecnologia='{2}',\n\t\t\t@terminal='{3}',\n\t\t\t@cantidad='{4}',\n\t\t\t@codigo_plan='{5}',\n\t\t\t@mes={6},\n\t\t\t@ano={7},\n\t\t\t@codigo_agente='{8}'\n\t\t\t \"\"\".format(row[2],\n\t\t\trow[5],\n\t\t\trow[6],\n\t\t\trow[7],\n\t\t\trow[-2],\n\t\t\trow[4],\n\t\t\trow[2].month,\n\t\t\trow[2].year,\n\t\t\trow[3])\n\t\t\tcursor.execute(string).commit()\n\n\t\tresults = Activacion.objects.filter(fecha_actividad__month=fecha.month,\\\n\t\t\tfecha_actividad__year=fecha.year)\\\n\t\t\t.aggregate(suma = Sum('cantidad'))\n\n\n\t\tresults.update(Activacion.objects.filter(fecha_actividad__month=fecha.month,\\\n\t\t\tfecha_actividad__year=fecha.year)\\\n\t\t\t.aggregate(count = Count('cantidad')))\n\n\t\tprint(\"*************************\",results)\n\n\t\treturn results\n\n\n\n\telse:\n\n\t\tAlta.objects.filter(fecha_actividad__month=fecha.month ,\n\t\t\tfecha_actividad__year=fecha.year).delete()\n\n\t\tfor row in df.itertuples():\n\t\t\tconnection = engine.raw_connection()\n\t\t\tcursor=connection.cursor()\n\t\t\t#Se ejecuta el SP por cada registro del dataframe\n\t\t\tstring=\"\"\"exec sp_insert_into_alta\n\t @fecha_actividad='{0}',\n\t @plataforma='{1}',\n\t @tecnologia='{2}',\n\t @terminal='{3}',\n\t @cantidad='{4}',\n\t @codigo_plan='{5}',\n\t @mes={6},\n\t @ano={7},\n\t @codigo_agente='{8}' \"\"\".format(row[2],\n\t row[5],\n\t row[6],\n\t row[7],\n\t row[-2],\n\t row[4],\n\t row[2].month,\n\t row[2].year,\n\t row[3])\n\t\t\tcursor.execute(string).commit()\n\n\n\t\tresults = Alta.objects.filter(fecha_actividad__month=fecha.month,\\\n\t\t\tfecha_actividad__year=fecha.year)\\\n\t\t\t.aggregate(suma = Sum('cantidad'))\n\n\n\t\tresults.update(Alta.objects.filter(fecha_actividad__month=fecha.month,\\\n\t\t\tfecha_actividad__year=fecha.year)\\\n\t\t\t.aggregate(count = Count('cantidad')))\n\n\t\tprint(\"*************************\",results)\n\n\n\n\t\treturn results", "def prepare_data(self):", "def create_db(self):", "def insert_data():\n\tBase.metadata.drop_all(engine)\n\tBase.metadata.create_all(engine)\n\tu1 = insert_user(\"233@B.com\", \"/static/image/avatar.JPG\")\n\tu2 = insert_user(\"fy@B.com\", \"/static/image/avatar.JPG\")\n\tc = insert_catalog(u1.id, \"Sichuan Dish\")\n\tinsert_catalog(u1.id, \"Fujian Dish\")\n\tinsert_catalog(u1.id, \"Guangdong Dish\")\n\tinsert_catalog(u2.id, \"Zhejiang Dish\")\n\tinsert_catalog(u2.id, \"Beijing Dish\")\n\tinsert_item(u1.id, \"Iphone 6 plus\", c, 'Is a phone', None)\n\tinsert_item(u1.id, \"Hot pot\", c, \"Hot hot hot\", None)\n\tinsert_item(u2.id, \"Kong Bao Chicken\", c, \"Classic\", None)", "def get_data(self):\n\n return pd.read_sql_query(\"Select * from {table}\".format(table=self.table_name), con=self.con)", "def query(self):", "def put_it_in_tables(self):\n my_connection = mysql.connector.connect(user=self.user, password=self.password, database='openfoodfacts')\n cursor = my_connection.cursor(buffered=True)\n for i in self.my_data:\n prod_name = i['product_name']\n try:\n add_aliment = (\"INSERT INTO aliment \"\n \"(product_name, product_description, barcode, nutritional_score, stores, product_category) \"\n \"VALUES (%s, %s, %s, %s, %s, %s)\")\n data_aliment = (i['product_name'].replace(\"'\", \"''\"), i['product_description'].replace(\"'\", \"''\"), i['barcode'].replace(\"'\", \"''\"), i['nutritional_score'].replace(\"'\", \"''\"), i['stores'].replace(\"'\", \"''\"), i['product_category'].replace(\"'\", \"''\"))\n cursor.execute(add_aliment, data_aliment)\n except mysql.connector.IntegrityError:\n pass \n my_connection.commit()\n cursor.close()\n my_connection.close()\n print(\"ok c'est fait\")", "def getAll():\n if request.method == 'POST':\n # Recuperation des infos\n data = OrdersModel().getAll()\n # Formatage des Delais\n data['delai'] = data['delai'].apply(Utils.formatSeconds)\n # df[\"id\"] = df.apply(lambda x: Crypt.encode(cfg._APP_SECRET_KEY, x['id']), axis=1)\n # Retour du message\n return Render.jsonTemplate(_OPERATION, 'Ordres', categorie=\"SUCCESS\", data=data.to_dict(\"records\"))\n else:\n abort(400)", "def crearBD(self):\n if self.base.isConnected():\n mensaje = \"Usted ya se encuentra conectado a la base \" + self.base.getDbName() + \", ¿Desea Crear una nueva?\"\n if askyesno(\"Atención\", mensaje):\n nombre = tkinter.simpledialog.askstring(\"Elija el Nombre de la Base\", prompt=\"Nombre\")\n self.base.setDbName(nombre)\n resultado = self.base.createDB()\n showinfo('Resultado', resultado)\n self.crearTabla()\n self.mostrarString.set('Mostrando Registros Existentes en ' + self.base.getDbName())\n self.tituloTree.configure(text=self.mostrarString.get())\n self.updateTree()\n else:\n try:\n mibase = mysql.connector.connect(\n host=\"localhost\",\n user=\"root\",\n password=\"\"\n )\n baseNombre['nombre'] = tkinter.simpledialog.askstring(\"Elija el Nombre de la Base\", prompt=\"Nombre\")\n buffer = open('nombreBD.json', 'w')\n buffer.write(json.dumps(baseNombre))\n buffer.close() \n micursor = mibase.cursor()\n baseSQL = f\"CREATE DATABASE {getNameDB()}\"\n micursor.execute(baseSQL)\n mensaje = f\"Se ha creado la base {getNameDB()}\"\n showinfo('BD Creada', mensaje)\n mostrarString.set('Mostrando Registros Existentes en ' + getNameDB())\n tituloTree.configure(text=mostrarString.get())\n\n if askyesno('Tabla Inexistente', '¿Desea crear una tabla?'):\n crearTabla()\n except:\n showinfo ('Error', exc_info()[1])", "def hook_db(self):\n # get dates as strings from the database so that we can serialize to\n # json this is not working for some reason. another solution has been\n # reached, but this would be a better one, so I'm leaving it here.\n #conv = MySQLdb.converters.conversions.copy()\n #conv[10] = str\n self.database = MySQLdb.connect(host=self.host,\n port=self.port,\n user=self.user,\n passwd=self.passwd,\n db=self.db)\n #conv=conv)\n self.cursor = self.database.cursor()", "def connect_to_data_base():\n try:\n connection = psycopg2.connect(**DATA_CONNECTION)\n\n cursor = connection.cursor()\n\n cursor.execute(\"SELECT version();\")\n #record = cursor.fetchone()\n \n print(\"{} \\t Conexión Base de datos realizada con éxito\".format(datetime.now()))\n \n return connection, cursor\n\n except (Exception, psycopg2.Error) as error:\n print(\"{} \\t Error en el método connect_to_data_base\".format(datetime.now()))\n print(\"Error en la conexión a la base de datos\", error)\n \n return None", "def cargarObras(self):\n self.cargarObjetos(self.tableOs,\n ObraSocialModel.buscarTodos(\"razon_social\", self.sesion).all(),\n (\"razon_social\", \"cuit\", \"direccion\")\n )", "def fill_db(self, data):\n check_input_params(data, self.DB)\n self.db = data[self.DB]", "def cargar_obras(self):\n self.cargarObjetos(self.tableObra,\n ObraSocialModel.buscarTodos(\"razon_social\", self.sesion).all(),\n (\"razon_social\", \"cuit\", \"direccion\")\n )", "def read_db(self):\n with open(self.filename, 'r') as database:\n data = json.load(database)\n self.data = data", "def creacion_conexion_db():\n\n global bd\n bd = mysql.connector.connect(host=\"localhost\", user=\"root\", passwd=\"\")\n micursor = bd.cursor(buffered=True)\n try:\n micursor.execute(\"CREATE DATABASE entrega\")\n bd = mysql.connector.connect(\n host=\"localhost\", user=\"root\", passwd=\"\", database=\"entrega\"\n )\n micursor = bd.cursor(buffered=True)\n micursor.execute(\n \"CREATE TABLE paciente( id int(11) NOT NULL PRIMARY KEY AUTO_INCREMENT, nombre VARCHAR(128) COLLATE utf8_spanish2_ci NOT NULL, apellido varchar(128) COLLATE utf8_spanish2_ci NOT NULL, dni VARCHAR(128) COLLATE utf8_spanish2_ci NOT NULL, dia date NOT NULL, hora int(11) NOT NULL, minuto int(11) NOT NULL )\"\n )\n\n return mysql.connector.connect(\n host=\"localhost\", user=\"root\", passwd=\"\", database=\"entrega\"\n )\n\n except Exception:\n\n return mysql.connector.connect(\n host=\"localhost\", user=\"root\", passwd=\"\", database=\"entrega\"\n )", "def ddd():\n return get_data(db, MyTable)", "def connection_database(self):\n # connection to the database\n self.data_base = mysql.connector.connect(user=self.info[0], password=self.info[1],\n host=self.info[2])\n self.cursor = self.data_base.cursor()\n\n # executed \"use Purbeurre\" request\n self.cursor.execute(\"USE Purbeurre\")", "def load_renter_data():\n return pd.read_sql_query(_sql_query, _con)", "def db_data4test():\n administrators = {\n 'field': ['name', 'password'],\n 'data': [\n ('admin', '123'),\n ]\n }\n\n countries = {\n 'field': 'name',\n 'data': [\n 'China',\n 'India'\n ]\n }\n\n positions = {\n 'field': 'name',\n 'data': [\n 'Software EngineerSystem Analyst',\n 'Business Analyst',\n 'Technical support',\n 'Network Engineer',\n 'Technical Consultant',\n 'Web Developer',\n 'Software Test'\n ]\n }\n\n users = {\n 'field': ['name', 'password'],\n 'data': [\n ('test', '123456'),\n ('test2', '123456'),\n ('test3', '123456')\n ]\n }\n\n user_infos = {\n 'field': [\n 'name', 'first_name', 'last_name', 'position', 'company',\n 'nationality', 'tobe_contacted', 'skills_have', 'skills_learned'\n ],\n 'data': [\n (\n 'test', 'Huang', 'Xiao', 'Business Analyst',\n 'Global Consulting Services', 'China', 1,\n '3months Python Subject',\n 'Advanced Python through on-job training'\n ),\n (\n 'test2', 'Yong', 'Wu', 'Business Analyst',\n 'REA', 'China', 0,\n '3 months Datawarehousing',\n 'Project management skill'\n ),\n ]\n }\n\n return {\n 'administrator': administrators,\n 'country': countries,\n 'position': positions,\n 'user': users,\n 'user_info': user_infos\n }", "def importData():\n #importChallengeDataToDB()\n importTrendingDataToDB()", "def getBaseMonetaria(self):\n #Obtener la url de descarga del cvs\n urlPackage=\"https://datos.gob.ar/api/3/action/package_show?id=sspm-factores-explicacion-base-monetaria\"\n s=requests.get(urlPackage).content\n objJson = json.loads(s)\n resultado = objJson['result']['resources']\n selector = 0\n ultimoResultado = resultado[selector]\n urlDescarga = ultimoResultado['url']\n descripcion = ultimoResultado['description']\n print(\"Descargando: {}\".format(descripcion))\n print(\"Archivo: {}\".format(urlDescarga))\n \n #Descargar la url con cvs y generar pandas dataframe\n contenidoCVS = requests.get(urlDescarga).content\n flujoCVS = io.StringIO(contenidoCVS.decode('utf-8'))\n df_temp = pd.read_csv(flujoCVS)\n \n #transform string to datetime\n df_temp['indice_tiempo'] = pd.to_datetime(df_temp['indice_tiempo'], format='%Y-%m-%d', errors='ignore')\n df_temp['indice_tiempo'] = df_temp['indice_tiempo'].dt.date\n #set index\n df_temp.set_index('indice_tiempo', inplace=True)\n \n return df_temp", "def importDatabase(self):\n db_conn.execute(\"INSERT INTO Fietsenstalling (Naam, Achternaam, Telefoon, FietsNr, PIN) VALUES \"\n \"(?, ?, ?, ?, ?);\", (naamInvoer.get(), achternaamInvoer.get(), telefoonnummerInvoer.get(), FietsNr, pincodeInvoer.get()))\n\n db_conn.commit()", "def ouverture_connection():\n #driver://username:password@host:port/database\n pg_db = create_engine('postgresql://alain:nostromos@localhost:5432/gites_wallons',\n convert_unicode=True,\n encoding='utf-8')\n connection = pg_db.connect()\n hebergements = connection.execute(\" \\\n select \\\n hebergement.heb_pk, \\\n hebergement.heb_adresse, \\\n hebergement.heb_localite, \\\n hebergement.heb_cgt_cap_min, \\\n hebergement.heb_cgt_cap_max, \\\n hebergement.heb_cgt_nbre_chmbre, \\\n link_hebergement_epis.heb_nombre_epis, \\\n hebergement.heb_lit_1p, \\\n hebergement.heb_lit_2p, \\\n hebergement.heb_lit_sup, \\\n hebergement.heb_lit_enf, \\\n type_heb.type_heb_nom, \\\n hebergement.heb_coordonnee, \\\n proprio.pro_prenom1, \\\n proprio.pro_prenom2, \\\n proprio.pro_nom1, \\\n proprio.pro_nom2, \\\n hebergement.heb_nom, \\\n hebergement.heb_gid_activite_nature, \\\n hebergement.heb_gid_theme_equestre, \\\n hebergement.heb_gid_peche, \\\n hebergement.heb_gid_panda, \\\n hebergement.heb_gid_patrimoine, \\\n hebergement.heb_gid_antiallergique, \\\n hebergement.heb_gid_access_tous, \\\n hebergement.heb_gid_bebe_tendresse, \\\n hebergement.heb_gid_beau_jardin, \\\n hebergement.heb_gid_eco_gite, \\\n proprio.pro_tel_priv, \\\n proprio.pro_gsm1, \\\n commune.com_nom, \\\n commune.com_cp, \\\n proprio.pro_email, \\\n hebergement.heb_tarif_we_bs, \\\n hebergement.heb_tarif_we_ms, \\\n hebergement.heb_tarif_we_hs, \\\n hebergement.heb_tarif_sem_bs, \\\n hebergement.heb_tarif_sem_ms, \\\n hebergement.heb_tarif_sem_hs, \\\n hebergement.heb_fumeur, \\\n hebergement.heb_animal \\\n from \\\n hebergement left outer join link_hebergement_epis on link_hebergement_epis.heb_pk = hebergement.heb_pk, \\\n commune, \\\n type_heb, \\\n proprio \\\n where \\\n hebergement.heb_typeheb_fk in (1,2,3,4,7,10) \\\n and \\\n commune.com_pk=hebergement.heb_com_fk \\\n and \\\n type_heb.type_heb_pk=hebergement.heb_typeheb_fk \\\n and \\\n proprio.pro_pk=hebergement.heb_pro_fk \\\n and \\\n proprio.pro_etat=True \\\n and \\\n hebergement.heb_site_public = '1' \\\n order by \\\n hebergement.heb_localite, \\\n proprio.pro_nom1, \\\n hebergement.heb_nom\")\n return hebergements", "def create_base_testes(dbsession):\n # print('Iniciando criação da base #############################')\n # print('dbsession', dbsession)\n base1 = BaseOrigem('alimentos_e_esportes')\n dbsession.add(base1)\n dbsession.commit()\n risco1 = PadraoRisco('perigo', base1)\n dbsession.add(risco1)\n dbsession.commit()\n param1 = ParametroRisco('alimento', 'teste1', risco1)\n param2 = ParametroRisco('esporte', 'teste2', risco1)\n dbsession.add(param1)\n dbsession.add(param2)\n dbsession.commit()\n valor1 = ValorParametro('bacon', Filtro.igual, param1)\n valor2 = ValorParametro('base jump', Filtro.igual, param2)\n dbsession.add(valor1)\n dbsession.add(valor2)\n dbsession.commit()\n visao1 = Visao('viagens', base1.id)\n dbsession.add(visao1)\n dbsession.commit()\n tabela1 = Tabela('viagens', 'viagem', '', 0, visao1.id)\n tabela2 = Tabela('alimentos', 'alimento', 'viagem', tabela1.id, visao1.id)\n tabela3 = Tabela('esportes', 'esporte', 'viagem', tabela1.id, visao1.id)\n dbsession.add(tabela1)\n dbsession.add(tabela2)\n dbsession.add(tabela3)\n dbsession.commit()\n # print('Base criada! ########################')", "def get_data(self):", "def __init__(self):\n self.data = None\n self.conn = None\n self.database = None\n self.table = None\n self.manage = None\n self.limiting = 0", "def _get_db(self):\n gt_db = ...\n return gt_db", "def collect_data():\n\n \"Aqui va el codigo de alberto para recoger los datos que puede venir en forma de diccionario\"\n #TODO: Función para recoger los datos de los bms y meterlos en diccionarios (Alberto jr.)\n\n bms1 = dict()\n bms2 = dict()\n bms3 = dict()\n general = dict()\n\n\n # Ejemplos de datos para meter en los diccionarios\n\n temperature = 35.5\n voltage1 = 15.2\n voltage2 = 14.8\n date = time.strftime(\"%Y-%m-%d\") # Current date\n t = time.strftime(\"%H:%M:%S\") # Current time\n\n return bms1, bms2, bms3, general", "def _build_db_data(self):\n self.logger.debug('Bulding task db document.')\n db_data = {}\n db_data.update(self.query)\n db_data['condition'] = False\n db_data['records'] = []\n self.db_collection.insert_one(db_data)\n return db_data", "def _setData(self):\n data_list = []\n results = self.query.all()\n formatter = date.getLocaleFormatter(self.request, \"date\", \"long\")\n for result in results:\n data ={}\n data[\"qid\"]= (\"m_\" + str(result.motion_id))\n data[\"subject\"] = u\"M \" + str(result.motion_number) + u\" \" + result.short_name\n data[\"title\"] = result.short_name\n if result.approval_date:\n data[\"result_item_class\"] = (\"workflow-state-\" + \n result.status + \"sc-after-\" + \n datetime.date.strftime(result.approval_date, \"%Y-%m-%d\"))\n else:\n data[\"result_item_class\"] = \"workflow-state-\" + result.status\n data[\"url\"] = url.set_url_context(\"motions/obj-\" + str(result.motion_id))\n data[\"status\"] = misc.get_wf_state(result)\n data[\"status_date\"] = formatter.format(result.status_date)\n data[\"owner\"] = \"%s %s\" %(result.owner.first_name, result.owner.last_name)\n data[\"type\"] = _(result.type)\n data[\"to\"] = \"\"\n data_list.append(data)\n self._data = data_list", "def read_sql(self):\n pass", "def __upload_data(self):\n data_path = \"database\"\n os.makedirs(data_path, exist_ok=True)\n try:\n conn = sqlite3.connect('database/customers.db')\n query = '''CREATE TABLE IF NOT EXISTS all_customers_database (\n first_name TEXT, second_name TEXT,\n gender TEXT, account_type TEXT, account_number INTEGER PRIMARY KEY UNIQUE NOT NULL,\n account_password VARCHAR, account_balance REAL );'''\n #Create table\n cursor = conn.cursor()\n print(\"Connection sucessful\")\n cursor.execute(query)\n conn.commit()\n print(\"Table created\")\n #Insert a row to a database\n insert_query ='''INSERT INTO all_customers_database\n (first_name, second_name, gender, account_type, account_number, account_password, account_balance)\n VALUES \n (?, ?, ?, ?, ?, ?, ?);'''\n conn.execute(insert_query, (self.first_name, self.second_name, self.gender, self.account_type, self.account_number, self.account_password, self.account_balance))\n print(\"Your details saved successfully.\")\n except sqlite3.Error as err:\n # print(\"Error while creating a sqlite table \", err)\n print(\"Error creating database\")\n finally:\n if conn:\n conn.close()\n # print(\"Sqlite connection closed.\")", "def consultarInformes(self):\n try:\n cursor = self.__conexion.cursor()\n # Se le suma 1 hora para que este en nuestra franja horaria\n cursor.execute(\n \"SELECT datetime(fecha + 3600, 'unixepoch'), datos, sesion FROM Informes ORDER BY fecha DESC\")\n self.__conexion.commit()\n datos = cursor.fetchall()\n cursor.close()\n\n return datos\n\n except sqlite3.Error as error:\n print(\"Error al consultar la base de datos: \", error)", "def importer():\n\n #Lager liste der eg legg transaksjonar som blir henta og ikkje laga:\n get_list = []\n\n #Gjer txt-fila i mappen om til csv-fil\n file_fixer()\n\n with open(out_path) as file:\n reader = csv.reader(file)\n r_0 = next(reader)\n r_0.append(\"type\")\n r_0.append('amount')\n r_0.append('category')\n r_0.append('account')\n r_0.append('project')\n\n\n for row in reader:\n #Legger til dei fire kollonenne (amount, account, subaacount, project), tomme.\n row.append(\"\")\n row.append(\"\")\n\n #Omformatterer rader:\n row = format_fix(row)\n row.append(\"\")\n row.append(\"\")\n row.append(\"\")\n print(row)\n\n\n try:\n obj, created = Transaction.objects.get_or_create(\n date=row[0],\n transaction_type=row[1],\n description=row[2],\n amount=row[3]\n )\n\n except Transaction.MultipleObjectsReturned:\n continue\n\n if not created:\n get_list.append(obj.pk)\n\n return get_list", "def load_data(client):\n codes = [\"DUB\", \"LHR\", \"ETC\", \"XXX\"]\n q = generateMultiInsertQuery(codes, \"Airport\")\n #print(json.dumps(q.json(), indent=4))\n q.execute(client)", "def update_database():\n\n # We obtain the data from the official database\n df = getData.extractData()\n\n # We save the dataframe for later use in the API\n auxiliary.saveToCsv(df, 'app/resources')", "def resultadosDiarios(self):\r\n self.checkingConnection()\r\n self.model = QSqlQueryModel()\r\n self.model.setQuery('''SELECT date1, ingresos, compras, gastos,\r\n (ingresos - compras - gastos) AS Saldo FROM (SELECT date1,\r\n ingresos, compras, gastos FROM ((SELECT Clients.date AS date1,\r\n SUM(Clients.value) AS ingresos FROM Clients GROUP BY Clients.date)\r\n JOIN (SELECT Compras.date AS date2, SUM(Compras.value) AS compras\r\n FROM Compras GROUP BY Compras.date) JOIN (SELECT Gastos.date AS date3,\r\n SUM(Gastos.value) AS gastos FROM Gastos GROUP BY Gastos.date)\r\n ON date1 = date2 AND date2 = date3))''', self.db)\r\n self.setModel(self.model)", "def connectDatabase(self):\r\n self.checkingConnection()\r\n\r\n self.model = QSqlQueryModel()\r\n self.model.setQuery('''\r\n SELECT Clients.id, Clients.date, Clients.hour, Clients.name, \r\n Clients.birthday, Clients.cellphone, Clients.address, City.name, \r\n Payment.method, Clients.pollo, Clients.carne, Clients.empanachos, \r\n Clients.total, Clients.value FROM Clients JOIN City JOIN Payment\r\n ON Clients.city_id = City.id AND Clients.payment_id = Payment.id\r\n ''', self.db)\r\n\r\n self.setModel(self.model)", "def __init__(self, database):\n self.database = database", "def query3() :", "def get_data(self, tablename):\n conn = self.get_conn()\n c = conn.cursor()\n status_sql = self.get_status_sql(tablename)\n c.execute(status_sql)\n results = c.fetchall()\n data = []\n for row in results:\n data.append(dict_from_row(row))\n conn.commit()\n conn.close()\n return data", "def config_db():", "def read():\n\tid_buscar = int(input(\"Ingrese ID de pokemon: \"))\n\texistencia = \"\"\"\n\t\t\t\tSELECT * FROM sansanito\n\t\t\t\tWHERE id = :1\"\"\"\n\tcur.execute(existencia, [id_buscar])\n\tres = cur.fetchall()\n\t# Res vacio implica que no existe registro con ID ingresado\n\tif res == []:\n\t\tprint(\"ID no encontrado en la tabla!\")\n\t\treturn\n\telse:\n\t\tprint_table(hdrs_sansanito, True, res)", "def generarConsultasConexion(self):\n for parRecursos in self.CombiConsultaLibre:\n parRecursosL0=self.limpiaRecursos(parRecursos[0])\n parRecursosL1=self.limpiaRecursos(parRecursos[1])\n \n if self.nivel_profundidad>=1:\n consultasparql = self.busConex1 % (parRecursosL0,parRecursosL1,self.limit_BC)\n print consultasparql;\n resultoCC=self.consulta(consultasparql)\n for resul in resultoCC['results']['bindings']:\n triple = parRecursos[0]+\"-|\"+parRecursos[1]+\"-|\"+resul['p1']['value']\n self.ResultConsultasConexion.append(triple) \n \n if self.nivel_profundidad>=2:\n consultasparql = self.busConex2 % (parRecursosL0,parRecursosL1,self.limit_BC)\n resultoCC=self.consulta(consultasparql)\n for resul in resultoCC['results']['bindings']:\n o1=resul['o1']['value']\n o1=o1.replace('http://dbpedia.org/resource/','')\n triple1 = parRecursos[0]+\"-|\"+o1+\"*-|\"+resul['p1']['value']\n triple2 = parRecursos[1]+\"-|\"+o1+\"*-|\"+resul['p2']['value']\n self.ResultConsultasConexion.append(triple1) \n self.ResultConsultasConexion.append(triple2) \n \n if self.nivel_profundidad>=3:\n consultasparql = self.busConex3_1 % (parRecursosL0,parRecursosL1,self.limit_BC)\n resultoCC=self.consulta(consultasparql)\n for resul in resultoCC['results']['bindings']:\n o1=resul['o1']['value']\n o1=o1.replace('http://dbpedia.org/resource/','')\n o2=resul['o2']['value']\n o2=o1.replace('http://dbpedia.org/resource/','')\n triple1 = parRecursos[0]+\"-|\"+o1+\"*-|\"+resul['p1']['value']\n triple2 = parRecursos[1]+\"-|\"+o2+\"*-|\"+resul['p2']['value']\n triple3 = o1+\"*-|\"+o2+\"*-|\"+resul['p3']['value'] \n self.ResultConsultasConexion.append(triple1) \n self.ResultConsultasConexion.append(triple2) \n self.ResultConsultasConexion.append(triple3) \n\n consultasparql = self.busConex3_2 % (parRecursosL0,parRecursosL1,self.limit_BC)\n resultoCC=self.consulta(consultasparql)\n for resul in resultoCC['results']['bindings']:\n o1=resul['o1']['value']\n o1=o1.replace('http://dbpedia.org/resource/','')\n o2=resul['o2']['value']\n o2=o1.replace('http://dbpedia.org/resource/','')\n triple1 = parRecursos[0]+\"-|\"+o1+\"*-|\"+resul['p1']['value']\n triple2 = parRecursos[1]+\"-|\"+o2+\"*-|\"+resul['p2']['value']\n triple3 = o2+\"*-|\"+o1+\"*-|\"+resul['p3']['value'] \n self.ResultConsultasConexion.append(triple1) \n self.ResultConsultasConexion.append(triple2) \n self.ResultConsultasConexion.append(triple3)", "def load_data_sql(): \r\n conn = mysql.connect(**st.secrets[\"mysql\"])\r\n\r\n data = pd.read_sql('SELECT * FROM song_data', conn)\r\n lookup_table = pd.read_sql('SELECT * FROM lookup_table', conn)\r\n \r\n return data, lookup_table", "def consultarUltimoInforme(self):\n try:\n cursor = self.__conexion.cursor()\n # Se le suma 1 hora para que este en nuestra franja horaria\n cursor.execute(\n \"SELECT datetime(fecha + 3600, 'unixepoch'), datos, sesion FROM Informes ORDER BY fecha DESC LIMIT 1\")\n self.__conexion.commit()\n datos = cursor.fetchall()\n cursor.close()\n\n return datos\n\n except sqlite3.Error as error:\n print(\"Error al consultar la base de datos: \", error)", "def _get_db_data(self) -> None:\n if self._db_data:\n return\n with db(cursor=True) as cur:\n cur.execute('SELECT count, gender, age_start FROM age_groups WHERE district = ?', (self.district,))\n self._db_data = cur.fetchall()\n self._db_data = sorted([row for row in self._db_data if row['gender'] == self.gender],\n key=lambda x: (x['age_start'] is None, x['age_start']))", "def income(self):\r\n self.checkingConnection()\r\n model = QSqlQueryModel()\r\n model.setQuery('''\r\n SELECT Clients.id, Clients.date, Clients.hour, Clients.name, \r\n (Clients.carne + Clients.pollo) AS empanadas,\r\n Clients.total, Clients.value FROM Clients''', self.db)\r\n self.setModel(model)", "def request_meteodata(request: str):\n import MySQLdb\n import platform\n if platform.system() == \"Windows\":\n MySQLParams = {\n 'host' : \"192.168.5.1\",\n 'user' : \"MeteoRobot\",\n 'passwd': \"robot\",\n 'db' : \"MeteoData\"\n }\n else:\n MySQLParams = {\n 'host' : \"localhost\",\n 'user' : \"MeteoRobot\",\n 'passwd': \"robot\",\n 'db' : \"MeteoData\"\n }\n try:\n con = MySQLdb.connect(**MySQLParams)\n cur = con.cursor()\n cur.execute(request)\n con.commit()\n data = cur.fetchall()\n except MySQLdb.Error as err:\n print(str(err))\n return []\n except Exception as err:\n print(str(err))\n return []\n con.close()\n return data", "def GetTableData(self, query_parameters):\n raise NotImplementedError('Implement this')", "def cargar_otras(self):\n\n stream_cargar = open ('yo_otros.txt', 'rt',encoding=\"utf-8\")\n datos=stream_cargar.readlines()\n \n # print(datos)\n # print (len(kasino.maquinas))\n\n lista_maquinas=[]\n lista_deco =[]\n day=\"\"\n money=\"\"\n\n contador=0\n dia_o_dinero=\"dia\"\n\n for i in datos[0]:\n # print(contador,i)\n if contador <8:\n lista_maquinas.append(i)\n contador+=1\n\n elif contador <17:\n lista_deco.append(i)\n contador+=1\n\n\n elif contador >= 17 and dia_o_dinero ==\"dia\":\n if i ==\"D\":\n pass\n elif i ==\"M\":\n dia_o_dinero=\"dinero\"\n else:\n day+=i\n elif contador >= 17 and dia_o_dinero == \"dinero\":\n money+=i\n \n \n\n # print(\"lm\",lista_maquinas)\n # print (\"ld\",lista_deco)\n # print(day,money)\n\n contador=0\n for i in kasino.maquinas:\n kasino.maquinas[i]=int(lista_maquinas[contador])\n contador+=1\n\n contador=0\n for i in kasino.decoracion:\n kasino.decoracion[i]=int(lista_deco[contador])\n contador+=1\n\n kasino.dia=int( day)\n kasino.dinero=int(money)", "def post(self):\r\n piso=self.request.get('piso')\r\n numext=self.request.get('numext')\r\n numint=self.request.get('numint')\r\n piso=self.validonumero(piso)\r\n numext=self.validonumero(numext)\r\n numint=self.validonumero(numint)\r\n \r\n empresa=empresas()\r\n empresa.nombre=self.request.get('desc')\r\n empresa.calle=self.request.get('calle')\r\n empresa.numeroExterior=int(numext)\r\n empresa.numeroInterior=int(numint)\r\n empresa.colonia=self.request.get('colonia')\r\n empresa.piso=int(piso)\r\n empresa.andador=self.request.get('andador')\r\n empresa.codigo_postal=int(self.request.get('cp'))\r\n empresa.sitioweb=self.request.get('web')\r\n empresa.correo=self.request.get('mail')\r\n empresa.nombreContacto=\"\"\r\n empresa.paternoContacto=\"\"\r\n empresa.maternoContacto=\"\"\r\n #### \r\n ciudad=self.request.get('ciudad')\r\n query=\"where ciudad='%s'\"%ciudad\r\n cd=ciudades.gql(query)\r\n city=cd.fetch(1)\r\n for lstcd in city:\r\n empresa.id_Ciudad=lstcd.key().id()\r\n empresa.put()\r\n jsondic={}\r\n jsondata=[]\r\n jsondata+=[self.addKey(jsondic,\"Dato\", empresa.key().id())]\r\n self.response.out.write(simplejson.dumps(jsondata))\r\n return False", "def insert_in_tendencias():\n\n ultimos = persistence.traer_ultimos_precios_doge()\n average_doge = sum(ultimos) / len(ultimos)\n min_price_doge = min(ultimos)\n max_price_doge = max(ultimos)\n openprice = persistence.traer_masviejo_precio_doge()\n closeprice = persistence.traer_ultimo_precio_doge()\n persistence.insert_new_tendencia(\"DOGE\", str(average_doge), str(min_price_doge), str(max_price_doge), str(openprice), str(closeprice))\n\n ultimos = persistence.traer_ultimos_precios_btc()\n average_btc = sum(ultimos) / len(ultimos)\n min_price_btc = min(ultimos)\n max_price_btc = max(ultimos)\n openprice = persistence.traer_masviejo_precio_btc()\n closeprice = persistence.traer_ultimo_precio_btc()\n persistence.insert_new_tendencia(\"BTC\", str(average_btc), str(min_price_btc), str(max_price_btc), str(openprice), str(closeprice))\n\n ultimos = persistence.traer_ultimos_precios_eth()\n average_eth = sum(ultimos) / len(ultimos)\n min_price_eth = min(ultimos)\n max_price_eth = max(ultimos)\n openprice = persistence.traer_masviejo_precio_eth()\n closeprice = persistence.traer_ultimo_precio_eth()\n persistence.insert_new_tendencia(\"ETH\", str(average_eth), str(min_price_eth), str(max_price_eth), str(openprice), str(closeprice))", "def example_data():\n\n #add user, business, comment, rating, tips, question, answer\n sample_user = User(username='ilkay', \n password=argon2.hash('123Qwe/'),\n email='ilkay@ilkay.com')\n sample_business = Business(business_id='IBZbaTy-_Ds7GITu4QimHQ', \n business_name='Wildhaven Ranch', \n business_type='zoo',\n latitude=34.256787,\n longitude=-117.161389)\n sample_favorite = Favorite(user=sample_user,\n business=sample_business)\n sample_comment = Comment(user=sample_user, \n business=sample_business,\n comment='hi there')\n sample_rating = Rating(user=sample_user, \n business=sample_business,\n rating=5)\n sample_tip_b = BusinessTip(user=sample_user, \n business=sample_business,\n business_tip='bring wet towels')\n sample_tip_t = TripTip(user=sample_user,\n trip_tip='bring toys')\n sample_question = Question(question='Favorite color?')\n sample_answer = Answer(question=sample_question,\n user=sample_user,\n answer='blue')\n sample_route = Route(user=sample_user,\n start=\"Sunnyvale, CA\",\n end=\"Universal City, CA\")\n sample_stopover = Stopover(route=sample_route,\n business=sample_business,\n latitude=34.256787,\n longitude=-117.161389)\n\n db.session.add_all([sample_user,\n sample_business, \n sample_rating, \n sample_comment,\n sample_tip_b,\n sample_tip_t,\n sample_question,\n sample_answer,\n sample_favorite,\n sample_route,\n sample_stopover])\n db.session.commit()", "def migrar(self):\r\n start = time.time()\r\n if not os.path.exists('output'):\r\n os.makedirs('output')\r\n # seleciona as tabelas\r\n res = self.cur_origem.execute(\r\n \"select rdb$relation_name from rdb$relations where rdb$view_blr is null and (rdb$system_flag is null or rdb$system_flag = 0) %s;\" % self.filtro)\r\n\r\n # para cada tabela\r\n for row, in res.fetchall():\r\n row = row.strip()\r\n\r\n # conta os registros\r\n countsql = self.cur_origem.execute(\r\n \"select count(*) as total from %s \" % row)\r\n count, = countsql.fetchall()[0]\r\n start_time = time.time()\r\n start_datetime = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\r\n print(\"MIGRANDO: %s\\n NRO REGISTROS: %s registros\\n INICIO: %s\" % (\r\n row, count, start_datetime))\r\n\r\n # gera o create table e trunca a tabela ( se ja existir )\r\n create, tipos = self.ddl_table(row)\r\n self.cur_destino.execute(create)\r\n self.cur_destino.execute(\"TRUNCATE TABLE %s\" % row)\r\n\r\n # busca os dados\r\n self.cur_origem.execute(\"select * from %s \" % (row))\r\n\r\n # grava os dados no TXT\r\n with open(\"output/%s.txt\" % row, \"wb\") as f:\r\n writer = csv.writer(f, delimiter='|')\r\n writer.writerows(self.cur_origem.fetchall())\r\n\r\n # le o arquivo gravado e copia para o banco destino\r\n with open(\"output/%s.txt\" % row, \"r\") as f:\r\n try:\r\n self.cur_destino.copy_expert(\r\n \"\"\"COPY %s FROM STDIN WITH QUOTE '\"' DELIMITER '|' NULL '' CSV \"\"\" % row, f)\r\n except Exception as e:\r\n self.erros.append([\"%s\" % row, e])\r\n end_time = time.time()\r\n end_datetime = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\r\n print(\" FIM: %s\\n TEMPO: %ss\\n TABELA COM ERRO %s\" %\r\n (end_datetime, round(end_time-start_time, 0), e))\r\n else:\r\n end_time = time.time()\r\n end_datetime = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\r\n print(\" FIM: %s\\n TEMPO: %ss\\n OK\" %\r\n (end_datetime, round(end_time-start_time, 0)))\r\n end = time.time()\r\n print(\"TEMPO GASTO: %s s\" % (end-start))", "def __init__(self,):\n self.logger = conf._init_logger(logger=conf.LOGGER_ORM)\n self.logger = logging.getLogger(conf.LOGGER_ORM)\n\n self.logger.info(\"[+] Initilizing Orm [+]\")\n\n\n self.engine = sqlalchemy.create_engine(\n f\"mysql+mysqldb://{conf.DB_USER}:{conf.DB_PASSWORD}@{conf.DB_ADRESS}/{conf.DB_NAME}\")\n self.metadata = sqlalchemy.MetaData(bind=self.engine)\n self.metadata.reflect(only=[\"examens\", \"sections\", \"patients\", \"medecins\", \"types_intervention\"])\n self.conn = self.engine.connect()\n \"\"\"\n Load the ORM of different table into the class\n \"\"\"\n self.check_table()\n self.hl7_connections = sqlalchemy.Table(\"hl7_connections\", self.metadata)\n self.examens = sqlalchemy.Table(\"examens\", self.metadata)\n self.sections = sqlalchemy.Table(\"sections\", self.metadata)\n self.patients = sqlalchemy.Table(\"patients\", self.metadata)\n self.medecins = sqlalchemy.Table(\"medecins\", self.metadata)\n self.types_interventions = sqlalchemy.Table(\"types_intervention\", self.metadata)\n self.logger.info(\"[+] Orm initialized [+]\")", "def __init__(self):\n self.conn = psycopg2.connect(dbname=DB, user=DB_USER, password=DB_PW, host=HOST, port=PORT)\n self.categories = self.fill_category()\n self.fill_products()", "def __init__(self):\n self.dbcon = DbConnection.get_con()", "def database_conn(self, table, hr, data):\n # origionally from https://www.w3schools.com/python/python_mysql_insert.asp\n mydb = mysql.connector.connect(\n host=\"localhost\",\n user=WEATHERUSER,\n password=DBPASS,\n database=\"weather\"\n )\n mycursor = mydb.cursor()\n sql = f\"UPDATE weather.{table} SET hr = {hr}, dt = %s, temp = %s, pressure = %s, humidity = %s, dewpoint = %s, rain = %s where hr = {hr}\"\n mycursor.execute(sql, data)\n mydb.commit()", "def ingest(self):\n self.log.debug('starting the ``get`` method')\n\n dictList = self._create_dictionary_of_ned_d()\n self.primaryIdColumnName = \"primaryId\"\n self.raColName = \"raDeg\"\n self.declColName = \"decDeg\"\n\n tableName = self.dbTableName\n createStatement = u\"\"\"\n CREATE TABLE `%(tableName)s` (\n `primaryId` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'An internal counter',\n `Method` varchar(150) DEFAULT NULL,\n `dateCreated` datetime DEFAULT CURRENT_TIMESTAMP,\n `dateLastModified` datetime DEFAULT CURRENT_TIMESTAMP,\n `updated` varchar(45) DEFAULT '0',\n `dist_derived_from_sn` varchar(150) DEFAULT NULL,\n `dist_in_ned_flag` varchar(10) DEFAULT NULL,\n `dist_index_id` mediumint(9) DEFAULT NULL,\n `dist_mod` double DEFAULT NULL,\n `dist_mod_err` double DEFAULT NULL,\n `dist_mpc` double DEFAULT NULL,\n `galaxy_index_id` mediumint(9) DEFAULT NULL,\n `hubble_const` double DEFAULT NULL,\n `lmc_mod` double DEFAULT NULL,\n `notes` varchar(500) DEFAULT NULL,\n `primary_ned_id` varchar(150) DEFAULT NULL,\n `redshift` double DEFAULT NULL,\n `ref` varchar(150) DEFAULT NULL,\n `ref_date` int(11) DEFAULT NULL,\n `master_row` tinyint(4) DEFAULT '0',\n `major_diameter_arcmin` double DEFAULT NULL,\n `ned_notes` varchar(700) DEFAULT NULL,\n `object_type` varchar(100) DEFAULT NULL,\n `redshift_err` double DEFAULT NULL,\n `redshift_quality` varchar(100) DEFAULT NULL,\n `magnitude_filter` varchar(10) DEFAULT NULL,\n `minor_diameter_arcmin` double DEFAULT NULL,\n `morphology` varchar(50) DEFAULT NULL,\n `hierarchy` varchar(50) DEFAULT NULL,\n `galaxy_morphology` varchar(50) DEFAULT NULL,\n `radio_morphology` varchar(50) DEFAULT NULL,\n `activity_type` varchar(50) DEFAULT NULL,\n `in_ned` tinyint(4) DEFAULT NULL,\n `raDeg` double DEFAULT NULL,\n `decDeg` double DEFAULT NULL,\n `eb_v` double DEFAULT NULL,\n `sdss_coverage` TINYINT DEFAULT NULL,\n PRIMARY KEY (`primaryId`),\n UNIQUE KEY `galaxy_index_id_dist_index_id` (`galaxy_index_id`,`dist_index_id`)\n ) ENGINE=MyISAM AUTO_INCREMENT=0 DEFAULT CHARSET=latin1;\n DROP VIEW IF EXISTS `view_%(tableName)s_master_recorders`;\n CREATE\n VIEW `view_%(tableName)s_master_recorders` AS\n (SELECT \n `%(tableName)s`.`primary_ned_id` AS `primary_ned_id`,\n `%(tableName)s`.`object_type` AS `object_type`,\n `%(tableName)s`.`raDeg` AS `raDeg`,\n `%(tableName)s`.`decDeg` AS `decDeg`,\n `%(tableName)s`.`dist_mpc` AS `dist_mpc`,\n `%(tableName)s`.`dist_mod` AS `dist_mod`,\n `%(tableName)s`.`dist_mod_err` AS `dist_mod_err`,\n `%(tableName)s`.`Method` AS `dist_measurement_method`,\n `%(tableName)s`.`redshift` AS `redshift`,\n `%(tableName)s`.`redshift_err` AS `redshift_err`,\n `%(tableName)s`.`redshift_quality` AS `redshift_quality`,\n `%(tableName)s`.`major_diameter_arcmin` AS `major_diameter_arcmin`,\n `%(tableName)s`.`minor_diameter_arcmin` AS `minor_diameter_arcmin`,\n `%(tableName)s`.`magnitude_filter` AS `magnitude_filter`,\n `%(tableName)s`.`eb_v` AS `gal_eb_v`,\n `%(tableName)s`.`hierarchy` AS `hierarchy`,\n `%(tableName)s`.`morphology` AS `morphology`,\n `%(tableName)s`.`radio_morphology` AS `radio_morphology`,\n `%(tableName)s`.`activity_type` AS `activity_type`,\n `%(tableName)s`.`ned_notes` AS `ned_notes`,\n `%(tableName)s`.`in_ned` AS `in_ned`,\n `%(tableName)s`.`primaryId` AS `primaryId`\n FROM\n `%(tableName)s`\n WHERE\n (`%(tableName)s`.`master_row` = 1));\n \"\"\" % locals()\n\n self.add_data_to_database_table(\n dictList=dictList,\n createStatement=createStatement\n )\n\n self._clean_up_columns()\n self._get_metadata_for_galaxies()\n self._update_sdss_coverage()\n\n self.log.debug('completed the ``get`` method')\n return None", "def load_data(self) -> None:", "def read_db():\n with open(\"config.json\") as f:\n config = json.load(f)\n \n conn = psycopg2.connect(dbname='cage_sc_db', user='cage_db_user', \n password='legend', host='10.66.193.71')\n cursor = conn.cursor()\n\n # cmd = \"SELECT value_raw, timestamp FROM numeric_data WHERE endpoint_name='krstc_baseline' AND timestamp>'2019-09-27T00:00';\"\n \n # cmd = \"SELECT * FROM endpoint_id_map;\"\n \n # cmd = \"SELECT value_cal, timestamp FROM numeric_data WHERE endpoint_name='cage_coldPlate_temp' AND timestamp>'2019-09-03T00:02';\"\n \n # cmd = \"SELECT value_cal, timestamp FROM numeric_data WHERE endpoint_name='cage_pressure' AND timestamp>'2019-09-27T00:00';\"\n \n cmd = \"SELECT value_cal, timestamp FROM numeric_data WHERE endpoint_name='cage_ln_level' AND timestamp>'2019-09-27T00:00';\"\n \n # cmd = \"SELECT value_raw, timestamp FROM string_data WHERE endpoint_name='krstc_hv_status' AND timestamp>'2019-08-01';\"\n \n cursor.execute(cmd)\n\n # retrieve data. returns a list of tuples.\n record = cursor.fetchall()\n \n # print(type(record[0]))\n \n # dt = record[0][1]\n \n # print(dt)\n \n for rec in record:\n print(rec)", "def read_data(transferencias):\n # read in triples of user/artist/playcount from the input dataset\n transfer_count = transferencias.groupby([\"REFERENCIA_1\", \"REFERENCIA_ORIGEN\"]).IMPORTE.count().reset_index()\n\n # for index, row in transfer_count.iterrows():\n # cliente = Empresa.objects.get(fiscal_id=str(row['REFERENCIA_ORIGEN']))\n # proveedor = Empresa.objects.get(fiscal_id=str(row['REFERENCIA_1']))\n # proveedor.clients.add(cliente)\n # cliente.providers.add(proveedor)\n\n print(\"clientes y proveedores cargados\")\n\n data = pandas.DataFrame()\n data['user'] = transfer_count['REFERENCIA_ORIGEN'].astype(\"category\")\n data['artist'] = transfer_count['REFERENCIA_1'].astype(\"category\")\n data['transfers'] = transfer_count['IMPORTE'].astype(\"category\")\n\n transfers = coo_matrix((data['transfers'].astype(float),\n (data['artist'].cat.codes.copy(),\n data['user'].cat.codes.copy())))\n\n return data, transfers", "def setup(self):\n print(\"INIT DATA\")\n\n self.nutella = Product.objects.create(name=\"nutella\", nutriscore=\"e\")", "def bd_iniciar():\n db = bd_conecta()\n with app.open_resource('schema.sql', mode='r') as f:\n db.cursor().executescript(f.read())\n db.commit()", "def listar(self):\n conn = None\n\n try:\n params = config()\n conn = psycopg2.connect(**params)\n cur = conn.cursor()\n cur.execute(\"SELECT id_aluno, nome_aluno, cpf_aluno, data_nasc_aluno, telefone_aluno FROM Alunos\")\n\n # Imprime o número de alunos cadastrados.\n print(f\"\\nHá {cur.rowcount} aluno(s) cadastrado(s): \")\n row = cur.fetchone()\n\n while row is not None:\n print(f\"\\nID: {row[0]}\\nNome: {row[1]}\\nCPF: {row[2]}\\nData de Nascimento: {row[3].strftime('%d/%m/%Y')}\\nTelefone: {row[4]}\\n\")\n row = cur.fetchone()\n \n cur.close()\n\n except (Exception, psycopg2.DatabaseError) as error:\n print(error)\n \n finally:\n if conn is not None:\n conn.close()", "def cargar_productos(self, obraSocial):\n self.limpiarTabla(self.tableProductos)\n\n query=self.sesion.query(ProductoModel.codigo_barra,ProductoModel.id_medicamento,ProductoModel.id_presentacion,MonodrogaModel.nombre,DescuentoModel.descuento,ProductoModel.importe).\\\n join(MedicamentoModel).filter(ProductoModel.id_medicamento==MedicamentoModel.nombre_comercial).\\\n join(MonodrogaModel).filter(MedicamentoModel.id_monodroga==MonodrogaModel.nombre).\\\n join(DescuentoModel).filter(DescuentoModel.producto==ProductoModel.codigo_barra).\\\n filter(DescuentoModel.obra_social==obraSocial,ProductoModel.baja==False).order_by(ProductoModel.codigo_barra)\n\n for n, obj in enumerate(query):\n self.tableProductos.insertRow(n)\n for m, campo in enumerate(obj):\n self.tableProductos.setItem(n, m, QtGui.QTableWidgetItem(str(campo)))\n\n for row,producto in enumerate(ProductoModel.buscarTodos(ProductoModel.codigo_barra,self.sesion)):\n self.tableProductos.setItem(row,6,QtGui.QTableWidgetItem(str(producto.getCantidad(self.sesion))))", "def execute(self, context):\n\n # Initialize PostgreSQL hook\n self.postgres = PostgresHook(\n postgres_conn_id=self.postgres_conn_id,\n schema=self.postgres_schema).get_sqlalchemy_engine()\n\n # Initialize Socrata hook\n super().execute()\n\n # Load table\n table = self._select_table()\n self.table_dicts = [dict(row) for row in table]\n\n if self.replace:\n result = self.socrata.replace(self.dataset_id, self.table_dicts)\n else:\n # Code from etl-airflow\n for i in range(0, len(self.table_dicts), UPLOAD_CHUNK_SIZE):\n try:\n result = self.socrata.upsert(self.dataset_id, self.table_dicts[i:i+UPLOAD_CHUNK_SIZE])\n except:\n print(f\"Error on record {i}\")\n result = self.socrata.upsert(self.dataset_id, self.table_dicts[i:i+UPLOAD_CHUNK_SIZE])", "def getDbase(self):\n for item in self.sqlData: # for every colummn name in the data\n self.sqdbase[item]=np.array(self.sqlData[item]) # add to the dictionary the clomunm name and the corresponding data\n \n self.sqlData['index'] = list(range(len(self.sqlData['time']))) # since we sometimes have even a column for index(pandas put it automatically) and sometimes not which will not be used for Stats we dropp it out\n self.sqdbase.pop('index') # we make sure that all dataFRames we are working with has inde column and the drop it\n return self.sqdbase", "def class_to_db(self):", "def populate_t_database():\n with open('minerals.json') as file:\n file = json.loads(file.read())\n\n for mineral in file[:22]:\n mineral_entry = Mineral.objects.get_or_create(**mineral)", "def _run_query(self):", "def bd_conecta():\n if not hasattr(g, 'sqlite_db'):\n rv = sqlite3.connect(app.config['DATABASE'])\n rv.row_factory = sqlite3.Row\n g.sqlite_db = rv\n return g.sqlite_db", "def get_label():\r\n\r\n user = check_auth(request.headers, __name__)\r\n if user != True:\r\n return user\r\n user = authorize.get(request.headers.get('UserToken'))\r\n\r\n vozvrat = {}\r\n try:\r\n database = Database(config)\r\n except TypeError:\r\n vozvrat[\"messageError\"] = \"Нет подключения к БД\"\r\n return jsonify(vozvrat)\r\n\r\n vozvrat = []\r\n\r\n fields = [\r\n \"u.firstname\",\r\n \"u.lastname\",\r\n \"up.id\",\r\n \"up.name\",\r\n \"up.photo\",\r\n \"up.type\",\r\n \"up.method\",\r\n \"up.sale\",\r\n \"up.price\",\r\n \"c.name\",\r\n \"up.weight\",\r\n \"u2.name\",\r\n \"fp.id\",\r\n \"a.country\",\r\n \"a.city\",\r\n \"a.address\",\r\n \"a.lat\",\r\n \"a.lng\"\r\n ]\r\n\r\n query = sql.SQL(\"SELECT {} FROM users u \\\r\n RIGHT JOIN users_product up on u.id = up.user_id\\\r\n LEFT JOIN units u2 on up.unit_id = u2.id\\\r\n LEFT JOIN currencys c on up.currency_id = c.id\\\r\n LEFT JOIN favorit_products fp on u.id = fp.user_id\\\r\n LEFT JOIN address a on up.address_id = a.id\").format(\r\n sql.SQL(\",\").join(sql.Identifier(\r\n i.split('.')[0], i.split('.')[1]) for i in fields)\r\n )\r\n execute = database.select_data(query)\r\n if type(execute) != list:\r\n return execute\r\n\r\n data_append = {}\r\n for row in execute:\r\n for i in range(len(fields)):\r\n value = row[i]\r\n\r\n if fields[i] == \"up.id\":\r\n fields[i] = \"up.users_product_id\"\r\n if fields[i] == \"c.name\":\r\n fields[i] = \"c.currency\"\r\n if fields[i] == \"u2.name\":\r\n fields[i] = \"u2.unit\"\r\n if fields[i] == \"fp.id\":\r\n fields[i] = \"fp.is_favorit\"\r\n value = True if value != None else False\r\n\r\n data_append[fields[i].split('.')[1]] = value\r\n vozvrat.append(data_append)\r\n\r\n return jsonify(vozvrat)", "def __init__(self):\r\n self.con = lite.connect('Profile_database.db') # Opening of database file\r\n self.cursor = self.con.cursor()\r\n\r\n self.profiles_name_list = []\r\n self.output_zakladki = []\r\n self.output_leki = []\r\n self.output_leki_cegly = []\r\n self.output_lista_cegiel = []", "def _setData(self):\n data_list = []\n results = self.query.all()\n formatter = date.getLocaleFormatter(self.request, \"date\", \"long\")\n for result in results:\n data = {}\n data[\"qid\"] = (\"i-\" + str(result.parliamentary_item_id))\n if type(result)==domain.AgendaItem:\n g = u\" \" + result.group.type + u\" \" + result.group.short_name\n else:\n g = u\"\" # !+ g?\n data[\"subject\"] = result.short_name\n data[\"title\"] = result.short_name\n data[\"result_item_class\"] = \"workflow-state-\" + result.status\n data[\"url\"] = url.set_url_context(\"%ss/obj-%i\" % (\n result.type, result.parliamentary_item_id))\n data[\"status\"] = misc.get_wf_state(result)\n data[\"status_date\"] = formatter.format(result.status_date)\n data[\"owner\"] = \"%s %s\" %(result.owner.first_name, result.owner.last_name)\n data[\"type\"] = _(result.type)\n if type(result)==domain.Question:\n data[\"to\"] = result.ministry.short_name\n else:\n data[\"to\"]= u\"\"\n # remember original domain object\n data[\"id\"] = result.parliamentary_item_id\n data[\"_obj\"] = result\n # append processed result item\n data_list.append(data)\n self._data = data_list" ]
[ "0.6196703", "0.5996628", "0.5992133", "0.5989583", "0.59360296", "0.59161276", "0.5913229", "0.5900204", "0.588538", "0.5868029", "0.58524144", "0.5828052", "0.58185756", "0.5817846", "0.57922584", "0.5788057", "0.5701537", "0.5690327", "0.567897", "0.5660096", "0.5645931", "0.5637763", "0.5622868", "0.562049", "0.5617047", "0.560525", "0.55724216", "0.55574113", "0.5554959", "0.55540663", "0.5553354", "0.5542462", "0.552704", "0.5521694", "0.5511959", "0.55051196", "0.55014604", "0.5481726", "0.5480955", "0.54799527", "0.5457235", "0.5455094", "0.5446414", "0.54460377", "0.5444798", "0.54414743", "0.5438316", "0.54365396", "0.5408611", "0.5399318", "0.53897655", "0.5389174", "0.538299", "0.5379058", "0.53757435", "0.5369244", "0.53606904", "0.5358099", "0.53440195", "0.53430736", "0.533252", "0.5325361", "0.5322113", "0.53159285", "0.5314835", "0.53124034", "0.531083", "0.5308909", "0.52960384", "0.5292309", "0.52922124", "0.52913105", "0.5286321", "0.528123", "0.5277113", "0.52717274", "0.52686155", "0.5265313", "0.52643234", "0.525985", "0.52573955", "0.52470446", "0.5245103", "0.52426547", "0.5242171", "0.5241063", "0.5238251", "0.5235747", "0.5225946", "0.5223987", "0.52236915", "0.5223345", "0.5223295", "0.5221198", "0.52193713", "0.5207402", "0.52029717", "0.5201096", "0.52003956", "0.5195074", "0.51929325" ]
0.0
-1
funcion para visualizar lo que se desplaza Q para cada epsilon
def secuencia(R, Q, q): n = 1 r = [] for qq in q: for qqq in qq.eps: r.append(qqq) r = sorted(r) for l in r: print('la l', l) Qaux = [] for j in range(len(Q)): notaux = [] notaux.append(Q[j][0]+j*l[0]) notaux.append(Q[j][1]+(j+1)*l[0]) notaux.append(Q[j][2]) Qaux.append(notaux) # print(Qaux) Qaux[-1][1] = R[-1][1] dibuja(R, Qaux, n) n += 1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def epsilon_delta(self):", "def test_q(self):\n assert np.allclose(self.stepper.q, self.ODE.exact(self.stepper.t), rtol=1e-3, atol=1e-5)", "def greedy_eps(self, Q):\r\n s = self.get_state()\r\n s_x, s_y = s[0][0], s[0][1]\r\n s_vx, s_vy = s[1][0], s[1][1]\r\n if np.random.rand() > self.EPS:\r\n print(Q[s_x, s_y, s_vx, s_vy, :, :])\r\n if (np.max(Q[s_x, s_y, s_vx, s_vy, :, :]) ==\r\n np.min(Q[s_x, s_y, s_vx, s_vy, :, :])):\r\n a = (0, 0)\r\n else:\r\n a = np.argmax(Q[s_x, s_y, s_vx, s_vy, :, :])\r\n a = np.unravel_index(a, (3, 3)) - np.array([1, 1])\r\n a = (a[0], a[1])\r\n else:\r\n a = self.action_to_tuple(random.randrange(9))\r\n\r\n return a", "def epsilon():\n return _EPSILON", "def epsilon(current_episode, num_episodes):\n # return 1 - (current_episode/num_episodes)\n return .5 * .9**current_episode", "def q_criterion(a):\n print(\"Detection method: Q criterion\")\n Q = np.zeros((a.u.shape[0], a.u.shape[1]))\n print(a.u.shape[0], a.u.shape[1])\n #print(Q.shape)\n for i in range(a.u.shape[0]):\n for j in range(a.u.shape[1]):\n Q[i, j] = -0.5*(a.derivative['dudx'][i, j]**2 + a.derivative['dvdy'][i, j]**2) \\\n - a.derivative['dudy'][i, j] * a.derivative['dvdx'][i, j]\n return Q", "def test_qing(self):\n fun = get_problem('qing', self.dimension, -500, 500)\n self.assertAlmostEqual(fun(self.array10), 584.0, delta=1e-4)", "def test_9(self):\n\n sq_qe = gen_step_qe(1.42, 0.9)\n test_ill = Illumination()\n # test_qef = qe_filter(sq_qe)\n\n filtered_ill = test_ill * sq_qe\n\n assert isinstance(filtered_ill, Illumination)\n\n #plt.plot(filtered_ill.get_spectrum('eV')[0, :], filtered_ill.get_spectrum('eV')[1, :], label=\"filtered\")\n #plt.plot(test_ill.get_spectrum('eV')[0, :], test_ill.get_spectrum('eV')[1, :], label=\"original\")\n\n #plt.xlabel('wavelength (eV)')\n #plt.ylabel('spectrum (W/eV/m^2)')\n\n #plt.legend()\n\n #plt.show()", "def demo_neg():\n n_dim = 3\n A = -1 * np.eye(n_dim)\n covar = np.eye(n_dim)\n mean = np.zeros(n_dim)\n approx = approx_quad_form(mean, covar, A)\n\n true_dist = scipy.stats.chi2(df=n_dim)\n\n q = np.linspace(-10, 0)\n\n plt.plot(\n q, true_dist.pdf(-1 * q), label='True', color='black')\n plt.plot(\n q, approx(q), label='Approx.',\n color='tab:blue', linestyle='--')\n plt.xlabel('q')\n plt.ylabel('pdf(q) [-]')\n plt.legend()", "def Qfun(Phieq,Phi,Phibar,taurad):\n #note Q is different from Perez-Becker and Showman by a factor of g (for consistency with Phi vs H)\n Q=(1/taurad)*(Phieq-(Phi+Phibar))\n\n return Q", "def quasinewton(x, V, alpha, p, gprev, gnew):\r\n broken = False\r\n p = np.array(p)[np.newaxis]\r\n gnew = np.array(gnew)[np.newaxis]\r\n gprev = np.array(gprev)[np.newaxis]\r\n gk = gprev.T\r\n gk1 = gnew.T\r\n yk = gk1 - gk\r\n sk = alpha*p.T\r\n I = np.eye(len(x))\r\n if sk.T@yk == 0:\r\n broken = True\r\n p = p.flatten()\r\n return p, V, broken\r\n rhok = (1/(sk.T@yk)).flatten()\r\n v = I - (rhok*yk@sk.T)\r\n Vnew = (v.T@V@v) + (rhok*sk@sk.T)\r\n pnew = -Vnew@gk1\r\n pnew = pnew.flatten()\r\n return pnew, Vnew, broken", "def quasi_optimalityTV(f, lam_init = 2.0, q = 0.9):\n \n lam = lam_init\n max_iter = 50\n error = np.zeros(max_iter)\n #alt_error = np.zeros(max_iter)\n u_old = ChambollePock_denoise(f,lam, tau = 0.5, sig = 0.25, acc = True, tol = 1.0e-5)\n for i in range(1, max_iter):\n lam = lam_init * (q ** i)\n u_new = ChambollePock_denoise(f,lam, tau = 0.5, sig = 0.25, acc = True, tol = 1.0e-5)\n error[i] = np.linalg.norm(u_old - u_new)\n #alt_error[i] = np.linalg.norm(u_old - u_new) /abs(lam_init*(q ** i - q ** (i-1)))\n u_old = np.copy(u_new)\n\n #plt.plot(error)\n #plt.plot(alt_error)\n #plt.show()\n opt_idx = np.argmin(error[error != 0.0])\n t = 1.0 / (1.0 + lam_init * (q ** opt_idx))\n lam = lam_init * (q ** opt_idx)\n u= ChambollePock_denoise(f,lam, tau = 0.5, sig = 0.25, acc = True, tol = 1.0e-5)\n \n return u, t", "def Purcell(l_onde,eps,Q,V):\n return (3/(4*np.pi**2))*((l_onde*1e-6)/np.sqrt(eps))**3*(Q/V)", "def update_q(self):\n beta = self.EC_beta\n self.gamma_q = (self.gamma_s - self.gamma_r) * beta + (1 - beta) * self.gamma_q\n self.Sigma_q = (self.Sigma_s - self.Sigma_r) * beta + (1 - beta) * self.Sigma_q\n try:\n assert np.all(np.logical_not(np.isnan(self.gamma_q)))\n except:\n print(\"Invalid update encountered...\")", "def yule_q(self):\n a, c, d, b = self.to_ccw()\n p1, q1 = a + b, c + d\n p2, q2 = a + c, b + d\n n = a + b + c + d\n\n if n == 0:\n return np.nan\n elif p1 == n:\n # c and d are zero\n return _div(a - b, p1)\n elif p2 == n:\n # b and d are zero\n return _div(a - c, p2)\n elif q1 == n:\n # a and b are zero\n return _div(d - c, q1)\n elif q2 == n:\n # a and c are zero\n return _div(d - b, q2)\n\n return _div(self.covar(), a * d + b * c)", "def qgset(x):\n return 0.2855*x - 0.8565", "def __init__(self, epsilon=1e-14):\n self.epsilon = epsilon", "def epsilon_greedy(q, s, eps = 0.5):\n if random.random()<eps:\n return uniform_dist(q.actions).draw()\n else:\n return greedy(q,s)", "def test_convert_to_q(self):\n\n riskfree = .01\n lmbd = .01\n lmbd_s = .5\n lmbd_y = .5\n mean_v = .5\n kappa_s = 1.5\n kappa_y = .5\n eta_s = .1\n eta_y = .01\n rho = -.5\n\n theta = [riskfree, mean_v, kappa_s, kappa_y, eta_s, eta_y,\n rho, lmbd, lmbd_s, lmbd_y]\n param = CentTendParam.from_theta(theta)\n param.convert_to_q()\n\n kappa_sq = kappa_s - lmbd_s * eta_s\n kappa_yq = kappa_y - lmbd_y * eta_y\n scale = kappa_s / kappa_sq\n\n self.assertEqual(param.measure, 'Q')\n self.assertEqual(param.riskfree, riskfree)\n self.assertEqual(param.lmbd, 0)\n self.assertEqual(param.lmbd_s, lmbd_s)\n self.assertEqual(param.lmbd_y, lmbd_y)\n self.assertEqual(param.mean_v, mean_v * kappa_y / kappa_yq * scale)\n self.assertEqual(param.kappa_s, kappa_sq)\n self.assertEqual(param.kappa_y, kappa_yq)\n self.assertEqual(param.eta_s, eta_s)\n self.assertEqual(param.eta_y, eta_y * scale**.5)\n self.assertEqual(param.rho, rho)\n self.assertTrue(param.is_valid())", "def ADM_QED(nf):\n Qu = 2/3\n Qd = -1/3\n Qe = -1\n nc = 3\n gamma_QED = np.array([[8/3*Qu*Qu*nc, 8/3*Qu*Qd*nc, 8/3*Qu*Qd*nc, 8/3*Qu*Qu*nc, 8/3*Qu*Qd*nc, 8/3*Qu*Qe*nc, 8/3*Qu*Qe*nc, 8/3*Qu*Qe*nc],\n [8/3*Qd*Qu*nc, 8/3*Qd*Qd*nc, 8/3*Qd*Qd*nc, 8/3*Qd*Qu*nc, 8/3*Qd*Qd*nc, 8/3*Qd*Qe*nc, 8/3*Qd*Qe*nc, 8/3*Qd*Qe*nc],\n [8/3*Qd*Qu*nc, 8/3*Qd*Qd*nc, 8/3*Qd*Qd*nc, 8/3*Qd*Qu*nc, 8/3*Qd*Qd*nc, 8/3*Qd*Qe*nc, 8/3*Qd*Qe*nc, 8/3*Qd*Qe*nc],\n [8/3*Qu*Qu*nc, 8/3*Qu*Qd*nc, 8/3*Qu*Qd*nc, 8/3*Qu*Qu*nc, 8/3*Qu*Qd*nc, 8/3*Qu*Qe*nc, 8/3*Qu*Qe*nc, 8/3*Qu*Qe*nc],\n [8/3*Qd*Qu*nc, 8/3*Qd*Qd*nc, 8/3*Qd*Qd*nc, 8/3*Qd*Qu*nc, 8/3*Qd*Qd*nc, 8/3*Qd*Qe*nc, 8/3*Qd*Qe*nc, 8/3*Qd*Qe*nc],\n [8/3*Qe*Qu, 8/3*Qe*Qd, 8/3*Qe*Qd, 8/3*Qe*Qu, 8/3*Qe*Qd, 8/3*Qe*Qe, 8/3*Qe*Qe, 8/3*Qe*Qe],\n [8/3*Qe*Qu, 8/3*Qe*Qd, 8/3*Qe*Qd, 8/3*Qe*Qu, 8/3*Qe*Qd, 8/3*Qe*Qe, 8/3*Qe*Qe, 8/3*Qe*Qe],\n [8/3*Qe*Qu, 8/3*Qe*Qd, 8/3*Qe*Qd, 8/3*Qe*Qu, 8/3*Qe*Qd, 8/3*Qe*Qe, 8/3*Qe*Qe, 8/3*Qe*Qe]])\n gamma_QED_1 = np.zeros((2,154))\n gamma_QED_2 = np.hstack((np.zeros((8,2)),gamma_QED,np.zeros((8,144))))\n gamma_QED_3 = np.hstack((np.zeros((8,10)),gamma_QED,np.zeros((8,136))))\n gamma_QED_4 = np.zeros((136,154))\n gamma_QED = np.vstack((gamma_QED_1, gamma_QED_2, gamma_QED_3, gamma_QED_4))\n\n if nf == 5:\n return gamma_QED\n elif nf == 4:\n return np.delete(np.delete(gamma_QED, [6, 14, 22, 30, 42, 50, 58, 66, 74, 82, 94, 102, 110, 118, 126, 134, 142, 150], 0)\\\n , [6, 14, 22, 30, 42, 50, 58, 66, 74, 82, 94, 102, 110, 118, 126, 134, 142, 150], 1)\n elif nf == 3:\n return np.delete(np.delete(gamma_QED, [5,6, 13,14, 21,22, 29,30, 41,42, 49,50, 57,58, 65,66, 73,74, 81,82,\\\n 93,94, 101,102, 109,110, 117,118, 125,126, 133,134, 141,142, 149,150], 0)\\\n , [5,6, 13,14, 21,22, 29,30, 41,42, 49,50, 57,58, 65,66, 73,74, 81,82,\\\n 93,94, 101,102, 109,110, 117,118, 125,126, 133,134, 141,142, 149,150], 1)\n else:\n raise Exception(\"nf has to be 3, 4 or 5\")", "def ikine_pose_ur5(xdes, dxdes, ddxdes, q0): \n k_p = 550;\n k_o = 150;\n k = np.diag([k_p, k_p, k_p, k_o, k_o, k_o, k_o])\n best_norm_e1 = 0.01\n best_norm_e2 = 0.01\n max_iter = 20\n delta = 0.001\n dq_p\t\t\t= np.zeros(6)\n\n q = copy(q0)\n for i in range(max_iter):\n T = fkine_ur5(q)\n e1 = xdes[0:3] - T[0:3,3]\n e2 = quatError(xdes[3:7], rot2quat(T[0:3,0:3]))\n e = np.concatenate((e1,e2), axis=0)\n de = -np.dot(k,e)\n J = jacobian_pose_ur5(q,delta)\n Jinv = np.linalg.pinv(J)\n dq = np.dot(Jinv, dxdes - de )\n q = q + delta*dq\n \n if (np.linalg.norm(e2) < best_norm_e2) & (np.linalg.norm(e1)< best_norm_e1):\n\n best_norm_e2 = np.linalg.norm(e2)\n best_norm_e1 = np.linalg.norm(e1)\n q_best = q\n dq_best = dq\n ddq_best \t\t= \t(dq_best - dq_p)/delta\n #ddq_best = np.dot(Jinv, ( ddxdes - np.dot(dJ,dq_best) ))\n print(\"iter: \", i)\n print(\"norma position: \",best_norm_e1)\n print(\"norma orientation: \",best_norm_e2)\n #print(\"---------\")\n\n \tdq_p \t= dq\n return q_best, dq_best, ddq_best", "def _getQvals(self, board):\r\n state = self._getState(board)\r\n # create the input to neural network\r\n toNN = [state[i-1] for i in range(1, self.inputSize)]\r\n toNN.insert(0, 0.0)\r\n # find expected rewards\r\n qVals = []\r\n for i in range(self.rowSize):\r\n toNN[0] = float(i)\r\n qVals.append(self.Q.calculate(toNN))\r\n return qVals", "def Q_i(params):\n Q = params['Q'].value\n Qe = Q_e(params)\n return (Q ** -1 - np.real(Qe ** -1)) ** -1", "def quatError(Qdes, Q):\n\n we = Qdes[0]*Q[0] + np.dot(Qdes[1:4].transpose(),Q[1:4]) - 1\n e = -Qdes[0]*Q[1:4] + Q[0]*Qdes[1:4] - np.cross(np.transpose(Qdes[1:4]), np.transpose(Q[1:4]))\n Qe = np.array([ we, e[0], e[1], e[2] ])\n\n return Qe", "def plot_Q(graph,NCommunityClassifier,eps=1e-3,maxQ=False):\n q1=0\n q2=q1+2*eps\n Q_results=[0]\n i=1\n while q2-q1>eps:\n clfN=NCommunityClassifier(graph,Nmax=i)\n clfN.fit()\n q1=q2\n q2=clfN.Q \n Q_results.append(q2)\n i+=1\n plt.plot(np.arange(1,i+1),Q_results)\n plt.xlabel(\"Number of communities\")\n plt.ylabel(\"Modularity\")\n plt.show()\n if maxQ:\n return q2", "def quartic_potential(x):\n k1=1\n k2=10\n return (k1*x**4)-(k2*x**2)", "def demo():\n n_dim = 3\n A = np.eye(n_dim)\n covar = np.eye(n_dim)\n mean = np.zeros(n_dim)\n approx = approx_quad_form(mean, covar, A)\n\n true_dist = scipy.stats.chi2(df=n_dim)\n\n q = np.linspace(0, 10)\n\n plt.plot(\n q, true_dist.pdf(q), label='True', color='black')\n plt.plot(\n q, approx(q), label='Approx.',\n color='tab:blue', linestyle='--')\n plt.xlabel('q')\n plt.ylabel('pdf(q) [-]')\n plt.legend()", "def QDot_detection(self):\r\n\r\n # Creates a list with the total intensities from the lines as to analyze which lines contain quantum dots\r\n total_intensity_list = []\r\n for (columnName, columnData) in self.df4.iteritems():\r\n total_intensity = 0\r\n for i in columnData.values:\r\n total_intensity += i\r\n total_intensity_list.append(total_intensity)\r\n \r\n # Construct the 3-sigma threshold\r\n avg_tot_intensity = mean(total_intensity_list)\r\n stdev_tot_intensity = stdev(total_intensity_list)\r\n\r\n threshold = 3 * stdev_tot_intensity + avg_tot_intensity\r\n\r\n QDot_slits = [total_intensity_list.index(i) + 1 for i in total_intensity_list if i >= threshold]\r\n \r\n # If 2 lines next to each other are labeled as quantum dots, the slit with the lowest total intensity will be discarded\r\n to_be_deleted_slits = []\r\n for i in range(0, len(QDot_slits) - 1):\r\n if QDot_slits[i + 1] - QDot_slits[i] == 1:\r\n if total_intensity_list[QDot_slits[i + 1] - 1] > total_intensity_list[QDot_slits[i] - 1]:\r\n to_be_deleted_slits.append(QDot_slits[i])\r\n elif total_intensity_list[QDot_slits[i + 1] - 1] < total_intensity_list[QDot_slits[i] - 1]:\r\n to_be_deleted_slits.append(QDot_slits[i + 1])\r\n \r\n for slit in to_be_deleted_slits:\r\n QDot_slits.remove(slit)\r\n\r\n # Optional code to plot the total intensities of every slit in the 2d map.\r\n # -------------------------------------------------------------------------\r\n # fig = plt.figure(figsize=(10,7))\r\n # plt.plot(total_intensity_list, label=\"Total intensity\")\r\n # plt.plot([x - 1 for x in QDot_slits], [total_intensity_list[x - 1] for x in QDot_slits], 'rx', label=\"SI-NP\")\r\n # plt.hlines(avg_tot_intensity, 0, 200, colors='red', label=\"Average total intensity\")\r\n # plt.hlines(threshold, 0, 200, colors='green', label='3-sigma threshold')\r\n # plt.title(\"Total intensities for a single datafile\")\r\n # plt.xlabel(\"Position along the slit (pixels)\")\r\n # plt.ylabel(\"Total intensity (arbitrary units)\")\r\n # plt.xlim(0,200)\r\n # plt.ylim(0,60)\r\n # plt.legend()\r\n # plt.show()\r\n\r\n return QDot_slits", "def test_quintic(self):\n fun = get_problem('quintic', self.dimension, -10.0, 10.0)\n self.assertEqual(fun(self.array6), 0.0)", "def epsilon_fit_Chang(l_onde,vl1,vl2,vt1,vt2,gl1,gl2,gt1,gt2,f_l1,f_l2,f_t1,f_t2,epsinf1,epsinf2):\n # Chang PRB38 12369\n v = 1e4/l_onde\n \n epsx = (epsinf1+epsinf2)/2 - (f_t1*(vl1**2 - vt1**2))/(-vt1**2 + v**2 + 1j*v*gt1) - (f_t2*(vl2**2 - vt2**2))/(-vt2**2 + v**2 + 1j*v*gt2)\n epsz = 1/(((1/2)*(1/epsinf1 + 1/epsinf2)) + (f_l1*(vl1**2 - vt1**2))/(-vl1**2 + v**2 + 1j*v*gl1) + (f_l2*(vl2**2 - vt2**2))/(-vl2**2 + v**2 + 1j*v*gl2))\n \n# eps1 = epsinf1*(1 - (f_t1*(vl1**2 - vt1**2))/(vt1**2 - v**2 - 1j*v*gt1))\n# eps2 = epsinf2*(1 - (f_t2*(vl2**2 - vt2**2))/(vt2**2 - v**2 - 1j*v*gt2))\n# epsx = (1/2)*(eps1+eps2)\n# epsz = 1/((1/2)*(1/eps1 + 1/eps2))\n \n return (epsx.real + 1j*np.abs(epsx.imag)),(epsz.real + 1j*np.abs(epsz.imag))", "def q(self):\n return self._x", "def convert_to_q(self):\n if self.measure == 'Q':\n warnings.warn('Parameters are already converted to Q!')\n else:\n kappa_sp = self.kappa_s\n kappa_yp = self.kappa_y\n self.kappa_s = self.kappa_s - self.lmbd_s * self.eta_s\n self.kappa_y = self.kappa_y - self.lmbd_y * self.eta_y\n self.scale = kappa_sp / self.kappa_s\n self.mean_v *= (kappa_yp / self.kappa_y * self.scale)\n self.lmbd = 0\n self.eta_y *= (self.scale**.5)\n self.measure = 'Q'\n self.update_ajd()", "def epsilon(self):\n return self.__epsilon", "def _define_epsilon(n,T,a=1):\n\n return np.sqrt(np.log(n)/T)*a", "def Q1_test():\n A, p1, p2 = [0,0], [2,4], [6,5]\n return (distance(A,p1) > 4.472135) and (distance(p1,p2) < 4.472136)", "def test_qqlnu_np_scalar(self):\n\n R_mg = np.asarray([1.00938347, 1.01333147, 1.01762706])\n # Number of NP events generated in MG [25603, 15708, 9833]\n # Number of SM events generated in MG [23536, 10207, 4851]\n\n bins = np.asarray([1200., 1400., 1600., 1800.])\n nbins = len(bins)-1\n\n for i in range(nbins):\n center = 0.5*(bins[i]+bins[i+1])\n wc = wcxf.WC('SMEFT', 'Warsaw up', center, {'lequ1_2232': 1e-7})\n wc_obj = flavio.WilsonCoefficients()\n wc_obj.set_initial_wcxf(wc)\n R = pplnu.R_sigma_qqlnu_int(13e3**2, bins[i], bins[i+1], 'mu', wc_obj, par2)\n err = (R-R_mg[i])/R_mg[i]\n self.assertAlmostEqual(err,0,delta=0.02,msg=f'error in bin {i}: {err}')", "def test_qqlnu_np_vector(self):\n\n R_mg = np.asarray([ 8.76973438, 12.82963675, 18.01933393])\n # Number of NP events generated in MG [25183, 16648, 11444]\n # Number of SM events generated in MG [23536, 10207, 4851]\n\n bins = np.asarray([1200., 1400., 1600., 1800.])\n nbins = len(bins)-1\n\n for i in range(nbins):\n center = 0.5*(bins[i]+bins[i+1])\n wc = wcxf.WC('SMEFT', 'Warsaw up', center, {'lq3_2211': 1e-7})\n wc_obj = flavio.WilsonCoefficients()\n wc_obj.set_initial_wcxf(wc)\n R = pplnu.R_sigma_qqlnu_int(13e3**2, bins[i], bins[i+1], 'mu', wc_obj, par2)\n err = (R-R_mg[i])/R_mg[i]\n self.assertAlmostEqual(err,0,delta=0.02,msg=f'error in bin {i}: {err}')", "def _q_x(self):\n lambda_r = self.latt_par['lambda_r'].value \n return 2*np.pi*self.k/lambda_r", "def demo_expr():\n n_dim = 2\n A = np.eye(n_dim)\n A[1, 1] = -0.5\n covar = np.eye(n_dim)\n mean = np.zeros(n_dim)\n a = np.ones(n_dim)\n d = 0\n approx = approx_quad_expr(\n mean, covar, A, a, d)\n\n # Sample from true dist\n n_sample = int(1e5)\n x = np.random.multivariate_normal(mean, covar, n_sample)\n q_samples = np.zeros(n_sample)\n for i in range(n_sample):\n q_samples[i] = x[i] @ A @ x[i] + a @ x[i] + d\n\n q = np.linspace(-10, 10)\n\n plt.plot(\n q, approx(q), label='Approx.',\n color='tab:blue', linestyle='--')\n bins = np.linspace(-10, 10, 101)\n bins[0] = -np.inf\n bins[-1] = np.inf\n plt.hist(\n q_samples, density=True, histtype='stepfilled',\n bins=bins,\n alpha=0.5, color='black', label='Samples')\n plt.xlabel('q')\n plt.ylabel('pdf(q) [-]')\n plt.legend()\n\n central_moments_sample = scipy.stats.moment(\n q_samples, moment=[0, 1, 2, 3, 4])\n print(central_moments_sample)", "def deviation_ok(norm, value, epsilon):\n deviation = abs(norm-value)/norm\n # print(abs(d-epsilon))\n return deviation <= epsilon", "def eps_greedy(Q, epsilon, num_actions):\n if np.random.uniform(0,1,1) > epsilon:\n action = np.argmax(Q)\n else:\n action = np.random.randint(low=0, high=num_actions)\n \n Q_value = Q[action]\n return action, Q_value", "def Q_e(params):\n return (params['Q_e_real'].value +\n 1j * params['Q_e_imag'].value)", "def q_(w,R,lam=1064.0e-9):\n\n if R!=np.inf:\n q=np.pi*w**2*R/(np.pi*w**2-1j*R*lam)\n else:\n q=1j*np.pi*w**2/lam\n\n return q", "def epsilon(self):\n return self._epsilon", "def __init__(self, nA=6, gamma=.9, alpha=.9,\n epsilon_start=1, epsilon_decay=.999, epsilon_min=0.25):\n self.nA = nA\n self.Q = defaultdict(lambda: np.zeros(self.nA))\n self.epsilon = epsilon_start\n self.epsilon_decay = epsilon_decay\n self.epsilon_min = epsilon_min\n self.gamma = gamma\n self.alpha = alpha\n print(\"Epsilon: {}, E Decay: {}, E Min: {}, Gamma: {}, Alpha: {}\".format(self.epsilon, self.epsilon_decay, self.epsilon_min, self.gamma, self.alpha))", "def quintil_rent(x,p,d):\n \n if x <= d[p][0.20]:\n return 'Q1'\n elif x <= d[p][0.4]:\n return 'Q2'\n elif x <= d[p][0.6]: \n return 'Q3'\n elif x <= d[p][0.8]:\n return 'Q4'\n else:\n return 'Q5'", "def PQa(P_0, Q_0, D):\n A_i_2 = B_i_1 = 0\n A_i_1 = B_i_2 = 1\n\n G_i_2 = -P_0\n G_i_1 = Q_0\n\n P_i = P_0\n Q_i = Q_0\n\n while True:\n\n a_i = floor((P_i + sqrt(D))/Q_i)\n A_i = a_i*A_i_1 + A_i_2\n B_i = a_i*B_i_1 + B_i_2\n G_i = a_i*G_i_1 + G_i_2\n\n yield P_i, Q_i, a_i, A_i, B_i, G_i\n\n A_i_1, A_i_2 = A_i, A_i_1\n B_i_1, B_i_2 = B_i, B_i_1\n G_i_1, G_i_2 = G_i, G_i_1\n\n P_i = a_i*Q_i - P_i\n Q_i = (D - P_i**2)/Q_i", "def epsilongreedy_policy(Qvalues_oa):\n \n X = np.zeros_like(Qvalues_oa)\n \n # where are the actions with maximal value?\n maxX = Qvalues_oa == np.max(Qvalues_oa, axis=-1, keepdims=True)\n \n # assign 1-eps probability to max actions\n X += (1-epsilon) * maxX / maxX.sum(axis=-1, keepdims=True)\n \n # assign eps probability to other actions\n othX = np.logical_not(maxX)\n X += epsilon * othX / othX.sum(axis=-1, keepdims=True)\n \n assert np.allclose(X.sum(-1), 1.0)\n \n return X", "def inv_p_error(q,s,v, dq,ds,dv):\n return np.sqrt( (ds**2*(q-v)**2 + dv**2*(q+s-1)**2 + dq**2*(v+s-1)**2)/(v+s-1)**4 )", "def test_QSe_Run(self):\n fit_group, result = BayesQuasi(Program='QSe',\n SampleWorkspace=self._sample_ws,\n ResolutionWorkspace=self._res_ws,\n MinRange=-0.547607,\n MaxRange=0.543216,\n SampleBins=1,\n ResolutionBins=1,\n Elastic=False,\n Background='Sloping',\n FixedWidth=False,\n UseResNorm=False,\n WidthFile='',\n Loop=True,\n Save=False,\n Plot='None')\n self._validate_QSe_shape(result, fit_group)\n self._validate_QSe_value(result, fit_group)", "def q(self) -> float:\n return self._pwr.imag", "def test_qqlnu_np_tensor(self):\n\n R_mg = np.asarray([1.42841172, 1.63238727, 1.87684397])\n # Number of NP events generated in MG [25779, 16499, 10905]\n # Number of SM events generated in MG [23536, 10207, 4851]\n\n bins = np.asarray([1200., 1400., 1600., 1800.])\n nbins = len(bins)-1\n\n for i in range(nbins):\n center = 0.5*(bins[i]+bins[i+1])\n wc = wcxf.WC('SMEFT', 'Warsaw up', center, {'lequ3_2212': 1e-7})\n wc_obj = flavio.WilsonCoefficients()\n wc_obj.set_initial_wcxf(wc)\n R = pplnu.R_sigma_qqlnu_int(13e3**2, bins[i], bins[i+1], 'mu', wc_obj, par2)\n err = (R-R_mg[i])/R_mg[i]\n self.assertAlmostEqual(err,0,delta=0.02,msg=f'error in bin {i}: {err}')", "def call_epsilon_GaN(l_onde):\n v=1e4/l_onde\n epsinfE = 5.04\n epsinfA = 5.01\n wLE = 742.1\n wLA = 732.5\n wTE = 560.1\n wTA = 537\n gLE = 3.8\n gLA = 4 \n \n epsilonE = epsinfE*(1+(wLE**2-wTE**2)/(wTE**2-v**2-1j*gLE*v))\n epsilonA = epsinfA*(1+(wLA**2-wTA**2)/(wTA**2-v**2-1j*gLA*v))\n \n return epsilonE,epsilonA", "def run_qpe(self, n_ancillae=8):\n quantum_instance = aqua.QuantumInstance(\n backend=Aer.get_backend(\"statevector_simulator\"), shots=1\n )\n Hamil_mat = aqua.operators.MatrixOperator(self.mat)\n # Hamil_mat = MatrixOperator(self.mat)\n Hamil_qop = aqua.operators.op_converter.to_weighted_pauli_operator(\n Hamil_mat\n )\n # Hamil_qop = op_converter.to_weighted_pauli_operator(Hamil_mat)\n qpe = aqua.algorithms.QPE(Hamil_qop, num_ancillae=n_ancillae)\n qpe_result = qpe.run(quantum_instance)\n # qc = qpe.construct_circuit(measurement=True)\n print(\"qpe_result\", qpe_result)\n return qpe_result[\"eigenvalue\"], qpe_result, qpe", "def epsilon_fit_Chang_homemade(l_onde,vl1,vl2,vt1,vt2,gl1,gl2,gt1,gt2,f_t1,f_t2,f_l1,f_l2,epsinf1,epsinf2):\n # Chang PRB38 12369\n v = 1e4/l_onde\n \n epsx = (epsinf1+epsinf2)/2 - (f_l1*(vl1**2 - vt1**2))/(-vt1**2 + v**2 + 1j*v*gt1) - (f_l2*(vl2**2 - vt2**2))/(-vt2**2 + v**2 + 1j*v*gt2)\n epsz = 1/(1/((epsinf1+epsinf2)/2) + (f_l1*(vl1**2 - vt1**2))/(-vt1**2 + v**2 + 1j*v*gt1) + (f_l2*(vl2**2 - vt2**2))/(-vt2**2 + v**2 + 1j*v*gt2))\n #epsx = (1/2)*(eps1+eps2)\n #epsz = 1/((1/2)*(1/eps1 + 1/eps2))\n \n #epsx = (1/2)*epsinf1*(1-(f_t1*(v**2 - vl1**2 + 1j*v*gl1)/(v**2 - vt1**2 + 1j*v*gt1))-\\\n # (f_t2*(v**2 - vl2**2 + 1j*v*gl2)/(v**2 - vt2**2 + 1j*v*gt2)))\n #epsz = 1/((1/2)*(1/epsinf2)*(1+(f_l1*(v**2 - vt1**2 +1j*v*gl1)/(v**2 - vl1**2 +1j*v*gl1))+\\\n # (f_l2*(v**2 - vt2**2 +1j*v*gl2)/(v**2 - vl2**2 +1j*v*gl2))))\n return (epsx.real + 1j*np.abs(epsx.imag)),(epsz.real + 1j*np.abs(epsz.imag))", "def epsilon_GaAs_TISB(l_onde,n_2D,w12,f,Ts):\n ev = 1.60218e-19\n eps0 = 8.854e-12\n c = 3e8\n \n v = 1e4/l_onde\n \n epsinf = 11;\n wL = 291.2;\n wT = 267.89;\n T = 2.54;\n epsGaAs = epsinf*(1+(wL**2-wT**2)/(wT**2-v**2-1j*T*v)) \n \n meff = 9.109e-31*0.067\n ns = (n_2D/22e-7)*1e6 # passage de cm-2 ? m-2 On fixe l'epaisseur du puit a 20nm, ordre de grandeur...\n wp = np.sqrt((ns*ev**2)/(meff*eps0*epsinf)) # SI\n wp = wp*1e-2/(2*np.pi*c) #cm-1\n if w12==0:\n wp=0\n else:\n w12 = 1e4/w12 # cm-1\n \n \n eps_ssbandes = epsinf*(f*wp**2./(w12**2-v**2-1j*Ts*v))\n eps = epsGaAs + eps_ssbandes\n return eps.real + 1j*np.abs(eps.imag)", "def _epsilon(vds) -> np.ndarray:\n return vds[\"rhod_tot\"] / vds[\"rho\"]", "def qnode(params):\n variational_circuit(params)\n return qml.expval(qml.PauliX(1))", "def update( self, qhalf = \"qhalf\", qconvex = \"qconvex\" ):\n\n def makeDots( v1, v2, v3 ):\n \"\"\" helper function\n \"\"\"\n res = []\n for nx in xrange( -2, 3 ):\n for ny in xrange( -2, 3 ):\n for nz in xrange( -2, 3 ):\n res.append( ( v1 * nx ) + ( v2 * ny ) + ( v3 * nz ) )\n return res\n\n def str2vec( s ):\n \"\"\" helper function\n \"\"\"\n if ',' in s:\n ss = s.split( \",\" )\n else:\n ss = s.split( )\n\n fs = map( lambda s: float( s.strip( ) ), ss )\n return Vec( fs[ 0 ], fs[ 1 ], fs[ 2 ] )\n\n\n rep = self.rep.minimize()\n\n try:\n dots = makeDots( rep.v1, rep.v2, rep.v3 )\n\n if 'win' in sys.platform:\n p1 = Popen( [ qhalf, \"Fp\" ], stdin = PIPE, stdout = PIPE ) #, close_fds = True )\n p2 = Popen( [ qconvex, \"o\" ],stdin = PIPE, stdout = PIPE ) #, close_fds = True )\n else:\n p1 = Popen( [ qhalf, \"Fp\" ], stdin = PIPE, stdout = PIPE, close_fds = True )\n p2 = Popen( [ qconvex, \"o\" ],stdin = PIPE, stdout = PIPE, close_fds = True )\n\n\n s = '3 1\\n'\n s += '0 0 0\\n'\n s += '4\\n'\n s += str( len( dots ) - 1 ) + '\\n'\n for d in dots:\n if d.vlen() > 0.001:\n n = d.norm()\n off = - d.vlen() / 2.0\n s += \"%.20f %.20f %.20f %.20f\\n\" % ( n[ 0 ], n[ 1 ], n[ 2 ], off )\n\n ret = p1.communicate( s )[ 0 ]\n ret = p2.communicate( ret )[ 0 ]\n\n lines = ret.split( '\\n' )\n params = lines[ 1 ].split( )\n params = map( lambda s: int( s.strip() ), params )\n dcount = params[ 0 ]\n\n lines = lines[ 2: ]\n pnts = map( lambda s: str2vec( s ), lines[ :dcount ] )\n\n lines = lines[ dcount: ]\n\n polys = []\n for l in lines:\n nums = l.split()\n if nums:\n nums = map( lambda s: int( s ), nums )\n nums = nums[ 1: ]\n ppnt = map( lambda n: pnts[ n ], nums )\n polys.append( Poly( ppnt ) )\n except Exception, e:\n print \"errrrrorrr:\", e\n\n self.mesh = Mesh( polys )", "def __init__(self, alpha=0.5, epsilon=0.1):\n self.q = dict()\n self.alpha = alpha\n self.epsilon = epsilon", "def resultant(P, Q):\n return np.linalg.det(P.sylvester(Q))", "def qu(N60):\n return 0.29 * N60**0.72 * 100", "def __init__(self, epsilon=1e-7):\n super().__init__()\n self.epsilon = epsilon", "def qpoint(force_constant=[], qpt=[0.0, 0.0, 0.0]):\n qpt = np.array(qpt)\n exp_iqpt = np.exp(1.0j * qpt)\n dmat = force_constant * exp_iqpt\n vals, vects = np.linalg.eigh(dmat)\n return vals, vects", "def epsilon_greedy_probs(self, nA, Q_s, i_count, eps=None):\r\n epsilon = 1.0 / i_count\r\n if eps is not None:\r\n epsilon = eps\r\n \r\n policy_s = np.ones(nA) * epsilon / nA\r\n policy_s[np.argmax(Q_s)] = 1 - epsilon + (epsilon / nA)\r\n return policy_s", "def Q_term(\n omega1, # vorticity-1 component\n omega2, # vorticity-2 component\n omega3, # vorticity-3 component\n s11, # strain rate-11 component\n s12, # strain rate-12 component\n s13, # strain rate-13 component\n s22, # strain rate-22 component\n s23, # strain rate-23 component\n s33): # strain rate-33 component\n #---------------------------------------------------------------------#\n # Numerator and denominator #\n #---------------------------------------------------------------------#\n num = omega1*s11*omega1 + omega1*s12*omega2 + omega1*s13*omega3 +\\\n omega2*s12*omega1 + omega2*s22*omega2 + omega2*s23*omega3+\\\n omega3*s13*omega1 + omega3*s23*omega2 + omega3*s33*omega3\n den1 = omega1*omega1 + omega2*omega2 + omega3*omega3\n den2 = (s11*s11 + s12*s12 + s13*s13 + s12*s12 + s22*s22 + s23*s23 +\\\n s13*s13 + s23*s23 + s33*s33)**0.5\n den = ((2.0/3.0)**0.5)* den1 * den2\n #---------------------------------------------------------------------#\n # Q calculation #\n #---------------------------------------------------------------------#\n Q = num/den\n\n return Q", "def B(q):\n # print('Value q')\n # print(q)\n if q > 0 and q != 0 and q != 1:\n result = -(q*math.log(q,2) + (1-q)*math.log(1-q,2))\n else:\n result = 0\n # print('Result of B')\n # print(result)\n return result", "def test_quintic2(self):\n fun = get_problem('quintic', self.dimension, -10.0, 10.0)\n self.assertEqual(fun(self.array7), 0.0)", "def qlearning(alpha, gamma, epsilon, episodes, max_steps, n_tests, render = False, test=False):\n env = gym.make('Taxi-v2')\n n_states, n_actions = env.observation_space.n, env.action_space.n\n Q = init_q(n_states, n_actions, type=\"ones\")\n timestep_reward = []\n for episode in range(episodes):\n print(f\"Episode: {episode}\")\n s = env.reset()\n a = epsilon_greedy(Q, epsilon, n_actions, s)\n t = 0\n total_reward = 0\n done = False\n while t < max_steps:\n if render:\n env.render()\n t += 1\n s_, reward, done, info = env.step(a)\n total_reward += reward\n a_ = np.argmax(Q[s_, :])\n if done:\n Q[s, a] += alpha * ( reward - Q[s, a] )\n else:\n Q[s, a] += alpha * ( reward + (gamma * Q[s_, a_]) - Q[s, a] )\n s, a = s_, a_\n if done:\n if render:\n print(f\"This episode took {t} timesteps and reward: {total_reward}\")\n timestep_reward.append(total_reward)\n break\n if render:\n print(f\"Here are the Q values:\\n{Q}\\nTesting now:\")\n if test:\n test_agent(Q, env, n_tests, n_actions)\n return timestep_reward", "def qp(self, eta_u, eta_v):\n z = 1\n v = np.array(eta_u)\n n_features = v.shape[0]\n u = np.sort(v)[::-1]\n cssv = np.cumsum(u) - z\n ind = np.arange(n_features) + 1\n cond = u - cssv / ind > 0\n rho = ind[cond][-1]\n theta = cssv[cond][-1] / float(rho)\n uu = np.maximum(v - theta, 0)\n vv = np.array(())\n return uu, vv", "def set_epsilon(self,epsilon):\r\n\t\tself.epsilon = epsilon", "def prob3(x0 = 0, K = 9):\n #Code from prob1start\n x = sy.symbols('x')\n exp = (sy.sin(x) + 1)**(sy.sin(sy.cos(x)))\n der = sy.simplify(sy.diff(exp, x))\n \n f = sy.lambdify(x, exp)\n f1 = sy.lambdify(x, der)\n #Code from prob 1 end\n \n #just for ease of coding...\n difD = np.array(x0)\n \n #Initialize what I want\n FDQ1 = []\n FDQ2 = []\n BDQ1 = []\n BDQ2 = []\n CDQ2 = []\n CDQ4 = []\n N = []\n correct = f1(x0)\n \n #Get errors\n for i in range(0, K):\n n = 10**(-i)\n \n N.append(n)\n \n FDQ1.append(np.abs(correct - fdq1(f, difD, h = n)))\n \n FDQ2.append(np.abs(correct - fdq2(f, difD, h = n)))\n \n BDQ1.append(np.abs(correct - bdq1(f, difD, h = n)))\n \n BDQ2.append(np.abs(correct - bdq2(f, difD, h = n)))\n \n CDQ2.append(np.abs(correct - cdq2(f, difD, h = n)))\n \n CDQ4.append(np.abs(correct - cdq4(f, difD, h = n)))\n \n #Plot results\n plt.loglog(N, FDQ1, label = \"Order 1 Forward\")\n plt.loglog(N, FDQ2, label = \"Order 2 Forward\")\n plt.loglog(N, BDQ1, label = \"Order 1 Backward\")\n plt.loglog(N, BDQ2, label = \"Order 2 Backward\")\n plt.loglog(N, CDQ2, label = \"Order 2 Centered\")\n plt.loglog(N, CDQ4, label = \"Order 4 Centered\")\n \n #Tidy results\n plt.xlabel(\"h\")\n plt.ylabel(\"Absolute Error\")\n plt.title(\"title for approximate derivative errors graphs that isn\\'t in book\")\n plt.legend(loc = 2)\n plt.show()\n return\n raise NotImplementedError(\"Problem 3 Incomplete\")", "def q_from_ea(ea, p):\n return 0.622 * ea / (p - 0.378 * ea)", "def Q_learning_test(env,alpha,gamma,episodes, q_table):\n %time\n \n # For plotting metrics\n all_epochs = []\n all_penalties = []\n rewards = []\n \n total_reward = 0\n \n for i in range(1, episodes+1):\n state = env.reset()\n episode_rewards = []\n\n epochs, penalties, reward, = 0, 0, 0\n done = False\n\n while not done:\n \n action = np.argmax(q_table[state]) # Exploit learned values by choosing optimal values\n next_state, reward, done, info = env.step(action) \n\n\n if reward == -10:\n penalties += 1\n \n state = next_state\n episode_rewards.append(reward)\n epochs += 1\n \n if done == True:\n break \n if epochs == 1000:\n break \n \n total_reward += reward\n rewards.append(np.sum(episode_rewards))\n \n if i % 1000 == 0:\n clear_output(wait=True)\n print(f\"Episode: {i}\")\n\n \n print(\"Training finished.\\n\")\n \n \n plt.plot(savgol_filter(rewards, 1001, 3, mode = \"interp\"))\n plt.title(\"Smoothened testing reward per episode\", pad = 30 , size = BIGGER_SIZE)\n plt.xlabel('Episodes', labelpad = 20);\n plt.ylabel('Total Reward', labelpad = 20);\n plt.tick_params(axis='both', which='major', labelsize=16);\n plt.tick_params(axis='both', which='minor', labelsize=16);\n #plt.xlim(100000, 200000);\n #plt.ylim(0,50)\n # plt.xticks(np.arange(0, episodes+1, 5000));\n # plt.yticks(np.arange(min(rewards), max(rewards)+1, 1000));", "def rochelobe(q):\n return 0.49*q**(2./3)/(0.6*q**(2./3) + log(1+q**(1./3)))", "def Q(self):\n self.dualEigenmatrix()", "def calc_q_square(self):\n return self._q_x()**2 + self._q_z()**2", "def qrst_tm(x):\n return 0.2228*x - 0.6685", "def test_init_q(self):\n\n riskfree = .01\n lmbd = .01\n lmbd_s = .5\n lmbd_y = .5\n mean_v = .5\n kappa_s = 1.5\n kappa_y = .5\n eta_s = .1\n eta_y = .01\n rho = -.5\n\n param = CentTendParam(riskfree=riskfree,\n lmbd=lmbd, lmbd_s=lmbd_s, lmbd_y=lmbd_y,\n mean_v=mean_v, kappa_s=kappa_s, kappa_y=kappa_y,\n eta_s=eta_s, eta_y=eta_y, rho=rho, measure='Q')\n\n kappa_sq = kappa_s - lmbd_s * eta_s\n kappa_yq = kappa_y - lmbd_y * eta_y\n scale = kappa_s / kappa_sq\n\n self.assertEqual(param.measure, 'Q')\n self.assertEqual(param.riskfree, riskfree)\n self.assertEqual(param.lmbd, 0)\n self.assertEqual(param.lmbd_s, lmbd_s)\n self.assertEqual(param.lmbd_y, lmbd_y)\n self.assertEqual(param.mean_v, mean_v * kappa_y / kappa_yq * scale)\n self.assertEqual(param.kappa_s, kappa_sq)\n self.assertEqual(param.kappa_y, kappa_yq)\n self.assertEqual(param.eta_s, eta_s)\n self.assertEqual(param.eta_y, eta_y * scale**.5)\n self.assertEqual(param.rho, rho)\n self.assertTrue(param.is_valid())\n\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n param.convert_to_q()", "def a_q(self, phi, ci, tl):\n\t return (self.j(phi, tl)*(ci - self.gamma(tl)))/(4.*(ci + 2.*self.gamma(tl)))", "def epsilon(self, length: int, time: int) -> float:\n return (self.beta ** (1.0 / (length / 2))) ** time", "def test_sqpdfo_prelim(self):\n n,nb,mi,me,x,lm,lb,ub,scalefacX,Delta,nfix,indfix,xfix,vstatus,xstatus,sstatus,dstatus,QZ,RZ,scale,poised,Y_radius,poised_model,X,fX,Y,fY,ciX,ciY,ceX,ceY,poisedness_known,m,gx,normgx,fcmodel,ind_Y,i_xbest,cur_degree,rep_degree,plin,pdiag,pquad,indfree,info,options,values = \\\n sqpdfo_prelim_(self.func,self.x0,self.lm0,self.Delta0,\n self.lb,self.ub,self.scaleX,self.scalefacX,self.cur_degree,self.rep_degree,\n self.plin,self.pdiag,self.pquad,self.c,self.initial_Y,self.kappa_ill,\n self.factor_FPR,self.Lambda_FP,self.Lambda_CP,\n self.eps_L,self.lSolver,self.hardcons,self.stratLam,self.xstatus,\n self.sstatus,self.dstatus)\n \n correctn = 3\n correctnb = 2\n correctmi = 0\n correctme = 2\n correctx = array([0.500000000000000, 1.000000000000000, 0.500000000000000]).T\n correctlm = array([ 0, 0, 0,-0.333333332763891,-0.000000000249999]).T\n correctlb = array([-0.500000000000000, 0, -np.Inf]).T\n correctub = array([np.Inf, np.Inf, np.Inf]).T\n correctscalefacX = array([ 1, 1, 1])\n correctDelta = 1\n correctnfix = 0\n correctindfix = array( [])\n correctxfix = array([ 0, 0, 0]).T\n correctvstatus = array([0, 0, 0]).T\n correctxstatus = array([ 1, 1, 1, 1]).T\n correctsstatus = array([ 1, 1, 1, 1])\n correctdstatus = array([0, 0, 0, 0]).T\n correctQZ = array([\n [1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1]])\n correctRZ = array([\n [1, 1, 1, 1],\n [0, -1, 0, 0],\n [0, 0, -1, 0],\n [0, 0, 0, -1]])\n correctscale = array([1, 1, 1, 1]).T\n correctpoised = 1\n correctY_radius = 1\n correctpoised_model = 1\n correctX = array([\n [0.500000000000000, -0.500000000000000, 0.500000000000000, 0.500000000000000],\n [1.000000000000000, 1.000000000000000, 0, 1.000000000000000],\n [0.500000000000000, 0.500000000000000, 0.500000000000000, -0.500000000000000]])\n correctfX = array([ 1.500000000000000, 1.500000000000000, 0.500000000000000, 1.500000000000000])\n correctY = array([\n [0.500000000000000, -0.500000000000000, 0.500000000000000, 0.500000000000000],\n [1.000000000000000, 1.000000000000000, 0, 1.000000000000000],\n [0.500000000000000, 0.500000000000000, 0.500000000000000, -0.500000000000000]])\n correctfY = array([ 1.500000000000000, 1.500000000000000, 0.500000000000000, 1.500000000000000])\n correctciX = array( [])\n correctciY = array([])\n correctceX = array([\n [2, 1, 1, 1],\n [3, 2, 1, 0]])\n correctceY = array([\n [2, 1, 1, 1],\n [3, 2, 1 , 0]])\n correctpoisedness_known = 1\n correctm = 3\n correctgx = array([ 0, 1, 0]).T\n correctnormgx = 1\n correctfcmodel = array([\n [1.500000000000000, 0, 1.000000000000000, 0],\n [2.000000000000000, 1.000000000000000, 1.000000000000000, 1.000000000000000],\n [3.000000000000000, 1.000000000000000, 2.000000000000000, 3.000000000000000]])\n correctind_Y = array([ 0, 1, 2, 3])\n correcti_xbest = 0\n correctcur_degree = 4\n correctrep_degree = 4\n correctplin = 4\n correctpdiag = 7\n correctpquad = 10\n correctindfree = array([ 0, 1, 2])\n \n #print \"sqpdfo_prelim outputs\"\n #print n,nb,mi,me,x,lm,lb,ub,scalefacX,Delta,nfix,indfix,xfix,vstatus,xstatus,sstatus,dstatus,QZ,RZ,scale,poised,Y_radius,poised_model,X,fX,Y,fY,ciX,ciY,ceX,ceY,poisedness_known,m,gx,normgx,fcmodel,ind_Y,i_xbest,cur_degree,rep_degree,plin,pdiag,pquad,indfree,info,options,values\n \n self.assertEqual(n, correctn)\n self.assertEqual(int(nb), correctnb)\n self.assertEqual(mi, correctmi)\n self.assertEqual(Delta, correctDelta)\n self.assertEqual(nfix, correctnfix)\n self.assertEqual(poised, correctpoised)\n self.assertEqual(Y_radius, correctY_radius)\n self.assertEqual(poised_model, correctpoised_model)\n self.assertEqual(poisedness_known, correctpoisedness_known)\n self.assertEqual(m, correctm)\n self.assertEqual(normgx, correctnormgx)\n self.assertEqual(i_xbest, correcti_xbest)\n self.assertEqual(cur_degree, correctcur_degree)\n self.assertEqual(rep_degree, correctrep_degree)\n self.assertEqual(plin, correctplin)\n self.assertEqual(pdiag, correctpdiag)\n self.assertEqual(pquad, correctpquad)\n #print \"me\", me\n #print \"correctme\", correctme\n self.assertEqual(me, correctme)\n self.assertTrue(compare_array(correctx,x, self.abs_tol, self.rel_tol))\n self.assertTrue(compare_array(correctlm,lm, self.abs_tol, self.rel_tol))\n self.assertTrue(compare_array(correctlb,lb, self.abs_tol, self.rel_tol))\n self.assertTrue(compare_array(correctub,ub, self.abs_tol, self.rel_tol))\n self.assertTrue(compare_array(correctscalefacX,scalefacX, self.abs_tol, self.rel_tol))\n self.assertTrue(compare_array(correctindfix,indfix, self.abs_tol, self.rel_tol))\n self.assertTrue(compare_array(correctxfix,xfix, self.abs_tol, self.rel_tol))\n self.assertTrue(compare_array(correctvstatus,vstatus, self.abs_tol, self.rel_tol))\n self.assertTrue(compare_array(correctxstatus,xstatus, self.abs_tol, self.rel_tol))\n self.assertTrue(compare_array(correctsstatus,sstatus, self.abs_tol, self.rel_tol))\n self.assertTrue(compare_array(correctdstatus,dstatus, self.abs_tol, self.rel_tol))\n self.assertTrue(compare_array(correctQZ,QZ, self.abs_tol, self.rel_tol))\n self.assertTrue(compare_array(correctRZ,RZ, self.abs_tol, self.rel_tol))\n self.assertTrue(compare_array(correctX,X, self.abs_tol, self.rel_tol))\n self.assertTrue(compare_array(correctfX,fX, self.abs_tol, self.rel_tol))\n self.assertTrue(compare_array(correctY,Y, self.abs_tol, self.rel_tol))\n self.assertTrue(compare_array(correctfY,fY, self.abs_tol, self.rel_tol))\n self.assertTrue(compare_array(correctciX,ciX, self.abs_tol, self.rel_tol))\n self.assertTrue(compare_array(correctciY,ciY, self.abs_tol, self.rel_tol))\n self.assertTrue(compare_array(correctceX,ceX, self.abs_tol, self.rel_tol))\n self.assertTrue(compare_array(correctceY,ceY, self.abs_tol, self.rel_tol))\n self.assertTrue(compare_array(correctgx,gx, self.abs_tol, self.rel_tol))\n self.assertTrue(compare_array(correctfcmodel,fcmodel, self.abs_tol, self.rel_tol))\n self.assertTrue(compare_array(correctind_Y,ind_Y, self.abs_tol, self.rel_tol))\n self.assertTrue(compare_array(correctindfree,indfree, self.abs_tol, self.rel_tol))", "def d_q_hisano(self, mchi):\n w = self.MW**2/mchi**2\n def gAV(x):\n bx = np.sqrt(1-x/4+0*1j)\n out = np.real_if_close(1/(24*bx) * np.sqrt(x) * (8 - x - x**2) * np.arctan(2*bx/np.sqrt(x))\\\n - 1/24 * x * (2 - (3+x)*np.log(x)))\n return out\n return (self.alpha)**2/(self.MW**2*self.sw**4) * ((self.dchi**2 - 1)/8 * gAV(w))", "def test_QFT(self):\n op = qml.QFT(wires=range(3))\n res = op.matrix()\n exp = QFT\n assert np.allclose(res, exp)", "def error(self): \n if not self.terminal:\n err = sum([v**2 for v in self.state + self.q[:-1]])\n else:\n err = sum([v**2 for v in LIMITS[:9]] + [1.0 - LIMITS[9]**2])\n err *= (self.max_steps - self.steps)\n return err", "def get_probs(Q_s, epsilon, nA):\n policy_s = np.ones(nA) * epsilon / nA\n best_a = np.argmax(Q_s)\n policy_s[best_a] = 1 - epsilon + (epsilon / nA)\n return policy_s", "def test_potential_differences(self):\n t, x_n, x_p = self.t, self.x_n, self.x_p\n\n np.testing.assert_array_almost_equal(\n self.phi_s_n(t, x_n) - self.phi_e_n(t, x_n), self.delta_phi_n(t, x_n)\n )\n np.testing.assert_array_almost_equal(\n self.phi_s_p(t, x_p) - self.phi_e_p(t, x_p),\n self.delta_phi_p(t, x_p),\n decimal=5,\n )", "def Tlosses(self, dq = np.zeros(1) , ddq = np.zeros(1)): \n \n T = np.dot( self.Ia , ddq ) + np.dot( self.Da , dq )\n \n return T", "def call_epsilon_AlN(l_onde):\n v=1e4/l_onde\n epsinfE = 4.160\n epsinfA = 4.350\n wLE = 909.6\n wLA = 888.9\n wTE = 667.2\n wTA = 608.5\n g = 2.2\n \n epsilonE = epsinfE*(1+(wLE**2-wTE**2)/(wTE**2-v**2-1j*g*v))\n epsilonA = epsinfA*(1+(wLA**2-wTA**2)/(wTA**2-v**2-1j*g*v))\n \n return epsilonE,epsilonA", "def KB_Dist(P,Q):\r\n \r\n \r\n K=0\r\n Epsilon=0.000001\r\n Q+=Epsilon\r\n P+=Epsilon\r\n for x in range(len(Q)):\r\n K-=P[x]*np.log(Q[x]/P[x])\r\n return K", "def _calculate_quilting_error(self, resistance=3.24e6):\n\n self.sigma_quilting = ((0.126*self.actuator_spacing**2)**2)/(resistance*self.thickness**2)", "def project_curve(q):\n n,T = q.shape\n if n==2:\n dt = 0.35\n if n==3:\n dt = 0.2\n epsilon = 1e-6\n\n iter = 1\n res = ones(n)\n J = zeros((n,n))\n\n s = linspace(0,1,T)\n\n qnew = q.copy()\n qnew = qnew / sqrt(innerprod_q2(qnew,qnew))\n\n qnorm = zeros(T)\n G = zeros(n)\n C = zeros(300)\n while (norm(res) > epsilon):\n if iter > 300:\n break\n\n # Jacobian\n for i in range(0,n):\n for j in range(0,n):\n J[i,j] = 3 * trapz(qnew[i,:]*qnew[j,:],s)\n \n J += eye(n)\n\n for i in range(0,T):\n qnorm[i] = norm(qnew[:,i])\n \n # Compute the residue\n for i in range(0,n):\n G[i] = trapz(qnew[i,:]*qnorm,s)\n \n res = -G\n\n if (norm(res) < epsilon):\n break\n\n x = solve(J,res)\n C[iter] = norm(res)\n\n delG = Basis_Normal_A(qnew)\n temp = zeros((n,T))\n for i in range(0,n):\n temp += x[i]*delG[i]*dt\n \n qnew += temp\n iter += 1\n \n qnew = qnew/sqrt(innerprod_q2(qnew,qnew))\n\n return qnew", "def findQ(H_s):\n nl,dl = symToTransferFn(H_s)\n syst = sp.lti(nl,dl)\n p1,p2 = syst.poles[0], syst.poles[1]\n return np.sqrt(abs(p1*p2))/abs(p1+p2)", "def __init__(self, nA=6):\n self.nA = nA\n self.Q = defaultdict(lambda: np.zeros(self.nA))\n self.gamma=1.0\n self.alpha=0.2 #this can be potentially changed\n self.epsilon=1.0\n self.eps_start=1.0\n self.eps_decay=.9\n self.eps_min=0.0005", "def q_greedify_policy(env, V, pi, s, gamma):\n ### START CODE HERE ###\n ##q(s,a)=sigma(P(ss')*(gamma*V(s')+R(s,a,s'))\n q = np.zeros((env.action_space.n))\n for idx, action in enumerate(range(env.action_space.n)):\n for prob_next_state, next_state, reward_next_state, done in env.P[s][action]:\n q[idx] += prob_next_state * ((gamma * V[next_state]) + reward_next_state)\n\n greedy_action = np.argmax(q)\n # print(greedy_action)\n for action, action_prob in enumerate(pi[s]):\n if action == greedy_action:\n print(action, greedy_action)\n pi[s][action] = 1\n else:\n pi[s][action] = 0", "def _q_z(self):\n D = self.latt_par['D'].value\n lambda_r = self.latt_par['lambda_r'].value\n gamma = self.latt_par['gamma'].value\n return 2*np.pi*(self.h/D - self.k/lambda_r/np.tan(gamma))", "def test_get_Q_alt(self):\n vect_length = 50\n x_vect = np.random.normal(size=vect_length) \\\n + 1.j * np.random.normal(size=vect_length)\n y_vect = np.random.normal(size=vect_length) \\\n + 1.j * np.random.normal(size=vect_length)\n\n self.ds.spw_Nfreqs = vect_length\n\n for i in range(vect_length):\n Q_matrix = self.ds.get_Q_alt(i)\n # Test that if the number of delay bins hasn't been set\n # the code defaults to putting that equal to Nfreqs\n self.assertEqual(self.ds.spw_Ndlys, self.ds.spw_Nfreqs)\n\n xQy = np.dot(np.conjugate(x_vect), np.dot(Q_matrix, y_vect))\n yQx = np.dot(np.conjugate(y_vect), np.dot(Q_matrix, x_vect))\n xQx = np.dot(np.conjugate(x_vect), np.dot(Q_matrix, x_vect))\n\n # Test that Q matrix has the right shape\n self.assertEqual(Q_matrix.shape, (vect_length, vect_length))\n\n # Test that x^t Q y == conj(y^t Q x)\n self.assertAlmostEqual(xQy, np.conjugate(yQx))\n\n # x^t Q x should be real\n self.assertAlmostEqual(np.imag(xQx), 0.)\n\n x_vect = np.ones(vect_length)\n Q_matrix = self.ds.get_Q_alt(vect_length//2)\n xQx = np.dot(np.conjugate(x_vect), np.dot(Q_matrix, x_vect))\n self.assertAlmostEqual(xQx, np.abs(vect_length**2.))\n # Sending in sinusoids for x and y should give delta functions\n\n # Now do all the same tests from above but for a different number\n # of delay channels\n self.ds.set_Ndlys(vect_length-3)\n for i in range(vect_length-3):\n Q_matrix = self.ds.get_Q_alt(i)\n xQy = np.dot(np.conjugate(x_vect), np.dot(Q_matrix, y_vect))\n yQx = np.dot(np.conjugate(y_vect), np.dot(Q_matrix, x_vect))\n xQx = np.dot(np.conjugate(x_vect), np.dot(Q_matrix, x_vect))\n\n # Test that Q matrix has the right shape\n self.assertEqual(Q_matrix.shape, (vect_length, vect_length))\n\n # Test that x^t Q y == conj(y^t Q x)\n self.assertAlmostEqual(xQy, np.conjugate(yQx))\n\n # x^t Q x should be real\n self.assertAlmostEqual(np.imag(xQx), 0.)\n\n x_vect = np.ones(vect_length)\n Q_matrix = self.ds.get_Q_alt((vect_length-2)//2-1)\n xQx = np.dot(np.conjugate(x_vect), np.dot(Q_matrix, x_vect))\n self.assertAlmostEqual(xQx, np.abs(vect_length**2.))\n # Sending in sinusoids for x and y should give delta functions\n\n # Make sure that error is raised when asking for a delay mode outside\n # of the range of delay bins\n pytest.raises(IndexError, self.ds.get_Q_alt, vect_length-1)\n\n # Ensure that in the special case where the number of channels equals\n # the number of delay bins, the FFT method gives the same answer as\n # the explicit construction method\n multiplicative_tolerance = 0.001\n self.ds.set_Ndlys(vect_length)\n for alpha in range(vect_length):\n Q_matrix_fft = self.ds.get_Q_alt(alpha)\n Q_matrix = self.ds.get_Q_alt(alpha, allow_fft=False)\n Q_diff_norm = np.linalg.norm(Q_matrix - Q_matrix_fft)\n self.assertLessEqual(Q_diff_norm, multiplicative_tolerance)\n\n # Check for error handling\n pytest.raises(ValueError, self.ds.set_Ndlys, vect_length+100)", "def em_epsilon_cdp(epsilon, delta, k):\n if delta <= 0:\n return epsilon / k\n else:\n log_delta = np.log(1 / delta)\n return max(\n epsilon / k,\n np.sqrt((8 * log_delta + 8 * epsilon) / k) -\n np.sqrt(8 * log_delta / k))", "def g(self, RD):\n g = 1 / np.sqrt((1 + 3 * np.power(self.q, 2)) / np.power(np.pi, 2)) \n \n return g", "def test_get_Q(self):\n vect_length = 50\n x_vect = np.random.normal(size=vect_length) \\\n + 1.j * np.random.normal(size=vect_length)\n y_vect = np.random.normal(size=vect_length) \\\n + 1.j * np.random.normal(size=vect_length)\n\n self.ds.spw_Nfreqs = vect_length\n #Test if there is a warning if user does not pass the beam\n key1 = (0, 24, 38)\n key2 = (1, 24, 38)\n uvd = copy.deepcopy(self.uvd)\n ds_t = pspecdata.PSpecData(dsets=[uvd, uvd])\n\n for i in range(vect_length):\n try:\n Q_matrix = self.ds.get_Q(i)\n # Test that if the number of delay bins hasn't been set\n # the code defaults to putting that equal to Nfreqs\n self.assertEqual(self.ds.spw_Ndlys, self.ds.spw_Nfreqs)\n except IndexError:\n Q_matrix = np.ones((vect_length, vect_length))\n\n xQy = np.dot(np.conjugate(x_vect), np.dot(Q_matrix, y_vect))\n yQx = np.dot(np.conjugate(y_vect), np.dot(Q_matrix, x_vect))\n xQx = np.dot(np.conjugate(x_vect), np.dot(Q_matrix, x_vect))\n\n # Test that Q matrix has the right shape\n self.assertEqual(Q_matrix.shape, (vect_length, vect_length))\n\n # Test that x^t Q y == conj(y^t Q x)\n self.assertAlmostEqual(xQy, np.conjugate(yQx))\n\n # x^t Q x should be real\n self.assertAlmostEqual(np.imag(xQx), 0.)\n\n x_vect = np.ones(vect_length)\n try:\n Q_matrix = self.ds.get_Q(vect_length/2)\n except IndexError:\n Q_matrix = np.ones((vect_length, vect_length))\n xQx = np.dot(np.conjugate(x_vect), np.dot(Q_matrix, x_vect))\n self.assertAlmostEqual(xQx, np.abs(vect_length**2.))\n\n # Now do all the same tests from above but for a different number\n # of delay channels\n self.ds.set_Ndlys(vect_length-3)\n for i in range(vect_length-3):\n try:\n Q_matrix = self.ds.get_Q(i)\n except IndexError:\n Q_matrix = np.ones((vect_length,vect_length))\n xQy = np.dot(np.conjugate(x_vect), np.dot(Q_matrix, y_vect))\n yQx = np.dot(np.conjugate(y_vect), np.dot(Q_matrix, x_vect))\n xQx = np.dot(np.conjugate(x_vect), np.dot(Q_matrix, x_vect))\n\n # Test that Q matrix has the right shape\n self.assertEqual(Q_matrix.shape, (vect_length, vect_length))\n\n # Test that x^t Q y == conj(y^t Q x)\n self.assertAlmostEqual(xQy, np.conjugate(yQx))\n\n # x^t Q x should be real\n self.assertAlmostEqual(np.imag(xQx), 0.)\n\n x_vect = np.ones(vect_length)\n try:\n Q_matrix = self.ds.get_Q((vect_length-2)/2-1)\n except IndexError:\n Q_matrix = np.ones((vect_length,vect_length))\n xQx = np.dot(np.conjugate(x_vect), np.dot(Q_matrix, x_vect))\n self.assertAlmostEqual(xQx, np.abs(vect_length**2.))\n\n # Make sure that error is raised when asking for a delay mode outside\n # of the range of delay bins\n pytest.raises(IndexError, self.ds.get_Q, vect_length-1)" ]
[ "0.68171436", "0.6283236", "0.61913866", "0.6162494", "0.6137614", "0.61371356", "0.61351055", "0.60722744", "0.5978949", "0.5960126", "0.59534186", "0.59087783", "0.5905014", "0.5886242", "0.5830619", "0.5824134", "0.5795988", "0.57408494", "0.5716115", "0.5714753", "0.5707399", "0.56822425", "0.5679688", "0.56559277", "0.56491315", "0.562882", "0.5610037", "0.5605076", "0.5603901", "0.56020355", "0.5596007", "0.55914634", "0.5588853", "0.55769444", "0.55698776", "0.5564466", "0.5561765", "0.5550033", "0.55483294", "0.55344373", "0.55328405", "0.55282915", "0.5526801", "0.55120504", "0.5502196", "0.54981154", "0.5489038", "0.54880667", "0.54860073", "0.5485895", "0.54814947", "0.5470427", "0.54628855", "0.5460009", "0.54579216", "0.5455358", "0.545412", "0.54442", "0.54440176", "0.54370266", "0.5426414", "0.5424766", "0.54208976", "0.5416534", "0.54104894", "0.54095507", "0.5403319", "0.5395391", "0.5394736", "0.5389876", "0.5388608", "0.53863376", "0.5384786", "0.5368094", "0.53619874", "0.536135", "0.5360785", "0.5358121", "0.5353125", "0.53526807", "0.53516287", "0.5350273", "0.5346777", "0.53361315", "0.5331318", "0.5324358", "0.532365", "0.532333", "0.53192115", "0.53178364", "0.53134954", "0.5307921", "0.5305562", "0.5303643", "0.5301508", "0.52998704", "0.5299677", "0.5297873", "0.5297513", "0.52945316" ]
0.6147279
4
Generar y preparar melodia de consulta apartir de un archivo .csv
def create_query(query_path): query = pd.read_csv(query_path, names=["inicio", "duración", "tono", query_path[62:-4]]).drop([0], axis=0) time_where_query_starts = float(query.iloc[0, 0]) # cambiar formato de string a float y desplazar la cancion para forzar que empiece en cero for i in range(len(query)): if type(query.iloc[i, 0]) == str: query.iloc[i, 0] = float(query.iloc[i, 0]) if type(query.iloc[i, 1]) == str: query.iloc[i, 1] = float(query.iloc[i, 1]) if type(query.iloc[i, 2]) == str: query.iloc[i, 2] = float(query.iloc[i, 2]) query.iloc[i, 0] = (query.iloc[i, 0]-time_where_query_starts) return prepare_melody(query)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_csv():", "def procesarFilaCuerpo(fila):\r\n csv = \"\"\r\n columnas = fila.split(\"</td>\")\r\n for col in columnas:\r\n csv += procesarColumnaCuerpo(col)+\";\"\r\n \r\n csv = csv[:-1] #quitar el śltimo ;\r\n print csv\r\n return csv", "def format_porteurs(filepath):\n fieldnames, rows = get_header_rows(filepath)\n\n if \"statut\" in fieldnames:\n fieldnames.append(\"situation_societariat_entrance\")\n fieldnames.append(\"situation_situation\")\n for row in rows:\n statut = row['statut']\n row['situation_societariat_entrance'] = \"\"\n if statut == \"Associé\":\n row['situation_societariat_entrance'] = \"01/01/2015\"\n row['situation_situation'] = PORTEUR_STATUS_MATCH.get(statut)\n\n\n if 'coordonnees_address1' in fieldnames and 'coordonnees_address2' in fieldnames:\n fieldnames.append('coordonnees_address')\n for row in rows:\n row['coordonnees_address'] = row['coordonnees_address1'] + \\\n '\\n' + row['coordonnees_address2']\n\n if \"coordonnees_civilite\" in fieldnames:\n fieldnames.append('coordonnees_sex')\n for row in rows:\n if row['coordonnees_civilite'].lower() == u\"mademoiselle\":\n row['coordonnees_civilite'] = u\"Madame\"\n\n if row['coordonnees_civilite'] == u'Madame':\n row['coordonnees_sex'] = 'F'\n else:\n row['coordonnees_sex'] = 'M'\n\n if \"zus\" in fieldnames:\n fieldnames.append(\"coordonnees_zone_qual\")\n for row in rows:\n if row['zus'] == '1':\n row['coordonnees_zone_qual'] = 'zus'\n\n write_csv_file(filepath, fieldnames, rows)", "def csv_out(d):\n\theaders = ('쿠션, 파운데이션, 컨실러, 파우더, 블러쉬/블러셔/브론징, 컨투어링/하이라이터, 프라이머, UV프로텍터, 아이브로우, 아이라이너, 마스카라, 섀도우/글리터, 립/립스틱/틴트, 립케어/립밤/립글로스/립 오일, 스킨/토너/토닉, 로션/에멀젼, 미스트, 기타, zzz').split(',')\n\tglobal line\n\twith open('pony_refinedddd.csv', 'w', encoding='EUC-KR') as csv_file:\n\t\tcsvf = csv.writer(csv_file, delimiter=',')\n\t\tcsvf.writerow(headers)\n\t\tfor y in range(len(d)):\n\t\t\tfor z in range(len(d[y])):\n\t\t\t\t'''\n\t\t\t\tline = [d[y]['스킨/토너/토닉'], d[y]['로션/에멀젼'], \n\t\t\t\t\t\td[y]['미스트'], d[y]['기타'], d[y]['쿠션'],\n\t\t\t\t\t\td[y]['파운데이션'], d[y]['컨실러'],d[y]['파우더'], d[y]['블러쉬/블러셔/브론징'],\n\t\t\t\t\t\td[y]['컨투어링/하이라이터'], d[y]['프라이머'], d[y]['UV프로텍터'],\n\t\t\t\t\t\td[y]['아이브로우'],d[y]['아이라이너'], d[y]['마스카라'], d[y]['섀도우/글리터'],\n\t\t\t\t\t\td[y]['립/립스틱/틴트'], d[y]['립케어/립밤/립글로스/립 오일'], d[y]['zzz']]\n\t\t\t\t'''\n\n\t\t\t\tline.append(d[y][z])\n\t\t\tcsvf.writerow(line)\n\t\t\tline=[]", "def lectcsv(NOM):\n #NOM=input(\"nom du fichier:\")#interactif\n #NOM=str(NOM +\".csv\")\n c=[NOM]\n #ouverture du fichier et recuperation du contenu\n with open(NOM) as f:\n contenu = csv.reader(f, delimiter=' ', quotechar='|')\n for row in contenu:\n c.append(row[0].split(';'))#separation du string \n return c", "def importer():\n\n #Lager liste der eg legg transaksjonar som blir henta og ikkje laga:\n get_list = []\n\n #Gjer txt-fila i mappen om til csv-fil\n file_fixer()\n\n with open(out_path) as file:\n reader = csv.reader(file)\n r_0 = next(reader)\n r_0.append(\"type\")\n r_0.append('amount')\n r_0.append('category')\n r_0.append('account')\n r_0.append('project')\n\n\n for row in reader:\n #Legger til dei fire kollonenne (amount, account, subaacount, project), tomme.\n row.append(\"\")\n row.append(\"\")\n\n #Omformatterer rader:\n row = format_fix(row)\n row.append(\"\")\n row.append(\"\")\n row.append(\"\")\n print(row)\n\n\n try:\n obj, created = Transaction.objects.get_or_create(\n date=row[0],\n transaction_type=row[1],\n description=row[2],\n amount=row[3]\n )\n\n except Transaction.MultipleObjectsReturned:\n continue\n\n if not created:\n get_list.append(obj.pk)\n\n return get_list", "def guardar_CSV(self):\n participantes = self.__disparos.copy()\n archivo = input(\"Ingrese nombre del archivo: \")\n with open(f\"{archivo}.txt\", 'a') as csv_file:\n campos = ['idDisparo', 'nroParticipante', 'nombre', 'apellido', 'edad', 'sexo', 'disparos', 'mejor_disparo', 'promedio', 'puntaje_total']\n csv_writer = csv.DictWriter(csv_file, fieldnames=campos)\n csv_writer.writeheader()\n for linea in participantes:\n csv_writer.writerow(linea)\n print(\n f\"\"\"\n ==========================================\n == SE HAN GUARDADO LOS DATOS ==\n ==========================================\n \"\"\"\n )", "def prepare_CSV(self):\n self.drop_columns()\n self.rename_columns()\n self.spilt_columns()\n self.add_vehicle_id_column()\n self.add_source_column()\n self.add_timestamp_columns()\n self.get_colour_columns()\n self.clean_column_formats()\n\n # print(self.data.info())\n # print(self.data.sample(10))\n\n return self.data", "def dataset_1():\n csv = pd.read_csv('resources/dataset_1.csv') # Pandas loads the CSV file as a DataFrame object\n csv.fillna('', inplace=True) # Pandas fills empty celles with NaN. We replace every Nan value with an emtpy string.\n csv.num_rue = csv.num_rue.apply(str) # Cast street numbers to strings\n # Create a new column named 'address' which concatenates the columns ['num_rue', 'cpltnum_ru', 'type_rue', 'article_ru', 'nom_rue']\n # csv[['num_rue', 'cpltnum_ru', 'type_rue', 'article_ru', 'nom_rue']] select a subset of the table 'csv'.\n # .agg(' '.join, axis=1) is equivalent to merge the selected cells of every lines as 'num_rue' + ' ' + 'cpltnum_ru' + ' ' + 'type_rue' + ' ' + 'article_ru' + ' ' + 'nom_rue'\n csv['address'] = csv[['num_rue', 'cpltnum_ru', 'type_rue', 'article_ru', 'nom_rue']].agg(' '.join, axis=1)\n return csv", "def csv_loader(csv_file):\n df = pd.read_csv(csv_file, sep=';', parse_dates=['Data_Alteraçao'])\n pd.set_option('display.float_format', '{:.0f}'.format)\n\n df = df.fillna(0)\n df = df.drop(columns=['Cod. Pareamento', 'Cod. UF', 'Sigla UF', 'Cod. Subarea',\n 'Nome Subarea', 'Cod. Municipio', 'Nome Municipio', 'Codigo Agencia',\n 'Nome Agencia', 'Cod. Setor', 'Cod. Logradouro CNEFE',\n 'Tipo Logradouro CNEFE', 'Titulo Logradouro CNEFE',\n 'Nome Logradouro CNEFE', 'Nome Tratado CNEFE', 'Tipo Logradouro DNE',\n 'Titulo Logradouro DNE', 'Nome Logradouro DNE', 'Nome Tratado DNE',\n 'Logradouro Completo DNE', 'Distancia', 'Cod. Match', 'Motivo Match',\n 'CEPs Face', 'Localidade Face',\n 'Alterar Logradouro para DNE?', 'Observaçao', 'SIAPE Alteração',\n 'Nome Alteraçao', 'Data_Alteraçao', 'Status', 'Unnamed: 33'])\n\n # df.astype({'CEP Logradouro CNEFE': 'int32'}).dtypes\n\n df['CEP'] = df['CEP'].str.replace(' ', '', regex=False)\n\n ceps_dne = []\n for index, row in df.iterrows():\n if type(row.CEP) == str:\n for cep in row.CEP.split(','):\n # print(index, cep)\n ceps_dne.append(int(cep))\n\n ceps_cnefe = df['CEP Logradouro CNEFE'].astype(int).tolist()\n ceps = ceps_dne + ceps_cnefe\n ceps = list(set(ceps))\n return pd.Series(ceps)", "def ort_kreis_reader(self, csv_file):\n\n ort_kreis_df = pd.read_csv(csv_file, usecols=[1,3], names=['Ort', 'Kreis'], skiprows=1)\n\n ort_kreis_df = ort_kreis_df.drop_duplicates(keep='first')\n\n #cities kreis names are null, so fill their kreis slot with that city name\n ort_kreis_df['Kreis'].fillna(ort_kreis_df['Ort'], inplace=True)\n\n cleaned_ort_kreis_df = ort_kreis_df.replace(to_replace=' Städte|-Kreis| Kreis|Kreis | Land|Landkreis |Städteregion ', value='', regex=True)\n\n return cleaned_ort_kreis_df", "def ouvrir_fichier():\r\n df = pandas.read_csv(\r\n 'ong.csv',\r\n header=2,\r\n names=[\r\n 'id',\r\n 'country',\r\n 'year',\r\n 'emissions',\r\n 'value',\r\n 'footnotes',\r\n 'source'\r\n ]\r\n )\r\n if df is None:\r\n return abort(404)\r\n else:\r\n return df", "def uvozi_filme(cur):\n cur.execute(\"DELETE FROM narocila;\")\n with open('podatki/narocila.csv') as datoteka:\n podatki = csv.reader(datoteka)\n stolpci = next(podatki)\n poizvedba = \"\"\"\n INSERT INTO narocila VALUES ({})\n \"\"\".format(', '.join([\"?\"] * len(stolpci))) #---------zakaj tukaj vprašaji???-------------\n for vrstica in podatki:\n cur.execute(poizvedba, vrstica)", "def get_data(self, csv_file):\n pass", "def _set_grille_csv(self):\n with open(self.csvPath, \"r\") as csvFile:\n fileRead = csv.reader(csvFile, delimiter=\",\")\n\n #We read each row of the csv file\n for row in fileRead:\n rowSplitted = row[0].split(\";\")\n self._grilleCSV.append(rowSplitted)", "def reformate_park_csv(list_num_park=[1, 2, 3],\n list_date_park=['2015', '2016'],\n sep=';'):\n\n # Reading parkX_20XX.csv ...\n df = create_df_park_data(list_num_park, list_date_park)\n\n # Dropping Useless columns for speed up\n df.drop(park_col_type['drop'], axis=1, inplace=True)\n\n # Converting in datetime types and keeping in GMT+01:\n print(\"Converting 'Date' column in datetime type ...\")\n df['Date'] = pd.to_datetime(df['Date'], format=\"%d/%m/%Y %H:%M\")\n\n # we create an ident for each hour \"Date_hour_int\"\n print('Constructing id for each date & hour ...')\n df[\"Date_hour_int\"] = df[\"Date\"].dt.year*10**6 + df[\"Date\"].dt.month*10**4\\\n + df[\"Date\"].dt.day*10**2 + df[\"Date\"].dt.hour\n\n # we create a dataframe with \"production_mean_hour\" value for each\n # Eolienne*date_hour_int\n print(\"Computing 'Production_mean_hour' ...\")\n df_product_mean = df[df[\"Fonctionnement\"] == 1]\\\n .groupby([\"Eolienne\", \"Date_hour_int\"])[\"Production\"]\\\n .mean().reset_index().rename(columns={\"Production\": \"Production_mean_hour\"})\n\n # we add this value in the initial dataset \"df\"\n df = pd.merge(df, df_product_mean,\n on=[\"Eolienne\", \"Date_hour_int\"], how=\"left\")\n df = df[park_col_type['keep']]\n\n # output csv files per turbine :\n for num_turb in range(1, 12):\n fname_out = data_reformated_dir + 'turb_' + str(num_turb) + '.csv'\n print('Storing ' + fname_out + ' ...')\n df_tmp = df.loc[df['Eolienne'] == 'Turb'+str(num_turb)]\n df_tmp.to_csv(fname_out, sep=sep, index=False)", "def CSV(id_poke):\r\n poke = [] #creer une liste vide\r\n with open('BDD/pokemon.csv') as csv_file: #ouvre le fichier pokemon.csv\r\n csv_reader = csv.reader(csv_file, delimiter=',') #place les lignes du csv dans une variable\r\n for row in csv_reader: #parcours des lignes du csv\r\n if row[0] == str(id_poke): #si l'id de la ligne = id du pokemon\r\n poke = list(row) #variable = ligne correspondante dans le csv\r\n return(poke) #retourne une liste avec les caracteristiques du poke\r", "def csv_delimiter_examples():\n number_table = read_csv_file(\"number_table.csv\", \" \")\n print_table(number_table)\n print()\n name_table = read_csv_file(\"name_table.csv\", \",\")\n print_table(name_table)", "def csv_delimiter_examples():\n number_table = read_csv_file(\"number_table.csv\", \" \")\n print_table(number_table)\n print()\n name_table = read_csv_file(\"name_table.csv\", \",\")\n print_table(name_table)", "def loadCSV(input_file):", "def transform(self):\n with open(self.csv_path, \"r\") as f:\n csv_entries = [{k: v for k, v in row.items()} for row in csv.DictReader(f, skipinitialspace=True)]\n\n nested_fields = get_nested_fieldnames(csv_entries[0])\n # values of these fields should be transformed to a list\n # list_fields = set()\n # for entry in csv_entries:\n # for k, v in entry.items():\n # if '||' in v:\n # list_fields.add(k)\n list_fields = {\n \"BITSTREAM Download URL\",\n \"BITSTREAM License\",\n \"BITSTREAM Webshop URL\",\n \"dc.contributor\",\n \"dc.contributor.author\",\n \"dc.contributor.editor\",\n \"dc.date.available\",\n \"dc.date.accessioned\",\n \"dc.date.issued\",\n \"dc.date.submitted\",\n \"dc.dateSubmitted\",\n \"dc.description.abstract\",\n \"dc.description.provenance\",\n \"dc.grantproject\",\n \"dc.identifier\",\n \"dc.identifier.pr\",\n \"dc.language\",\n \"dc.notes\",\n \"dc.number\",\n \"dc.redirect\",\n \"dc.relation.ispartofseries\",\n \"dc.relationisFundedBy\",\n \"dc.subject\",\n \"dc.subject.classification\",\n \"dc.subject.other\",\n \"dc.title\",\n \"dc.title.alternative\",\n \"dc.type\",\n \"oapen.collection\",\n \"oapen.grant.number\",\n \"oapen.grant.program\",\n \"oapen.imprint\",\n \"oapen.relation.hasChapter\",\n \"oapen.relation.hasChapter_dc.title\",\n \"oapen.relation.isFundedBy\",\n \"oapen.relation.isFundedBy_grantor.name\",\n \"oapen.relation.isPartOfBook\",\n \"oapen.relation.isPartOfBook_dc.title\",\n \"oapen.relation.isPublishedBy_publisher.name\",\n \"oapen.relation.isPublisherOf\",\n \"oapen.relation.isbn\",\n \"oapen.remark.public\",\n \"peerreview.anonymity\",\n \"peerreview.id\",\n \"peerreview.open.review\",\n \"peerreview.publish.responsibility\",\n \"peerreview.review.decision\",\n \"peerreview.review.stage\",\n \"peerreview.review.type\",\n \"peerreview.reviewer.type\",\n }\n # add custom 'dc.subject.classification_code'\n list_fields.add(\"dc.subject.classification_code\")\n entries = transform_dict(csv_entries, convert, nested_fields, list_fields)\n\n # Transform release into JSON Lines format saving in memory buffer\n # Save in memory buffer to gzipped file\n list_to_jsonl_gz(self.transform_path, entries)", "def _importInDjango(self):\n\n with open(settings.DATA_PATH, 'r', encoding='latin-1') as csv_file:\n reader = csv.DictReader(csv_file, delimiter=';')\n for raw in reader:\n\n # Créer ou mettre à jour la division\n division, created = Division.objects.get_or_create(\n nom=raw['Division']\n )\n if created:\n self.stdout.write(\n 'Divion {} ajoutée'.format(division.nom)\n )\n\n # Créer ou mettre à jour les équipes\n equipeDom, created = Equipe.objects.get_or_create(\n nom=raw['Equipe 1'],\n division=division\n )\n if created:\n self.stdout.write(\n 'Equipe {} ajoutée'.format(equipeDom.nom)\n )\n\n equipeExt, created = Equipe.objects.get_or_create(\n nom=raw['Equipe 2'],\n division=division\n )\n if created:\n self.stdout.write(\n 'Equipe {} ajoutée'.format(equipeExt.nom)\n )\n\n # Créer ou mettre à jour la rencontre\n scoreDom = 0 if raw['Score 1'] == '' else int(raw['Score 1'])\n scoreExt = 0 if raw['Score 2'] == '' else int(raw['Score 2'])\n forfaitDom = True if raw['Forfait 1'] == 'true' else False\n forfaitExt = True if raw['Forfait 2'] == 'true' else False\n date = datetime.datetime.strptime(raw['Date de rencontre'], '%d/%m/%Y')\n heure = datetime.datetime.strptime(raw['Heure'], '%H:%M')\n rencontre, created = Rencontre.objects.update_or_create(\n numero=int(raw['N° de match']),\n equipeDom=equipeDom,\n equipeExt=equipeExt,\n defaults={\n 'date': date,\n 'heure': heure,\n 'scoreDom': scoreDom,\n 'scoreExt': scoreExt,\n 'forfaitDom': forfaitDom,\n 'forfaitExt': forfaitExt,\n }\n )\n if created:\n self.stdout.write(\n 'Rencontre {} / {} ajoutée'.format(\n rencontre.equipeDom,\n rencontre.equipeExt\n )\n )", "def from_csv_to_database():\r\n for year, path in FileNamePath.items():\r\n # load csv files\r\n with open(path, encoding='cp1251') as dataset:\r\n print(f\"Download {year} data\")\r\n get_curr_data(dataset, year)", "def main():\n with open('csv_files/products.csv', 'a') as data_file:\n # Move to the next line before appending new row to the file\n data_file.write(\"\\n\")\n data_writer = csv.writer(data_file)\n for i in range(5, 10000):\n data_writer.writerow([str(i+1), \" description{}\".format(str(i)),\n \" type{}\".format(str(i)),\n \" {}\".format(str(random.randint(1, 100)))])\n\n with open('csv_files/customers.csv', 'a') as data_file:\n # Move to the next line before appending new row to the file\n data_file.write(\"\\n\")\n data_writer = csv.writer(data_file)\n for i in range(5, 10000):\n data_writer.writerow([str(i+1), \" first_name{}\".format(str(i)),\n \" last_name{}\".format(str(i)),\n \" address{}\".format(str(i)),\n \" phone_number{}\".format(str(i)),\n \" email{}\".format(str(i))])", "def expand_source_data():\n\n file = csv_file('exercise.csv')\n add_to_csv_file = generate_csv.BuildCsvFile(100000, file)\n add_to_csv_file.add_rows()", "def get_influencer_csv(csv):\n\n df = pd.read_csv(csv)\n df = df[(df.Followers > MIN_FOLLOWERS) & (df.Followers < MAX_FOLLOWERS)]\n df = df.dropna(subset=['Email'])\n\n csv_name = csv.replace('.csv', '') + '_influencers.csv'\n df.to_csv(csv_name, index=False)", "def dataLoad():\n try:\n try: #Python3\n f = open(__file__ + \".csv\",\"rt\")\n except: #Python2\n f = open(__file__ + \".csv\",\"rb\")\n data = f.read().split(',')\n entryCol.entry0.delete(0,END)\n entryCol.entry0.insert(0,data[0])\n entryCol.entry1.delete(0,END)\n entryCol.entry1.insert(0,data[1])\n entryCol.entry2.delete(0,END)\n entryCol.entry2.insert(0,data[2])\n entryCol.entry3.delete(0,END)\n entryCol.entry3.insert(0,data[3])\n botWind.writeN(\"DataLoad: File\")\n except:\n botWind.writeN(\"DataLoad: Default\")", "def importar_visualizar_tabela(self):\r\n\r\n self.tabela_clientes = pd.read_csv('telecom_users.csv') # armazenando arquivo csv em uma variavel\r\n self.tabela_clientes = self.tabela_clientes.drop([\"Unnamed: 0\"], axis=1) # apagando a coluna Unnamed: 0, axist=1 -> para excluir a coluna, axist=0 -> excluir a linha (exist = eixo)\r\n print(self.tabela_clientes)\r\n # print(self.tabela_clientes.columns) # para mostrar todas as colunas da tabela \r\n self.tabela_clientes['NovaColuna'] = 1 # criar uma nova coluna se não existir, se caso ja exista, irá substituir todos os valores na coluna para 1\r", "def generate_csv(filename, delimiter, newline, quantity, qchar, encoding):\n header, rows = parse_csv(filename, delimiter, qchar, newline, encoding)\n combinations = get_n_random_combinations(rows, quantity)\n combinations = clean_rows(combinations, rows)\n header, rows = add_new_column(\n header, combinations, 'email', email_generator)\n return header, rows", "def generate_query_file(self, query_file, xml_name, apply_stemmer):\n\t\tif apply_stemmer:\n\t\t\tlogging.info('INICIANDO: geração de arquivo de consultas com stemmer')\n\t\telse:\n\t\t\tlogging.info('INICIANDO: geração de arquivo de consultas sem stemmer')\n\n\t\tcontent = self.read_xml(xml_name)\n\t\tif apply_stemmer:\n\t\t\t\tquery_file = query_file.split(\".\")[0] + \"_stemmer.\" + query_file.split(\".\")[1]\n\t\telse:\n\t\t\tquery_file = query_file.split(\".\")[0] + \"_nostemmer.\" + query_file.split(\".\")[1]\n\n\t\twith open(query_file, 'w', newline='') as csvfile:\n\t\t\tfieldnames = ['QueryNumber', 'QueryText']\n\t\t\twriter = csv.DictWriter(csvfile, fieldnames=fieldnames)\n\n\t\t\twriter.writeheader()\n\t\t\tfor index in range(0, len(content['QueryNumber'])):\n\t\t\t\tquery_text = self.tokenize_query(content['QueryText'][index])\n\t\t\t\tif apply_stemmer:\n\t\t\t\t\tquery_text = StemmingWords().stemming_tokens(query_text)\n\t\t\t\tlogging.info('Escrevendo consulta '+str(index+1)+'/'+str(len(content['QueryNumber'])))\n\t\t\t\twriter.writerow({'QueryNumber': content['QueryNumber'][index], 'QueryText': query_text})\n\t\tlogging.info('FINALIZADO: geração de arquivo de consultas')", "def dump_csv():\n df = helper.load_dataframe('asintosku').reset_index()\n df['min'] = None\n df['max'] = None\n df.asin = df.asin + np.where(\n df.isprime == 0, '_seller', '_prime')\n del df['isprime']\n dfold = load_csv()\n merged = dfold.append(df, ignore_index=True, sort=True).sort_values(\n 'min', ascending=False).drop_duplicates(['seller_sku'])\n merged[['asin', 'mean', 'min', 'max', 'seller_sku']].to_csv(\n datafolder+filename, index=False)", "def csv_reader(file_obj):\n global gagnant\n global gain\n # le tableau qui contient les gagnants\n reader = csv.reader(file_obj, delimiter=';')\n gagnant = next(reader)\n gain = next(reader)\n gagnant = [list(map(int,gagnant))]\n gagnant=gagnant[0]\n gain = [list(map(int,gain))]\n gain=gain[0]", "def readCsv(variables, path, pathCsv, estacion):\n # os.makedirs('../data/totalData/')\n dataVa = df.DataFrame()\n variables = variables\n mypath = path\n patron = re.compile(variables + '_'+estacion+'_\\d\\d\\d\\d-\\d\\d-\\d\\d' + '.*')\n for base, dirs, filess in os.walk(mypath, topdown=False):\n filess = sorted(filess)\n for value in filess:\n if patron.match(value) != None:\n tempData = df.read_csv(mypath + value)\n #tempData = completeMet(tempData)\n tempData = tempData.iloc[0:24, :]\n dataVa = concat([tempData, dataVa], axis=0)\n dataVa = dataVa.reset_index()\n dataVa = dataVa.drop(labels='index', axis=1)\n dataVa.to_csv(pathCsv + variables + '_'+ estacion +'_total.csv', encoding='utf-8', index=False)\n dataVa = df.DataFrame()", "def read_precios(path=\".\"):\n\n here = Path(path)\n sucursales = sucursales_prov(here)\n productos = pd.read_csv(here / \"productos.csv\")\n precios = [read_precio(f, sucursales) for f in Path(path).glob(\"precios_*.csv\")]\n\n # aplico merges de todos los precios\n def merge(left, right):\n return (\n pd.merge(left, right, on=[\"producto_id\", \"id_prov\", \"provincia\"], how=\"inner\").dropna()\n # .drop([\"sucursal_id_x\", \"sucursal_id_y\"], axis=1)\n )\n\n precios = functools.reduce(merge, precios)\n # eliminar columnas repetidas\n precios = precios.loc[:, ~precios.columns.duplicated()]\n precios = precios.rename(columns={\"cadena_x\": \"cadena\"})\n\n # elimina columnas innecesarias.\n # basicamente sucursal_* y cadena_y (queda cadena, renombrada antes)\n for c in set(precios.columns):\n if c.startswith((\"sucursal\", \"cadena_\")):\n precios.drop(c, axis=1, inplace=True)\n\n # cruzamos datos con productos\n precios = pd.merge(\n precios,\n productos[[\"id\", \"marca\", \"nombre\", \"categoria1\", \"categoria2\", \"categoria3\"]],\n left_on=\"producto_id\",\n right_on=\"id\",\n ).drop([\"id\", \"id_prov\"], axis=1)\n\n precios.sort_index(axis=1, inplace=True)\n\n # obtenemos el periodo maximo inicio vs fin (asumiendo columnas ordenadas)\n inicio, *_, fin = [c for c in precios.columns if c.startswith(\"precio_\")]\n\n precios[\"variacion\"] = precios[fin] - precios[inicio]\n precios[\"variacion_relativa\"] = precios[\"variacion\"] / precios[inicio] * 100\n return precios", "def open_file():\r\n\tr_ct = 0\r\n\t\r\n\twith open('flavors_of_cacao.csv', 'r') as csvfile:\r\n\t\tcacao_stream = csv.DictReader(csvfile)\r\n\t\tfor cacao_row in cacao_stream:\r\n\t\t\tr_ct += 1\r\n\t\t\t\r\n\t\t\t#quit after 100 records\r\n\t\t\tif r_ct > 100:\r\n\t\t\t\tbreak\r\n\t\t\t\t\r\n\t\t\t#pull the data out of the dictionary for sqlite3\r\n\t\t\tt_Company = cacao_row['Company']\r\n\t\t\tt_Specific_Bean_Origin = cacao_row['Specific_Bean_Origin']\r\n\t\t\tt_REF = cacao_row['REF']\r\n\t\t\tt_Review = cacao_row['Review']\r\n\t\t\tt_Cocoa = cacao_row['Cocoa']\r\n\t\t\tt_Location = cacao_row['Location']\r\n\t\t\tt_Rating = cacao_row['Rating']\r\n\t\t\tt_Bean = cacao_row['Bean']\r\n\t\t\tt_Broad_Bean_Origin = cacao_row['Broad_Bean_Origin']\r\n\t\t\t\r\n\t\t\t#print the first 15 lines\r\n\t\t\tif r_ct <= 15:\r\n\t\t\t\tprint (r_ct, t_Company, t_Bean, t_Cocoa, t_Review)\r\n\t\t\t\t\r\n\t\t\t#creates a sql cursor, formats the insert sql and executes it\r\n\t\t\tc = conn.cursor()\r\n\t\t\tstrsql = \"\"\"\r\n\t\t\t\tINSERT INTO cacao\r\n\t\t\t\t\t(Company, Specific_Bean_Origin, REF, Review, Cocoa, Location, Rating, Bean, Broad_Bean_Origin)\r\n\t\t\t\tvalues (\r\n\t\t\t\t\t'{t_Company}', '{t_Specific_Bean_Origin}', '{t_REF}', '{t_Review}', '{t_Cocoa}', '{t_Location}', '{t_Rating}', '{t_Bean}', '{t_Broad_Bean_Origin}');\r\n\t\t\t\t\"\"\".format(\r\n\t\t\t\t\tt_Company = t_Company,\r\n\t\t\t\t\tt_Specific_Bean_Origin = t_Specific_Bean_Origin,\r\n\t\t\t\t\tt_REF = t_REF,\r\n\t\t\t\t\tt_Review = t_Review,\r\n\t\t\t\t\tt_Cocoa = t_Cocoa,\r\n\t\t\t\t\tt_Location = t_Location,\r\n\t\t\t\t\tt_Rating = t_Rating,\r\n\t\t\t\t\tt_Bean = t_Bean,\r\n\t\t\t\t\tt_Broad_Bean_Origin = t_Broad_Bean_Origin\r\n\t\t\t\t\t)\r\n\t\t\tc.execute(strsql)\r\n\t\t\tconn.commit()", "def csv(self, section=\"main\", column_headers=True):\n table_end = \"\\r\\n\\r\\n\\r\\n\"\n table_head_pos = self.header_dictionary[section]\n table_end_pos = self.raw_data[table_head_pos:].find(table_end)\n if table_end_pos == -1:\n table_end_pos = len(self.raw_data)\n else:\n table_end_pos += table_head_pos\n if column_headers:\n a = \",\".join(self.columns()) + \"\\n\"\n else:\n a = \"\"\n return a + self.raw_data[table_head_pos:table_end_pos].replace(\"\\t\",\",\").replace(\"\\r\",\"\")", "def gen_info():\n # Carga la metainfo de departamentos de covidstas y filtramos departamentos de Santa Fe\n covidstats_meta_df = pd.read_csv('covidstats.csv',sep=';')\n covidstats_meta_df['LOCATION']='ARGENTINA/'+covidstats_meta_df['Provincia'].apply(normalize_str)+'/'+covidstats_meta_df['Departamento'].apply(normalize_str)\n covidstats_meta_df=covidstats_meta_df[covidstats_meta_df['LOCATION'].apply(lambda l : l.startswith('ARGENTINA/SANTA FE'))]\n covidstats_meta_df\n\n # Cargamos la info poblacional y chequemos que tengamos toda la info\n info_df=pd.read_csv('info_general.csv')\n s = set(info_df['LOCATION'])\n for l in set(covidstats_meta_df['LOCATION']):\n if l not in s:\n print('FALTA INFO DE: {}'.format(l))\n\n # Cargamos la info geografica y chequemos que tengamos toda la info\n gdf = gpd.read_file('maps_general.geojson')\n gdf=gdf[gdf['LOCATION'].apply(lambda l : l.startswith('ARGENTINA/SANTA FE'))]\n s = set(gdf['LOCATION'])\n for l in set(covidstats_meta_df['LOCATION']):\n if l not in s:\n print('FALTA INFO GEOGRAFICA DE: {}'.format(l))\n return covidstats_meta_df, info_df, gdf", "def guardar_datos(self, archivo):\r\n with open(archivo, \"wb\") as csv_file:\r\n writer = csv.writer(csv_file, delimiter=';')\r\n writer.writerow(\r\n [\"Camion\", \"Carga\", \"Tipo\", \"Peso Final\", \"Operacion\", \"Recurso\", \"Dia\",\r\n \"Arribo\", \"Espera M. O/D\", \"Espera R\", \"Espera P.\", \"Espera T.\", \"Inicio\", \"Fin\",\r\n \"Medio de Almacenamiento\", \"Nivel\"])\r\n for linea in self.datos:\r\n writer.writerow(linea)", "def get_data():\r\n data = pd.read_csv(FILE_PATH)\r\n # Replace 'Zero KM' by year 2022 assuming it's a new car\r\n data['Ano'] = data['Ano'].str.replace('Zero KM', '2021').replace('2022', '2021')\r\n data['Ano'] = data['Ano'].astype(int)\r\n data['Automático'] = data['Automático'].astype(int)\r\n return data", "def generate_dataset_csv(request):\n\n response = csv_export(request,Dataset)\n return response", "def makeCsv(net, date, opt, path, minlat, maxlat, minlon, maxlon, variables, estaciones):\n\n # data_lon = Dataset('/ServerData/KRAKEN/Reanalisis/a1979/wrfout_c15d_d01_1979-08-15_00:00:00.1979')\n # LON = data_lon.variables['XLONG'][:]\n # LAT = data_lon.variables['XLAT'][:]\n #\n # LON = LON[0][0]\n # LAT = LAT[0]\n #\n # LONsize = len(LON)\n # LATsize = len(LAT)\n #\n # celda = []\n var_cut = []\n for i in variables:\n var = net.variables[i][:,int(minlat):int(maxlat),int(minlon):int(maxlon)]\n #print(LON)\n #print(var)\n #return\n # celda.append(var)\n # result = ne(var, LON, LAT, LONsize, LATsize, minlat, maxlat, minlon, maxlon)\n var_cut.append(var)\n\n for ls in range(len(var_cut)):\n saveData(var_cut[ls], variables[ls], date, opt, path, estaciones)", "def loadCSVFile (file, sep=\";\"):\n lst = lt.newList(\"ARRAY_LIST\") #Usando implementacion arraylist\n #lst = lt.newList() #Usando implementacion linkedlist\n print(\"Cargando archivo ....\")\n t1_start = process_time() #tiempo inicial\n dialect = csv.excel()\n dialect.delimiter=sep\n try:\n with open(file, encoding=\"utf-8\") as csvfile:\n spamreader = csv.DictReader(csvfile, dialect=dialect)\n for row in spamreader: \n lt.addLast(lst,row)\n except:\n print(\"Hubo un error con la carga del archivo\")\n t1_stop = process_time() #tiempo final\n print(\"Tiempo de ejecución \",t1_stop-t1_start,\" segundos\")\n return lst", "def csv_header(html_path):\n yield 'ideDocumento' # this field is missing from the reference\n with open(html_path, 'rb') as file_handler:\n parsed = BeautifulSoup(file_handler.read(), 'lxml')\n for row in parsed.select('.tabela-2 tr'):\n try:\n yield row.select('td')[0].text.strip()\n except IndexError:\n pass", "def fao1():\n df = pd.read_csv(\"/Users/Elsa/Desktop/Covid_Agosto/Proyecto_Elsa/Proyecto_individual_Elsa/src/main/FAO.csv\",encoding=\"ISO-8859-1\")\n \n return df", "def mani2():\r\n #open the original file\r\n with open('csvfile1.csv', 'r') as csvfile1:\r\n #read the csv file\r\n csv_reader = csv.reader(csvfile1)\r\n\r\n #open the new csv file \r\n with open('csvfile2.csv', 'w') as csvfile2:\r\n #write to it with another delimeter\r\n csv_writer = csv.writer(csvfile2, delimiter='-')\r\n #loop through the line\r\n for line in csv_reader:\r\n csv_writer.writerow(line)", "def get_mall_data(): \n filename = 'mall_customers.csv'\n \n if os.path.isfile(filename):\n return pd.read_csv(filename, index_col=0)\n else: \n df = pd.read_sql(\"\"\"select * from customers\"\"\", get_connection('mall_customers'))\n df.to_csv(filename)\n return df", "def get_csv_string(self, **kwargs):\n ...", "def __loaddata(filename, datatype='flightcsv', minprob=0.001, maxprob=0.20):\n if datatype is 'flightcsv':\n return extract_flight_csv(filename, minprob=minprob, maxprob=maxprob)\n else:\n raise Exception('unknown datatype %s' % datatype)", "def read_csv_file(self):\n pass", "def generate_csv(table, header):\n with open(\"%s.csv\" % header, \"w\") as csvfile:\n for i in range(len(table)):\n for j in range(len(table[i])):\n if j != len(table[i])-1:\n tmp = table[i][j] + \",\"\n else:\n tmp = table[i][j] + \"\\n\"\n csvfile.write(tmp)", "def export_repayment_csv(request):\n import csv\n from django.utils.encoding import smart_str\n # response = HttpResponse(content_type='text/csv')\n # response['Content-Disposition'] = 'attachment; filename=Repayment_report.csv'\n from_date = request.GET.get('from_date')\n to_date = request.GET.get('to_date')\n search = request.GET.get('search_value') or ''\n search_query = Q()\n if search:\n search_query = Q(user__user__username__icontains=search) | \\\n Q(user__user__first_name__icontains=search) | \\\n Q(project__title__icontains=search) | \\\n Q(amount__icontains=search) | \\\n Q(user__user__last_name__icontains=search) | \\\n Q(user__user__email__icontains=search)\n if from_date and to_date:\n import datetime\n import pytz\n date1 = datetime.datetime.strptime(from_date, '%Y-%m-%d').date()\n date2 = datetime.datetime.strptime(to_date, '%Y-%m-%d').date()\n repayments = RepaymentFragment.objects.filter(amount__gt=0.00,\n created_at__range=[\n datetime.datetime(date1.year, date1.month, date1.day, 8, 15,\n 12, 0, pytz.UTC),\n datetime.datetime(date2.year, date2.month, date2.day, 8, 15,\n 12, 0, pytz.UTC)]).order_by('-created_at') \\\n .select_related(\"user\", \"project\", \"user__user\").filter(search_query).iterator()\n else:\n repayments = RepaymentFragment.objects.filter(amount__gt=0.00).filter(search_query).order_by('-created_at') \\\n .select_related(\"user\", \"project\", \"user__user\").iterator()\n # writer = csv.writer(response, csv.excel)\n # response.write(u'\\ufeff'.encode('utf8')) # BOM (optional...Excel needs it to open UTF-8 file properly)\n\n def stream():\n buffer_ = StringIO()\n writer = csv.writer(buffer_)\n writer.writerow([\n smart_str(u\"FIRST NAME\"),\n smart_str(u\"LAST NAME\"),\n smart_str(u\"USERNAME\"),\n smart_str(u\"EMAIL\"),\n smart_str(u\"DATE\"),\n smart_str(u\"NAME OF PROJECT\"),\n smart_str(u\"DONATION AMOUNT\"),\n smart_str(u\"REPAYMENT AMOUNT\"),\n\n ])\n\n for payment in repayments:\n writer.writerow([\n smart_str(payment.user.user.first_name),\n smart_str(payment.user.user.last_name),\n smart_str(payment.user.user.username),\n smart_str(payment.user.user.email),\n smart_str(payment.created_at),\n smart_str(payment.project.title),\n smart_str(round(\n Payment.objects.filter(user=payment.user).filter(project=payment.project).aggregate(Sum('amount'))[\n 'amount__sum'] or 0, 2)),\n smart_str(round(payment.amount, 2)),\n ])\n buffer_.seek(0)\n data = buffer_.read()\n buffer_.seek(0)\n buffer_.truncate()\n yield data\n\n # Create the streaming response object with the appropriate CSV header.\n response = StreamingHttpResponse(stream(), content_type='text/csv')\n response['Content-Disposition'] = 'attachment; filename=\"Repayment_report.csv\"'\n return response", "def importCSV(request, template='contacts/person/import.html'):\n\n if not request.user.is_authenticated():\n return HttpResponseRedirect('/login/?next=%s' % request.path)\n\n registres = 0\n\n if request.method == 'POST':\n form = ImportCSVForm(request.POST, request.FILES)\n if form.is_valid():\n uploaded_file = request.FILES['fitxer']\n uploaded_file.read()\n reader = csv.reader(uploaded_file, delimiter=',', quotechar='\"')\n\n for row in reader:\n person = Person()\n person.first_name = row[0]\n person.last_name = row[1]\n person.contact_type = row[3]\n person.id_card = row[5]\n\n base_slug = slugify(\"%s %s %s\" % (p.first_name, p.last_name, p.secondlast_name))\n # hem de comprovar que no existeix cap persona amb aquest nom. Si no, hem d'afegir -1\n tmp_slug = base_slug\n trobat = True\n counter = 0\n\n while trobat:\n try:\n Person.objects.get(slug__iexact=tmp_slug)\n counter = counter + 1\n tmp_slug = \"%s-%s\" % (base_slug, str(counter))\n\n except Person.DoesNotExist:\n trobat = False\n\n person.slug = tmp_slug\n person.save()\n\n registres = registres + 1\n\n else:\n form = ImportCSVForm()\n\n return render_to_response(template, {'registres': registres, 'form': form}, RequestContext(request))", "def pre_process_books(csv, outputname):\n df = pd.read_csv(csv, error_bad_lines=False, encoding = \"ISO-8859-1\")\n df.to_csv(outputname, index=False)", "def loadelixcomo():\n elixcomo = pd.read_csv(pkg_resources.resource_filename(__name__,'Elixhauser_Comorbidities.csv')).iloc[:,1:]\n return elixcomo", "def getCSVbbx(filepath, detail, folder, time):\n \n #format validation\n pd.read_csv(filepath)\n click.echo(\"csv\")\n CRSinfo = True\n listlat = [\"Koordinate_Hochwert\",\"lat\",\"Latitude\",\"latitude\"]\n listlon = [\"Koordinate_Rechtswert\",\"lon\",\"Longitude\",\"longitude\",\"lng\"]\n listCRS = [\"CRS\",\"crs\",\"Koordinatensystem\",\"EPSG\",\"Coordinate reference system\", \"coordinate system\"]\n listtime = [\"time\", \"timestamp\", \"date\", \"Time\", \"Jahr\", \"Datum\"]\n try:\n deli=';'\n df = pd.read_csv(filepath, delimiter=deli,engine='python')\n #tests if there is a column named Coordinatesystem or similar\n click.echo(\"hi\")\n #click.echo(df.columns.values)\n #click.echo(intersect(listCRS,df.columns.values))\n if not intersect(listCRS,df.columns.values):\n CRSinfo= False\n print(\"hu\")\n print(\"No fitting header for a reference system\")\n\n if not(((intersect(listlat,df.columns.values) and intersect(listlon,df.columns.values)))or (intersect(listtime, df.columns.values))):\n #output=\"No fitting header for latitudes or longitudes\"\n raise Exception('No fitting ')\n #print(output)\n #return output\n\n except Exception as exce:\n deli=','\n df = pd.read_csv(filepath, delimiter=deli,engine='python')\n #tests if there is a column named Coordinatesystem or similar\n click.echo(\"hi\")\n #click.echo(df.columns.values)\n #click.echo(intersect(listCRS,df.columns.values))\n if not intersect(listCRS,df.columns.values):\n CRSinfo= False\n \n print(\"No fitting header for a reference system2\")\n z=intersect(listtime, df.columns.values)\n print (z)\n t=intersect(listlat,df.columns.values) and intersect(listlon,df.columns.values)\n print (intersect(listlat,df.columns.values))\n print(\"_______________\")\n print(t)\n if not t:\n print(\"false\")\n\n if not(((intersect(listlat,df.columns.values) and intersect(listlon,df.columns.values)))or (intersect(listtime, df.columns.values))):\n #output=\"No fitting header for latitudes or longitudes\"\n #raise Exception('No fim')\n \n raise Exception(\"evtl kein csv oder ungueltiges Trennzeichen.\")\n #print(\"keine Koordinaten vorhanden\")\n #print(output)\n #return output\n print (exce)\n\n if detail =='bbox':\n click.echo(\"bbox\")\n # Using Pandas: http://pandas.pydata.org/pandas-docs/stable/io.html\n #if folder=='single':\n mylat=intersect(listlat,df.columns.values)\n mylon=intersect(listlon,df.columns.values)\n lats=df[mylat[0]]\n lons=df[mylon[0]]\n bbox=[min(lats),min(lons),max(lats),max(lons)]\n # CRS transformation if there is information about crs\n if(CRSinfo):\n mycrsID=intersect(listCRS,df.columns.values)\n myCRS=df[mycrsID[0]]\n lat1t,lng1t = extractTool.transformToWGS84(min(lats),min(lons), myCRS)\n lat2t,lng2t = extractTool.transformToWGS84(max(lats),max(lons), myCRS)\n bbox=[lat1t,lng1t,lat2t,lng2t]\n if folder=='single':\n print(\"----------------------------------------------------------------\")\n click.echo(\"Filepath:\")\n click.echo(filepath)\n click.echo(\"Boundingbox of the CSV object:\")\n click.echo(bbox)\n print(\"----------------------------------------------------------------\")\n extractTool.ret_value.append(bbox)\n if folder=='whole':\n extractTool.bboxArray.append(bbox)\n print(\"----------------------------------------------------------------\")\n click.echo(\"Filepath:\")\n click.echo(filepath)\n click.echo(\"Boundingbox of the CSV:\")\n click.echo(bbox)\n print(\"----------------------------------------------------------------\")\n else:\n if folder=='single':\n print(\"----------------------------------------------------------------\")\n click.echo(\"Filepath:\")\n click.echo(filepath)\n click.echo(\"Boundingbox of the CSV object:\")\n print(bbox)\n print(\"Missing CRS -----> Boundingbox will not be saved in zenodo.\")\n print(\"----------------------------------------------------------------\")\n extractTool.ret_value.append([None])\n if folder=='whole':\n print(\"----------------------------------------------------------------\")\n click.echo(\"Filepath:\")\n click.echo(filepath)\n click.echo(\"Boundingbox of the CSV file:\")\n click.echo(bbox)\n click.echo(\"because of a missing crs this CSV is not part of the folder calculation.\")\n print(\"----------------------------------------------------------------\")\n\n else:\n extractTool.ret_value.append([None])\n\n #returns the convex hull of the coordinates from the CSV object.\n if detail == 'convexHull':\n click.echo(\"convexHull\")\n mylat=intersect(listlat,df.columns.values)\n mylon=intersect(listlon,df.columns.values)\n lats=df[mylat[0]]\n lons=df[mylon[0]]\n coords=np.column_stack((lats, lons))\n #definition and calculation of the convex hull\n hull=ConvexHull(coords)\n hull_points=hull.vertices\n convHull=[]\n for z in hull_points:\n point=[coords[z][0], coords[z][1]]\n convHull.append(point)\n if(CRSinfo):\n mycrsID=intersect(listCRS,df.columns.values)\n myCRS=df[mycrsID[0]]\n inputProj='epsg:'\n inputProj+=str(myCRS[0])\n print(inputProj)\n inProj = Proj(init=inputProj)\n outProj = Proj(init='epsg:4326')\n for z in coords:\n z[0],z[1] = transform(inProj,outProj,z[0],z[1])\n if folder=='single':\n print(\"----------------------------------------------------------------\")\n click.echo(\"Filepath:\")\n click.echo(filepath)\n click.echo(\"convex Hull of the csv file: \")\n click.echo(convHull)\n print(\"----------------------------------------------------------------\")\n extractTool.ret_value.append(convHull)\n if folder=='whole':\n extractTool.bboxArray=extractTool.bboxArray+convHull\n print(\"----------------------------------------------------------------\")\n click.echo(\"Filepath:\")\n click.echo(filepath)\n click.echo(\"convex hull of the CSV:\")\n click.echo(convHull)\n print(\"----------------------------------------------------------------\")\n #return convHull\n else:\n if folder=='single':\n print(\"----------------------------------------------------------------\")\n click.echo(\"Filepath:\")\n click.echo(filepath)\n click.echo(\"Convex hull of the CSV object:\")\n print(convHull)\n print(\"Missing CRS -----> Boundingbox will not be saved in zenodo.\")\n print(\"----------------------------------------------------------------\")\n extractTool.ret_value.append([None])\n if folder=='whole':\n print(\"----------------------------------------------------------------\")\n click.echo(\"Filepath:\")\n click.echo(filepath)\n click.echo(\"Convex hull of the CSV file:\")\n click.echo(convHull)\n click.echo(\"because of a missing crs this CSV is not part of the folder calculation.\")\n print(\"----------------------------------------------------------------\")\n\n\n else:\n extractTool.ret_value.append([None])\n\n\n\n \n if (time):\n click.echo(\"hallo\")\n # Using Pandas: http://pandas.pydata.org/pandas-docs/stable/io.html\n df = pd.read_csv(filepath, sep=';|,',engine='python')\n click.echo(listtime)\n click.echo(df.columns.values)\n intersection=intersect(listtime, df.columns.values)\n click.echo(intersection)\n if not intersection:\n print(\"No fitting header for time-values\")\n extractTool.ret_value.append([None])\n # TODO: fehlerbehandlung \n #try:\n #for t in listtime:\n #if(x not in df.columns.values):\n #click.echo(\"This file does not include time-values\")\n #else:\n #time=df[t]\n #timeextend =[min(time), max(time)]\n #click.echo(timeextend)\n #return timeextend\n #except Exception as e:\n #click.echo (\"There is no time-value or invalid file.\")\n #return None \n else:\n \n \n time=df[intersection[0]]\n print(min(time))\n print(max(time))\n timemin=str(min(time))\n timemax=str(max(time))\n timemax_formatted=dateparser.parse(timemax)\n timemin_formatted=dateparser.parse(timemin)\n timeextend=[timemin_formatted, timemax_formatted]\n print(timeextend)\n if folder=='single':\n print(\"----------------------------------------------------------------\")\n click.echo(\"Timeextend of this CSV file:\")\n click.echo(timeextend)\n print(\"----------------------------------------------------------------\")\n extractTool.ret_value.append([timeextend])\n #return timeextend\n if folder=='whole':\n extractTool.timeextendArray.append(timeextend)\n print(\"timeextendArray:\")\n print(extractTool.timeextendArray)\n\n else:\n extractTool.ret_value.append([None])\n if folder=='single':\n print(extractTool.ret_value)\n return extractTool.ret_value", "def load_csv(filename):\n # Open csvfile\n with open(filename) as csvfile:\n reader = csv.DictReader(csvfile)\n\n # Put data in gloabal list\n for row in reader:\n # Get data of subject with either or both milk and peanut allergy\n if row[\"MILK_ALG_START\"] != \"NA\" or row[\"PEANUT_ALG_START\"] != \"NA\":\n sub_list = list()\n for key in DATA_KEYS:\n sub_list.append(row[key])\n\n # Add data of subject to all data \n data_list.append(sub_list)", "def load(*args):\r\n\r\n #args[0].to_csv(str(PATH.joinpath('./data/{}.csv'.format(args[1]))),index=False)\r\n\r\n try: # it will fail if duplicates\r\n args[0].to_sql('cmf', con=engine, if_exists='append', index=False)\r\n except:\r\n pass", "def pull_data(raw_file, out_file):\n ifile = codecs.open(raw_file, 'r', encoding='utf_16_le')\n ofile = open(out_file, 'w')\n writer = csv.writer(ofile, delimiter=',', quoting=csv.QUOTE_NONE, lineterminator='\\n')\n # print \"Processing: \", raw_file\n\n is_header = True\n for row in ifile:\n data_row = row\n if not is_header:\n data_row = parse_row(row)\n data_rows.append(data_row)\n else:\n is_header = False\n\n for a_row in data_rows:\n writer.writerow(a_row)\n\n ifile.close()\n ofile.close()", "def _csv_export(self, exppath):\n with open(exppath, 'w') as csvfile:\n csvwriter = csv.writer(csvfile, delimiter=',', skipinitialspace=True)\n csvwriter.writerow(['hexstr','dmc','name'])\n for clr in self.lookup_table:\n csvwriter.writerow([clr.hex.to_str(), clr.id, clr.name])", "def __create_csv(self):\n with open(self.__csv_file_name, 'w', newline='', encoding='utf-8') as csv_file:\n writer = csv.DictWriter(csv_file, fieldnames=self.__csv_fields, delimiter=';')\n writer.writeheader()", "def create_csv_file(self):\r\n # Create a new csv-file\r\n with open(self.fname, 'w') as f:\r\n writer = csv.writer(f, dialect='excel')\r\n writer.writerow(['set_time',\r\n 'read_time_P_ac',\r\n 'read_time_P_bat',\r\n 'soc',\r\n 'set_value',\r\n 'P_ac',\r\n 'P_bat'])", "def to_csv(header, rows):\r\n with open('result.csv', 'w') as result:\r\n result_writer = csv.writer(result, delimiter=';')\r\n result_writer.writerow(header)\r\n result_writer.writerows(rows)", "def end_result_csv(final_list, mes):\n\n def filna_dict(mes):\n \"\"\"werkt. maar ga een dict comprehension proberen.\"\"\"\n key = [f'pdf_{count + 1}' for count in range(mes)]\n value = ['stans.pdf' for count in range(mes)]\n filna_tobe_inserted = dict(zip(key, value))\n return filna_tobe_inserted\n\n fin = filna_dict(mes)\n\n for pad in final_list:\n\n df = pd.read_csv(pad, delimiter=\";\")\n df.fillna(fin, inplace=True)\n df.to_csv(pad, index=0)\n print(pad)\n\n return fin", "def load_data_csv():\r\n \r\n # Load lookup table\r\n path = 'data/id_lookup.csv'\r\n lookup_table = pd.read_csv(path, index_col=0)\r\n\r\n # Load song data\r\n path2 = 'data/data_lyrics_features.csv'\r\n data = pd.read_csv(path2, index_col=0)\r\n\r\n return data, lookup_table", "def setup_csv(self) -> None:\n csvData = ['Followers', 'Time']\n\n # Create our CSV file header\n with open(self.graphfile, 'w') as csvFile:\n writer = csv.writer(csvFile)\n writer.writerow(csvData)\n csvFile.close()", "def csvread(bestandsnaam):\n with open(\"database/\" + bestandsnaam, \"r\") as ReadMyCsv:\n reader = csv.DictReader(ReadMyCsv, delimiter=\";\")\n\n gegevens = []\n for gegeven in reader:\n gegevens.append(gegeven)\n\n return gegevens", "def process_csv():\n csv_rows = []\n fieldnames = ['site',\n 'latitude',\n 'longitude',\n 'city',\n 'region_code',\n 'country_code',\n 'continent_code',\n 'min_ip_hex',\n 'max_ip_hex',\n 'transit_provider',\n 'min_ip',\n 'max_ip',\n 'ip_prefix',\n 'min_ipv6_hex',\n 'max_ipv6_hex',\n 'min_ipv6',\n 'max_ipv6',\n 'ipv6_prefix']\n\n location_map = build_location_map()\n\n # Read in the CSV file and augment the columns\n with open(INPUT_FILE, 'rb') as csvfile:\n reader = csv.DictReader(csvfile)\n\n for row in reader:\n csv_rows.append(process_row(row, location_map))\n\n # Write the new CSV file with new columns\n with open(OUTPUT_FILE, 'w') as csvfile:\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n writer.writeheader()\n for row in csv_rows:\n writer.writerow(row)\n\n print(\"MLab Sites CSV generated at {0}\".format(OUTPUT_FILE))", "def csv_reader2(file_obj):\n global grilles\n # le tableau qui contient les gagnants\n grilles = [list(map(int,rec[1:])) for rec in csv.reader(file_obj, delimiter=';')]", "def construct_csv_file(cursor,file_name,row_limit=100000):\n if file_name =='':\n logging.error('Csv File cannot be created without a defined filename')\n\n\n with open(file_name,'w',newline='') as csv_file:\n writer, output = create_writer()\n\n header = [h[0] for h in cursor.description]\n writer.writerow(header)\n csv_file.write(output.getvalue())\n #Writer closed to empty it. \n output.close()\n\n while True:\n writer, output = create_writer()\n\n results = cursor.fetchmany(row_limit)\n if not results:\n break\n with open(file_name, 'a', newline='') as csv_file:\n writer.writerows(results)\n csv_file.write(output.getvalue())\n output.close()\n\n return", "def carga_datos_PREM( directorio ):\n\n # se chequea si el input directorio es o no string\n if not isinstance( directorio, basestring):\n directorio = str( directorio )\n else:\n directorio = directorio\n\n # se chequea el formateo del string (se desea que no termine con /)\n if not directorio.endswith( \"/\" ):\n directorio = directorio + \"/\"\n\n premfile = directorio + \"PREM_1s.csv\" # radio, profundidad, densidad, Vpv, Vph, Vsv, Vsh, eta, Q-mu, Q-kappa\n PREM = np.genfromtxt( premfile, delimiter = \",\" )\n prem_prof = PREM[:,1]*1000 # en metros\n prem_vs = PREM[:,5]*1000 # velocidad Vsv en metros/segundos\n\n return prem_vs, prem_prof", "def csv_file(data,output_dir,filename,order = [],head = True):\n with open(output_dir + filename + '.csv', 'w') as f:\n write = csv.writer(f)\n write.writerows(manip.dic_to_list(data,order,head),)\n return None", "def create_csv(d, f):\n csv_data = list()\n csv_head = [unicode(x) for x in f]\n\n for row in d:\n row_data = list()\n for field in f:\n fields = field.split('.')\n row_data.append(extract_dict(row, fields))\n csv_data.append(row_data)\n\n csv = (csv_head, csv_data)\n return csv", "def read_exo(filnam, skiprows=25, encoding=\"utf-8\"):\n\n try:\n exo = pd.read_csv(\n filnam,\n skiprows=skiprows,\n # parse_dates=[['Date (MM/DD/YYYY)',\n # 'Time (HH:MM:SS)']],\n parse_dates=[[0, 1]],\n encoding=encoding,\n )\n except UnicodeDecodeError:\n exo = pd.read_csv(\n filnam,\n skiprows=skiprows,\n # parse_dates=[['Date (MM/DD/YYYY)',\n # 'Time (HH:MM:SS)']],\n parse_dates=[[0, 1]],\n encoding=\"mac-roman\",\n )\n except NotImplementedError as e:\n print(\n (\n \" *** Could not decode file. Try saving the csv file using \"\n \"UTF-8 encoding and retrying\\n\"\n ),\n e,\n )\n except ValueError as e:\n print(\n (\n \" *** Could not decode header. \"\n \"Have you specified skiprows correctly?\\n\"\n ),\n e,\n )\n # exo.rename(columns={'Date (MM/DD/YYYY)_Time (HH:MM:SS)': 'time'},\n # inplace=True)\n exo.rename(\n columns={exo.columns[0]: \"time\"}, inplace=True\n ) # rename first column to time.\n # Need to do this because the format of the date/time header can change between versions\n exo.set_index(\"time\", inplace=True)\n exo.rename(columns=lambda x: x.replace(\" \", \"_\"), inplace=True)\n exo.rename(columns=lambda x: x.replace(\"/\", \"_per_\"), inplace=True)\n pvar = None\n if \"Press_psi_a\" in exo.columns:\n pvar = \"Press_psi_a\"\n elif \"Pressure_psi_a\" in exo.columns:\n pvar = \"Pressure_psi_a\"\n else:\n print(\n \"*** Could not find pressure (Press_psi_a, Pressure_psi_a) in source data file. Have you exported pressure if this instrument was equipped with a pressure sensor?\"\n )\n if pvar:\n # Convert from PSI to dbar\n exo[\"Press_dbar\"] = exo[pvar] * 0.689476\n\n exo = xr.Dataset(exo)\n hdr = read_exo_header(filnam, encoding=encoding)\n exo.attrs[\"serial_number\"] = hdr[\"serial_number\"]\n exo.attrs[\"instrument_type\"] = \"YSI EXO2 Multiparameter Sonde\"\n\n # Apply sensor serial numbers to each sensor\n for k in exo.variables:\n if \"fDOM\" in k:\n if \"fDOM\" in hdr:\n hdrvar = \"fDOM\"\n elif \"fDOM QSU\" in hdr:\n hdrvar = \"fDOM QSU\"\n exo[k].attrs[\"sensor_serial_number\"] = hdr[hdrvar][\"sensor_serial_number\"]\n elif \"Chlorophyll\" in k or \"BGA\" in k or \"TAL\" in k:\n if \"Total Algae BGA-PE\" in hdr:\n hdrvar = \"Total Algae BGA-PE\"\n elif \"BGA PE RFU\" in hdr:\n hdrvar = \"BGA PE RFU\"\n elif \"TAL PE RFU\" in hdr:\n hdrvar = \"TAL PE RFU\"\n exo[k].attrs[\"sensor_serial_number\"] = hdr[hdrvar][\"sensor_serial_number\"]\n elif \"Temp\" in k or \"Cond\" in k or \"Sal\" in k:\n if \"Unknown CT\" in hdr:\n exo[k].attrs[\"sensor_serial_number\"] = hdr[\"Unknown CT\"][\n \"sensor_serial_number\"\n ]\n elif \"Wiped CT\" in hdr:\n exo[k].attrs[\"sensor_serial_number\"] = hdr[\"Wiped CT\"][\n \"sensor_serial_number\"\n ]\n else:\n hdrvar = \"Sal psu\"\n exo[k].attrs[\"sensor_serial_number\"] = hdr[hdrvar][\"sensor_serial_number\"]\n elif \"ODO\" in k:\n try:\n exo[k].attrs[\"sensor_serial_number\"] = hdr[\"Optical DO\"][\n \"sensor_serial_number\"\n ]\n except KeyError:\n exo[k].attrs[\"sensor_serial_number\"] = hdr[\"ODO % sat\"][\n \"sensor_serial_number\"\n ]\n elif k == \"Turbidity\":\n exo[k].attrs[\"sensor_serial_number\"] = hdr[\"Turbidity\"][\n \"sensor_serial_number\"\n ]\n elif k == \"Turbidity_NTU\":\n exo[k].attrs[\"sensor_serial_number\"] = hdr[\"Turbidity NTU\"][\n \"sensor_serial_number\"\n ]\n elif k == \"Turbidity_FNU\":\n exo[k].attrs[\"sensor_serial_number\"] = hdr[\"Turbidity FNU\"][\n \"sensor_serial_number\"\n ]\n elif \"pH\" in k:\n exo[k].attrs[\"sensor_serial_number\"] = hdr[\"pH\"][\"sensor_serial_number\"]\n elif \"Press\" in k or \"Depth\" in k:\n if \"Depth Non-Vented 0-10m\" in hdr:\n hdrvar = \"Depth Non-Vented 0-10m\"\n elif \"Depth m\" in hdr:\n hdrvar = \"Depth m\"\n elif \"Pressure psi a\" in hdr:\n hdrvar = \"Pressure psi a\"\n else:\n hdrvar = None\n exo[k].attrs[\"sensor_serial_number\"] = hdr[hdrvar][\"sensor_serial_number\"]\n\n return exo", "def route_data(route):\n os.chdir(\"../Data/test\") #change to whatever directory your data files are stored in\n with open(\"../Sorted Data/\"+str(route)+\"_data.csv\",\"w\",newline=\"\") as result_file: #storing resulting data in csv file in different directory\n wr=csv.writer(result_file, dialect='excel') #setting up csv writer\n for file in glob.glob(\"*.csv\"): #looping through raw data files\n reader=csv.reader(open(file))\n for line in reader:\n if extract_bus_route(line[3])==route: #extract_bus_route returns the bus route from journey pattern id (col D)\n wr.writerow(line)", "def prepare_csv(self, filename, *args, **kwargs):\n x_possible = getattr(settings, 'IMPORT_CSV_X_FIELDS', ['Lon*', 'x', 'lon*'])\n y_possible = getattr(settings, 'IMPORT_CSV_Y_FIELDS', ['Lat*', 'y', 'lat*'])\n geom_possible = getattr(settings, 'IMPORT_CSV_GEOM_FIELDS',\n ['geom', 'GEOM', 'WKT', 'the_geom', 'THE_GEOM', 'WKB'])\n\n oo = kwargs.get('open_options', [])\n\n oo.append('X_POSSIBLE_NAMES={0}'.format(','.join(x_possible)))\n oo.append('Y_POSSIBLE_NAMES={0}'.format(','.join(y_possible)))\n oo.append('GEOM_POSSIBLE_NAMES={0}'.format(','.join(geom_possible)))\n\n kwargs['open_options'] = oo\n\n return filename, args, kwargs", "def read_data():\n data = pd.read_csv('input_data/Preply_tutor_views_datasaet.csv')\n return data", "def tokenizeFormCsv( input_file, columns,save_file_path):\n # 写文件\n output = open(save_file_path, \"w+\", encoding=\"utf-8\")\n\n\n with tf.gfile.Open(input_file, \"r\") as f:\n reader = csv.DictReader(f)\n i = 0\n for row in reader:\n content_line = ''\n i += 1\n for column in columns:\n content_line += cut(row[column])\n\n output.write(content_line + \"\\n\")\n print(\"line {} is finished : {}\".format(i, content_line))\n\n\n output.close()\n print(\"csv finished\")", "def save_csv_file():\n global output_on_display, import_lst, column_names, data\n if data_base == '':\n mistake_load_table()\n else:\n column_names = data[0]\n save_name = asksaveasfilename(title=\"Select file\", filetypes=((\"CSV\", \"*.csv\"), (\"all files\", \"*.*\")),\n confirmoverwrite=True, defaultextension='.csv')\n step = len(column_names)\n data_csv = import_lst\n if len(data_csv[0]) == step:\n pass\n else:\n data_csv = import_lst[step::]\n\n with open(save_name, 'w+') as csv_file:\n csv_writer = csv.writer(csv_file)\n csv_writer.writerow(column_names)\n csv_writer.writerows(data_csv)", "def create_csv(csvfile, forms, sep=';', internal_sep=',', raw=True):\n writer = csv.writer(csvfile, delimiter=sep)\n writer.writerow(\n ['uid'] + list(forms.keys()) +\n ['first', 'last', 'time', 'finished', 'language', 'origin']\n )\n\n uids = list(db.session.query(Answer.uid).distinct())\n for uid, in uids:\n user_answers = list(\n Answer.query.filter_by(uid=uid).order_by(Answer.created_at)\n )\n uanswers = {\n num: {a.field: a.value for a in answers}\n for num, answers in groupby(user_answers, lambda x: x.question)\n }\n uid = uid if isinstance(uid, str) else uid.decode('ascii')\n row = [uid] + [\n qform.survey_answers(uanswers.get(number, None),\n raw=raw, sep=internal_sep)\n for number, qform in forms.items()\n ] + [\n str(user_answers[0].created_at),\n str(user_answers[-1].created_at),\n str(user_answers[-1].created_at - user_answers[0].created_at),\n 'yes' if 'finish' in uanswers else 'no',\n Counter(\n next(answers).lang\n for _, answers in groupby(user_answers, lambda x: x.question)\n ).most_common()[0][0],\n uanswers.get('origin', {'submit': 'main'})['submit'],\n ]\n writer.writerow(row)", "def read_data(filename, use):\r\n with open(os.path.join(os.getcwd(), filename)) as csvfile:\r\n read_csv = csv.reader(csvfile, delimiter=',')\r\n if use == 'exp':\r\n data = set()\r\n for row in read_csv:\r\n data.add(tuple(row))\r\n elif use == 'field':\r\n data = {}\r\n for row in read_csv:\r\n data[row[0]] = int(row[1])\r\n return data", "def user_list_csv():\n us = user.User.query.all()\n filename = 'xxx.csv'\n csv_name = _rename_file(filename)\n url = app.config['CSV_FILES_DEST'] + '/' + csv_name\n with codecs.open(url, 'wb') as csvfile:\n #fieldnames = ['账号', '姓名', '描述', '角色', '邮箱', '电话', '工作电话', '公司', '部门', '职位']\n fieldnames = []\n if len(us) > 0:\n fieldnames = us[0].to_csv_dict().keys()\n writer = unicodecsv.writer(csvfile, encoding='utf-8-sig')\n writer.writerow(fieldnames)\n for u in us:\n dct = u.to_csv_dict()\n n_items = {}\n for name in fieldnames:\n if dct[name] is not None:\n n_items[name] = dct[name]\n else:\n n_items[name] = ''\n writer.writerow(n_items.values())\n return send_file(url)", "def csvObj():\n CSV_URL = \"http://unitedstates.sunlightfoundation.com/legislators/legislators.csv\"\n s = requests.get(CSV_URL) # Download the csv using requests.\n reader = csv.DictReader(s.text.splitlines(), lineterminator=\"\\n\") # Use the dictreader to make a dictionary with the attribute name paired with the rows value for that attribute.\n name2twitter_id = {}\n for row in reader:\n if (row['in_office'] == \"1\" and row['twitter_id'] != \"\"):\n name = row['firstname'] + \" \" # Construct the name.\n if (row['middlename'] != \"\"): # Not all names have middle names.\n name += row['middlename'] + \" \"\n name += row['lastname']\n name2twitter_id[name] = row['twitter_id'] # Assign the name to their handle.\n del name2twitter_id[\"Tim Murphy\"] # This representative does not have an active twitter handle. \n name2twitter_id[\"Gregory W. Meeks\"] = \"RepGregoryMeeks\" # Insert this representatives twitter handle manually.\n return name2twitter_id", "def import_data():\n data = pd.read_csv('partA/bikes_October18.csv', ',')\n return data", "def action_import(self):\n ctx = self._context\n account_obj = self.env[\"account.account\"]\n import_obj = self.env['import.journal.entries.advanced']\n import_line_obj = self.env[\"journal.entries.csv.import\"]\n if 'active_id' in ctx:\n import_id = import_obj.browse(ctx['active_id'])\n if not self.data:\n raise exceptions.Warning(_(\"Necesitas seleccionar un archivo!\"))\n # Decode the file data\n data = base64.b64decode(self.data).decode('utf-8')\n file_input = StringIO(data)\n file_input.seek(0)\n reader_info = []\n if self.delimeter:\n delimeter = str(self.delimeter)\n else:\n delimeter = ','\n reader = csv.reader(file_input, delimiter=delimeter,\n lineterminator='\\r\\n')\n try:\n reader_info.extend(reader)\n except Exception:\n raise exceptions.Warning(_(\"Archivo no valido\"))\n keys = reader_info[0]\n # check if keys exist\n if not isinstance(keys, list) or ('cuenta' not in keys):\n raise exceptions.Warning(_(\"No se encuentran 'cuentas' contable en el archivo\"))\n del reader_info[0]\n values = {}\n actual_date = fields.Date.today()\n for i in range(len(reader_info)):\n val = {}\n field = reader_info[i]\n values = dict(zip(keys, field))\n account = False\n if 'cuenta' in values and values['cuenta']:\n account_id = account_obj.search([('code', '=', values['cuenta'])]) \n if account_id:\n account = account_id[0]\n else:\n account = account_id\n\n val[\"ref\"] = values[\"descripcion\"]\n val[\"document_number\"] = values[\"num_documento\"]\n val[\"document_date\"] = datetime.strptime(values[\"fecha\"] , \"%d-%m-%Y\")\n val['account_id'] = account.id\n val['parent_id'] = import_id.id\n val['debit'] = values['debito']\n val['credit'] = values['credito']\n val['processed'] = False\n validate = import_line_obj.create(val)\n if validate:\n if validate.account_id:\n validate.is_ok = True", "def sample_csv_file(tmpdir):\n csv_file = tmpdir.mkdir('sub').join('sample_phone_masts.csv')\n # header and 3 rows of test data\n csv_file.write(\n 'Property Name,Property Address,Unit Name,Tenant Name,Lease Start Date,Lease End Date,Lease Years,'\n 'Current Rent\\n'\n 'Farmhouse 2,Field X,Unit 2,CellWorks Ltd,29 Apr 2008,28 Apr 2018,10,700\\n'\n 'Farmhouse 1,Field Y,Unit 1,CellWorks Ltd,29 Apr 2002,28 Apr 2020,15,500\\n'\n 'Farmhouse 3,Field Z,Unit 3,CellWorks Ltd,01 Dec 2019,01 Dec 2021,15,999.99\\n'\n )\n return str(csv_file)", "def import_csv_dataset():\n import_fields = pd.read_csv('redacted-2020-june-30-wprdc-.csv', header=None).to_numpy()[0, :]\n import_values = pd.read_csv('redacted-2020-june-30-wprdc-.csv').to_numpy()\n import_values = clean_values(import_values)\n return import_fields, import_values", "def save_data_csv(self, filename):\n #add masked entry as last column\n fields = numpy.r_[self.colLabels, ['masked']]\n\n #add dynamic expression to column headers\n for k, col in enumerate(self.dynamic_cols):\n fields[col] += \" [%s]\"%self.dynamic_expressions[k] if self.dynamic_expressions[k] else ''\n\n #add custom labels to field names \n for col, fieldname in enumerate(fields):\n custom_label = self.column_labels_custom.get(col)\n fields[col] += \" (%s)\"%custom_label if custom_label else ''\n\n fields[col] += \" {*}\" if (col in self.colsel and (fieldname.find('user')==0 or col in self.dynamic_cols)) else ''\n \n #add options\n \n \n #don't save last two lines\n data = numpy.c_[self.data[:-2], self.rowmask[:-2]]\n\n with open(filename, 'wb') as f:\n import csv\n writer = csv.writer(f)\n writer.writerow(fields)\n #writer.writerows(data)\n for row in data:\n r = [entry.encode('latin_1') if type(entry) is types.UnicodeType else entry for entry in row]\n writer.writerow(r)\n self.modified = False", "def data_add(invoice_details):\r\n with open(\"beer_data.csv\", \"a\") as data_file:\r\n writer = csv.writer(data_file)\r\n writer.writerow(invoice_details)\r\n data_file.close()", "def _gen_csv_data(self, f, dialect):\n\t\treader = self._get_csv_reader(f, dialect)\n\n\t\tfor line in reader:\n\t\t\ttry:\n\t\t\t\tdatum = line[self.ipa_col]\n\t\t\texcept IndexError:\n\t\t\t\tmes = 'Could not find IPA data on line: {}'.format(line)\n\t\t\t\traise ValueError(mes)\n\n\t\t\tyield datum, reader.line_num", "def leer_blast(archivo_entrada,query):\n datos=pd.read_csv(archivo_entrada,delimiter='\\t') \n \n #El usuario puede elegir si quiere filtrar o dejarlo con los valores predeterminados\n pregunta=input(\"¿Quiere introducir el porcentaje de filtrado para identidad, evalue y coverage?[S/N]: \")\n\n if pregunta==\"S\" or pregunta==\"s\":\n id=float(input(\"¿Cuál es el porcentaje de identidad por el que desea filtrar?: \"))\n cov=float(input(\"¿Cuál es el valor de coverage por el que desea filtrar?: \"))\n evalue=float(input(\"¿Cuál es el valor de Evalue por el que desea filtrar?: \"))\n else:\n id=85\n cov=30\n evalue=1e-2\n\n def ordena(datos):\n \"\"\"Funcion para ordenar los datos\n datos=archivo Resultado_blast_completo abierto con pandas\n \"\"\"\n datos =datos[(datos['Identidad'] >=id) & (datos['Cobertura'] >= cov) & (datos['Evalue'] <= evalue)]\n return \n \n ordena(datos)\n return datos", "def filterIEDBFile(filename, field, search):\n X = pd.read_csv(filename)\n cols = ['PubMed ID','Author','Journal','Year','T Cell ID','MHC Allele Name',\n 'Epitope Linear Sequence','Epitope Source Organism Name']\n y = X[X[field].str.contains(search)]\n print y[cols]\n y.to_csv('filtered.csv',cols=cols)\n return y", "def process_csv(filepath):\n suburb = get_suburb(filepath)\n read_file = pd.read_csv(filepath,\n infer_datetime_format=True,\n parse_dates=[\"SALE DATE\"],\n dayfirst=True)\n read_file[\"SUBURB\"] = suburb\n separate_date(read_file)\n return read_file", "def prepare_data():\n df = pd.read_csv('Wholesale customers data.csv')\n df_numeric = df[['Fresh', 'Milk', 'Grocery', 'Frozen', 'Detergents_Paper', 'Delicassen']]\n return df, df_numeric", "def data_clean(countryfile, folder):\n with open(r\"C:\\Users\\User\\Documents\\LUCAS2015_spectra\\LUCAS2015_Soil_Spectra_EU28\\spectra_ \" + countryfile + \" .csv\") as f:\n # maakt csv reader aan\n reader = csv.reader(f)\n # Open\n with open(folder + r\"\\spectra_ \" + countryfile + \" .csv\", 'w', newline='') as file:\n writer = csv.writer(file)\n for c, row in enumerate(reader):\n if c == 0:\n writer.writerow(row[:5] + row[205:-200:2])\n else:\n x = np.array(row[205:-200:2], dtype='float64')\n reflectance = 10 ** (-x)\n writer.writerow(row[:5] + list(reflectance))", "def ingest_file(input, fields, advanced_operators, output, delimiter=',', quotechar='\"'):\n with open(input, 'rb') as csv_file:\n reader = csv.DictReader(csv_file)\n\n with open(output, 'a') as write_csvfile:\n fieldnames = ['acronym', 'title', 'projectUrl',\n 'foundProjectUrl1', 'foundProjectUrl2',\n 'foundProjectUrl3', 'foundProjectUrl4',\n 'foundProjectUrl5', 'foundProjectUrl6',\n 'foundProjectUrl7', 'foundProjectUrl8',\n 'foundProjectUrl9', 'foundProjectUrl10']\n\n writer = csv.DictWriter(write_csvfile, fieldnames=fieldnames)\n writer.writeheader() # this method only available at python 2.7\n\n search_engine = SearchWeb()\n\n # iterate reader\n for row in reader:\n query_string = str(concatenate(row, fields))\n\n response = search_engine.search(query_string, advanced_operators)\n\n projectsUrl = []\n results_size = len(response)\n\n # TODO print with logger\n print \"INFO: RESULT SIZE - %s\" % results_size\n\n for i in range(10):\n if i < results_size:\n projectsUrl.append(response[i]['Url'])\n else:\n projectsUrl.append('')\n\n # TODO print with logger\n print \"INFO: FIRST RESULT - %s\" % projectsUrl[0]\n writer.writerow(dict(acronym=row['acronym'], title=row['title'], projectUrl=row['projectUrl'],\n foundProjectUrl1=projectsUrl[0], foundProjectUrl2=projectsUrl[1],\n foundProjectUrl3=projectsUrl[2], foundProjectUrl4=projectsUrl[3],\n foundProjectUrl5=projectsUrl[4], foundProjectUrl6=projectsUrl[5],\n foundProjectUrl7=projectsUrl[6], foundProjectUrl8=projectsUrl[7],\n foundProjectUrl9=projectsUrl[8], foundProjectUrl10=projectsUrl[9]))", "def writeToCsv(clue):\n filename = 'new_clue_import_for_editing.csv'\n f = open(filename, 'w')\n fieldnames = list(set([m['Clue_field'] for m in mapping]))\n fieldnames.append('date')\n writer = csv.DictWriter(f, fieldnames=fieldnames)\n writer.writeheader()\n for c in clue:\n writer.writerow(c)\n f.close()", "def _getCSVForPerField(self, statistic):\n\n rows = []\n\n chart_json = simplejson.loads(statistic.chart_json)\n description = chart_json['description'] \n header = []\n for item in description:\n header.append(item[-1].encode('utf-8'))\n rows.append(header)\n\n final_stat = simplejson.loads(statistic.final_json)\n for choice, result in final_stat.iteritems():\n row = []\n row.append(unicode(choice).encode('utf-8'))\n for item in result:\n row.append(unicode(item).encode('utf-8'))\n rows.append(row)\n\n return rows", "def make_csv(file_of_data):\n with open(file_of_data, 'w') as f:\n writer = csv.writer(f)\n header = (\"Counter\", \"Date/time\", \"Latitude\", \"Longitude\", \"Temperature\", \"Humidity\")\n writer.writerow(header)", "def extractionTitleRatings(cur, conn):\n fh = open(pathTitleRatings)\n reader = csv.reader(fh, delimiter = '\\t')\n firstLine = True\n for row in reader:\n if firstLine : firstLine = False # Read header\n else :\n idTitulo = int(row[0][2:])\n valuacionMedia = float(row[1])\n nombreVoto = int(row[2])\n # print(clasificacionInsert.format(idTitulo, valuacionMedia, nombreVoto))\n # REGISTER DATA IN CLASIFICACION TABLE\n cur.execute(clasificacionInsert.format(idTitulo, valuacionMedia, nombreVoto))\n conn.commit()", "def prepare_out_csv(output_dir, filename):\n out_columns_pi = ['fasta_file', 'acc.code',\n 'organism', 'EC.code', 'species',\n 'note', 'pi', 'modification', 'category']\n string = ''\n for i in out_columns_pi:\n if i == out_columns_pi[-1]:\n string += i\n else:\n string += i+','\n string += '\\n'\n with open(output_dir+filename, 'w') as f:\n f.write(string)" ]
[ "0.6793251", "0.6274848", "0.62337714", "0.6202803", "0.6107997", "0.60955846", "0.60521036", "0.60283566", "0.5975984", "0.59122914", "0.5902194", "0.5892259", "0.5851414", "0.5821785", "0.58078057", "0.578318", "0.5779465", "0.57591194", "0.57591194", "0.5738218", "0.5728837", "0.5680294", "0.56655097", "0.56467247", "0.5642721", "0.5637487", "0.5610441", "0.56088567", "0.55976427", "0.559575", "0.5572163", "0.5568526", "0.5547551", "0.5525046", "0.55159706", "0.5509492", "0.5499558", "0.5491932", "0.54911953", "0.548205", "0.54753405", "0.5474487", "0.54666626", "0.5456919", "0.5454975", "0.5450408", "0.5440227", "0.54364836", "0.5431941", "0.5422632", "0.5412024", "0.54054844", "0.5394968", "0.5390834", "0.5388125", "0.5378882", "0.53715247", "0.53714454", "0.53699076", "0.5369789", "0.53598154", "0.5357831", "0.5357588", "0.53496504", "0.53454316", "0.5343939", "0.5343448", "0.53304654", "0.53250563", "0.5324722", "0.5322879", "0.5318716", "0.5314721", "0.53120536", "0.5304167", "0.5302103", "0.52952796", "0.52941114", "0.5287581", "0.5283692", "0.52824646", "0.5281721", "0.52814204", "0.52796763", "0.527861", "0.5273969", "0.5269323", "0.5267864", "0.5264486", "0.52534443", "0.52514327", "0.5251282", "0.52450526", "0.52370405", "0.5236582", "0.5235089", "0.5230416", "0.52298594", "0.5229762", "0.5227654" ]
0.5444409
46
Get options from vct config file
def get_vct_config(var): vct_root = get_vct_root() context = { 'var': var, 'source': """ if [ -f %(vct_root)s/vct.conf.overrides ]; then . %(vct_root)s/vct.conf.default . %(vct_root)s/vct.conf.overrides elif [ -f %(vct_root)s/vct.conf ]; then . %(vct_root)s/vct.conf elif [ -f %(vct_root)s/vct.conf.default ]; then . %(vct_root)s/vct.conf.default fi """ % { 'vct_root': vct_root} } out = run("bash -c '%(source)s; echo $%(var)s'" % context, display=False, silent=False) return out.stdout
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_options(self,options_file):\n config=ConfigParser.ConfigParser()\n config.read(options_file)\n return config", "def options(self):\n\t\treturn self.config_parser.options(self.section_name)", "def autostart_read_options(self,options_file):\n config=ConfigParser.ConfigParser()\n config.read(options_file)\n return config", "def get_option(self, name):\r\n if not isinstance(name, str):\r\n name = \" \".join(name)\r\n lines = self.sendAndRecv(\"GETCONF %s\\r\\n\" % name)\r\n\r\n r = []\r\n for _,line,_ in lines:\r\n try:\r\n key, val = line.split(\"=\", 1)\r\n r.append((key,val))\r\n except ValueError:\r\n r.append((line, None))\r\n\r\n return r", "def readOptions(self):\n get = command_line.CommandLineParser().get_option\n if get('nosplash')!=None:\n self.temp_configuration.showSplash = bool(get('nosplash'))\n if get('debugsignals')!=None:\n self.temp_configuration.debugSignals = bool(get('debugsignals'))\n if get('dotVistrails')!=None:\n self.temp_configuration.dotVistrails = get('dotVistrails')\n #in theory this should never happen because core.configuration.default()\n #should have done this already\n #if not self.configuration.check('dotVistrails'):\n # self.configuration.dotVistrails = system.default_dot_vistrails()\n # self.temp_configuration.dotVistrails = system.default_dot_vistrails()\n if get('multiheads')!=None:\n self.temp_configuration.multiHeads = bool(get('multiheads'))\n if get('maximized')!=None:\n self.temp_configuration.maximizeWindows = bool(get('maximized'))\n if get('movies')!=None:\n self.temp_configuration.showMovies = bool(get('movies'))\n if get('cache')!=None:\n self.temp_configuration.useCache = bool(get('cache'))\n if get('verbose')!=None:\n self.temp_configuration.verbosenessLevel = get('verbose')\n if get('noninteractive')!=None:\n self.temp_configuration.interactiveMode = \\\n not bool(get('noninteractive'))\n if get('workflowinfo') != None:\n self.temp_configuration.workflowInfo = str(get('workflowinfo'))\n if get('dumpcells') != None:\n self.temp_configuration.spreadsheetDumpCells = get('dumpcells')\n if get('pdf') != None:\n self.temp_configuration.spreadsheetDumpPDF = get('pdf')\n if get('workflowgraph') != None:\n self.temp_configuration.workflowGraph = str(get('workflowgraph'))\n if get('evolutiongraph') != None:\n self.temp_configuration.evolutionGraph = str(get('evolutiongraph'))\n if get('executeworkflows') != None:\n self.temp_configuration.executeWorkflows = \\\n bool(get('executeworkflows'))\n if get('showspreadsheetonly') != None:\n self.temp_configuration.showSpreadsheetOnly = \\\n bool(get('showspreadsheetonly'))\n # asking to show only the spreadsheet will force the workflows to\n # be executed\n if get('reviewmode') != None:\n self.temp_configuration.reviewMode = bool(get('reviewmode'))\n\n if self.temp_configuration.showSpreadsheetOnly and not self.temp_configuration.reviewMode:\n self.temp_configuration.executeWorkflows = True\n \n self.temp_db_options = InstanceObject(host=get('host'),\n port=get('port'),\n db=get('db'),\n user=get('user'),\n parameters=get('parameters')\n )\n if get('nologger')!=None:\n self.temp_configuration.nologger = bool(get('nologger'))\n if get('quickstart') != None:\n self.temp_configuration.staticRegistry = str(get('quickstart'))\n if get('detachHistoryView')!= None:\n self.temp_configuration.detachHistoryView = bool(get('detachHistoryView'))\n self.input = command_line.CommandLineParser().positional_arguments()", "async def get_options(self):", "def config(ctx):\n if not ctx.invoked_subcommand:\n cfg = ctx.obj['cfg']\n for section in cfg.sections():\n print(\"[\", section, \"]\")\n for option in cfg[section]:\n print(option, \" = \", cfg[section][option])", "def _opt_config(self):\n return self._opt_method.config", "def get_options(self):\n\t\treturn self.options", "def _get_options(self):\n return self.options", "def test_read_config_option(self):\n # set up config\n config.set_config_file(os.path.join(path_to_module, \"test_config.conf\"))\n config.setup()\n # Test that all the parameters loaded from file are correct\n self.assertEqual(config.read_config_option('client_id'), 'uploader')\n self.assertEqual(config.read_config_option('client_secret'), 'secret')\n self.assertEqual(config.read_config_option('username'), 'admin')\n self.assertEqual(config.read_config_option('password'), 'password1')\n self.assertEqual(config.read_config_option('base_url'), 'http://localhost:8080/irida-latest/api/')\n self.assertEqual(config.read_config_option('parser'), 'miseq')\n self.assertEqual(config.read_config_option('readonly', bool), False)", "def get_options(self):\n options = dict()\n while True:\n line = self.rfile.readline().decode(\"utf8\").strip()\n if not line:\n break\n self.log.debug(\"Got line: %s\", line)\n if \":\" not in line:\n self.log.debug(\"Invalid option: %s\", line)\n error_msg = \"header not in 'Name: value' format\"\n raise oa.errors.InvalidOption(error_msg)\n name, value = line.split(\":\", 1)\n options[name.lower()] = value.strip()\n return options", "def config_options(config_file_path, version_group):\n translations = { \"True\": True, \"False\": False,\n \"true\": True, \"false\": False,\n \"yes\": True, \"no\": False,\n \"Yes\": True, \"No\": False }\n config = configparser.ConfigParser()\n config.read(config_file_path)\n group = config[version_group]\n env = { }\n for key in group:\n if group[key] in translations:\n logger.debug(\"Translating {}\".format(key))\n env[key] = translations[group[key]]\n else:\n logger.debug(\"Not translating |{}|\".format(key))\n env[key] = group[key]\n logger.debug(\"Environment: {}\".format(env))\n return env", "def get_config(self):\n configs = []\n\n \"\"\"Get all vdu and/or vdu config in a descriptor.\"\"\"\n vnf_config = self.vnfd.get(\"vnf-configuration\")\n if vnf_config:\n juju = vnf_config['juju']\n if juju:\n configs.append(vnf_config)\n\n for vdu in self.vnfd['vdu']:\n vdu_config = vdu.get('vdu-configuration')\n if vdu_config:\n juju = vdu_config['juju']\n if juju:\n configs.append(vdu_config)\n\n return configs", "def load_options():\n try:\n with open(config, \"rU\") as f:\n options = serializer.load(f)\n check(options)\n if options[\"version\"] < version:\n options[\"version\"] = version.int\n options = get_config(options)\n save_options(options)\n except IOError:\n options = get_config()\n save_options(options)\n except Exception:\n print (\"Options could not be loaded:\")\n import traceback\n traceback.print_exc()\n options = get_config()\n save_options(options)\n else:\n o_o = options\n options = get_config(options)\n if o_o != options:\n save_options(options)\n del(o_o)\n globals()[\"clientoptions\"] = options\n return options", "def _get_options_config(pathname):\n\n if pathname is None:\n return {}\n\n return utils.load_yaml_file(pathname).pop(\"options\")", "def gather_candidates(self, context):\n candidates = []\n\n with open(context['data_file'], 'r') as fp:\n try:\n config = load(fp)\n except JSONDecodeError:\n err_string = 'Decode error for' + context['data_file']\n error(self.vim, err_string)\n config = []\n\n for obj in config:\n candidates.append({\n 'word': obj['option'],\n '__option': obj['option'],\n '__shortname': obj['shortname'],\n '__description': obj['description'],\n 'abbr': f\"{obj['option']:<15}│{obj['shortname']:<10}│{obj['description']:<15}\",\n })\n\n return candidates", "def versatileOptions():\r\n return tuple(sorted(i[0] for i in list(Options.defaults().items()) if i[1].find(' #v ') > 0))", "def read_config():\n with open('.vrocli.yml', 'r') as conffile:\n try:\n config = yaml.load(conffile)['vrocli']\n logger = logging.getLogger()\n logger.setLevel(config['log_level'])\n return config\n except yaml.YAMLError as e:\n logger.error(e)\n exit(-1)", "def get_options(self):\r\n return self._option_values", "def parse(self, config_file):\n\t\tself.options = yaml.load(open(config_file))", "def get_options_from_file(path):\n with open(path) as f:\n content = f.read()\n keys = re.findall(r\"%(.+):\", content)\n values = re.findall(r\":\\s*([\\w\\W]+?)\\s*(?:%|$)\", content)\n\n options = dict(zip(keys, values))\n return options", "def read_dotvistrails_option(self):\n get = command_line.CommandLineParser().get_option\n if get('dotVistrails')!=None:\n self.temp_configuration.dotVistrails = get('dotVistrails')", "def get_options(cls):\n return {\n \"name\": str,\n ConfigOption(\"install_files\", default=None): Or(None, list),\n ConfigOption(\"timeout\", default=300): int,\n ConfigOption(\"log_regexps\", default=None): Or(None, list),\n ConfigOption(\"stdout_regexps\", default=None): Or(None, list),\n ConfigOption(\"stderr_regexps\", default=None): Or(None, list),\n ConfigOption(\"file_logger\", default=None): Or(None, str),\n ConfigOption(\"async_start\", default=False): bool,\n ConfigOption(\"report_errors_from_logs\", default=False): bool,\n ConfigOption(\"error_logs_max_lines\", default=10): int,\n ConfigOption(\"path_cleanup\", default=True): bool,\n ConfigOption(\"pre_start\", default=None): validate_func(\"driver\"),\n ConfigOption(\"post_start\", default=None): validate_func(\"driver\"),\n ConfigOption(\"pre_stop\", default=None): validate_func(\"driver\"),\n ConfigOption(\"post_stop\", default=None): validate_func(\"driver\"),\n }", "def config_list_options(section):\n return __CONFIG.items(section)", "def parse_config(config_file_name):\n\n config = ConfigParser.ConfigParser()\n config.read(config_file_name)\n main_cfg_section = config.sections()[0]\n\n options = config.options(main_cfg_section)\n results = {}\n for option in options:\n try:\n results[option] = config.get(main_cfg_section, option)\n except:\n print('exception on %s!' % option)\n results[option] = None\n return results", "def parse_config (config_file, option):\n\top_config = open(config_file, \"r\")\n\tif option == \"blast\":\n\t\tfor line in op_config:\n\t\t\tif line.startswith(\"blast\"):\n\t\t\t\tline = line.split(\"\\t\")\n\t\t\t\tdb = line[1]\n\t\t\t\tevalue = line[2]\n\t\t\t\treturn(db, evalue)\n\telif option == \"clustalw\":\n\t\tfor line in op_config:\n\t\t\tif line.startswith (\"clustalw\"):\n\t\t\t\tline = line.split(\"\\t\")\n\t\t\t\tclustal_path = line[1]\n\t\t\t\treturn (clustal_path)\n\n\telif option == \"plotly\":\n\t\tfor line in op_config:\n\t\t\tif line.startswith(\"plotly\"):\n\t\t\t\tline = line.split(\"\\t\")\n\t\t\t\tusername = line[1]\n\t\t\t\tapi_key = line[2]\n\t\t\t\treturn (username, api_key)", "def get_option(self,sctn,optionname):\n\n if type(sctn) == list:\n sections = sctn\n else:\n sections = [sctn]\n sections.append(self.section_type)\n for i in range(0,len(sections)):\n if self.config.has_option(sections[i],optionname):\n return self.config.get(sections[i],optionname)", "def _get_options(ret):\n attrs = {\"host\": \"host\", \"port\": \"port\", \"skip\": \"skip_on_error\", \"mode\": \"mode\"}\n\n _options = salt.returners.get_returner_options(\n __virtualname__, ret, attrs, __salt__=__salt__, __opts__=__opts__\n )\n return _options", "def read_cfg(file_path, account):\n d = {}\n parser = SafeConfigParser()\n\n try:\n parser.read(os.path.expanduser(file_path))\n for option in parser.options(account):\n # [1:-1] strips apostrophes wrapping the string\n d[option] = parser.get(account, option)[1:-1]\n return d\n except:\n print \"Config read failed\"\n return None", "def config(gvar):\n\n mandatory = []\n required = []\n optional = ['-cc', '-ckv', '-CSEP', '-CSV', '-g', '-H', '-h', '-NV', '-ok', '-r', '-s', '-V', '-VC', '-v', '-x509', '-xA']\n\n if gvar['retrieve_options']:\n return mandatory + required + optional\n\n # Check for missing arguments or help required.\n form_data = check_keys(\n gvar,\n mandatory,\n required,\n optional,\n key_map=KEY_MAP)\n\n # List the current defaults. If the form_data contains any optional fields,\n # those values will be updated before the list is retrieved.\n response = requests(\n gvar,\n '/server/config/',\n form_data\n )\n \n if response['message']:\n print(response['message'])\n\n # Print report\n show_active_user_groups(gvar, response)\n\n show_table(\n gvar,\n response['config_list'],\n [\n 'category/Category,k',\n 'config_key/Config Key,k',\n 'config_type/Type',\n 'config_value/Value',\n ],\n title=\"Server Configuration\",\n )", "def getVirtualConfig(self,node,vmid):\n data = self.connect('get','nodes/%s/qemu/%s/config' % (node,vmid),None)\n return data", "def _get_cernvm_config():\n\n try:\n response = urllib2.urlopen(\"http://cernvm.cern.ch/config/\")\n _config = response.read()\n\n # Parse response\n _params = {}\n _config = _config.split(\"\\n\")\n for line in _config:\n if line:\n (k, v) = line.split(\"=\", 1)\n _params[k] = v\n\n # Generate JSON map for the CERNVM_REPOSITORY_MAP\n _cvmMap = {}\n _map = _params[\"CERNVM_REPOSITORY_MAP\"].split(\",\")\n for m in _map:\n (name, _optlist) = m.split(\":\", 1)\n options = _optlist.split(\"+\")\n _cvmMap[name] = options\n\n # Update CERNVM_REPOSITORY_MAP\n _params[\"CERNVM_REPOSITORY_MAP\"] = json.dumps(_cvmMap)\n _params[\"CERNVM_ORGANISATION_LIST\"] = _params[\n \"CERNVM_ORGANISATION_LIST\"].split(\",\")\n\n # Return parameters\n return _params\n\n except Exception as ex:\n print \"Got error: %s\\n\" % str(ex)\n return {}", "def get_config():\n if pyversion(\"3\"): import configparser\n else: import ConfigParser as configparser\n config = configparser.ConfigParser()\n import os\n rcfiles = [\n \"/etc/weatherrc\",\n \"/etc/weather/weatherrc\",\n os.path.expanduser(\"~/.weather/weatherrc\"),\n os.path.expanduser(\"~/.weatherrc\"),\n \"weatherrc\"\n ]\n for rcfile in rcfiles:\n if os.access(rcfile, os.R_OK): config.read(rcfile)\n for section in config.sections():\n if section != section.lower():\n if config.has_section(section.lower()):\n config.remove_section(section.lower())\n config.add_section(section.lower())\n for option,value in config.items(section):\n config.set(section.lower(), option, value)\n return config", "def parse_config (config_file, option):\n\top_config = open(config_file, \"r\")\n\tif option == \"blast\":\n\t\tfor line in op_config:\n\t\t\tif line.startswith(\"blast\"):\n\t\t\t\tline = line.split(\"\\t\")\n\t\t\t\tdb = line[1].strip()\n\t\t\t\tevalue = line[2].strip()\n\t\t\t\treturn(db, evalue)\n\n\telif option == \"clustalw\":\n\t\tfor line in op_config:\n\t\t\tif line.startswith (\"clustalw\"):\n\t\t\t\tline = line.split(\"\\t\")\n\t\t\t\tclustal_path = line[1].strip()\n\t\t\t\treturn (clustal_path)\n\n\telif option == \"plotly\":\n\t\tfor line in op_config:\n\t\t\tif line.startswith(\"plotly\"):\n\t\t\t\tline = line.split(\"\\t\")\n\t\t\t\tusername = line[1].strip()\n\t\t\t\tapi_key = line[2].strip()\n\t\t\t\treturn (username, api_key)\n\n\telif option == \"email\":\n\t\tfor line in op_config:\n\t\t\tif line.startswith(\"Entrez_email\"):\n\t\t\t\tline = line.split(\"\\t\")\n\t\t\t\tmail = line[1].strip()\n\t\t\t\treturn (mail)\n\telif option == \"root\":\n\t\tfor line in op_config:\n\t\t\tif line.startswith(\"root\"):\n\t\t\t\tline = line.split(\"\\t\")\n\t\t\t\troot = line[1].strip()\n\t\t\t\treturn (root)", "def _parser_options():\n #We have two options: get some of the details from the config file,\n import argparse\n from pydft import base\n pdescr = \"Numerical DFT code.\"\n parser = argparse.ArgumentParser(parents=[base.bparser], description=pdescr)\n for arg, options in script_options.items():\n parser.add_argument(arg, **options)\n \n args = base.exhandler(examples, parser)\n if args is None:\n return\n\n return args # pragma: no cover", "def parse():\n rcParams = configparser.ConfigParser(defaults=defaults())\n rcParams.read([os.path.join(os.getcwd(), 'watershed_workflowrc'),\n os.path.join(os.getcwd(), '.watershed_workflowrc'),\n os.path.join(home(), '.watershed_workflowrc')])\n return rcParams", "def get_options(self):\n return []", "def readopts(self):\n parser = OptionParser()\n parser.add_option(\"--dbname\", action=\"store\", type=\"string\", dest=\"dbname\", default=None)\n\n parser.add_option(\"--user\",\n action=\"store\",\n type=\"string\",\n dest=\"user\",\n default=None)\n\n parser.add_option(\"--password\",\n action=\"store\",\n type=\"string\",\n dest=\"password\",\n default=None)\n\n parser.add_option(\"--host\",\n action=\"store\",\n type=\"string\",\n dest=\"host\",\n default=None)\n\n parser.add_option(\"--port\",\n action=\"store\",\n type=\"string\",\n dest=\"port\",\n default=None)\n\n (options, args) = parser.parse_args()\n\n if options.dbname is None:\n print \"dbname is mandatory\"\n exit(1)\n\n conf = \"dbname=%s\" % options.dbname\n for parm in ['user', 'password', 'host', 'port']:\n if options.__dict__[parm] is not None:\n conf = \"%s %s=%s\" % (conf, parm, options.__dict__[parm])\n return conf", "def options(self):\r\n return self._options", "def _load_file(self, filename: Path) -> Tuple[Dict[str, Any], Dict[str, Any]]:\n with filename.open(\"rb\") as f:\n config = tomli.load(f)\n\n global_options = config.get(\"tool\", {}).get(\"cibuildwheel\", {})\n platform_options = global_options.get(self.platform, {})\n\n return global_options, platform_options", "def _parser_options():\n #We have two options: get some of the details from the config file,\n import argparse\n import sys\n from matdb import base\n pdescr = \"MATDB Context Finder\"\n parser = argparse.ArgumentParser(parents=[base.bparser], description=pdescr)\n for arg, options in _script_options.items():\n parser.add_argument(arg, **options)\n \n args = base.exhandler(examples, parser)\n if args is None:\n return\n\n return args", "def select_divvy_config(filepath):\n divcfg = yacman.select_config(\n config_filepath=filepath,\n config_env_vars=COMPUTE_SETTINGS_VARNAME,\n default_config_filepath=DEFAULT_CONFIG_FILEPATH,\n check_exist=True,\n )\n _LOGGER.debug(\"Selected divvy config: {}\".format(divcfg))\n return divcfg", "def get_config(self,config):\n return self.parser.get(\"main\", config)", "def comando_config(self):\r\n if args.tag:\r\n cfile = args.file\r\n\t if args.opcao == 'daruma' and not cfile:\r\n cfile = '/usr/local/lib/daruma.ini'\r\n\t if args.value:\r\n dictags = self.set_param_section_config_ini(cfile, args.loja, args.tag, args.value)\r\n return dictags\r\n # modificar\r\n\t else:\r\n dictag = self.get_param_section_config_ini(cfile, args.loja, args.tag)\r\n return dictag #retorna dicicionario\r", "def read_all_options(self, test_case=None):\n args = self.get_parsed_cmd_args(test_case)\n\n Options.validate_methods(args[\"methods\"])\n\n self.read_config_file(args[\"config_file\"])\n\n for option in self.options:\n if args[option] not in [None, []]:\n self.options[option] = args[option]\n\n if option in self.method_options:\n method, method_option = self.method_options[option]\n Options.available_methods()[method].options[method_option] = args[option]\n\n #remove duplicate\n for option in [\"methods\", \"packages\"]:\n self.options[option] = list(set(self.options[option]))\n\n return self.options", "def options(self, section, *args):\n cnt = self._check_args('options', 2, 3, args)\n try:\n return ConfigParser.RawConfigParser.options(self, section)\n except ConfigParser.NoSectionError:\n if cnt == 1:\n return args[0]\n raise", "def get_config(self, view = None):\n return self._get_config(\"config\", view)", "def requested_config_vals():\n return {'transfer_stats_per_file':'opt'}", "def opt_in(self) -> List[str]:\n return self.raw_config.get(\"opt_in\", [])", "def vinet_configs(connection):\n assert connection\n query = \"\"\"select * from configs()\"\"\"\n return [item.strip() for item in sqlio.read_sql_query(query, connection)['name']]", "def parse_config(self):\n # TODO: parse config file\n pass", "def options(argv=[]):\r\n parser = HendrixOptionParser\r\n return vars(parser.parse_args(argv)[0])", "def options(self):\n return self.__options", "def config(self, *args):\n if len(args) == 1 and args[0].find('.') >= 0:\n return self._client.execute('showconfig', args[0]).strip()\n \n out = self._client.execute('showconfig', args)\n result = {}\n\n for line in out.splitlines():\n ks, v = line.split('=', 1)\n ks = ks.split('.')\n d = result\n for k in ks[:-1]:\n d = d.setdefault(k, {})\n d[ks[-1]] = v.strip()\n\n return result", "def get_config():\n\n return json.loads(CONFIG_FILE.read_text())", "def options(self, parser, env):\n pass", "def get_tool_options(tool_dirname: str) -> Dict[str, Any]:\n return load_yaml(os.path.join(PHP_TOOL_PATH, tool_dirname, \"options.yml\"))", "def readConfig(file=\"dispatcher.conf\"):\n\n parser = configparser.ConfigParser()\n parser.read(file)\n machines = parser.items(\"MACHINES\")\n commands = parser.items(\"COMMANDS\")\n\n return machines, commands", "def read_config(self, config_filename):", "def get_options():\n # pass in the access_token via commandline\n parser = OptionParser()\n parser.add_option(\"--data-dir\", default='/tmp',\n action=\"store\", type=\"string\", dest=\"data_dir\",\n help=\"Directory where DBs exist\")\n parser.add_option(\"--malicious\",\n action=\"store_true\", default=False, dest=\"malicious\",\n help=\"Check malicious\")\n parser.add_option(\"--suspicious\",\n action=\"store_true\", default=False, dest=\"suspicious\",\n help=\"Check suspicious\")\n parser.add_option(\"--predicted\",\n action=\"store_true\", default=False, dest=\"predicted\",\n help=\"Check predicted\")\n (options, _) = parser.parse_args()\n if(not options.malicious and\n not options.predicted and\n not options.suspicious):\n parser.error(\"Please specify at least one category\")\n return options", "def get_config_params():\n configParser = configparser.ConfigParser()\n configParser.read(os.path.splitext(sys.argv[0])[0]+'.ini')\n return configParser", "def getConfig(PV, verbose, **kwargs):\n\tif 'hutch' in kwargs and kwargs['hutch']:\n\t\thutch = kwargs['hutch']\n\telse:\n\t\thutch = PV[:3]\n\t\tif hutch.lower() == \"sxr\" or hutch.lower() == \"amo\":\n\t\t\thutch = \"sxd\"\n\tcol, hr, mode = \"\", \"\", \"\"\n\tif 'HR' in kwargs and kwargs['HR']: mode = \"_hr_mode\"\n\telif 'LR' in kwargs and kwargs['LR']: mode = \"_lr_mode\"\n\telse:\n\t\tif \":col:\" in PV.lower(): col = \"_col\"\n\t\telse: col = \"\"\n\t\tif \":hr:\" in PV.lower() : hr = \"_hr\"\n\t\telse: hr = \"\"\n\tcfgName = hutch + hr + col + mode\n\t## Add new parameters below\n\t##\n\textra = \"\"\n\tif 'extra' in kwargs and kwargs['extra']:\n\t\textra = \"_{0}\".format(kwargs['extra'])\n\tcfgName += extra\n\tconfig = \"./gigeScripts/configurations/gige_{0}.cfg\".format(cfgName)\n\t##\n\t# Make sure the file exists\n\tif verbose: print \"Checking config file\"\n\tif not os.path.isfile(config):\n\t\tif not os.path.isfile(config[0:2] + config[14:]):\n\t\t\tprint \"Configuration file {0} does not exist!\".format(config)\n\t\t\tconfig = None\n\t\telse: config = config[0:2] + config[14:]\n\treturn config", "def get_conf_options(conf_files, key_lower=True):\r\n # use <xx> <yy> ... to specify a list string for an option.\r\n p_multi = re.compile(\"<([^>]+)>\")\r\n conf_options = dict()\r\n if key_lower:\r\n conf_parser = configparser.ConfigParser()\r\n else:\r\n conf_parser = AppConfig()\r\n try:\r\n conf_parser.read(conf_files)\r\n for section in conf_parser.sections():\r\n t_section = dict()\r\n for option in conf_parser.options(section):\r\n value = conf_parser.get(section, option)\r\n value_list = p_multi.findall(value)\r\n if value_list:\r\n value = [item.strip() for item in value_list]\r\n t_section[option] = value\r\n conf_options[section] = t_section\r\n return 0, conf_options\r\n except Exception as e:\r\n # print((\"Error. Can not parse configuration file(s) %s\" % conf_files))\r\n # print(e)\r\n return 1, \"\"", "def __init__(self, config_file: str = \"config.json\"):\n path_to_config = (Path(sys.modules[self.__module__].__file__).parent\n / config_file)\n with open(path_to_config, \"r\") as f:\n self.options = json.load(f)", "def cb_config(data, option, value):\n option_name = option.split(\".\")[-1]\n if option_name in vimode_settings:\n vimode_settings[option_name] = value\n return weechat.WEECHAT_RC_OK", "def get_conf_options(conf_files, key_lower=True):\r\n # use <xx> <yy> ... to specify a list string for an option.\r\n p_multi = re.compile(\"<([^>]+)>\")\r\n conf_options = dict()\r\n if key_lower:\r\n conf_parser = configparser.ConfigParser()\r\n else:\r\n conf_parser = AppConfig()\r\n try:\r\n conf_parser.read(conf_files)\r\n for section in conf_parser.sections():\r\n t_section = dict()\r\n for option in conf_parser.options(section):\r\n value = conf_parser.get(section, option)\r\n value = win2unix(value, 0)\r\n value_list = p_multi.findall(value)\r\n if value_list:\r\n value = [item.strip() for item in value_list]\r\n t_section[option] = value\r\n conf_options[section] = t_section\r\n return 0, conf_options\r\n except Exception as e:\r\n print((\"Error. Can not parse configuration file(s) %s\" % conf_files))\r\n print(e)\r\n return 1, \"\"", "def _options(self):\n return", "def configure(self, options, conf):", "def get_options(options, opt_path):\r\n options_in = open(opt_path, 'r')\r\n # get exceptions\r\n for line_in in options_in:\r\n line = line_in.strip()\r\n if len(line) == 0:\r\n continue\r\n if line.startswith(\"#\"):\r\n continue\r\n if line.startswith(\"[\") and \"pep8\" in line:\r\n continue\r\n option = line\r\n if not line.startswith(\"-\"):\r\n line = \"--\" + line\r\n options.append(line)\r\n\r\n options_in.close()", "def rpc_config_get(self, option_name):\n\t\tif isinstance(option_name, (list, tuple)):\n\t\t\toption_names = option_name\n\t\t\toption_values = {}\n\t\t\tfor option_name in option_names:\n\t\t\t\tif self.config.has_option(option_name):\n\t\t\t\t\toption_values[option_name] = self.config.get(option_name)\n\t\t\treturn option_values\n\t\telif self.config.has_option(option_name):\n\t\t\treturn self.config.get(option_name)\n\t\treturn", "def grab_config():\n parser = argparse.ArgumentParser(description=\"NoisyCLIP\")\n\n parser.add_argument('--config_file')\n parser.add_argument('--ckpt_file')\n\n config = yaml_config_hook(parser.parse_args().config_file)\n for k, v in config.items():\n parser.add_argument(f\"--{k}\", default=v, type=type(v))\n\n args = parser.parse_args()\n\n return args", "def options(self) -> Mapping[str, str]:\n return pulumi.get(self, \"options\")", "def get_options(filepath):\n options = {}\n with open(filepath, \"r\") as f:\n reader = csv.DictReader(f, delimiter=';')\n for row in reader:\n function = row[\"Option\"]\n options.setdefault(function, {})\n options[function][\"description\"] = row[\"Description\"]\n return options", "def RetrieveOptions():\n \n # Get options...\n global Options\n Options = docopt(_docoptUsage_)\n \n # Set current working directory to the specified directory...\n WorkingDir = Options[\"--workingdir\"]\n if WorkingDir:\n os.chdir(WorkingDir)\n \n # Handle examples option...\n if \"--examples\" in Options and Options[\"--examples\"]:\n MiscUtil.PrintInfo(MiscUtil.GetExamplesTextFromDocOptText(_docoptUsage_))\n sys.exit(0)", "def get_vcs_settings():\n\n default = [\n {\"name\": \"git\", \"dir\": \".git\", \"cmd\": \"git\"},\n {\"name\": \"svn\", \"dir\": \".svn\", \"cmd\": \"svn\"},\n {\"name\": \"bzr\", \"dir\": \".bzr\", \"cmd\": \"bzr\"},\n {\"name\": \"hg\", \"dir\": \".hg\", \"cmd\": \"hg\"},\n {\"name\": \"tf\", \"dir\": \"$tf\", \"cmd\": \"C:/Program Files (x86)/Microsoft Visual Studio 11.0/Common7/IDE/TF.exe\"}\n ]\n settings = get_settings().get('vcs', default)\n\n # re-format settings array if user has old format of settings\n\n if type(settings[0]) == list:\n settings = [dict(name=name, cmd=cmd, dir='.'+name) for name, cmd in settings]\n\n return settings", "def getOptions() :\n usage = ('usage: python submit_all.py -c CONFIG -d DIR ')\n\n parser = OptionParser(usage=usage) \n parser.add_option(\"-c\", \"--config\", dest=\"config\",\n help=(\"The crab script you want to submit \"),\n metavar=\"CONFIG\")\n parser.add_option(\"-d\", \"--dir\", dest=\"dir\",\n help=(\"The crab directory you want to use \"),\n metavar=\"DIR\")\n parser.add_option(\"-f\", \"--datasets\", dest=\"datasets\",\n help=(\"File listing datasets to run over\"),\n metavar=\"FILE\")\n (options, args) = parser.parse_args()\n\n\n if options.config == None or options.dir == None:\n parser.error(usage)\n \n return options", "def get_config_params(args):\n configParseObj = configparser.ConfigParser()\n configParseObj.read(args)\n return configParseObj", "def get_plugin_options(name):\n return get_plugin_loader(name).get_options()", "def _getoptions():\n parser = OptionParser()\n parser.add_option(\"-d\", \"--data-dir\", dest=\"datadir\",\n help=\"Data directory\",\n default=None)\n parser.add_option(\"-u\", \"--url\", dest=\"url\",\n help=\"URL to load to\",\n default='http://localhost:8080/metadataloader')\n return parser.parse_args()[0]", "def connectionoptions(self, tokens):\n\n return self.process_value_pairs(tokens, \"connectionoptions\")", "def _get_config(self, unit, filename):\n file_contents = unit.file_contents(filename)\n config = ConfigParser.ConfigParser()\n config.readfp(io.StringIO(file_contents))\n return config", "def _getoptions():\n parser = OptionParser()\n parser.add_option(\"-f\", \"--dwca_file\", dest=\"dwca_file\",\n help=\"Darwin Core Archive file\",\n default=None)\n return parser.parse_args()[0]", "def list_opts():\n return _make_opt_list([OPTS], 'tvdb')", "def test_get_config_file_value(self):\n parser = GbpOptionParser('cmd4')\n self.assertEqual(parser.get_config_file_value('new_overrides_git_option1'),\n 'new_overrides_git_value1')\n self.assertEqual(parser.get_config_file_value('doesnotexist'), None)", "def define_options(self):\n return {\n 'basename': OptionDef(required=True, default_value='keycloak', allowed_types=[str]),\n 'namespace': OptionDef(required=True, default_value='default', allowed_types=[str]),\n 'config': {\n 'service_port': OptionDef(required=True, default_value=8080, allowed_types=[int]),\n 'realm_import': OptionDef(format=OptionDefFormat.KDATA_VOLUME, allowed_types=[str, bytes, KData_Secret]),\n 'proxy_address_forwarding': OptionDef(format=OptionDefFormat.KDATA_ENV,\n allowed_types=[bool, *KDataHelper_Env.allowed_kdata()]),\n 'frontend_url': OptionDef(allowed_types=[str]),\n 'admin': {\n 'user': OptionDef(format=OptionDefFormat.KDATA_ENV, allowed_types=[str, *KDataHelper_Env.allowed_kdata()]),\n 'password': OptionDef(format=OptionDefFormat.KDATA_ENV, allowed_types=[str, KData_Secret]),\n },\n 'db': {\n 'vendor': OptionDef(format=OptionDefFormat.KDATA_ENV, allowed_types=[str, *KDataHelper_Env.allowed_kdata()]),\n 'addr': OptionDef(format=OptionDefFormat.KDATA_ENV, allowed_types=[str, *KDataHelper_Env.allowed_kdata()]),\n 'port': OptionDef(format=OptionDefFormat.KDATA_ENV, allowed_types=[int, *KDataHelper_Env.allowed_kdata()]),\n 'database': OptionDef(format=OptionDefFormat.KDATA_ENV, allowed_types=[str, *KDataHelper_Env.allowed_kdata()]),\n 'schema': OptionDef(format=OptionDefFormat.KDATA_ENV, allowed_types=[str, *KDataHelper_Env.allowed_kdata()]),\n 'user': OptionDef(format=OptionDefFormat.KDATA_ENV, allowed_types=[str, *KDataHelper_Env.allowed_kdata()]),\n 'password': OptionDef(format=OptionDefFormat.KDATA_ENV, allowed_types=[str, KData_Secret]),\n },\n },\n 'container': {\n 'keycloak': OptionDef(required=True, default_value='quay.io/keycloak/keycloak:11.0.2', allowed_types=[str]),\n },\n 'kubernetes': {\n 'resources': {\n 'deployment': OptionDef(allowed_types=[Mapping]),\n }\n },\n }", "def options(self):\n return self._options", "def options(self):\n return self._options", "def options(self):\n return self._options", "def options(self):\n return self._options", "def options(self):\n return self._options", "def cmdopt(request):\n return request.config.getoption(\"-c\")", "def read_config():\n parser = OptionParser()\n parser.add_option(\"-c\", \"--config\", dest=\"conf_path\", type=\"string\", help=\"config file path\")\n (options, args) = parser.parse_args()\n\n config.readfp(open(options.conf_path)) # \"threadbot.cfg\"\n subreddit = config.get(\"threadbot\", \"subreddit\")\n username = config.get(\"threadbot\", \"username\")\n password = config.get(\"threadbot\", \"password\")\n\n return subreddit, username, password", "def options() -> List:\n return list(c.value for c in Plugin)", "def options(self, parser):\n pass", "def get_config_params(config_file_name=\"config\\import.ini\"):\n\n configParser=configparser.ConfigParser()\n configfile=config_file_name\n configfile_path=os.path.abspath(configfile)\n configParser.read(configfile_path)\n # READING INI FILE\n #db params\n db_params=dict()\n db_params['user']=configParser.get('DB','pangaea_db_user')\n db_params['pwd']=configParser.get('DB','pangaea_db_pwd')\n db_params['db']=configParser.get('DB','pangaea_db_db')\n db_params['host']=configParser.get('DB','pangaea_db_host')\n db_params['port']=configParser.get('DB','pangaea_db_port')\n #terminologies\n terminologies_params=configParser.get('INPUT','terminologies') # parameters for each terminology as JSON str\n terminologies_params_parsed=json.loads(terminologies_params)\n\n return db_params,terminologies_params_parsed", "def __get_options(self):\n for sect in self.file_parser.sections():\n if self.file_parser.has_option(sect, 'implementation'):\n selected_imp = self.file_parser.get(sect, 'implementation')\n imptype = self.file_parser.get(sect, 'optype')\n # pylint: disable = E1103\n enabled = self.file_parser.get(sect, 'enabled').lower()\n # pylint: enable = E1103\n if enabled == 'always':\n stateval = True\n permanent = True\n elif enabled == 'true':\n stateval = True\n permanent = False\n else:\n stateval = False\n permanent = False\n\n if self.file_parser.has_option(sect, 'id'):\n _id = self.file_parser.get(sect, 'id')\n self.opt_dict[sect]['id'] = _id\n\n self.opt_dict[sect]['permanent'] = permanent\n self.opt_dict[sect]['imptype'] = imptype\n if stateval == True:\n imp_unavailable = (selected_imp in self.imp2opt_dict) and (\n self.imp2opt_dict[selected_imp] != 'none' )\n if selected_imp == 'none' or imp_unavailable:\n self.opt_dict[sect]['enabled'] = False\n self.opt_dict[sect]['selected_imp'] = 'none'\n else:\n self.opt_dict[sect]['enabled'] = True\n self.set_imp(sect, selected_imp)\n# dbmsg = 'Add imp2opt_dict[{0}] = {1}'\n# print dbmsg.format(selected_imp, sect)\n else:\n self.opt_dict[sect]['enabled'] = False\n self.opt_dict[sect]['selected_imp'] = 'none'", "def get_all_options(self): \n return self._options.items()", "def test():\n conf = AppConf()\n\n for section in conf.sections():\n print(section)\n for option, value in conf.items(section):\n print(\" {option:15}: {value}\".format(option=option, value=value))", "def _user_options(path):\n if path is None:\n return {}\n else:\n with util.open_file(path) as options_file:\n return util.load_json(options_file)" ]
[ "0.7112448", "0.6667187", "0.66389835", "0.65258443", "0.64888626", "0.64602494", "0.6437445", "0.6428669", "0.6369955", "0.6348633", "0.6293149", "0.6288183", "0.6242046", "0.6238758", "0.62077934", "0.6206483", "0.6179304", "0.61739796", "0.6169169", "0.61607754", "0.60885215", "0.6062007", "0.60474145", "0.6044793", "0.6027037", "0.60268646", "0.6025369", "0.6016114", "0.60134554", "0.59918296", "0.5980385", "0.5980337", "0.5977196", "0.59458876", "0.59430844", "0.5939805", "0.59199333", "0.5905064", "0.589868", "0.58941746", "0.5890653", "0.58884037", "0.5884455", "0.5880557", "0.58695513", "0.58687395", "0.58590937", "0.585859", "0.5853293", "0.5850937", "0.5847983", "0.5827723", "0.5817901", "0.5817646", "0.581399", "0.5805762", "0.5802174", "0.5801724", "0.57940507", "0.5789478", "0.57741183", "0.5771129", "0.576989", "0.5764474", "0.57532156", "0.5743168", "0.57376045", "0.5736508", "0.57313025", "0.5723395", "0.5715941", "0.5700975", "0.5680706", "0.56655306", "0.5665359", "0.5661282", "0.56595755", "0.56576127", "0.5657161", "0.565589", "0.5649333", "0.5637997", "0.56377816", "0.5637502", "0.5636756", "0.5628164", "0.56214666", "0.56214666", "0.56214666", "0.56214666", "0.56214666", "0.5619877", "0.5614281", "0.560362", "0.5602755", "0.56005627", "0.55954266", "0.5574667", "0.55728024", "0.55705744" ]
0.6026467
26
Method to read a well at a certain position
def read_well(self, row, col): try: col = int(col) except ValueError: print('Error! COLS have to be integers!') exit(1) w = self._plate_df.iloc[ROW_TO_INDEX[row]][col] return w
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read():\n # TODO", "def readentry(self):\n # because of the complexity of the format, have to parse\n # the whole file at once.\n if self.__blocks is None:\n self.__parse()\n if self.__blocks.has_key('data') and self.__blocks['data'].has_key('matrix'):\n if self.__cursor < self.ntax:\n current = self.__blocks['data']['matrix'][self.__cursor]\n self.__cursor = self.__cursor + 1\n return current\n return None", "def get_index_from_well(self, well):\n pass", "def read(self):\n word = self.ser.readline()\n\n # strip carriage return and newline\n word = word[:-2]\n\n if word == b'start':\n self.current_position = 1\n elif word == b'end':\n self.current_position = 0\n elif self.current_position == 1:\n try:\n self.x = int(word)\n except:\n # Bad int\n self.ser.flush()\n self.current_position += 1\n elif self.current_position == 2:\n try:\n self.y = int(word)\n except:\n # Bad int\n self.ser.flush()\n self.current_position += 1\n elif self.current_position == 3:\n try:\n self.z = int(word)\n except:\n # Bad int\n self.ser.flush()\n self.current_position += 1\n elif self.current_position == 4:\n try:\n self.gx = int(word)\n except:\n # Bad int\n self.ser.flush()\n self.current_position += 1\n elif self.current_position == 5:\n try:\n self.gy = int(word)\n except:\n # Bad int\n self.ser.flush()\n self.current_position += 1\n elif self.current_position == 6:\n try:\n self.gz = int(word)\n except:\n # Bad int\n self.ser.flush()\n self.current_position += 1\n else:\n pass", "def _read_next_alignment(self, stream):", "def read(self):", "def gadget_type2_read(f,h,p):\n\n stat = 0\n while stat == 0:\n stat = findBlock(f,h)\n \n if stat == 2:\n print('end of file =/')\n print('scanning did not find block %s' % (h.reading))\n sys.exit()\n \n if h.reading == 'pos' or h.reading == 'vel':\n arr = g.gadget_readposvel(f,h,p)\n elif h.reading == 'pid':\n arr = g.gadget_readpid(f,h,p)\n elif h.reading == 'mass':\n arr = g.gadget_readmass(f,h,p)\n elif h.reading == 'metallicity':\n arr = g.gadget_readmetals(f,h,p,single=0)\n elif h.reading == 'age':\n arr = g.gadget_readage(f,h,p)\n elif h.reading == 'Z':\n arr = gadget_readgasstarprop(f,h,p)\n else:\n arr = gadget_general(f,h,p)\n \n return arr", "def _read_header(self):\n f = self._open(self.filename, 'rb')\n idx = 0\n header = b''\n # reading the header \n while idx < 13: \n header += f.readline().rstrip() # removes the \"\\n\\r\" at the end\n idx += 1\n # \"magically\" compute the data offset\n try:\n self._offset_auto = ord(header[2]) + 1856\n except:\n self._offset_auto = header[2] + 1856\n\n\n\n header = header[:self._offset_auto+300] # add an extra random header for offset\n header = re.sub(r'(?P<section>\\[[^\\]]+\\])', '\\n\\g<section>', header.decode('latin1'))\n header = header.splitlines()[1:]\n self.header = dict([self._header_sect2dict(line) for line in header])\n self.shape = np.array(self.header['Acquisition']['areGRBScan'].split(',')[-2:]).astype(np.int)\n f.close()\n\n offset_list = {'auto': self._offset_auto,\n 'from_end': -np.prod(self.shape)*self._nbytes,\n 'from_end_4k': - np.prod(self.shape)*self._nbytes - 4092}\n\n if self._offset_input in offset_list:\n\n self._offset_data = offset_list[self._offset_input]\n if self._offset_input.startswith('from_end'):\n # set the flag to seek from the end of the file.\n self._offset_whence = 2\n elif type(self._offset_input) is int:\n self._offset_data = self._offset_input\n else:\n raise ValueError\n\n \n\n return self.header", "def readFirst(self, num):\n\t\t\n\t\treturn self.line[:num]", "def readLeft():\n return readAll()[0]", "def get_next_position(self):", "def get_block(self, idx):\n self.input_file.seek(idx * self.blocksize)\n return self.input_file.read(self.blocksize)", "def test_read_different_location(self):\n try:\n self.reader.read(self.books[1], 0, 1)\n self.fail(\"Readed book was not in the library\")\n except AssertionError:\n pass", "def _readTopLine(self):\n (self.NLHEAD, self.FFI) = nappy.utils.text_parser.readItemsFromLine(self.file.readline(), 2, int)\n self.NIV = int(self.FFI/1000)\n return (self.NLHEAD, self.FFI)", "def _read(self, location, width):\n return self._connector.read(location=location, width=width)", "def _get_block(self, pos):\n raise NotImplementedError", "def read(self) -> int:", "def read_data(self, loc):\n pass", "def readline(self, size=-1):\n ...", "def read_position(self):\n cord = 0\n if self.__current_player == 1:\n while cord == 0:\n cord = self.__players[0].read_position()\n if self.__fields[0].shoot_at(cord):\n print(\"Good job! You can shoot one more\")\n else:\n self.change_player()\n elif self.__current_player == 2:\n while cord == 0:\n cord = self.__players[1].read_position()\n if self.__fields[1].shoot_at(cord):\n print(\"Good job! You can shoot one more\")\n else:\n self.change_player()", "def get_piece_at_opening(self, x, y):\n self._validate_opening(x, y)\n return self._openings[x][y]", "def get_piece(self, at):\n return self.nodes[at].piece", "def locate(self, pos):\n for obj in self.wrappers:\n if obj.start <= pos < obj.end:\n for sub in getattr(obj, 'attributes', ()):\n if sub.start <= pos < sub.end:\n return sub\n return obj\n else:\n if pos == len(self.input):\n return self.wrappers[-1]\n raise IndexError(\"position %d out of range\" % pos)", "def __getitem__(self, i):\n\t\treturn self._blocks[i]", "def peek (self, where=0) :\r\n if (where<0 or where>=len(self)) :\r\n m = \"Trying to peek beyond the end of the Circ. Buff\"\r\n raise Exception(m)\r\n index = (self.nextGet_+where) % self.capacity()\r\n return self.buff_[index]", "def _read_data(self):", "def offset(self):\r\n return self.buf[0].unib[9:11]", "def read_far_ptr(self, offset):\n p = self.read_ptr(offset)\n segment_start = self.segment_offsets[ptr.far_target(p)] # in bytes\n offset = segment_start + ptr.far_offset(p)*8\n if ptr.far_landing_pad(p) == 0:\n # simple case: the landing pad is a normal pointer, just read it\n p = self.read_ptr(offset)\n return offset, p\n else:\n # complex case. From capnproto specs:\n # If B == 1, then the \"landing pad\" is itself another far\n # pointer that is interpreted differently: This far pointer\n # (which always has B = 0) points to the start of the object's\n # content, located in some other segment. The landing pad is\n # itself immediately followed by a tag word. The tag word\n # looks exactly like an intra-segment pointer to the target\n # object would look, except that the offset is always zero.\n #\n # read the 2nd far pointer and the tag word\n p = self.read_ptr(offset)\n ptag = self.read_ptr(offset+8)\n assert ptr.kind(p) == ptr.FAR\n assert ptr.far_landing_pad(p) == 0\n assert ptr.offset(ptag) == 0\n # compute the absolute offset which the object is located at\n segment_start = self.segment_offsets[ptr.far_target(p)] # in bytes\n offset = segment_start + ptr.far_offset(p)*8\n #\n # ptag is a pointer which perfectly describes the object we want\n # to read. Remember that normally when ptr_offset==0, capnproto\n # expects the object to start at offset+8. So here we return\n # offset-8, so that the object will be read at the expected place\n return offset-8, ptag", "def get_story(book):\n start_key = \"\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\"\n end_key = \"\\n\\n\\n\\n\\n\\n\\n\\n\\n\\nEnd\"\n start_pos = book.find(start_key)\n end_pos = book.find(end_key)\n story = book[start_pos:end_pos]\n return story", "def test_artemis_reader():\n _test_raw_reader(\n read_raw_artemis123,\n input_fname=short_hpi_1kz_fname,\n pos_fname=dig_fname,\n verbose=\"error\",\n )", "def read(self) -> int:\n ...", "def peek(self):", "def read_document(self):\n words = self.word_runner()\n word = \"press space to start\"\n orp_ind = 13\n try:\n while True:\n time.sleep(60 / self.wpm)\n\n if self.is_reading:\n word = next(words)\n orp_ind = int(self.orp_index(word))\n\n yield (word, orp_ind)\n except StopIteration:\n pass\n finally:\n del words", "def __read(self, i: int) -> bytes:\r\n b = self.data[self.idx: self.idx + i]\r\n self.idx += i\r\n if len(b) != i:\r\n raise DecodingError(\r\n \"Incorrect byte length returned between indexes of {0} and {1}. Possible unexpected End of File.\"\r\n .format(str(self.idx), str(self.idx - i)))\r\n return b", "def get_wells(self, fname):\r\n getLoc = False\r\n wells = []\r\n well = {'NAME': None, 'TYPE': None, 'OP_MODE': [], 'CON_TYPE': [], 'CON_VAL': [], 'LOC': None}\r\n with open(fname, \"r\") as fp:\r\n for line in fp:\r\n item = line.split()\r\n if len(item) > 0:\r\n keyword = item[0].strip('*')\r\n if getLoc:\r\n if item[0][0] == '*' and item[0][1] == '*':\r\n continue\r\n if ':' in item[2]:\r\n K = [int(x)for x in item[2].split(':')]\r\n well['LOC'] = (int(item[0]), int(item[1]), int(K[-1]))\r\n else:\r\n well['LOC'] = (int(item[0]), int(item[1]), int(item[2]))\r\n getLoc = False\r\n elif keyword == 'WELL':\r\n # Add the previous well to the grid\r\n if well['NAME'] is not None:\r\n wells.append(copy.deepcopy(well))\r\n well = {'NAME': None, 'TYPE': None, 'OP_MODE': [], 'CON_TYPE': [], 'CON_VAL': [],\r\n 'LOC': None}\r\n well['NAME'] = item[1].strip(\"'\")\r\n elif keyword == 'INJECTOR':\r\n well['TYPE'] = 'INJ'\r\n elif keyword == 'PRODUCER':\r\n well['TYPE'] = 'PRO'\r\n elif keyword == 'OPERATE':\r\n well['OP_MODE'].append(item[1].strip('*'))\r\n well['CON_TYPE'].append(item[2].strip('*'))\r\n well['CON_VAL'].append(item[3].strip('*'))\r\n elif keyword == 'PERF':\r\n getLoc = True\r\n wells.append(well)\r\n return self.Wells(wells, self.Grid, self.times, self.out_dir)", "def read_position(self):\n # player's chosen coordinates to attack\n pos = self.__players[self.__current_player].read_position()\n # enemy's field\n next_player_field = self.__fields[self.__next_player]\n # attack the enemy's field\n coord = next_player_field.field[pos[0]][pos[1]]\n coord.shoot_at()\n\n # if the enemy's field doesn't have alive ships — current player wins\n if next_player_field.alive == 0:\n print(f'☼ ☼ ☼ {self.__players[self.__current_player].show_name} is a winner! ☼ ☼ ☼')\n quit()\n\n # the next move of the enemy\n self.__current_player, self.__next_player = self.__next_player, self.__current_player", "def read(self , **kargs):\n return self.read_segment(**kargs)", "def _advance(self):\n if self._is_at_end():\n return None\n self.current += 1\n return self.source[self.current - 1]", "def parse_hand(self):\n self.parse_part()\n self.parse_header()\n self.parse_setup()\n self.parse_preflop()\n self.parse_flop()\n self.parse_turn()\n self.parse_river()\n self.parse_showdown()\n self.conclude_hand()", "def read(self, size=-1):\n ...", "def advance(self):\n in_bytes = self._pre_pos\n for tag in self._reader:\n if isinstance(tag, Tag):\n # skip the Metadata in flv stream.\n if not self.handle_magic_head(tag):\n if tag.type == VIDEO_TAG and tag.is_keyframe:\n self.append_keyframe(tag)\n self._pre_pos = self.position()\n in_bytes = self._pre_pos - in_bytes\n if in_bytes > 0:\n self.active()\n else:\n self.inactive()", "def _read(self, valid):\n start = self.pos\n while valid(self.char) and self.pos < self.length:\n self._read_char()\n\n return self.data[start : self.pos]", "def import_from_pos(fh):\n elem = None\n while True:\n l = fh.readline()\n if not l: break\n if 'nwfc1' in l and 'nwfc2' in l:\n w = l.split()\n nwfc1, nwfc2 = int(w[0]), int(w[1])\n # nwfc2 is assumed to be one - only one l value\n if 'lwfc1' in l:\n w = l.split('!')[0].split()\n lwfc1 = [int(_) for _ in w]\n if 'lwfc2' in l:\n lwfc2 = int(l.split()[0])\n if 'nonzero elements' in l:\n n = int(l.split()[0])\n elem = []\n l = fh.readline()\n c = 0\n while l and c < n:\n w = l.split()\n if len(w) in {5, 10}: # 5-col is for old pos format and 10-col is the enriched format by yfliang\n # (l,m) in lwfc1, m in lwfc2 (only one), i = (x=1,y=2,z=3)\n # m ranges from -l to l\n # elem = < h_c | r_i | beta_lm > (Core-level wavefunctions always proceed. )\n elem.append([int(_) for _ in w[ : 3]] + [float(w[3]) + 1j * float(w[4])]) \n l = fh.readline()\n c += 1\n return lwfc1, lwfc2, elem", "def __getitem__(self, index):\n return self.parses[index]", "def readRight():\n return readAll()[1]", "def read(self):\n return next(iter(self))", "def __getitem__(self, key):\n indices = key.rstrip(\"/\").split(\"/\")\n\n if len(indices) < 1:\n raise KeyError(\"Invalid slice of BasH5Collection\")\n\n if len(indices) >= 1:\n result = self.readers[indices[0]]\n if len(indices) >= 2:\n result = result[int(indices[1])]\n if len(indices) >= 3:\n if indices[2] == \"ccs\":\n result = result.ccsRead\n else:\n start, end = map(int, indices[2].split(\"_\"))\n result = result.read(start, end)\n return result", "def read(self, paragraph_idx=None):\n if paragraph_idx:\n self.paragraphs[paragraph_idx].read()\n else:\n for paragraph in self.paragraphs:\n paragraph.read()", "def read_output(self, fname, keys, subkeys):\r\n print('Reading well outputs')\r\n k = 0\r\n sk = 0\r\n # tID = 0\r\n build_keys = False\r\n build_subkeys = False\r\n cur_key = None\r\n cur_block = 1\r\n n_blocks = 0\r\n order = self._out_order(fname)\r\n # Initialize output dictionary\r\n well_out = {}\r\n for i,key in enumerate(keys):\r\n well_out[key] = {}\r\n for subkey in subkeys[i]:\r\n well_out[key][subkey] = {}\r\n # for t in range(len(self.times) - 1):\r\n # well_out[key][subkey][t+1] = []\r\n with open(fname, \"r\") as fp:\r\n for line in fp:\r\n item = line.split()\r\n if len(item) > 0:\r\n # Find current time step\r\n if item[0] == 'TIME:':\r\n head = ''.join(item[2:])\r\n if 'GEMFIELDSUMMARY' in head:\r\n build_keys = True\r\n # tID += 1\r\n tID = item[1]\r\n n_wells = len(order[tID])\r\n n_blocks = math.ceil(n_wells / 4)\r\n continue\r\n # Assume that keywords are ordered as they appear in .out\r\n if build_keys:\r\n # Current block has been read, move to next one\r\n if k == len(keys):\r\n cur_block += 1\r\n k = 0\r\n if keys[k] in line:\r\n cur_key = keys[k]\r\n build_keys = False\r\n build_subkeys = True\r\n continue\r\n elif build_subkeys:\r\n if sk == len(subkeys[k]):\r\n build_subkeys = False\r\n build_keys = True\r\n k += 1\r\n sk = 0\r\n continue\r\n for subkey in subkeys[k]:\r\n if subkey in line:\r\n if cur_block == n_blocks:\r\n line = line.split('++')[0]\r\n item = list(map(str.strip, line.split('+')))\r\n item = list(filter(None, item))\r\n if tID not in well_out[cur_key][subkey]:\r\n well_out[cur_key][subkey][tID] = item[1:]\r\n else:\r\n well_out[cur_key][subkey][tID].extend(item[1:])\r\n sk += 1\r\n # Attach well names to well outputs\r\n for key in well_out:\r\n for subkey in well_out[key]:\r\n # for t in range(1, len(self.times)):\r\n for t in well_out[key][subkey]:\r\n well_out[key][subkey][t] = {k:v for k,v in zip(order[t], well_out[key][subkey][t])}\r\n return well_out", "def read_lookahead(self) -> SmartSsdReadLookahead:\n return self._read_lookahead", "def read(self):\n pass", "def _parse_input_loc(block, profile, **kwargs):\n layer, wave_field = parse_fixed_width(\n 2 * [(5, int)], block\n )\n\n return profile.location(\n motion.WaveField[wave_field],\n index=(layer - 1),\n )", "def read(self, block_no):\n with open(self.file_path, 'r+') as f:\n f.seek(block_no * config.block_size)\n return f.read(config.block_size)", "def __getitem__(self, index):\n return self.seq[index]", "def read(self):\n raise NotImplementedError", "def get_position(self, position):", "def read_until(steg_bytes: bytes, offset: int, ending: str):\r\n # Create a variable to hold the bytes read\r\n bytes_read = b\"\"\r\n\r\n # Loop through the steg_bytes\r\n while offset < len(steg_bytes):\r\n # Check if the current byte is the ending byte sequence\r\n if steg_bytes[offset:offset + len(ending)] == ending.encode():\r\n # Return the bytes read and the offset of the ending byte sequence\r\n return bytes_read, offset\r\n # Read the next byte\r\n bytes_read += steg_bytes[offset:offset + 1]\r\n offset += 1", "def read(self,getindex):\n if getindex<0:\n #print(\"Indicies are non-negative\")\n return None\n try:\n bufinx = len(self.buffer)+(getindex - self.index.value)\n if bufinx<0:\n #print(\"This item has been deleted, try increasing the queue size\")\n return None\n return self.buffer[bufinx]\n except IndexError:\n #print(\"This item doesn't exist yet\")\n return None", "def read_example(self, index):\n if (index < 0 or index >= len(self._data)):\n raise ValueError(\"Index must be from 0 (inclusive) to number of lines (exclusive).\")\n \n t = self._data[index][1]\n (X, header) = self._read_timeseries(self._data[index][0], t)\n y = self._data[index][2]\n\n return (X, t, y, header)", "def simulate_read(self):\n\n fastafile = ps.FastaFile(self.genome_fa)\n # left split read\n\n insert = int(np.random.normal(self.insert_size, (self.insert_size / 12), 1))\n start = int(np.random.randint(self.chr_pos_start, (self.chr_pos_end + 1)))\n left_end = start + self.read_length\n total_end = start + int(np.round(insert))\n right_start = total_end - self.read_length\n if total_end > self.chr_pos_end:\n # split read scenario or insert spanning split read scenario\n if left_end > self.chr_pos_end:\n # left read spanning split read scenario\n # left_read\n left_dntps = self.chr_pos_end - start\n right_dntps = self.read_length - left_dntps\n\n # the error could be here\n left_split_read = fastafile.fetch(self.chr, start, self.chr_pos_end)\n right_split_read = fastafile.fetch(self.chr, self.chr_pos_start, (self.chr_pos_start + right_dntps))\n left_read = left_split_read + right_split_read\n\n # right_read\n right_start = self.chr_pos_start + int(round(self.insert_size - left_dntps - self.read_length))\n right_read = fastafile.fetch(self.chr, right_start, (right_start + self.read_length))\n\n # assertion to check the error here\n\n common_id = \"%s|%s|%s:%s-%s:%s|%s:%s|1|%s\" % (\n self.read_number,\n self.chr,\n start,\n self.chr_pos_end,\n self.chr_pos_start,\n (self.chr_pos_start + right_dntps),\n right_start,\n (right_start + self.read_length),\n self.circle_id,\n )\n\n else:\n if right_start > self.chr_pos_end:\n # insert spanning split read scenario\n left_read = fastafile.fetch(self.chr, start, (start + self.read_length))\n right_start = self.chr_pos_start + (right_start - self.chr_pos_end)\n right_read = fastafile.fetch(self.chr, right_start, (right_start + self.read_length))\n common_id = \"%s|%s|%s:%s|%s:%s|3|%s\" % (\n self.read_number,\n self.chr,\n start,\n (start + self.read_length),\n right_start,\n (right_start + self.read_length),\n self.circle_id,\n )\n else:\n # right split read scenario\n assert right_start <= self.chr_pos_end\n assert (right_start + self.read_length) > self.chr_pos_end\n left_read = fastafile.fetch(self.chr, start, (start + self.read_length))\n\n # compute right dntps\n left_dntps = self.chr_pos_end - right_start\n right_dntps = self.read_length - left_dntps\n left_split_read = fastafile.fetch(self.chr, right_start, self.chr_pos_end)\n right_split_read = fastafile.fetch(self.chr, self.chr_pos_start, (self.chr_pos_start + right_dntps))\n right_read = left_split_read + right_split_read\n common_id = \"%s|%s|%s:%s|%s:%s-%s:%s|2|%s\" % (\n self.read_number,\n self.chr,\n start,\n (start + self.read_length),\n right_start,\n self.chr_pos_end,\n self.chr_pos_start,\n (self.chr_pos_start, right_dntps),\n self.circle_id,\n )\n\n else:\n # non split read scenario\n left_read = fastafile.fetch(self.chr, start, (start + self.read_length))\n # correct right read start\n right_read = fastafile.fetch(self.chr, right_start, (right_start + self.read_length))\n common_id = \"%s|%s|%s:%s|%s:%s|0|%s\" % (\n self.read_number,\n self.chr,\n start,\n (start + self.read_length),\n right_start,\n (right_start + self.read_length),\n self.circle_id,\n )\n\n return (right_read, left_read, common_id)", "def readinto(self, buf: AnyWritableBuf, /) -> int | None:", "def readinto(self, buf: AnyWritableBuf, /) -> int | None:", "def _load_frame(self, i):\n\n eof = False \n try:\n self.im.seek(i)\n except EOFError:\n eof = True\n\n return eof", "def _read(self, openf=None, stepfilter=None):\n itemstack = []\n current = None\n result = {}\n xkeys = None\n timeskip = False\n laststep = False\n\n if openf is None:\n f = open(self.filepath)\n else:\n f = openf\n\n line = 'start'\n while line != '':\n lastpos = f.tell()\n line = f.readline()\n if line == '':\n continue\n \n if itemstack is not None and len(itemstack) > 0: \n cast = itemstack.pop()\n raw = line.split()\n values = [t(r) for t, r in zip(cast, raw)]\n if len(values) == 1:\n values = values[0]\n\n if current == \"time\":\n if stepfilter is not None and values not in stepfilter:\n timeskip = True\n elif (self.index is not None and values != self.index):\n if values > self.index:\n if openf is None:\n return {}\n else:\n timeskip = True\n laststep = True\n else:\n timeskip = True\n elif self.index is None:\n self.index = values\n else:\n timeskip = False\n \n if len(itemstack) == 0 and current not in result:\n result[current] = values\n else:\n if current not in result:\n result[current] = []\n result[current].append(values)\n continue\n elif itemstack is None and current == \"atoms\":\n if \"ITEM\" in line:\n current = None\n if openf is not None:\n f.seek(lastpos)\n break\n else:\n #E.g. line: 1 4 -65.9625 1.54915 1.46824 5 30.976 \n vals = line.split()\n sid, atype = tuple(map(int, vals[0:2]))\n result[\"type\"].append(atype)\n result[\"id\"].append(sid)\n x, y, z = tuple(map(float, vals[2:5]))\n result[\"xyz\"].append((x, y, z))\n if len(vals) > 5 and xkeys is not None:\n for ikey, v in enumerate(vals[5:]):\n result[xkeys[ikey]].append(eval(v))\n continue # pragma: no cover\n \n if \"ITEM: TIMESTEP\" in line:\n if laststep:\n f.seek(lastpos)\n break\n itemstack.append((int,))\n current = \"time\"\n timeskip = False\n elif not timeskip:\n if \"ITEM: NUMBER OF ATOMS\" in line:\n itemstack.append((int,))\n current = \"natoms\"\n elif \"ITEM: BOX BOUNDS\" in line:\n period = line.strip().split(\"BOX BOUNDS\")\n if len(period) == 2 and period[1] != '':\n result[\"periodic\"] = period[1].strip().split()\n else:\n result[\"periodic\"] = (\"ss\", \"ss\" ,\"ss\")\n \n\t\t # Changes by JPRIEDS to accommodate triclinic boxes\n\t\t # Written 170719\n\t\t if len(result[\"periodic\"]) == 6:\n\t\t\titemstack.extend([(float, float, float)]*3)\n\t\t\tcurrent = \"box\"\n\t\t\tresult[\"periodic\"] = result[\"periodic\"][3:]\n\t\t elif len(result[\"periodic\"]) == 3:\n\t\t\titemstack.extend([(float, float)]*3)\n\t\t\tcurrent = \"box\"\n\t\t else:\n emsg = \"Could not classify periodic bounds: {}\"\n raise ValueError(emsg.format(result[\"periodic\"]))\n elif \"ITEM: ATOMS\" in line:\n itemstack = None\n current = \"atoms\"\n result[\"type\"] = []\n result[\"id\"] = []\n result[\"xyz\"] = []\n \n #The first two headings in the line have \"ITEM: ATOMS\", the\n #rest are usuall id, type, x, y, z, rest...\n headings = line.split()\n extras = len(headings) > 7\n if extras:\n xkeys = []\n xheadings = headings[7:]\n for xhead in xheadings:\n key = \"atom:{}\".format(xhead)\n result[key] = []\n xkeys.append(key)\n \n if openf is None:\n #Close the file since we opened it.\n f.close()\n \n return result", "def open_position(self, position: int):\n # get row, column, and path to the well\n row_name = self.positions[position]['row']\n col_name = self.positions[position]['col']\n well_path = os.path.join(os.path.join(self.root_path, row_name), col_name)\n\n # check to see if this well exists (row/column)\n if os.path.exists(well_path):\n pos_name = self.positions[position]['name']\n pos_path = os.path.join(well_path, pos_name)\n\n # check to see if the position exists\n if os.path.exists(pos_path):\n\n if self.verbose: print(f'Opening subgroup {row_name}/{col_name}/{pos_name}')\n\n # update trackers to note the current status of the writer\n self.current_pos_group = self.store[row_name][col_name][pos_name]\n self.current_well_group = self.store[row_name][col_name]\n self.current_position = position\n\n else:\n raise FileNotFoundError(f'Could not find zarr position subgroup at {row_name}/{col_name}/{pos_name}\\\n Check spelling or create position subgroup with create_position')\n else:\n raise FileNotFoundError(f'Could not find zarr position subgroup at {row_name}/{col_name}/\\\n Check spelling or create column/position subgroup with create_position')", "def __getitem__(self, idx: int):\n return self.deck[idx]", "def find_offsets(self):\r\n\r\n #### Begin functionality here\r\n\r\n return()", "def read(self, sacc_data: sacc.Sacc) -> None:", "def read(self, sacc_data: sacc.Sacc) -> None:", "def read(self, sacc_data: sacc.Sacc) -> None:", "def test_next_reads_all_thoughts(user_info, thought):\n parser = ProtobufSampleParser()\n b = BytesIO(b\"\".join(_build_message_buffer(x) for x in (user_info, thought, thought)))\n out = parser.next(b)\n assert parser.user == user_info\n while out:\n assert out.snapshot == thought\n out = parser.next(b)", "def read(self, index_delta):\n assert(not self.is_almost_finished(index_delta))\n assert(index_delta > 0)\n\n # Conduct the sensor read\n self.currIndex += index_delta\n self.currValue = self.data[self.currIndex]", "def Offset(self) -> int:", "def Offset(self) -> int:", "def Offset(self) -> int:", "def next(self):\n lines = []\n query = False\n while 1:\n line = self._uhandle.readline()\n if not line:\n break\n # If I've reached the next one, then put the line back and stop.\n if lines and (line.startswith('BLAST')\n or line.startswith('BLAST', 1)\n or line.startswith('<?xml ')):\n self._uhandle.saveline(line)\n break\n # New style files ommit the BLAST line to mark a new query:\n if line.startswith(\"Query=\"):\n if not query:\n if not self._header:\n self._header = lines[:]\n query = True\n else:\n #Start of another record\n self._uhandle.saveline(line)\n break\n lines.append(line)\n\n if query and \"BLAST\" not in lines[0]:\n #Cheat and re-insert the header\n #print \"-\"*50\n #print \"\".join(self._header)\n #print \"-\"*50\n #print \"\".join(lines)\n #print \"-\"*50\n lines = self._header + lines\n \n if not lines:\n return None\n \n data = ''.join(lines)\n if self._parser is not None:\n return self._parser.parse(File.StringHandle(data))\n return data", "def read(self,numElems):\n self._ensureNumElems(numElems)\n # Take either numElems, or the entire head if not enough elements.\n last = min(self.head.shape[0], numElems)\n res = self.head[:last]\n self.head = self.head[last:]\n return res", "def __getitem__( self, line ):\n\n # check if the line is a string and if yes convert it to an index\n if isinstance( line, str ):\n line = find_line( self._line_info, line )\n if line < 0:\n raise ValueError(\"The desired spectral window could not be found!\")\n \n return self._sji_data[line]", "def test_read_EOF2(demo_data):\n\n openeeg = openEDF(demo_data)\n #read 200 samples starting from 100 samples before EOF\n start = max(openeeg.header.samples) - 100\n arr = openeeg.read(start, start + 200)\n assert arr.shape[-1] == 100\n\n openeeg.close()", "def readNextObject(offset):\n in_file.seek(offset)\n token = in_file.read(1)\n token_h, token_l = ord(token) & 0xF0, ord(token) & 0x0F #high and low parts \n if token == '\\x00':\n return None\n elif token == '\\x08':\n return False\n elif token == '\\x09':\n return True\n elif token == '\\x0f':\n return ''\n elif token_h == 0x10: #int\n result = 0\n for k in xrange((2 << token_l) - 1):\n result = (result << 8) + ord(in_file.read(1))\n return result\n elif token_h == 0x20: #real\n if token_l == 2:\n return struct.unpack('>f', in_file.read(4))[0]\n elif token_l == 3:\n return struct.unpack('>d', in_file.read(8))[0]\n elif token_h == 0x30: #date\n f = struct.unpack('>d', in_file.read(8))[0]\n return datetime.datetime.utcfromtimestamp(f + MAC_OS_X_TIME_OFFSET)\n elif token_h == 0x80: #data\n s = getSize(token_l)\n return in_file.read(s)\n elif token_h == 0x50: #ascii string\n s = getSize(token_l)\n return in_file.read(s)\n elif token_h == 0x60: #unicode string\n s = getSize(token_l)\n return in_file.read(s * 2).decode('utf-16be')\n elif token_h == 0x80: #uid\n return in_file.read(token_l + 1)\n elif token_h == 0xA0: #array\n s = getSize(token_l)\n obj_refs = struct.unpack('>' + ref_format * s, in_file.read(s * ref_size))\n return map(lambda x: readNextObject(object_offsets[x]), obj_refs)\n elif token_h == 0xC0: #set\n s = getSize(token_l)\n obj_refs = struct.unpack('>' + ref_format * s, in_file.read(s * ref_size))\n return set(map(lambda x: readNextObject(object_offsets[x]), obj_refs))\n elif token_h == 0xD0: #dict\n result = {}\n s = getSize(token_l)\n key_refs = struct.unpack('>' + ref_format * s, in_file.read(s * ref_size))\n obj_refs = struct.unpack('>' + ref_format * s, in_file.read(s * ref_size))\n for k, o in zip(key_refs, obj_refs):\n key = readNextObject(object_offsets[k])\n obj = readNextObject(object_offsets[o])\n result[key] = obj\n return result\n raise InvalidFileException()", "def wellBoreDataRead(fileName = None, onePhase = None):\n materialIndex = 1\n wellBoreDataDict = {}\n if onePhase == None:\n onePhase = True\n pass\n if fileName == None:\n if onePhase == True:\n fileName = environ[\"PWD\"]+\"/Data/wellbore.dat\"\n pass\n else:\n fileName = environ[\"PWD\"]+\"/Data/twophasewellbore.dat\"\n pass\n try:\n dataFile = open(fileName,\"r\")\n except:\n input(\" Necessary data have been read from\\n\"+\\\n \" the generic wellbore data file: $WRAPPER/Data/Wellbore/wellbore.dat\\n\"+\\\n \" A copy of that file is now in your reference directory as $PWD/Data/wellbore.dat.\\n\"+\n \" Modify it now to your own specific wellbore data.\\n\"+\\\n \" Now, enter any ascii key to continue the simulation\")\n if (onePhase == True):\n system(\"mkdir -p $PWD/Data;cp -p $WRAPPER/Data/Wellbore/wellbore.dat ./Data;chmod u+w $PWD/Data/wellbore.dat\")\n pass\n else:\n system(\"mkdir -p $PWD/Data;cp -p $WRAPPER/Data/Wellbore/twophasewellbore.dat ./Data;chmod u+w $PWD/Data/twophasewellbore.dat\")\n pass\n dataFile = open(fileName,\"r\")\n line = dataFile.readline()\n #print(\"first line\", line)\n \n while \"Transient terms\" not in line:\n line = dataFile.readline()\n if \"material\" in line.lower():\n wellBoreDataDict[\"Material\"+str(materialIndex)] = {}\n wellBoreDataDict1 = wellBoreDataDict[\"Material\"+str(materialIndex)]\n wellBoreDataDict[line.lower().replace(\"material\",\"\").replace(\"!\",\"\").replace(\"\\n\",\"\").replace(\" \",\"\")] = \"Material\"+str(materialIndex)\n materialIndex+=1\n elif (\"=\" in line) and (\"True\" in line):\n var, varType, varValue = wellBoreDataLineAnalysis(line)\n wellBoreDataDict1[var] = {varType:[varValue]}\n elif \"=\" in line:\n #print \"debug1 \",line\n pythonString, var, varValue, varType, unit = wellBoreDataLineAnalysis(line)\n if varType == \"Real\" or varType == \"Int\":\n wellBoreDataDict1[var] = {varType:[varValue,unit]}\n else:\n line = dataFile.readline().replace(\"\\n\",\"\")\n wellBoreDataDict1[var] = {varType:[line,unit]} # the type is implicitely variable due to the fact we have\n # to deal with a formula,\n # the variation is over the coordinate\n wellBoreDataDict1[var] = {varType:[line,unit]}\n #print(wellBoreDataDict); raw_input(\"wellBoreDataDict:\"+\"Material\"+str(materialIndex))\n return wellBoreDataDict", "def Read(self, offset):\r\n addr = offset * 4\r\n return self.read(addr)", "def read_for_box(box, line):\n if 'xlo' in box or 'xhi' in box:\n if 'ylo' in box or 'yhi' in box:\n read_box_line(line, box, 'z')\n else:\n read_box_line(line, box, 'y')\n else:\n read_box_line(line, box, 'x')", "def __read_block(self, buffer, startchr, endchr):\n\t\ttoken = buffer.read(1)\n\t\twhile token != startchr:\n\t\t\ttoken = buffer.read(1)\n\t\t\tif not token:\n\t\t\t\traise ValueError(\"read_block could not find beginning of block\")\n\t\t\n\t\tret = []\n\t\tcount = 1\n\t\twhile count:\n\t\t\ttoken = buffer.read(1)\n\t\t\tif token == startchr:\n\t\t\t\tcount += 1\n\t\t\telif token == endchr:\n\t\t\t\tcount -= 1\n\t\t\tif count:\n\t\t\t\tret.append(token)\n\t\t\tif not token:\n\t\t\t\tbreak\n\t\t\n\t\treturn \"\".join(ret)", "def read_lexeme(self) -> Lexeme:\r\n\r\n self._lexeme_buffer = self.lexeme_list[self.index]\r\n if self.index < len(self.lexeme_list) - 1:\r\n self.index += 1\r\n return self._lexeme_buffer", "def at(self):\n return self.data[self.end]", "def read_skel(self, fid):\r\n lin = self.read_line(fid)\r\n while lin:\r\n if lin[0]==':':\r\n if lin[1:]== 'name':\r\n lin = self.read_line(fid)\r\n self.name = lin\r\n elif lin[1:]=='units':\r\n lin = self.read_units(fid)\r\n elif lin[1:]=='documentation':\r\n lin = self.read_documentation(fid)\r\n elif lin[1:]=='root':\r\n lin = self.read_root(fid)\r\n elif lin[1:]=='bonedata':\r\n lin = self.read_bonedata(fid)\r\n elif lin[1:]=='hierarchy':\r\n lin = self.read_hierarchy(fid)\r\n elif lin[1:8]=='version':\r\n lin = self.read_line(fid)\r\n continue\r\n else: \r\n if not lin:\r\n self.finalize()\r\n return\r\n lin = self.read_line(fid)\r\n else:\r\n raise ValueError('Unrecognised file format')\r\n self.finalize()", "def reading(self, index: str = 'cm') -> int:\n if index == 'cm':\n index = Sonar.DISTANCE_CM\n self.board.sonar_read(self.trig)\n return self.data[index]", "def next(self):\n return self.filenum(), self.linenum(), self.tos().next()", "def __getitem__(self, index):\n return self.position[index]", "def read(self, w):\n pass", "def find(self, read, aa=None):\n aa = aa or ['C']\n\n for i, base in enumerate(read.sequence):\n if base in aa:\n yield Landmark(self.NAME, self.SYMBOL, i, 1)", "def get(self, offset: int) -> Position:\n line = bisect_right(self.line_starts, offset) - 1\n character = offset - self.line_starts[line]\n return Position(line=line, character=character)", "def read_sprite_info(rom, column_count_offset, sprite_data_offset):\n rom.seek(column_count_offset)\n sprites = []\n while True:\n sprite_info = {\n 'column_count': rom.read_ushort(),\n }\n offset = bytearray([0, 0, 0, 0])\n offset_index = rom.read_ubyte() - 1\n jump_amount = 0\n if offset_index < len(offset):\n while True:\n byte = rom.read_ubyte()\n if byte == 0:\n break\n offset[offset_index] = byte\n offset_index += 1\n jump_amount = rom.read_ubyte()\n sprite_info['offset'] = sprite_data_offset + struct.unpack('<I', offset)[0]\n sprites.append(sprite_info)\n # Test if this bit is set and if so, stop.\n # Probably not the way it worked, but it works for all roms.\n if jump_amount & 0x80:\n break\n return sprites", "def readAMBERTop(self, phys, filename):\r\n\r\n def skipLine(data):\r\n nl = data.index('\\n')\r\n return data[nl+1:len(data)]\r\n\r\n def jumpTo(data, target):\r\n fp = data.index(target)\r\n return data[fp:len(data)]\r\n\r\n def readRemove(data, size):\r\n retval = data[0:size-1]\r\n return data[size:len(data)]\r\n\r\n def getInteger(data):\r\n pos = 0\r\n retval = \"\"\r\n while (not data[pos].isdigit()):\r\n pos = pos + 1\r\n while (data[pos].isdigit()):\r\n retval = retval + data[pos]\r\n pos = pos + 1\r\n data = data[pos:len(data)]\r\n return int(retval), data\r\n\r\n def parse(data, arr, str, count, dtype, tupsize=1):\r\n data = jumpTo(data, \"%FLAG \"+str)\r\n data = jumpTo(data, \"%FORMAT\")\r\n numPerLine, data = getInteger(data)\r\n fieldsize, data = getInteger(data)\r\n data = skipLine(data) \r\n \r\n arr2 = []\r\n numread = 0\r\n for j in range(0, (tupsize*count-1) / numPerLine + 1):\r\n for i in range(0, numPerLine):\r\n if (tupsize == 1):\r\n arr.append(dtype(data[0:fieldsize].strip()))\r\n else:\r\n arr2.append(dtype(data[0:fieldsize].strip()))\r\n if (len(arr2) == tupsize):\r\n arr.append(arr2)\r\n arr2 = []\r\n numread += 1\r\n data = data[fieldsize:len(data)]\r\n if (numread == tupsize*count):\r\n break\r\n data = skipLine(data) \r\n return data\r\n\r\n def scan(data, str):\r\n return (data.count(str) != 0)\r\n\r\n\r\n f = open(filename, 'r')\r\n data = f.read()\r\n\r\n # First Line: VERSION ...\r\n data = skipLine(data)\r\n\r\n # Go To: %FLAG POINTERS\r\n data = jumpTo(data, '%FLAG POINTERS')\r\n\r\n data = jumpTo(data, '%FORMAT')\r\n numPerLine, data = getInteger(data)\r\n fieldsize, data = getInteger(data)\r\n data = skipLine(data)\r\n \r\n temp = []\r\n numread = 0\r\n for j in range(0, 31 / numPerLine + 1):\r\n for i in range(0, numPerLine):\r\n temp.append(int(data[0:8]))\r\n data = data[8:len(data)]\r\n numread += 1\r\n if (numread == 31):\r\n break\r\n data = skipLine(data)\r\n \r\n [natoms, ntypes, nbonh, mbona, ntheth, mtheta, nphih, mphia, nhparm, nparm, nnb, nres, nbona, ntheta, nphia, numbnd, numang, nptra, natyp, nphb, ifpert, nbper, ngper, ndper, mbper, mgper, mdper, ifbox, nmxrs, ifcap, numextra] = temp \r\n\r\n\r\n #################################################\r\n # Read AtomTypes\r\n atomnames = []\r\n charges = []\r\n masses = []\r\n atindex = []\r\n exclusions = []\r\n nparams = []\r\n reslabels = []\r\n respointers = []\r\n forceconstants = [[], [], []] # bond, angle, dihedral\r\n equilvals = [[], [], [[], []]] # bond, angle, dihedral\r\n scee_scales = []\r\n scnb_scales = []\r\n solty = []\r\n lj_acoef = []\r\n lj_bcoef = []\r\n\r\n data = parse(data, atomnames, \"ATOM_NAME\", natoms, str) \r\n data = parse(data, charges, \"CHARGE\", natoms, float)\r\n data = parse(data, masses, \"MASS\", natoms, float)\r\n data = parse(data, atindex, \"ATOM_TYPE_INDEX\", natoms, int)\r\n data = parse(data, exclusions, \"NUMBER_EXCLUDED_ATOMS\", natoms, int)\r\n data = parse(data, nparams, \"NONBONDED_PARM_INDEX\", ntypes*ntypes, int)\r\n data = parse(data, reslabels, \"RESIDUE_LABEL\", nres, str)\r\n data = parse(data, respointers, \"RESIDUE_POINTER\", nres, int)\r\n data = parse(data, forceconstants[0], \"BOND_FORCE_CONSTANT\", numbnd, float)\r\n data = parse(data, equilvals[0], \"BOND_EQUIL_VALUE\", numbnd, float)\r\n data = parse(data, forceconstants[1], \"ANGLE_FORCE_CONSTANT\", numang, float)\r\n data = parse(data, equilvals[1], \"ANGLE_EQUIL_VALUE\", numang, float)\r\n data = parse(data, forceconstants[2], \"DIHEDRAL_FORCE_CONSTANT\", nptra, float)\r\n data = parse(data, equilvals[2][0], \"DIHEDRAL_PERIODICITY\", nptra, float)\r\n data = parse(data, equilvals[2][1], \"DIHEDRAL_PHASE\", nptra, float)\r\n if (scan(data, \"SCEE_SCALE_FACTOR\")):\r\n data = parse(data, scee_scales, \"SCEE_SCALE_FACTORS\", nptra, float)\r\n else:\r\n for i in range(0, nptra):\r\n scee_scales.append(1.2) # Default \r\n if (scan(data, \"SCNB_SCALE_FACTOR\")):\r\n data = parse(data, scnb_scales, \"SCNB_SCALE_FACTORS\", nptra, float)\r\n else:\r\n for i in range(0, nptra):\r\n scnb_scales.append(2.0) # Default \r\n\r\n data = parse(data, solty, \"SOLTY\", natyp, float)\r\n data = parse(data, lj_acoef, \"LENNARD_JONES_ACOEF\", ntypes*(ntypes+1)/2, float)\r\n data = parse(data, lj_bcoef, \"LENNARD_JONES_BCOEF\", ntypes*(ntypes+1)/2, float)\r\n\r\n\r\n ##########################################################\r\n # STRUCTURE\r\n\r\n bonds = [[], []] # With H, Without H\r\n angles = [[], []] # With H, Without H\r\n dihedrals = [[], []] # With H, Without H\r\n impropers = [[], []] # With H, Without H\r\n excluded_atoms = [] \r\n hbond_acoef = []\r\n hbond_bcoef = []\r\n hbcut = []\r\n amber_atom_types = []\r\n tree_chain = []\r\n join_array = []\r\n irotat = []\r\n radii = []\r\n screen = []\r\n\r\n data = parse(data, bonds[0], \"BONDS_INC_HYDROGEN\", nbonh, int, 3)\r\n data = parse(data, bonds[1], \"BONDS_WITHOUT_HYDROGEN\", nbona, int, 3)\r\n data = parse(data, angles[0], \"ANGLES_INC_HYDROGEN\", ntheth, int, 4)\r\n data = parse(data, angles[1], \"ANGLES_WITHOUT_HYDROGEN\", ntheta, int, 4)\r\n data = parse(data, dihedrals[0], \"DIHEDRALS_INC_HYDROGEN\", nphih, int, 5)\r\n data = parse(data, dihedrals[1], \"DIHEDRALS_WITHOUT_HYDROGEN\", nphia, int, 5)\r\n \r\n # MERGE ARRAYS - PM HANDLES THE H+\r\n final_bonds = bonds[0] + bonds[1]\r\n final_angles = angles[0] + angles[1]\r\n final_dihedrals = dihedrals[0] + dihedrals[1]\r\n final_impropers = []\r\n \r\n # CLEAN UP THE TRASH\r\n del(bonds)\r\n del(angles)\r\n del(dihedrals)\r\n \r\n\r\n # Move impropers into their own array\r\n i = 0\r\n while (i < len(final_dihedrals)):\r\n if (final_dihedrals[i][2] < 0): # 1-4 exclusions are handled by our back end\r\n final_dihedrals[i][2] *= -1\r\n if (final_dihedrals[i][3] < 0):\r\n final_dihedrals[i][3] *= -1 # Make + again\r\n final_impropers.append(final_dihedrals[i])\r\n final_dihedrals.remove(final_dihedrals[i])\r\n i -= 1\r\n i += 1\r\n\r\n # Convert charge units\r\n for i in range(0, len(charges)):\r\n charges[i] /= 18.223\r\n\r\n\r\n data = parse(data, excluded_atoms, \"EXCLUDED_ATOMS_LIST\", nnb, int)\r\n data = parse(data, hbond_acoef, \"HBOND_ACOEF\", nphb, float)\r\n data = parse(data, hbond_bcoef, \"HBOND_BCOEF\", nphb, float)\r\n data = parse(data, hbcut, \"HBCUT\", nphb, float)\r\n data = parse(data, amber_atom_types, \"AMBER_ATOM_TYPE\", natoms, str)\r\n data = parse(data, tree_chain, \"TREE_CHAIN_CLASSIFICATION\", natoms, str)\r\n data = parse(data, join_array, \"JOIN_ARRAY\", natoms, int)\r\n data = parse(data, irotat, \"IROTAT\", natoms, int)\r\n data = parse(data, radii, \"RADII\", natoms, float)\r\n data = parse(data, screen, \"SCREEN\", natoms, float)\r\n\r\n # Further process dihedrals and impropers\r\n # Deal with multiplicity\r\n # A bit ugly, but the fastest for now\r\n # forceconstants[2][dihedrals[0][i][4]-1], int(equilvals[2][0][dihedrals[0][i][4]-1]), equilvals[2][1][dihedrals[0][i][4]-1]\r\n\r\n mult_di = dict()\r\n mult_im = dict()\r\n for i in range(0, len(final_dihedrals)):\r\n di_id = str(final_dihedrals[i][0])+' '+str(final_dihedrals[i][1])+' '+str(final_dihedrals[i][2])+' '+str(final_dihedrals[i][3])\r\n if (not mult_di.has_key(di_id)):\r\n mult_di[di_id] = [1, False, [forceconstants[2][final_dihedrals[i][4]-1]], [int(equilvals[2][0][final_dihedrals[i][4]-1])], [equilvals[2][1][final_dihedrals[i][4]-1]]]\r\n else:\r\n mult_di[di_id][0] += 1\r\n mult_di[di_id][2].append(forceconstants[2][final_dihedrals[i][4]-1])\r\n mult_di[di_id][3].append(int(equilvals[2][0][final_dihedrals[i][4]-1]))\r\n mult_di[di_id][4].append(equilvals[2][1][final_dihedrals[i][4]-1])\r\n \r\n for i in range(0, len(final_impropers)):\r\n im_id = str(final_impropers[i][0])+' '+str(final_impropers[i][1])+' '+str(final_impropers[i][2])+' '+str(final_impropers[i][3])\r\n if (not mult_im.has_key(di_id)):\r\n mult_im[im_id] = [1, False, [forceconstants[2][final_impropers[i][4]-1]], [int(equilvals[2][0][final_impropers[i][4]-1])], [equilvals[2][1][final_impropers[i][4]-1]]]\r\n else:\r\n mult_im[im_id][0] += 1\r\n mult_im[im_id][2].append(forceconstants[2][final_impropers[i][4]-1])\r\n mult_im[im_id][3].append(int(equilvals[2][0][final_impropers[i][4]-1]))\r\n mult_im[im_id][4].append(equilvals[2][1][final_impropers[i][4]-1])\r\n\r\n\r\n\r\n \r\n #[natoms, ntypes, nbonh, mbona, ntheth, mtheta, nphih, mphia, nhparm, nparm, nnb, nres, nbona, ntheta, nphia, numbnd, numang, nptra, natyp, nphb, ifpert, nbper, ngper, ndper, mbper, mgper, mdper, ifbox, nmxrs, ifcap, numextra] = temp \r\n #phys.myPSF.createAll(natoms, nbonh+mbona, ntheth+mtheta,\r\n # len(dihedrals[0])+len(dihedrals[1]),\r\n # len(impropers[0])+len(impropers[1]),\r\n # 0, 0, 0, 0)\r\n \r\n # Add atoms\r\n curres = 1\r\n for i in range(0, natoms):\r\n phys.myPSF.addAtom(i, 'SIM', curres, reslabels[curres-1],\r\n atomnames[i], atomnames[i], charges[i],\r\n masses[i]) \r\n if (curres != nres and i >= respointers[curres]):\r\n curres += 1\r\n\r\n # Add bonds\r\n for i in range(0, nbonh+nbona):\r\n phys.myPSF.addBond(i+1, final_bonds[i][0]/3+1, final_bonds[i][1]/3+1)\r\n phys.myPAR.addBond(i+1, atomnames[final_bonds[i][0]/3], atomnames[final_bonds[i][1]/3], forceconstants[0][final_bonds[i][2]/3], equilvals[0][final_bonds[i][2]/3])\r\n \r\n # Add angles\r\n for i in range(0, ntheth+ntheta):\r\n phys.myPSF.addAngle(i+1, final_angles[i][0]/3+1, final_angles[i][1]/3+1, final_angles[i][2]/3+1)\r\n phys.myPAR.addAngle(i+1, atomnames[final_angles[i][0]/3], atomnames[final_angles[i][1]/3], atomnames[final_angles[i][2]/3], forceconstants[1][final_angles[i][3]/3], equilvals[1][final_angles[i][3]/3])\r\n \r\n # Add dihedrals\r\n for i in range(0, len(final_dihedrals)):\r\n di_id = str(final_dihedrals[i][0])+' '+str(final_dihedrals[i][1])+' '+str(final_dihedrals[i][2])+' '+str(final_dihedrals[i][3])\r\n mult = mult_di[di_id][0]\r\n checked = mult_di[di_id][1]\r\n print di_id, \" \", mult\r\n if (not checked):\r\n if (mult == 1):\r\n phys.myPSF.addDihedral(i+1, final_dihedrals[i][0]/3+1, final_dihedrals[i][1]/3+1, int(numpy.abs(final_dihedrals[i][2]))/3+1, final_dihedrals[i][3]/3+1)\r\n phys.myPAR.addDihedral(i+1, atomnames[final_dihedrals[i][0]/3], atomnames[final_dihedrals[i][1]/3], atomnames[int(numpy.abs(final_dihedrals[i][2]))/3], atomnames[final_dihedrals[i][3]/3], forceconstants[2][final_dihedrals[i][4]-1], int(equilvals[2][0][final_dihedrals[i][4]-1]), equilvals[2][1][final_dihedrals[i][4]-1])\r\n else:\r\n mult_di[di_id][1] = True\r\n # Add dihedral with the appropriate multiplicity\r\n # Force constants, periodicity and phase shifts are in [2], [3], and [4] respectively\r\n fcvec = PARReader.VectorOfDouble()\r\n periodvec = PARReader.VectorOfInt()\r\n phasevec = PARReader.VectorOfDouble() \r\n for j in range(0, len(mult_di[di_id][2])):\r\n fcvec.push_back(mult_di[di_id][2][j])\r\n periodvec.push_back(mult_di[di_id][3][j])\r\n phasevec.push_back(mult_di[di_id][4][j])\r\n phys.myPSF.addDihedral(i+1, final_dihedrals[i][0]/3+1, final_dihedrals[i][1]/3+1, int(numpy.abs(final_dihedrals[i][2]))/3+1, final_dihedrals[i][3]/3+1)\r\n phys.myPAR.addDihedral(i+1, atomnames[final_dihedrals[i][0]/3], atomnames[final_dihedrals[i][1]/3], atomnames[int(numpy.abs(final_dihedrals[i][2]))/3], atomnames[final_dihedrals[i][3]/3], mult, fcvec, periodvec, phasevec)\r\n \r\n\r\n\r\n\r\n for i in range(0, len(final_impropers)):\r\n im_id = str(final_impropers[i][0])+' '+str(final_impropers[i][1])+' '+str(final_impropers[i][2])+' '+str(final_impropers[i][3])\r\n mult = mult_im[im_id][0]\r\n checked = mult_im[im_id][1]\r\n print im_id, \" \", mult\r\n if (not checked):\r\n if (mult == 1):\r\n phys.myPSF.addImproper(i+1, final_impropers[i][0]/3+1, final_impropers[i][1]/3+1, int(numpy.abs(final_impropers[i][2]))/3+1, final_impropers[i][3]/3+1)\r\n phys.myPAR.addImproper(i+1, atomnames[final_impropers[i][0]/3], atomnames[final_impropers[i][1]/3], atomnames[int(numpy.abs(final_impropers[i][2]))/3], atomnames[final_impropers[i][3]/3], forceconstants[2][final_impropers[i][4]-1], int(equilvals[2][0][final_impropers[i][4]-1]), equilvals[2][1][final_impropers[i][4]-1])\r\n else:\r\n mult_im[im_id][1] = True\r\n # Add dihedral with the appropriate multiplicity\r\n # Force constants, periodicity and phase shifts are in [2], [3], and [4] respectively\r\n fcvec = PARReader.VectorOfDouble()\r\n periodvec = PARReader.VectorOfInt()\r\n phasevec = PARReader.VectorOfDouble() \r\n for j in range(0, len(mult_im[im_id][2])):\r\n fcvec.push_back(mult_im[im_id][2][j])\r\n periodvec.push_back(mult_im[im_id][3][j])\r\n phasevec.push_back(mult_im[im_id][4][j])\r\n phys.myPSF.addImproper(i+1, final_impropers[i][0]/3+1, final_impropers[i][1]/3+1, int(numpy.abs(final_impropers[i][2]))/3+1, final_impropers[i][3]/3+1)\r\n phys.myPAR.addImproper(i+1, atomnames[final_impropers[i][0]/3], atomnames[final_impropers[i][1]/3], atomnames[int(numpy.abs(final_impropers[i][2]))/3], atomnames[final_impropers[i][3]/3], mult, fcvec, periodvec, phasevec)\r\n\r\n \r\n # Need to add garbage nonbonded stuff for now\r\n for i in range(0, natoms):\r\n phys.myPAR.addNonbonded(i, atomnames[i], 1, 1, 1, 1, 1, 1)\r\n\r\n # Add VDW parameters\r\n # AMBER has the Aij and Bij already in the parameter file\r\n # This actually makes life easier.\r\n # CHARMM does not, they simply have the original sigma and epsilon.\r\n # To compensate for this, for now we will leave the nonbondeds empty in phys.myPAR\r\n # We will then access the LennardJones parameter table in Topology directly\r\n k = 0\r\n phys.myTop.resizeLennardJonesParameters(ntypes)\r\n for i in range(0, ntypes):\r\n for j in range(i, ntypes):\r\n params = GenericTopology.LennardJonesParameters(lj_acoef[k], lj_bcoef[k])\r\n k += 1\r\n phys.myTop.setLennardJonesParameters(i, j, params)\r\n \r\n phys.myPAR.readFlag = 1\r\n phys.build()", "def getwellid(infile, wellinfo):\r\n m = re.search(\"\\d\", getfilename(infile))\r\n s = re.search(\"\\s\", getfilename(infile))\r\n if m.start() > 3:\r\n wellname = getfilename(infile)[0:m.start()].strip().lower()\r\n else:\r\n wellname = getfilename(infile)[0:s.start()].strip().lower()\r\n wellid = wellinfo[wellinfo['Well'] == wellname]['WellID'].values[0]\r\n return wellname, wellid", "def _read(fp, offset, size):\n fp.seek(offset)\n return fp.read(size)", "def read(self, readStart=None, readEnd=None):\n if not self.baxH5.hasRawBasecalls:\n raise ValueError, \"No raw reads in this file\"\n hqStart, hqEnd = self.hqRegion\n readStart = hqStart if readStart is None else readStart\n readEnd = hqEnd if readEnd is None else readEnd\n return ZmwRead(self.baxH5, self.holeNumber, readStart, readEnd)", "def readPAR(self,phys,parname):\r\n PARReader.PARReader(self.checkPath(parname),0).read(phys.myPAR)\r\n phys.myPAR.readFlag = 1\r\n phys.build()", "def peek(self, index):\n x = -1\n if (self.top - index + 1) < 0:\n print(\"Invalid Index\")\n else:\n x = self.arr[self.top - index + 1]\n return x" ]
[ "0.587106", "0.57935244", "0.57673585", "0.5558961", "0.5488502", "0.5469904", "0.54450524", "0.5361794", "0.53559667", "0.5308596", "0.52896506", "0.52439475", "0.52334213", "0.52206546", "0.51920587", "0.5184739", "0.5168924", "0.51680005", "0.5153771", "0.51435596", "0.51345706", "0.51194936", "0.51074755", "0.50878316", "0.508237", "0.5077448", "0.50615114", "0.5060005", "0.50576645", "0.50513905", "0.5041607", "0.50181746", "0.50047696", "0.49988052", "0.4973555", "0.49621743", "0.49542284", "0.49448037", "0.49399364", "0.4939576", "0.49124205", "0.49105552", "0.490158", "0.48997658", "0.48875555", "0.4874264", "0.4869487", "0.48683107", "0.4846811", "0.48462635", "0.4844299", "0.4841739", "0.48394623", "0.48358402", "0.48298025", "0.48269138", "0.48268777", "0.48232886", "0.4823141", "0.48223662", "0.4808956", "0.4808956", "0.48086736", "0.47931305", "0.47824726", "0.47809842", "0.47783893", "0.47773182", "0.47773182", "0.47773182", "0.47762004", "0.47641498", "0.47633338", "0.47633338", "0.47633338", "0.47623917", "0.4760768", "0.4757916", "0.47561163", "0.47526696", "0.4751786", "0.47445902", "0.4739205", "0.47370297", "0.47348225", "0.47345832", "0.47293693", "0.47270653", "0.4724139", "0.4721767", "0.47105387", "0.4708956", "0.4705581", "0.47001034", "0.46940774", "0.46934706", "0.46878347", "0.4683591", "0.46799755", "0.46798486" ]
0.5471764
5
It should create color shapes in the given data directory.
def test_create_shapes(data_dir): dataset.create_shapes(10, 10, 1, data_dir=data_dir) img_path = os.path.join(data_dir, "ellipse/0.png") assert os.path.exists(img_path) img = imageio.imread(img_path) assert img.shape == (10, 10, 4)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_create_shapes_grayscale(data_dir):\n dataset.create_shapes(10, 10, 1, channels=1, data_dir=data_dir)\n img_path = os.path.join(data_dir, \"ellipse/0.png\")\n assert os.path.exists(img_path)\n img = imageio.imread(img_path)\n assert img.shape == (10, 10)", "def load_shapes(self,count,img_folder,mask_folder,imglist,dataset_root_path):\n self.add_class(\"shapes\",1,\"red_s\")\n self.add_class(\"shapes\",2,\"red_m\")\n self.add_class(\"shapes\",3,\"red_l\")\n self.add_class(\"shapes\",4,\"yellow_s\")\n self.add_class(\"shapes\",5,\"yellow_m\")\n self.add_class(\"shapes\",6,\"yellow_l\")\n self.add_class(\"shapes\",7,\"green_s\")\n self.add_class(\"shapes\",8,\"green_m\")\n self.add_class(\"shapes\",9,\"green_l\")\n self.add_class(\"shapes\",10,\"blue_s\")\n self.add_class(\"shapes\",11,\"blue_m\")\n self.add_class(\"shapes\",12,\"blue_l\")\n self.add_class(\"shapes\",13,\"orange_s\")\n self.add_class(\"shapes\",14,\"orange_m\")\n self.add_class(\"shapes\",15,\"orange_l\")\n\n for i in range(count):\n filestr = imglist[i].split(\".\")[0]\n package_id = (int(filestr)-1)//30 + 1\n package_path = \"package%s\" % package_id\n # print(filestr)\n if mask_folder == 'mask/training_data/':\n mask_path = mask_folder+package_path +\"/image%s\" % filestr\n # print('====>',mask_path)\n csv_path_str = \"training_data/\"+package_path\n path_to_img = img_folder+'/'+package_path+ \"/%s.png\" % filestr\n else:\n mask_path = mask_folder + \"/image%s\" % filestr\n csv_path_str = img_folder\n path_to_img = img_folder+ \"/%s.png\" % filestr\n label_index = filestr\n # path_to_img = img_folder+ \"/%s.png\" % filestr\n # print(path_to_img)\n cv_img = cv2.imread(path_to_img)\n # print(cv_img)\n # resize_img = cv2.resize(cv_img,(384,384),interpolation = cv2.INTER_AREA)\n self.add_image(\"shapes\",image_id=i, path=path_to_img, csv_path=csv_path_str, width=cv_img.shape[1], height=cv_img.shape[0], mask_path=mask_path, label_index=label_index)", "def makeDataset(numberOfTrials, data_type):\n\n\tdata_folder = data_type + \"_images\"\n\tlabel_file = os.path.join(dataset_params.data_path, data_type + \"_lables.csv\")\n\n\tutils.create_directory(dataset_params.data_path)\n\tutils.create_directory(os.path.join(dataset_params.data_path, data_folder))\n\n\tallowedRadius = utils.defineShapePerimeter()\n\tcolorsRGB = utils.defineColorValues()\n\tshapeDict = utils.defineShapeSides()\n\tpadding = dataset_params.padding\n\n\tnum = 0\n\toutput_images = [[\"figNum\", \"shape\", \"color\", \"size\", \"background\", \"quadrant\", \"radius\"]]\n\tfor c in dataset_params.colors: # for all 7 foreground colors \n\t\tfor q in dataset_params.quadrants: # for all 4 quadratns \n\t\t\tfor s in dataset_params.shapes: # for all 5 shapes\n\t\t\t\tfor k in dataset_params.sizes: # for all 3 sizes\n\t\t\t\t\tfor b in dataset_params.backgrounds: # for all 3 background colors\n\t\t\t\t\t\tfor i in range(numberOfTrials):\n\t\t\t\t\t\t\tfileName = os.path.join(dataset_params.data_path, data_folder, str(num) + \".png\")\n\t\t\t\t\t\t\tpresentQuadrant = dataset_params.quadrants[q]\n\t\t\t\t\t\t\tradius = random.randint(allowedRadius[s][k][0],allowedRadius[s][k][1])\n\n\t\t\t\t\t\t\tif(presentQuadrant == 3):\n\t\t\t\t\t\t\t\txMin = 128 + padding\n\t\t\t\t\t\t\t\txMax = 255 - radius\n\t\t\t\t\t\t\t\tyMin = 128 + padding\n\t\t\t\t\t\t\t\tyMax = 255 - radius\n\n\t\t\t\t\t\t\telif(presentQuadrant == 2):\n\t\t\t\t\t\t\t\txMin = 0 + radius\n\t\t\t\t\t\t\t\txMax = 128 - padding\n\t\t\t\t\t\t\t\tyMin = 128 + padding\n\t\t\t\t\t\t\t\tyMax = 255 - radius\n\n\t\t\t\t\t\t\telif(presentQuadrant == 1):\n\t\t\t\t\t\t\t\txMin = 0 + radius\n\t\t\t\t\t\t\t\txMax = 128 - padding\n\t\t\t\t\t\t\t\tyMin = 0 + radius\n\t\t\t\t\t\t\t\tyMax = 128 - padding\n\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\txMin = 128 + padding\n\t\t\t\t\t\t\t\txMax = 255 - radius\n\t\t\t\t\t\t\t\tyMin = 0 + radius\n\t\t\t\t\t\t\t\tyMax = 128 - padding\n\n\t\t\t\t\t\t\txCenter = random.randint(xMin, xMax)\n\t\t\t\t\t\t\tyCenter = random.randint(yMin, yMax)\n\t\t\t\t\t\t\tcenter = [xCenter, yCenter]\n\n\t\t\t\t\t\t\tif(s == \"circle\"):\n\t\t\t\t\t\t\t\toutput_images.append([num, \"circle\", c, k, b, presentQuadrant, radius])\n\t\t\t\t\t\t\t\timg = makeCircle(c, radius, center, b, colorsRGB)\n\t\t\t\t\t\t\t\timg = img[:,:,::-1]\n\t\t\t\t\t\t\t\tcv2.imwrite(fileName, img)\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tn = shapeDict[s]\n\t\t\t\t\t\t\t\timg = makePolygon(center, n, radius, b, c, colorsRGB)\n\t\t\t\t\t\t\t\timg = img[:,:,::-1]\n\t\t\t\t\t\t\t\tcv2.imwrite(fileName, img)\n\t\t\t\t\t\t\t\toutput_images.append([num, s, c, k, b, presentQuadrant, radius])\n\t\t\t\t\t\t\tnum += 1\n\t\n\tprint(\"Number of image generated\", num)\n\n\tprint(\"Saving \" + data_type + \" data meta information to CSV ......\")\n\tdf = pd.DataFrame(output_images[1:], columns=output_images[0])\n\tdf.to_csv(label_file, index=False)\n\tprint(\"Saved \" + data_type + \" data meta information: \" + data_folder)\n\t\n\n\tprint(\"Saving \" + data_type + \" images data to npz(numpy) compressed file ......\")\n\tmake_npz_file(data_type)\n\tprint(\"Saved \" + data_type + \" images data to npz(numpy) compressed file!\")\n\t\n\treturn None", "def make_props_files(labels, label_list, dir_path, data,\r\n background_color, label_color, prefs):\r\n cat_connected_num = 0\r\n mapping = data['map']\r\n groups_and_colors = iter_color_groups(mapping, prefs)\r\n for params in groups_and_colors:\r\n l = params[0]\r\n if l == \"SampleID\" or l == \"Description\":\r\n continue\r\n m = params[2]\r\n c = params[3]\r\n output = open(os.path.join(dir_path, \"props/custom.%s.props\" % l), 'w')\r\n props_str_list = [l] * 5\r\n props_str_list.append(','.join(map(str, label_color.toRGB())))\r\n props_str_list.extend([l] * 22)\r\n props_str_list.append(','.join(map(str, label_color.toRGB())))\r\n props_str_list.extend([l] * 16)\r\n props_str_list.append(props_edge % (l, l))\r\n props_str_list.append(l)\r\n props_str_list.append(\r\n '\\n'.join([props_edge_meta % (l, s, ','.join(map(str, c[n].toRGB())))\r\n for s, n in m.items()]))\r\n props_str_list.extend([l] * 109)\r\n props_str_list.append(props_node % (l, l))\r\n props_str_list.append(l)\r\n props_str_list.append(\r\n '\\n'.join([props_node_meta % (l, s, ','.join(map(str, c[n].toRGB())))\r\n for s, n in m.items()]))\r\n props_str_list.extend([l] * 48)\r\n props_str_list[98] = ','.join(map(str, background_color.toRGB()))\r\n props_str_list[109] = ','.join(map(str, label_color.toRGB()))\r\n props_str_list[132] = ','.join(map(str, label_color.toRGB()))\r\n output.write(props_file_str % tuple(props_str_list))\r\n output.close()", "def create_data(data_size,heme, nucleotide, control, steroid,data_total,path_to_data):\n\n os.chdir(path_to_data)\n\n x_array = np.zeros(shape = (data_size,14,32,32,32))\n\n y_array = np.zeros(shape = data_size)\n\n print(\"data size = \", data_size)\n\n #training set :\n\n file_count = 0\n\n for file in data_total:\n\n y_array[file_count]= find_class(str(file), heme, nucleotide, control, steroid)\n\n x_array[file_count] = np.load(str(file+\".npy\"))\n\n file_count+=1\n\n\n return (x_array, y_array)", "def make_all_charts(data, dir_path, filename, num_categories, colorby, args,\r\n color_data, prefs, background_color, label_color,\r\n chart_type, generate_image_type, plot_width, plot_height,\r\n bar_width, dpi, resize_nth_label, label_type,\r\n include_html_legend, include_html_counts):\r\n\r\n # iterate over the preferences and assign colors according to taxonomy\r\n img_data = []\r\n for label, f_name in data:\r\n raw_fpath = os.path.join(\r\n dir_path,\r\n 'raw_data',\r\n os.path.split(f_name)[-1])\r\n # move raw file to output directory\r\n shutil.copyfile(f_name, raw_fpath)\r\n\r\n f = color_data['counts'][f_name]\r\n level = max([len(t.split(';')) - 1 for t in f[1]])\r\n\r\n for key in prefs.keys():\r\n if prefs[key]['column'] != str(level):\r\n continue\r\n col_name = 'Taxon'\r\n mapping = [['Taxon']]\r\n mapping.extend([[m] for m in f[1]])\r\n if 'colors' in prefs[key]:\r\n if isinstance(prefs[key]['colors'], dict):\r\n pref_colors = prefs[key]['colors'].copy()\r\n # copy so we can mutate\r\n else:\r\n pref_colors = prefs[key]['colors'][:]\r\n else:\r\n pref_colors = {}\r\n labelname = prefs[key]['column']\r\n\r\n # Define groups and associate appropriate colors to each group\r\n groups = group_by_field(mapping, col_name)\r\n pref_colors, data_colors, data_color_order = \\\r\n get_group_colors(groups, pref_colors)\r\n\r\n updated_pref_colors = {}\r\n\r\n if chart_type == 'area' and len(f[0]) == 1:\r\n raise ValueError(\r\n 'When generating area charts, the number of samples (or category values) must be greater than 1. However, you can still produce a pie chart or bar chart with only 1 sample (or category value), but you must remove the area chart value from the input arguments.')\r\n\r\n for key in pref_colors:\r\n updated_pref_colors[key.replace('\"', '')] = pref_colors[key]\r\n\r\n for i, val in enumerate(f[1]):\r\n f[1][i] = val.replace('\"', '')\r\n\r\n # parse the counts and continue processing\r\n img_data.extend(get_counts(label.strip(), colorby, num_categories,\r\n dir_path, level, f, prefs, updated_pref_colors,\r\n background_color,\r\n label_color, chart_type, generate_image_type,\r\n plot_width, plot_height, bar_width, dpi, raw_fpath,\r\n resize_nth_label, label_type, include_html_legend,\r\n include_html_counts))\r\n\r\n # generate html filepath\r\n outpath = os.path.join(dir_path, '%s_charts.html' % chart_type)\r\n out_table = ''.join(img_data)\r\n # write out html file\r\n write_html_file(out_table, outpath)", "def create_dataset(data_folder: str, dataset_file: str, targets_file: str = os.path.join('data', 'targets.pkl')):\n files = sorted(glob.glob(os.path.join(data_folder, '**/*.jpg'), recursive=True))\n images = []\n crop_sizes = []\n crop_centers = []\n targets = []\n for image in tqdm(files, desc='creating dataset', total=len(files)):\n img = Image.open(image)\n # quadruple dataset by vertical and horizontal flipping\n for i in range(4):\n if i == 1 or i == 3:\n img = img.transpose(Image.FLIP_LEFT_RIGHT)\n if i == 2:\n img = img.transpose(Image.FLIP_TOP_BOTTOM)\n x, y, w, h, cx, cy = get_random_image_values()\n resized = img.resize((y, x), Image.LANCZOS) # mind thee: x and y swapped\n arr = np.array(resized, dtype=np.float32)\n arr, target_array = create_cropped_data(np.copy(arr), (w, h), (cx, cy), crop_only=False)\n images.append(arr)\n crop_sizes.append((w, h))\n crop_centers.append((cx, cy))\n targets.append(target_array)\n data = {'images': images, 'crop_sizes': crop_sizes, 'crop_centers': crop_centers}\n # persist on harddrive\n with open(dataset_file, 'wb') as f:\n pickle.dump(data, f)\n with open(targets_file, 'wb') as f:\n pickle.dump(targets, f)\n print(f'created datset and saved it to {dataset_file} and targets to {targets_file}')", "def pyplot_colourise(self, folder_name):\n colourised_folder_name = folder_name + '_colourised'\n\n try:\n print(\"Making dir \" + str(colourised_folder_name) + \" for colourisation\")\n os.mkdir(colourised_folder_name)\n except OSError:\n print(\"Folder exists, have you already done this colourisation??\")\n return\n\n photo_list = self.get_photo_list(folder_name)\n\n for i, name in enumerate(photo_list):\n fig, ax = plt.subplots(figsize=(32, 16))\n file_name = folder_name + '/' + name\n colourised_image_name = colourised_folder_name + '/' + name\n image = cv2.imread(file_name, cv2.IMREAD_ANYDEPTH).astype(np.float32)\n go = ax.imshow(image, cmap='jet')\n fig.colorbar(go)\n fig.savefig(colourised_image_name)\n plt.close()", "def load_shapes(self, count, img_floder, mask_floder, imglist, creatnpzfile:bool=True):\n # Add classes\n \n self.add_class(\"shapes\", 1, \"grasper\")\n self.add_class(\"shapes\", 2, \"grasper2\")\n self.add_class(\"shapes\", 3, \"grasper3\")\n self.add_class(\"shapes\", 4, \"irrigator\")\n self.add_class(\"shapes\", 5, \"hook\")\n self.add_class(\"shapes\", 6, \"clipper\")\n\n # Add images\n # Generate random specifications of images (i.e. color and\n # list of shapes sizes and locations). This is more compact than\n # actual images. Images are generated on the fly in load_image().\n for i in range(count):\n img = imglist[i]\n if img.endswith(\".jpg\"):\n img_name = img.split(\".\")[0]\n img_path = os.path.join(img_floder,img)\n mask_path = os.path.join(mask_floder,img_name+\".png\")\n #save the mask infomation with numpy\n mask_info = None\n \n if not os.path.exists(os.path.join(mask_infofloder,\"{}.npz\".format(img_name))):\n mask_info = self.load_mask_pre(i,mask_path)\n np.savez(os.path.join(mask_infofloder,img_name),mask_ = mask_info[0], id_=mask_info[1])\n else:\n data = np.load(os.path.join(mask_infofloder,\"{}.npz\".format(img_name)))\n mask_info = data['mask_'],data['id_']\n\n self.add_image(\"shapes\", image_id=i, path=img_path, name=img_name, mask_path=mask_path, mask_info=mask_info)\n sys.stdout.write('-------creating the np file:--%s-------------pross:--%.4f%%--'%(os.path.join(mask_infofloder,\"{}.npz\".format(img_name)),\n (i+1)/float(count)*100))\n sys.stdout.write('\\r')\n sys.stdout.flush()", "def test_gen_colors(self):\n result = magic.gen_colors(\"tests/test_files/test.jpg\")\n self.assertEqual(result[0], \"#0F191A\")", "def prepare_data(data_path, val_data_path, patch_size,stride,scales = [1, 0.9, 0.8, 0.7],\n max_num_patches=None, aug_times=1,random_aug=False, gray_mode=False):\n # training database\n print('> Training database')\n types = ('*.bmp', '*.png')\n files = []\n for tp in types:\n files.extend(glob.glob(os.path.join(data_path, tp)))\n files.sort()\n\n if gray_mode:\n traindbf = './data/set400_p64.h5'\n valdbf = './data/set12.h5'\n else:\n traindbf = './data/train_rgb.h5'\n valdbf = './data/val_rgb.h5'\n\n if max_num_patches is None:\n max_num_patches = 5000000\n print(\"\\tMaximum number of patches not set\")\n else:\n print(\"\\tMaximum number of patches set to {}\".format(max_num_patches))\n train_num = 0\n i = 0\n with h5py.File(traindbf, 'w') as h5f:\n while i < len(files) and train_num < max_num_patches:\n imgor = cv2.imread(files[i])\n # h, w, c = img.shape\n for sca in scales:\n img = cv2.resize(imgor, (0, 0), fx=sca, fy=sca, \\\n interpolation=cv2.INTER_CUBIC)\n if not gray_mode:\n # CxHxW RGB image\n img = (cv2.cvtColor(img, cv2.COLOR_BGR2RGB)).transpose(2, 0, 1)\n else:\n # CxHxW grayscale image (C=1)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n img = np.expand_dims(img, 0)\n img = normalize(img)\n patches = img_to_patches(img, win=patch_size, stride=stride)\n print(\"\\tfile: %s scale %.1f # samples: %d\" % \\\n (files[i], sca, patches.shape[3] * 8))\n for nx in range(patches.shape[3]):\n if random_aug == False:\n for j in range(aug_times):\n data = data_augmentation(patches[:, :, :, nx].copy(), j)\n h5f.create_dataset(str(train_num), data=data)\n train_num += 1\n else:\n for j in range(aug_times):\n data = data_augmentation(patches[:, :, :, nx].copy(), random.randint(0, 7))\n h5f.create_dataset(str(train_num), data=data)\n train_num += 1\n i += 1\n # validation database\n print('\\n> Validation database')\n files = []\n for tp in types:\n files.extend(glob.glob(os.path.join(val_data_path, tp)))\n files.sort()\n h5f = h5py.File(valdbf, 'w')\n val_num = 0\n for i, item in enumerate(files):\n print(\"\\tfile: %s\" % item)\n img = cv2.imread(item)\n if not gray_mode:\n # C. H. W, RGB image\n img = (cv2.cvtColor(img, cv2.COLOR_BGR2RGB)).transpose(2, 0, 1)\n else:\n # C, H, W grayscale image (C=1)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n img = np.expand_dims(img, 0)\n\n C,H,W=img.shape\n\n # if H % 2 == 1:\n # \timg = img[:, :-1, :]\n # if W % 2 == 1:\n # \timg = img[:, :, :-1]\n\n img = normalize(img)\n h5f.create_dataset(str(val_num), data=img)\n val_num += 1\n h5f.close()\n\n print('\\n> Total')\n print('\\ttraining set, # samples %d' % train_num)\n print('\\tvalidation set, # samples %d\\n' % val_num)", "def create_data_folders() -> None:\n if not os.path.exists(\"data/save\"):\n os.mkdir(\"./data\")\n os.mkdir(\"./data/save\")\n if not os.path.exists(\"data/critics\"):\n os.mkdir(\"./data/critics\")\n if not os.path.exists('data/policies/'):\n os.mkdir('data/policies/')\n if not os.path.exists('data/results/'):\n os.mkdir('data/results/')", "def init_color_space(color_path):\n # type: (str) -> None\n color_space = np.zeros((256, 256, 256), dtype=np.uint8)\n if color_path.endswith('.yaml'):\n with open(color_path, 'r') as stream:\n try:\n color_values = yaml.safe_load(stream)\n except yaml.YAMLError as exc:\n # TODO: what now??? Handle the error?\n pass\n # pickle-file is stored as '.txt'\n elif color_path.endswith('.txt'):\n try:\n with open(color_path, 'rb') as f:\n color_values = pickle.load(f)\n except pickle.PickleError as exc:\n pass\n \n # compatibility with colorpicker\n if 'color_values' in color_values.keys():\n color_values = color_values['color_values']['greenField']\n length = len(color_values['red'])\n if length == len(color_values['green']) and \\\n length == len(color_values['blue']):\n # setting colors from yaml file to True in color space\n for x in range(length):\n color_space[color_values['blue'][x],\n color_values['green'][x],\n color_values['red'][x]] = 1\n print(\"Imported color space\")\n return color_space", "def setUp(self):\r\n\r\n self.data = {}\r\n self.data['xaxis'] = [10.0]\r\n self.sample_dict = {'Sample1': {10.00: [1.3276140000000001]}}\r\n self.data['yvals'] = {'Sample1': [1.3276140000000001]}\r\n self.data['err'] = {'Sample1': [.1]}\r\n self.xmax = 140\r\n self.ymax = 20\r\n self.std_type = 'stddev'\r\n self.ops = ['Sample1']\r\n self.mapping_category = 'SampleID'\r\n self.imagetype = 'png'\r\n self.resolution = 70\r\n self.mapping_lookup = {'SampleID-Sample1': 'col_0_row_0'}\r\n self.data['map'] = [['SampleID', 'Day'], ['Sample1', 'Day1']]\r\n self.color_prefs = {'SampleID': {'column': 'SampleID', 'color':\r\n {'Sample1': '#ff0000'}}}\r\n self.groups = {'Sample1': ['Sample1']}\r\n self.background_color = 'black'\r\n self.label_color = 'white'\r\n self.labelname = 'SampleID'\r\n self.rare_data = {'color': {'Sample1': '#ff0000'},\r\n 'series': {'Sample1': [2.0515300000000001], },\r\n 'headers': ['test.txt', 'SampleID'], 'xaxis': [10.0],\r\n 'error': {'Sample1': [0.0]}, 'options': ['Sample1']}\r\n self.fpath = '/tmp/'\r\n self.output_dir = '/tmp/'\r\n self.metric_name = 'test'\r\n self._paths_to_clean_up = []\r\n self._folders_to_cleanup = []\r\n self.rarefaction_file_data = [[10.0, 0.0, 1.0], [10.0, 1.0, 3.0]]\r\n d = {'redtowhite3_0': '#7fff00', 'redtowhite3_1': '#7fff00'}\r\n self.data_colors = color_dict_to_objects(d)\r\n self.colors = {'Sample1': 'redtowhite3_0', 'Sample2': 'redtowhite3_1'}\r\n self.colors2 = {'Sample1': 'redtowhite3_0'}\r\n self.mappingfile = ['#SampleID\\tSex\\tAge',\r\n '123\\tF\\t32',\r\n '234\\tM\\t30',\r\n '345\\tM\\t32']\r\n # self.p_mappingfile = parse_mapping_file(self.mappingfile,\\\r\n # strip_quotes=True)\r\n self.rarefactionfile = [\r\n '\\tsequences per sample\\titeration\\t123\\t234\\t345',\r\n 'rare10.txt\\t10\\t0\\t1.99181\\t0.42877\\t2.13996',\r\n 'rare10.txt\\t10\\t1\\t2.07163\\t0.42877\\t2.37055',\r\n 'rare310.txt\\t310\\t0\\t8.83115\\t0.42877\\t11.00725',\r\n 'rare310.txt\\t310\\t1\\t10.05242\\t0.42877\\t8.24474',\r\n 'rare610.txt\\t610\\t0\\t12.03067\\t0.42877\\t11.58928',\r\n 'rare610.txt\\t610\\t1\\t12.9862\\t0.42877\\t11.58642']\r\n\r\n self.rares = {'test.txt': (['', 'sequences per sample', 'iteration',\r\n 'Sample1'], [], ['rare1.txt', 'rare2.txt'],\r\n [[10.0, 2.0, 7.0, 7.0, 9.0], [10.0, 2.0, 7.0, 7.0, 9.0]])}\r\n self.col_headers, self.comments, self.rarefaction_fns, \\\r\n self.rarefaction_data = parse_rarefaction(self.rarefactionfile)\r\n self.matrix, self.seqs_per_samp, self.sampleIDs = \\\r\n get_rarefaction_data(self.rarefaction_data, self.col_headers)\r\n self.ave_seqs_per_sample1 = {'Sample1': [2.03172, 9.4417849999999994,\r\n 12.508435]}\r\n self.ave_seqs_per_sample = {'123': [2.03172, 9.4417849999999994,\r\n 12.508435], '234': [0.42876999999999998, 0.42876999999999998,\r\n 0.42876999999999998], '345': [2.255255, 9.625995, 11.58785]}\r\n self.collapsed_ser_sex = {'M': [1.3420125000000001, 5.0273824999999999,\r\n 6.0083099999999998], 'F': [2.03172, 9.4417849999999994, 12.508435]}\r\n self.err_ser_sex = {'M': [0.91324250000000007, 4.5986124999999998,\r\n 5.5795399999999997], 'F': [0.0, 0.0, 0.0]}\r\n self.rarefaction_legend_mat_init = {'test': {'SampleID': {}}}\r\n self.col_headers2 = [\r\n '', 'sequences per sample', 'iteration', 'Sample1',\r\n 'Sample2']\r\n\r\n self.rarefaction_data_mat = {\r\n 'SampleID': {'Sample1': {'test': {'ave': [' 7.000'], 'err': [' nan']}}}}\r\n\r\n self.rarefaction_legend_mat = {\r\n 'test': {\r\n 'samples': {\r\n 'Sample1': {\r\n 'color': '#ff0000',\r\n 'link': 'html_plots/testcol_0_row_0.png'}},\r\n 'groups': {\r\n 'SampleID': {\r\n 'Sample1': {\r\n 'groupcolor': '#ff0000',\r\n 'groupsamples': [\r\n 'Sample1']}}}}}\r\n self.exp_err_series_ave = {'M':\r\n [1.571915, 6.49885, 8.1750183333333339]}", "def colourise_image(self, folder_name):\n colourised_folder_name = folder_name + '_colourised'\n\n try:\n print(\"Making dir \" + str(colourised_folder_name) + \" for colourisation\")\n os.mkdir(colourised_folder_name)\n except OSError:\n print(\"Folder exists, have you already done this colourisation??\")\n return\n\n print(\"Writing to folder +\" + str(colourised_folder_name))\n photo_list = self.get_photo_list(folder_name)\n for i, name in enumerate(photo_list):\n file_name = folder_name + '/' + name\n colourised_image_name = colourised_folder_name + '/' + name\n image = cv2.imread(file_name, cv2.IMREAD_GRAYSCALE)\n image_8bit = image.astype(np.uint8)\n colour_image = cv2.applyColorMap(image_8bit, cv2.COLORMAP_JET)\n cv2.imwrite(colourised_image_name, colour_image)", "def _create_layout(root_dir, subsets):\n _create_folder(os.path.join(root_dir, \"images\"))\n _create_folder(os.path.join(root_dir, \"labels\"))\n\n for subset in subsets:\n _create_folder(os.path.join(root_dir, \"images\", subset))\n _create_folder(os.path.join(root_dir, \"labels\", subset))", "def load_data_in_folder(self):\n if self.data_filenames:\n print('removing existing data files')\n for f in tqdm(self.data_filenames):\n os.remove(f)\n print('loading files in data folder')\n n = len(self.filenames)\n idx_max = n // self.batch_size\n for idx in tqdm(range(0, idx_max-1)):\n data = []\n for f in self.filenames[idx:idx+self.batch_size]:\n img = cv2.imread(f, int(self.color))\n if not self.color:\n img = np.expand_dims(img, axis=-1)\n data.append(img)\n data = np.array(data)\n data = data.astype('float32')\n data = (data - 127.5)/127.5\n np.save(op.join(self.data_path, str(idx)), data)\n # TODO last batch ?\n self.data_filenames = sorted(glob(op.join(self.data_path, '*.npy')))", "def _preprocess_data(self, name, directory):\n if name.endswith('data'):\n for path in glob(str(directory / '**/*.jpg'), recursive=True):\n try:\n with Image.open(path) as img:\n if not name.startswith('feature'):\n img = img.rotate(-90, 0, 1)\n img = img.resize(self.input_shape)\n except (ValueError, OSError):\n print(\"Couldn't open {}\".format(path))\n else:\n path = Path(path)\n filename = path.name.split('img-')[1]\n target = (path.parent / filename).with_suffix('.image.png')\n img.save(target, 'PNG')\n os.remove(str(path))\n elif name.endswith('targets'):\n for path in glob(str(directory / '**/*.mat'), recursive=True):\n try:\n mat = spio.loadmat(path)['depthMap']\n img = spmisc.toimage(mat).resize(self.target_shape)\n except ValueError:\n print(\"Couldn't open {}\".format(path))\n else:\n path = Path(path)\n name = path.name[path.name.index('-') + 1:]\n target = (path.parent / name).with_suffix('.depth.png')\n img.save(target, 'PNG')\n os.remove(str(path))", "def _generate_dataset(self):\n # create train images\n train_path = os.path.join(self.root_dir, \"shapes\", \"train\", \"good\")\n os.makedirs(train_path, exist_ok=True)\n for i in range(self.num_train):\n result = generate_random_anomaly_image(\n image_width=self.image_width,\n image_height=self.image_height,\n shapes=self.train_shapes,\n generate_mask=False,\n )\n image = result[\"image\"]\n imsave(os.path.join(train_path, f\"{i:03}.png\"), image, check_contrast=False)\n\n # create test images\n for test_category in self.test_shapes:\n test_path = os.path.join(self.root_dir, \"shapes\", \"test\", test_category)\n mask_path = os.path.join(self.root_dir, \"shapes\", \"ground_truth\", test_category)\n os.makedirs(test_path, exist_ok=True)\n os.makedirs(mask_path, exist_ok=True)\n # anomaly and masks. The idea is to superimpose anomalous shapes on top of correct ones\n for i in range(self.num_test):\n correct_shapes = generate_random_anomaly_image(\n image_width=self.image_width,\n image_height=self.image_height,\n shapes=self.train_shapes,\n generate_mask=False,\n )\n result = generate_random_anomaly_image(\n image_width=self.image_width,\n image_height=self.image_height,\n shapes=[test_category],\n generate_mask=True,\n )\n correct_shapes = correct_shapes[\"image\"]\n image, mask = result[\"image\"], result[\"mask\"]\n image = np.minimum(image, correct_shapes) # since 255 is white\n imsave(os.path.join(test_path, f\"{i:03}.png\"), image, check_contrast=False)\n imsave(os.path.join(mask_path, f\"{i:03}_mask.png\"), mask, check_contrast=False)\n # good test\n test_good = os.path.join(self.root_dir, \"shapes\", \"test\", \"good\")\n os.makedirs(test_good, exist_ok=True)\n for i in range(self.num_test):\n result = generate_random_anomaly_image(\n image_width=self.image_width,\n image_height=self.image_height,\n shapes=self.train_shapes,\n )\n image = result[\"image\"]\n imsave(os.path.join(test_good, f\"{i:03}.png\"), image, check_contrast=False)", "def create_data_lists(voc07_path, voc08_path, voc09_path, voc10_path, voc12_path, output_folder):\n voc07_path = os.path.abspath(voc07_path)\n voc08_path = os.path.abspath(voc08_path)\n voc09_path = os.path.abspath(voc09_path)\n voc10_path = os.path.abspath(voc10_path)\n voc12_path = os.path.abspath(voc12_path)\n \n voc_labels = ('aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable',\n 'dog', 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor')\n label_map = {k: v + 1 for v, k in enumerate(voc_labels)}\n label_map['background'] = 0\n rev_label_map = {v: k for k, v in label_map.items()} # Inverse mapping\n\n # Color map for bounding boxes of detected objects from https://sashat.me/2017/01/11/list-of-20-simple-distinct-colors/\n distinct_colors = ['#e6194b', '#3cb44b', '#ffe119', '#0082c8', '#f58231', '#911eb4', '#46f0f0', '#f032e6',\n '#d2f53c', '#fabebe', '#008080', '#000080', '#aa6e28', '#fffac8', '#800000', '#aaffc3', '#808000',\n '#ffd8b1', '#e6beff', '#808080', '#FFFFFF']\n label_color_map = {k: distinct_colors[i] for i, k in enumerate(label_map.keys())}\n \n train_images = list()\n train_objects = list()\n n_objects = 0\n \n path_list = [voc07_path, \n# voc08_path, \n# voc09_path, \n# voc10_path, \n voc12_path\n ]\n # Training data\n for path in path_list:\n\n # Find IDs of images in training data\n with open(os.path.join(path, 'ImageSets/Main/trainval.txt')) as f:\n ids = f.read().splitlines()\n \n for id in ids:\n # Parse annotation's XML file\n objects, size = parse_annotation(os.path.join(path, 'Annotations', id + '.xml'))\n # \n if len(objects) == 0:\n continue\n if size[1] ==500:\n n_objects += len(objects)\n train_objects.append(objects)\n train_images.append(os.path.join(path, 'JPEGImages', id + '.jpg'))\n\n assert len(train_objects) == len(train_images)\n\n # Save to file\n with open(os.path.join(output_folder, 'TRAIN_images.json'), 'w') as j:\n json.dump(train_images, j)\n with open(os.path.join(output_folder, 'TRAIN_objects.json'), 'w') as j:\n json.dump(train_objects, j)\n with open(os.path.join(output_folder, 'label_map.json'), 'w') as j:\n json.dump(label_map, j) # save label map too\n\n print('\\nThere are %d training images containing a total of %d objects. Files have been saved to %s.' % (\n len(train_images), n_objects, os.path.abspath(output_folder)))\n\n # Validation data\n test_images = list()\n test_objects = list()\n n_objects = 0\n\n # Find IDs of images in validation data\n with open(os.path.join(voc07_path, 'ImageSets/Main/val.txt')) as f: # test\n ids = f.read().splitlines()\n\n for i, id in enumerate(ids):\n# #TEST CODE\n# if i>=4:\n# break\n # Parse annotation's XML file\n objects, size = parse_annotation(os.path.join(voc07_path, 'Annotations', id + '.xml'))\n if len(objects) == 0:\n continue\n if size[1] ==500:\n test_objects.append(objects)\n n_objects += len(objects)\n test_images.append(os.path.join(voc07_path, 'JPEGImages', id + '.jpg'))\n\n assert len(test_objects) == len(test_images)\n\n # Save to file\n with open(os.path.join(output_folder, 'TEST_images.json'), 'w') as j:\n json.dump(test_images, j)\n with open(os.path.join(output_folder, 'TEST_objects.json'), 'w') as j:\n json.dump(test_objects, j)\n\n print('\\nThere are %d validation images containing a total of %d objects. Files have been saved to %s.' % (\n len(test_images), n_objects, os.path.abspath(output_folder)))", "def generate_dat_files(rspecs, datroot, bands, labels):\n d = ds9.ds9()\n d.set('rgb')\n d.set('rgb red')\n\n # Save plaintext projection data\n # Idea: minimize file (band) loading operations\n for fname, flab in zip(bands, labels):\n d.set('file ' + fname) # Load a band\n for i in xrange(len(rspecs)):\n d.set('regions', rspecs[i]) # Load a region\n d.set('rgb red') # Plot projection data\n dat_fname = '{0}_{1:02d}_band_{2}.dat'.format(datroot, i+1, flab)\n d.set('plot {0} save {1}'.format(d.get('plot'), dat_fname))\n d.set('regions delete all')\n d.set('exit')", "def create_folder():\n directory = \"data/\"\n if not os.path.exists(directory):\n os.makedirs(directory)\n logging.info(\"Data folder created.\")\n else:\n logging.info(\"Data folder already existed.\")", "def generate_2d_plots(prefs, data, html_dir_path, data_dir_path, filename,\r\n background_color, label_color, generate_scree):\r\n coord_tups = [(\"1\", \"2\"), (\"3\", \"2\"), (\"1\", \"3\")]\r\n mapping = data['map']\r\n out_table = ''\r\n # Iterate through prefs and generate html files for each colorby option\r\n # Sort by the column name first\r\n sample_location = {}\r\n\r\n groups_and_colors = iter_color_groups(mapping, prefs)\r\n groups_and_colors = list(groups_and_colors)\r\n\r\n for i in range(len(groups_and_colors)):\r\n labelname = groups_and_colors[i][0]\r\n groups = groups_and_colors[i][1]\r\n colors = groups_and_colors[i][2]\r\n data_colors = groups_and_colors[i][3]\r\n data_color_order = groups_and_colors[i][4]\r\n\r\n data_file_dir_path = mkdtemp(dir=data_dir_path)\r\n\r\n new_link = os.path.split(data_file_dir_path)\r\n data_file_link = os.path.join('.', os.path.split(new_link[-2])[-1],\r\n new_link[-1])\r\n\r\n new_col_name = labelname\r\n img_data = {}\r\n plot_label = labelname\r\n\r\n if 'support_pcoas' in data:\r\n matrix_average, matrix_low, matrix_high, eigval_average, m_names = \\\r\n summarize_pcoas(data['coord'], data['support_pcoas'],\r\n method=data['ellipsoid_method'])\r\n data['coord'] = \\\r\n (m_names, matrix_average, data['coord'][2], data['coord'][3])\r\n for i in range(len(m_names)):\r\n sample_location[m_names[i]] = i\r\n else:\r\n matrix_average = None\r\n matrix_low = None\r\n matrix_high = None\r\n eigval_average = None\r\n m_names = None\r\n iterator = 0\r\n\r\n for coord_tup in coord_tups:\r\n if isarray(matrix_low) and isarray(matrix_high) and \\\r\n isarray(matrix_average):\r\n coord_1r = asarray(matrix_low)\r\n coord_2r = asarray(matrix_high)\r\n mat_ave = asarray(matrix_average)\r\n else:\r\n coord_1r = None\r\n coord_2r = None\r\n mat_ave = None\r\n sample_location = None\r\n\r\n coord_1, coord_2 = coord_tup\r\n img_data[coord_tup] = draw_pcoa_graph(\r\n plot_label, data_file_dir_path,\r\n data_file_link, coord_1, coord_2,\r\n coord_1r, coord_2r, mat_ave,\r\n sample_location,\r\n data, prefs, groups, colors,\r\n background_color, label_color,\r\n data_colors, data_color_order,\r\n generate_eps=True)\r\n\r\n out_table += TABLE_HTML % (labelname,\r\n \"<br>\".join(img_data[(\"1\", \"2\")]),\r\n \"<br>\".join(img_data[(\"3\", \"2\")]),\r\n \"<br>\".join(img_data[(\"1\", \"3\")]))\r\n\r\n if generate_scree:\r\n data_file_dir_path = mkdtemp(dir=data_dir_path)\r\n new_link = os.path.split(data_file_dir_path)\r\n data_file_link = os.path.join(\r\n '.',\r\n os.path.split(new_link[-2])[-1],\r\n new_link[-1])\r\n\r\n img_src, download_link = draw_scree_graph(\r\n data_file_dir_path, data_file_link, background_color,\r\n label_color, generate_eps=True, data=data)\r\n\r\n out_table += SCREE_TABLE_HTML % (\"<br>\".join((img_src, download_link)))\r\n\r\n outfile = create_html_filename(filename, '.html')\r\n outfile = os.path.join(html_dir_path, outfile)\r\n\r\n write_html_file(out_table, outfile)", "def process_data(output_folder):\n # select imgs\n img_folder = join(output_folder, 'img')\n select_img(output_folder, img_folder, 'HE-green')\n\n mask_folder = join(output_folder, 'mask')\n select_img(output_folder, mask_folder, '_EF5')", "def create_dataset_folder_structure():\n\n path = Path(f'{DATASETS}/{FEATURES_DATASET}')\n if not os.path.exists(path):\n print(f'\\nWARNING: The path does not exist. Creating new directory...\\n{path}\\n')\n os.mkdir(path)\n\n try:\n for path in new_sensor_paths:\n if not os.path.exists(path):\n print(f'\\nWARNING: The path does not exist. Creating new directory...\\n{path}\\n')\n os.mkdir(path)\n else:\n print(\"\\nPath already exists!\")\n except:\n return False\n else:\n return True", "def extract_shapefuns(self, parent_folder):\n shapelist = []\n cwd = os.getcwd()\n print('Extracting shapes')\n has_shapefun_file = False\n found_shapes = False #to see if KKRnano produced some shapes as output (better than using shapefun)\n parent_folder_listdir = parent_folder.listdir() #quicker this way as based on SSH tunneling\n for filename in parent_folder_listdir:\n if filename.find('shape.') >= 0:\n if has_shapefun_file:\n shapelist = []\n abs_path = f'{cwd}/{filename}'\n parent_folder.getfile(filename, f'{cwd}/{filename}')\n self.put_object_from_file(abs_path, filename)\n self.set_attribute(filename.replace('.', ''), filename)\n os.remove(filename)\n with self.open(filename, 'r') as _f:\n shapelist.append(SinglefileData(_f.name))\n print('Found shape in repsitory:')\n print(_f.name)\n has_shapefun_file = False\n found_shapes = True\n if 'shapefun' in parent_folder_listdir and not found_shapes:\n filename = 'shapefun'\n print('Shapefun in dir, this part of the program might need more testing')\n abs_path = f'{cwd}/{filename}'\n parent_folder.getfile(filename, f'{cwd}/{filename}')\n\n with open(filename, 'r') as reader:\n shapes = reader.readlines()\n\n\n# print(os.path.realpath(reader.name))\n lines = []\n for line in range(len(shapes)):\n if shapes[line].find('Shape') > 0:\n lines.append(line)\n lines.append(len(shapes))\n for j in range(len(lines) - 1):\n shape_string = ''\n for k in range(lines[j], lines[j + 1]):\n shape_string += shapes[k]\n\n shape_no_filename = 'shape.' + str(j + 1).zfill(7)\n with open(shape_no_filename, 'w') as file:\n file.write(shape_string)\n path = os.path.realpath(file.name)\n print(path)\n abs_path = f'{cwd}/{shape_no_filename}'\n self.put_object_from_file(abs_path, shape_no_filename) #Problem has to be called via instance\n self.set_attribute(shape_no_filename.replace('.', ''), shape_no_filename)\n with self.open(shape_no_filename, 'r') as _f:\n shapelist.append(SinglefileData(_f.name))\n os.remove(shape_no_filename)\n has_shapefun_file = True\n if has_shapefun_file:\n print(\n 'WARNING: Only a shapefun from some Voronoi input was found, it is possible that the potential does not match the shapefun parameters, unless they are set this way explicitly in the respective input file! It is advisable to use the `write_shapes=1` command in input.conf'\n )\n print('Found shapelist:')\n print(shapelist)\n return shapelist", "def generate_colour_data(width, height, imagiry_data, pixel2coord):\n for i in range(1, height):\n for j in range(1, width):\n colour_data.append(\n [\n pixel2coord(j, i)[0],\n pixel2coord(j, i)[1],\n imagiry_data.read([1])[0][i - 1][j - 1],\n \n ]\n )", "def makeLocationPtShapefile(config,locationData):\n\n\n # set up the shapefile driver\n driver = ogr.GetDriverByName(\"ESRI Shapefile\")\n\n num_years = config.EndYear\n \n for iteration in range(config.MinimumIteration, config.MaximumIteration + 1):\n for year in range(1, num_years + 1):\n\n shapeFilename = config.getOutputFilePath(cc.COLLAR_VALUES_SHAPEFILE_FILENAME.format(iteration,year))\n\n # delete the shapefile if it already exists\n if os.path.exists(shapeFilename):\n driver.DeleteDataSource(shapeFilename)\n if os.path.exists(shapeFilename):\n sys.exit(\"Unable to delete existing Shapefile '{0}'\".format(shapeFilename))\n\n # create the data source\n data_source = driver.CreateDataSource(shapeFilename)\n\n # create the spatial reference, WGS84\n srs = osr.SpatialReference()\n srs.ImportFromEPSG(4326)\n\n # create the layer\n layer = data_source.CreateLayer(\"location\", srs, ogr.wkbPoint)\n\n # Add the fields we're interested in\n # ITERATION_ID,YEAR_ID,JULIAN_DAY,STRATUM_ID,HARVEST_ZONE,LAT, LON,OUT_OF_BOUNDS,DISTANCE\n # DEVNOTE: Shapefiles seem bound to 10 character limit\n layer.CreateField(ogr.FieldDefn(\"ITER_ID\", ogr.OFTInteger))\n layer.CreateField(ogr.FieldDefn(\"YEAR_ID\", ogr.OFTInteger))\n layer.CreateField(ogr.FieldDefn(\"JULIAN_DAY\", ogr.OFTInteger))\n layer.CreateField(ogr.FieldDefn(\"STRATUM_ID\", ogr.OFTInteger))\n layer.CreateField(ogr.FieldDefn(\"LAT\", ogr.OFTReal))\n layer.CreateField(ogr.FieldDefn(\"LON\", ogr.OFTReal))\n layer.CreateField(ogr.FieldDefn(\"DIST_KM\", ogr.OFTReal))\n layer.CreateField(ogr.FieldDefn(\"REL_ZOI\", ogr.OFTString))\n layer.CreateField(ogr.FieldDefn(\"RAA\", ogr.OFTString))\n\n # Process the text file and add the attributes and features to the shapefile\n for row in locationData:\n \n # Filter by iteration and timestep\n if row['ITERATION_ID'] == iteration:\n if row['YEAR_ID'] == year:\n # create the feature\n feature = ogr.Feature(layer.GetLayerDefn())\n # Set the attributes using the values from the delimited text file\n feature.SetField(\"ITER_ID\", row['ITERATION_ID'])\n feature.SetField(\"YEAR_ID\", row['YEAR_ID'])\n feature.SetField(\"JULIAN_DAY\", row['JULIAN_DAY'])\n feature.SetField(\"STRATUM_ID\", row['STRATUM_ID'])\n feature.SetField(\"LAT\", row['LAT'])\n feature.SetField(\"LON\", row['LON'])\n feature.SetField(\"DIST_KM\", row['DISTANCE'])\n feature.SetField(\"REL_ZOI\", row['RELATION_TO_ZOI'])\n feature.SetField(\"RAA\", row['RANGE_ASSESSMENT_AREA'])\n\n # create the WKT for the feature using Python string formatting\n wkt = \"POINT(%f %f)\" % (float(row['LON']) , float(row['LAT']))\n\n # Create the point from the Well Known Txt\n point = ogr.CreateGeometryFromWkt(wkt)\n\n # Set the feature geometry using the point\n feature.SetGeometry(point)\n # Create the feature in the layer (shapefile)\n layer.CreateFeature(feature)\n # Destroy the feature to free resources\n feature.Destroy()\n\n # Destroy the data source to free resources\n data_source.Destroy()\n\n print (\"\\n\\tConverted Collar Points Values into Shapefile for Iteration/Year {0}/{1}. Output file:'{2}'\".format(iteration, year, shapeFilename))", "def color_raster_from_shapes(target_bounds, target_dx, shapes, shape_colors,\n shapes_crs, nodata=-1):\n assert(len(shapes) == len(shape_colors))\n assert(len(shapes) > 0)\n \n dtype = np.dtype(type(shape_colors[0]))\n \n target_x0 = np.round(target_bounds[0] - target_dx/2)\n target_y1 = np.round(target_bounds[3] + target_dx/2)\n width = int(np.ceil((target_bounds[2] + target_dx/2 - target_x0)/target_dx))\n height = int(np.ceil((target_y1 - target_bounds[1] - target_dx/2)/target_dx))\n\n out_bounds = [target_x0, target_y1 - target_dx*height, target_x0 + target_dx*width, target_y1]\n\n logging.info('Coloring shapes onto raster:')\n logging.info(' target_bounds = {}'.format(target_bounds))\n logging.info(' out_bounds = {}'.format(out_bounds))\n logging.info(' pixel_size = {}'.format(target_dx))\n logging.info(' width = {}, height = {}'.format(width, height))\n logging.info(' and {} independent colors of dtype {}'.format(len(set(shape_colors)), dtype))\n\n transform = rasterio.transform.from_origin(target_x0, target_y1, target_dx, target_dx)\n \n out_profile = {'height':height,\n 'width':width,\n 'count':1,\n 'dtype':dtype,\n 'crs':workflow.crs.to_rasterio(shapes_crs),\n 'transform':transform,\n 'nodata':nodata}\n \n out = nodata * np.ones((height, width), dtype)\n for p, p_id in zip(shapes, shape_colors):\n mask = rasterio.features.geometry_mask([p,], out.shape, transform, invert=True)\n out[mask] = p_id\n return out, out_profile, out_bounds", "def create_patches(data, patch_shape):\n\n imgs = []\n\n if data[0].shape[0] == test_size:\n step_length = (test_size - patch_shape[0]) // 2 # 176\n else:\n step_length = (training_size - patch_shape[0])\n\n for i in range(data.shape[0]):\n if len(patch_shape) == 3: # RGB images\n patches = patchify(data[i], patch_shape, step=step_length)\n patches = patches.reshape((-1, patch_shape[0], patch_shape[1], patch_shape[2]))\n imgs.extend(patches)\n else:\n patches = patchify(data[i], patch_shape, step=step_length)\n patches = patches.reshape((-1, patch_shape[0], patch_shape[1]))\n imgs.extend(patches)\n\n return np.asarray(imgs)", "def load_data_in_folder(self):\n print('loading files in data folder')\n n = len(self.filenames)\n idx_max = n // self.batch_size\n for idx in range(0, idx_max-1):\n data = []\n for f in self.filenames[idx:idx+64]:\n img = cv2.imread(f, int(self.color))\n if not self.color:\n img = np.expand_dims(img, axis=-1)\n data.append(img)\n data = np.array(data)\n data = data.astype('float32')\n data = (data - 127.5)/127.5\n np.save(op.join(self.data_path, str(idx)), data)\n # TODO last batch ?\n self.data_filenames = sorted(glob(op.join(self.data_path, '*.npy')))", "def create(data):\n \n # init color\n color = Color(\n color_id = data.get('id'),\n name = data['name'],\n rgb = data['rgb'],\n is_trans = data['is_trans'])\n \n # get external names and IDs\n if 'external_ids' in data:\n for name, value in data['external_ids'].items():\n color.external_names[name] = [n for l in value['ext_descrs'] for n in l]\n color.external_ids[name] = value['ext_ids']\n \n return color", "def data_shapes(self):", "def build(self, datas):\n\t\t# Browse the list of files\n\t\tfor data in datas:\n\t\t\tif isString(data):\n\t\t\t\tdata = Data(data)\n\t\t\telif isList(data):\n\t\t\t\tstate = None\n\t\t\t\tname = \"\"\n\t\t\t\tif len(data) >= 1:\n\t\t\t\t\tname = data[0]\n\t\t\t\tif len(data) >= 2:\n\t\t\t\t\tstate = data[1]\n\t\t\t\tdata = Data(name, state)\n\t\t\t# Cut the path of the file folder and piece\n\t\t\tself.addNode(self.tree,data.path(),data)", "def createGame(color_list):\n figurelist = [] #2D list with figures\n for i in range(0, 450, 30):\n figure_colum = []\n for k in range(0, 750, 30):\n point1 = g.Point(i,k)\n point2 = g.Point(i+30,k+30) \n shape = g.Rectangle(point1, point2)\n colorRow = int(i/30)\n colorColum = int(k/30)\n shape.setFill(color_list[colorRow][colorColum]) #Set color of figures to same as color list\n figure_colum.append(shape)\n figurelist.append(figure_colum)\n \n return figurelist", "def _parse_data_dir(self, data_dir):\n categories = os.listdir(data_dir)\n for folder_name in categories:\n all_fnames_list_fname = os.path.join(data_dir, folder_name,\n folder_name + \".bmf\")\n if not os.path.isfile(all_fnames_list_fname):\n raise IOError(\"Not found file {}\".format(all_fnames_list_fname))\n all_fnames_list = np.loadtxt(all_fnames_list_fname, dtype=np.str,\n skiprows=1)\n # Correct from pgm to jpg\n all_fnames_list = [f.split('.')[0]+'.jpg' for f in all_fnames_list]\n\n all_fnames_list = [os.path.join(data_dir, folder_name, f) for f \\\n in all_fnames_list]\n\n self.samples += len(all_fnames_list)\n # Append the last\n self.image_filenames.append(all_fnames_list)", "def create_dataset():\n with open(\"/root/config.json\", \"r\") as f:\n config = json.load(f)\n\n # create environmental variables\n for (key, value) in config.items():\n os.environ[key] = str(value)\n\n # run blender\n command = '/usr/lib/blender/blender {} --python {} --background'.\\\n format(\"/root/models/default.blend\", \"/root/rendering.py\")\n os.system(command)\n\n # post processing\n post_processing()", "def __init__(self, shape_num):\n self.shape_num = shape_num\n if shape_num == 1:\n self.width = 4\n self.height = 4\n self.grid = [[0 for x in range(self.height)] for y in range(self.width)]\n self.grid[0][2] = 1\n self.grid[1][2] = 1\n self.grid[2][2] = 1\n self.grid[3][2] = 1\n self.color = Color.SilverPink\n elif shape_num == 2:\n self.width = 3\n self.height = 3\n self.grid = [[0 for x in range(self.height)] for y in range(self.width)]\n self.grid[0][1] = 1\n self.grid[0][2] = 1\n self.grid[1][2] = 1\n self.grid[2][2] = 1\n self.color = Color.TuftsBlue\n elif shape_num == 3:\n self.width = 3\n self.height = 3\n self.grid = [[0 for x in range(self.height)] for y in range(self.width)]\n self.grid[2][1] = 1\n self.grid[0][2] = 1\n self.grid[1][2] = 1\n self.grid[2][2] = 1\n self.color = Color.ChromeYellow\n elif shape_num == 4:\n self.width = 2\n self.height = 2\n self.grid = [[0 for x in range(self.height)] for y in range(self.width)]\n self.grid[0][0] = 1\n self.grid[0][1] = 1\n self.grid[1][0] = 1\n self.grid[1][1] = 1\n self.color = Color.Independence\n elif shape_num == 5:\n self.width = 3\n self.height = 3\n self.grid = [[0 for x in range(self.height)] for y in range(self.width)]\n self.grid[1][0] = 1\n self.grid[2][0] = 1\n self.grid[0][1] = 1\n self.grid[1][1] = 1\n self.color = Color.ForestGreen\n elif shape_num == 6:\n self.width = 3\n self.height = 3\n self.grid = [[0 for x in range(self.height)] for y in range(self.width)]\n self.grid[1][1] = 1\n self.grid[0][2] = 1\n self.grid[1][2] = 1\n self.grid[2][2] = 1\n self.color = Color.Byzantine\n elif shape_num == 7:\n self.width = 3\n self.height = 3\n self.grid = [[0 for x in range(self.height)] for y in range(self.width)]\n self.grid[0][0] = 1\n self.grid[1][0] = 1\n self.grid[1][1] = 1\n self.grid[2][1] = 1\n self.color = Color.Coquelicot\n self.top_space = self.get_top_space()\n self.bottom_space = self.get_bottom_space()\n self.x = int((12 - self.width) / 2)\n self.y = 1 - self.top_space\n self.last_drop_time = perf_counter()", "def createPickColor():\n color_list = []\n\n for i in range(50, 450, 100): #Create the 4 shapes to show colors\n point1 = g.Point(50, i)\n point2 = g.Point(100, i+50)\n shape = g.Rectangle(point1, point2)\n color_list.append(shape)\n\n #Set the right colors\n color_list[0].setFill(\"Blue\")\n color_list[1].setFill(\"Green\")\n color_list[2].setFill(\"Yellow\")\n color_list[3].setFill(\"Red\")\n\n return color_list", "def write_shapefile(data, directory, filename, crs):\n prop_schema = []\n for name, value in data[0]['properties'].items():\n fiona_prop_type = next((\n fiona_type for fiona_type, python_type in \\\n fiona.FIELD_TYPES_MAP.items() if \\\n python_type == type(value)), None\n )\n\n prop_schema.append((name, fiona_prop_type))\n\n sink_driver = 'ESRI Shapefile'\n sink_crs = {'init': crs}\n sink_schema = {\n 'geometry': data[0]['geometry']['type'],\n 'properties': OrderedDict(prop_schema)\n }\n\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n with fiona.open(\n os.path.join(directory, filename), 'w',\n driver=sink_driver, crs=sink_crs, schema=sink_schema) as sink:\n for datum in data:\n sink.write(datum)", "def __main__() :\n try :\n poly = Polyhedre(sys.argv[1])\n \n name = sys.argv[2]\n \n createAllFiles(poly, name)\n\n createAllImageFiles(poly, name)\n \n except FileNotFoundError :\n print(\"Use an existing file\")", "def save_data(data,patches,labels,name,patch_dim,size=None):\n\n patchPerIm = (data.shape[1]*data.shape[2])/(patch_dim**2)\n\n if size is None:\n size = labels.shape[0]\n\n img_f = open('data/images_'+name,'w')\n patches_f = open('data/patches_'+name,'w')\n label_f = open('data/labels_'+name,'w')\n\n patches = patches.astype(np.float32)\n\n data[:size,...].tofile(img_f)\n patches[:,:(size*patchPerIm)].tofile(patches_f)\n labels[:size].tofile(label_f)\n\n img_f.close()\n patches_f.close()\n label_f.close()", "def create_folders():\n if not os.path.exists(\"data/train-npy/\"):\n os.makedirs(\"data/train-npy/\")\n if not os.path.exists(\"data/test-npy/\"):\n os.makedirs(\"data/test-npy/\")\n if not os.path.exists(\"data/valid-npy/\"):\n os.makedirs(\"data/valid-npy/\")", "def generate(n_samples: int, shapes: Union[str, List[Tuple[str, float]]], m_rel: int, n_classes: int, m_irr: int = 0,\n m_red: int = 0, n_clusters_per_class: int = 1, categorical_variables: List[int] = None,\n max_r: float = None, min_r: float = None, random_points: float = 0, noise_levels: List[float] = None,\n name: str = \"Dataset test\", random_state: int = None, points_distribution: str = None,\n save_dir: str = None, singlelabel: bool = True, iou_threshold: Union[float, List[float]] = None,\n mov_vectors: Union[List[List[float]], str] = None) \\\n -> Tuple[pd.DataFrame, pd.DataFrame, List[pd.DataFrame]]:\n\n if min_r is None:\n min_r = round(((n_classes / 10) + 1) / n_classes, 2)\n if max_r is None:\n max_r = min(min_r * 2, 0.8)\n\n if m_rel <= 0:\n raise ValueError(\"m_rel (the number of relevant features) must be larger than 0!\")\n if m_irr < 0:\n raise ValueError(\"m_irr (the number of irrelevant features) must be at least 0!\")\n if m_red < 0:\n raise ValueError(\"m_red (the number of redundant features) must be at least 0!\")\n if n_classes <= 0:\n raise ValueError(\"n_classes (the number of labels) must be larger than 0!\")\n if n_clusters_per_class <= 0:\n raise ValueError(\"n_clusters_per_class (the number of clusters per label) must be larger than 0!\")\n if n_samples <= 0:\n raise ValueError(\"n_samples (the number of samples) must be larger than 0!\")\n if categorical_variables is not None and len(categorical_variables) > m_rel:\n raise ValueError(\n \"There cannot be more categorical_variables than relevant features (m_rel)! (len(categorical_variables) <= m_rel)\")\n if max_r <= 0:\n raise ValueError(\"max_r (the maximum radius) must be larger than 0!\")\n if min_r <= 0:\n raise ValueError(\"min_r (the minimum radius) must be larger than 0!\")\n if not (0 <= random_points <= 1):\n raise ValueError(\"random_points must be between 0 and 1 (both inclusive)!\")\n if noise_levels is not None and any([m < 0 or m > 1 for m in noise_levels]):\n raise ValueError(\n \"noise_levels must be at least 0 (no value changes) and at most 1 (every value changes) for every level!\")\n if m_red > m_rel:\n raise ValueError(\"m_red must not be larger then m_rel!\")\n if min_r >= max_r > 0.8:\n raise ValueError(\"min_r < max_r <= 0.8 is required!\")\n\n if isinstance(shapes, str):\n if shapes not in [\"spheres\", \"cubes\", \"moons\", \"mix\"]:\n raise ValueError(\n \"When passing 'hypershapes' as str it needs to be one of ['spheres', 'cubes', 'moons', 'mix]!\")\n if isinstance(shapes, List):\n if len(shapes) < 2:\n raise ValueError(\"When passing 'hypershapes' as list it needs at least 2 entries!\")\n\n probs = []\n for _, prob in shapes:\n probs.append(prob)\n if round(sum(probs), 1) != 1:\n raise ValueError(\"The probabilities for the shapes must sum up to 1!\")\n\n if isinstance(iou_threshold, float):\n if not (0 <= iou_threshold <= 1):\n raise ValueError(\"When passing iou_threshold as float, 0 <= iou_threshold <= 1 is needed!\")\n elif isinstance(iou_threshold, list):\n if len(iou_threshold) != 2:\n raise ValueError(\"When passing iou_threshold as list it needs to contain exactly 2 float values!\")\n elif not (0 <= iou_threshold[0] <= iou_threshold[1] <= 1):\n raise ValueError(\n \"When passing iou_threshold as list the first value must be smaller than the second and both must be betweeen 0 and 1!\")\n\n random.seed(random_state)\n np.random.seed(random_state)\n\n if mov_vectors is not None and mov_vectors == \"random\":\n mov_vectors = np.random.rand(20, m_rel)\n\n hypershapes = generate_small_hypershapes(m_rel, n_classes, max_r, min_r, shapes, n_clusters_per_class,\n iou_threshold)\n\n rp_samples = round(n_samples * random_points)\n n_samples = n_samples - rp_samples\n\n rp = pd.DataFrame(np.array([np.random.rand(m_rel) for _ in range(rp_samples)]) * 2 - 1)\n\n ns = calculate_points_distribution(n_samples, n_classes, hypershapes, points_distribution)\n\n dataset = populate_hypershapes(m_rel, ns, hypershapes, n_classes)\n\n single_labels = dataset[m_rel].to_frame()\n single_labels.rename({m_rel: \"labels\"}, axis=1, inplace=True)\n dataset.drop(m_rel, axis=1, inplace=True)\n\n if not singlelabel:\n labels = assign_labels(dataset, hypershapes, n_classes)\n random_points_labels = assign_random_labels(rp, n_classes)\n else:\n labels = single_labels\n random_points_labels = pd.DataFrame([random.randrange(0, n_classes) for _ in range(len(rp))],\n columns=[\"labels\"])\n\n if mov_vectors is not None:\n dataset = move_points(dataset, mov_vectors, single_labels)\n\n if categorical_variables is not None:\n dataset, rp = make_features_categorical(dataset, rp, categorical_variables)\n\n dataset, labels = merge_dataset_with_random(dataset, labels, rp, random_points_labels)\n\n dataset.rename({i: \"rel{}\".format(i) for i in range(m_rel)}, axis=1, inplace=True)\n\n dataset = add_redundant(dataset, m_red, random_state)\n\n dataset = add_irrelevant(dataset, m_irr)\n\n dataset = dataset[np.random.permutation(dataset.columns)]\n\n if not singlelabel:\n noisy_labels = add_noise_multilabel(labels.copy(), noise_levels, n_classes)\n else:\n noisy_labels = add_noise_singlelabel(labels.copy(), noise_levels, n_classes)\n\n if save_dir:\n save_dir += \"/sl\" if singlelabel else \"/ml\"\n if isinstance(shapes, str):\n shape_name = shapes\n else:\n shape_name = \"custom\"\n Path(save_dir).mkdir(parents=True, exist_ok=True)\n with open(\"{}/{}_{}_dataset.csv\".format(save_dir, name.lower(), shape_name), \"w\") as file:\n dataset.to_csv(file, index=False)\n with open(\"{}/{}_{}_labels.csv\".format(save_dir, name.lower(), shape_name), \"w\") as file:\n labels.to_csv(file, index=False)\n for ind, noise_level in enumerate(noisy_labels):\n with open(\"{}/{}_{}_n{}_labels.csv\".format(save_dir, name.lower(), shape_name, ind), \"w\") as file:\n noise_level.to_csv(file, index=False)\n\n return dataset, labels, noisy_labels", "def prepare_test_data(args):\n image_dir = args.test_image_dir\n\n files = os.listdir(image_dir)\n files = [f for f in files if f.lower().endswith('.png')]\n\n img_ids = list(range(len(files)))\n img_files = []\n img_heights = []\n img_widths = []\n \n for f in files:\n img_path = os.path.join(image_dir, f)\n img_files.append(img_path)\n img = cv2.imread(img_path)\n img_heights.append(img.shape[0]) \n img_widths.append(img.shape[1]) \n\n print(\"Building the testing dataset...\")\n dataset = DataSet(img_ids, img_files, img_heights, img_widths)\n print(\"Dataset built.\")\n return dataset", "def gen_batch_function(self, data_folder, image_shape):\n\n\t\tdef get_batches_fn(batch_size):\n\t\t\t#\n\t\t\timage_paths = glob(os.path.join(data_folder, 'image_2', '*.png'))\n\t\t\t#\n\t\t\tlabel_paths = {\tre.sub(r'_(lane|road)_', '_', os.path.basename(path)): path\n\t\t\t\tfor path in glob(os.path.join(data_folder, 'gt_image_2', '*_road_*.png'))}\n\t\t\t#\n\t\t\tbackground_color = np.array([255, 0, 0])\n\t\t\t#\n\t\t\trandom.shuffle(image_paths)\n\t\t\t#\n\t\t\tfor batch_i in range(0, len(image_paths), batch_size):\n\t\t\t\t#\n\t\t\t\timages = []\n\t\t\t\t#\n\t\t\t\tgt_images = []\n\t\t\t\t#\n\t\t\t\tfor image_file in image_paths[batch_i:batch_i+batch_size]:\n\t\t\t\t\t#\n\t\t\t\t\tgt_image_file = label_paths[os.path.basename(image_file)]\n\t\t\t\t\t#\n\t\t\t\t\timage = scipy.misc.imresize(scipy.misc.imread(image_file), image_shape)\n\t\t\t\t\t#\n\t\t\t\t\tgt_image = scipy.misc.imresize(scipy.misc.imread(gt_image_file), image_shape)\n\t\t\t\t\t#\n\t\t\t\t\tgt_bg = np.all(gt_image == background_color, axis=2)\n\t\t\t\t\t#\n\t\t\t\t\tgt_bg = gt_bg.reshape(*gt_bg.shape, 1)\n\t\t\t\t\t#\n\t\t\t\t\tgt_image = np.concatenate((gt_bg, np.invert(gt_bg)), axis=2)\n\t\t\t\t\t#\n\t\t\t\t\timages.append(image)\n\t\t\t\t\t#\n\t\t\t\t\tgt_images.append(gt_image)\n\t\t\t\t#\n\t\t\t\tyield np.array(images), np.array(gt_images)\n\t\t#\n\t\treturn get_batches_fn", "def training_data_generation(DATA_DIR, img_height_size, img_width_size, label_list):\r\n \r\n img_ms_files = glob.glob(DATA_DIR + '\\\\Train_MS' + '\\\\Train_*.tif')\r\n img_pan_files = glob.glob(DATA_DIR + '\\\\Train_Pan' + '\\\\Train_*.tif')\r\n polygon_files = glob.glob(DATA_DIR + '\\\\Train_Polygons' + '\\\\Train_*.geojson')\r\n \r\n img_ms_array_list = []\r\n img_pan_array_list = []\r\n mask_array_list = []\r\n \r\n for file in range(len(img_ms_files)):\r\n with rasterio.open(img_ms_files[file]) as f:\r\n metadata = f.profile\r\n img_ms = np.transpose(f.read(tuple(np.arange(metadata['count']) + 1)), [1, 2, 0])\r\n \r\n with rasterio.open(img_pan_files[file]) as g:\r\n metadata_pan = g.profile\r\n img_pan = np.expand_dims(g.read(1), axis = 2)\r\n \r\n ms_to_pan_ratio = metadata['transform'][0] / metadata_pan['transform'][0]\r\n \r\n if (img_height_size % ms_to_pan_ratio) != 0 or (img_width_size % ms_to_pan_ratio) != 0:\r\n raise ValueError('Please make sure that both img_height_size and img_width_size can be divided by {}'.format(int(ms_to_pan_ratio)))\r\n \r\n mask = training_mask_generation(img_pan_files[file], polygon_files[file], labels = label_list)\r\n \r\n img_ms_array, img_pan_array, mask_array = image_clip_to_segment_and_convert(img_ms, img_pan, mask, ms_to_pan_ratio, \r\n img_height_size, img_width_size)\r\n \r\n img_ms_array_list.append(img_ms_array)\r\n img_pan_array_list.append(img_pan_array)\r\n mask_array_list.append(mask_array)\r\n \r\n img_ms_full_array = np.concatenate(img_ms_array_list, axis = 0)\r\n img_pan_full_array = np.concatenate(img_pan_array_list, axis = 0)\r\n mask_full_array = to_categorical(np.concatenate(mask_array_list, axis = 0), num_classes = len(label_list))\r\n \r\n return img_ms_full_array, img_pan_full_array, mask_full_array", "def create_dirs():\n\tif os.path.isdir(path):\n\t\tshutil.rmtree(path, ignore_errors=True)\n\tos.makedirs(path+\"/log\",exist_ok=True)\n\tos.makedirs(path+\"/losses\",exist_ok=True) \n\tos.makedirs(path+\"/samples\",exist_ok=True)\n\tos.makedirs(path+\"/model\",exist_ok=True)\n\tos.makedirs(path+\"/datasets\",exist_ok=True)\n\tshutil.copy2(\"config.py\", path+\"/config.py\")\n\tfor i in rconfig[\"datasets\"]:\n\t\tdsconfig = get_dsconfig(i)\n\t\tos.makedirs(path+\"/datasets/\"+dsconfig[\"id\"],exist_ok=True)\n\t\tshutil.copy2(i+\"/dsconfig.py\", path+\"/datasets/\"+dsconfig[\"id\"]+\"/dsconfig.py\")\n\t\tcopytree(dsconfig[\"split\"], path+\"/datasets/\"+dsconfig[\"id\"]+\"/split\")", "def _create_data_directory(self):\n self.src_data_dir.mkdir(exist_ok=True, parents=True)", "def create_directory_structure():\n\n def ensure_directory(path):\n try:\n os.makedirs(path)\n\n except OSError as e:\n if e.errno != errno.EEXIST:\n raise\n\n ensure_directory('./out/textures')\n ensure_directory('./out/data')", "def setUp(self):\r\n self.data = {}\r\n self.data['coord'] = [['Sample1', 'Sample2'], array([[-0.2, 0.07],\r\n [-0.04, 0.2]]), array(\r\n [0.7, 0.6]),\r\n array([25.00, 30.00])]\r\n self.data[\r\n 'map'] = [['#Sample-ID', 'Day'], ['Sample1', 'Day1'], ['Sample2',\r\n 'Day1']]\r\n\r\n self.coord_header = [\"Sample1\", \"Sample2\", \"Sample3\"]\r\n self.coords = array(\r\n [[-0.219044992, 0.079674486, 0.09233683], [-0.042258081,\r\n 0.000204041, 0.024837603], [0.080504323, -0.212014503,\r\n -0.088353435]])\r\n self.groups = {}\r\n self.groups['Day1'] = ['Sample1', 'Sample2', 'Sample3']\r\n self.pct_var = array([25.00, 30.00, 35.00])\r\n self.coord_tups = [(\"1\", \"2\"), (\"3\", \"2\"), (\"1\", \"3\")]\r\n self.colors = {\"Day1\": \"red1\"}\r\n self.filename = 'test_pca.txt'\r\n self.dir_path = '/tmp/'\r\n self.prefs = {}\r\n self.prefs['Sample'] = {}\r\n self.prefs['Sample']['column'] = \"Day\"\r\n\r\n self.dict = defaultdict(list)\r\n self.dict['Day1'].append('Sample1')\r\n self.dict['Day1'].append('Sample2')\r\n self.dict['Day1'].append('Sample3')\r\n\r\n self.labelname = self.prefs['Sample']['column']\r\n self.mapping = [\r\n [\"Sample-ID\", \"Day\", \"Type\"], [\"Sample1\", \"Day1\", \"Soil\"],\r\n [\"Sample2\", \"Day1\", \"Soil\"], [\"Sample3\", \"Day1\", \"Soil\"]]\r\n self.data_color_hsv = {\r\n #'black1':\t(0,0,20),\r\n 'red1': (0, 100, 100),\r\n 'blue1': (240, 100, 100),\r\n 'orange1': (28, 98, 95),\r\n 'green1': (120, 100, 50.2),\r\n 'purple1': (302, 73, 57),\r\n 'yellow1': (60, 100, 100),\r\n 'cyan1': (184, 49, 96),\r\n 'pink1': (333, 37, 96),\r\n 'teal1': (178, 42, 63),\r\n 'brown1': (36, 89, 42),\r\n 'gray1': (0, 0, 50.2),\r\n 'lime': (123, 99, 96),\r\n 'red2': (14, 51, 97),\r\n 'blue2': (211, 42, 85),\r\n 'orange2': (32, 46, 99),\r\n 'green2': (142, 36, 79),\r\n 'purple2': (269, 29, 75),\r\n 'yellow2': (56, 40, 100),\r\n #'black2':\t(303,100,24),\r\n 'gray2': (0, 0, 75.3),\r\n #'teal2':\t(192,100,24),\r\n 'red3': (325, 100, 93),\r\n 'blue3': (197, 100, 100),\r\n #'purple3':\t(271,43,36),\r\n 'brown2': (33, 45, 77),\r\n 'green3': (60, 100, 50.2),\r\n 'purple4': (264, 75, 100),\r\n #'yellow3':\t(60,66,75),\r\n #'blue4':\t(213,45,77),\r\n 'red4': (348, 31, 74),\r\n 'teal3': (180, 100, 50.2),\r\n #'brown3':\t(60,100,28),\r\n 'red5': (0, 100, 50.2),\r\n 'green4': (81, 100, 26),\r\n #'purple5':\t(240,100,41),\r\n 'orange3': (26, 100, 65)\r\n #'brown4':\t(25,100,20),\r\n #'red6':\t(17,100,63),\r\n #'purple6':(272,100,44)\r\n }\r\n\r\n self.data_color_order = ['red1', 'blue1', 'orange1', 'green1',\r\n 'purple1', 'yellow1', 'cyan1', 'pink1', 'teal1', 'brown1',\r\n 'gray1', 'lime', 'red2', 'blue2', 'orange2', 'green2',\r\n 'purple2', 'yellow2', 'gray2', 'red3', 'blue3', 'brown2',\r\n 'green3', 'purple4', 'red4', 'teal3', 'red5', 'green4',\r\n 'orange3']\r\n\r\n self._paths_to_clean_up = []\r\n self._dir_to_clean_up = ''", "def write_shapefile(data, directory, filename, crs):\n # Translate props to Fiona sink schema\n prop_schema = []\n for name, value in data[0]['properties'].items():\n fiona_prop_type = next((\n fiona_type for fiona_type, python_type in \\\n fiona.FIELD_TYPES_MAP.items() if \\\n python_type == type(value)), None\n )\n\n prop_schema.append((name, fiona_prop_type))\n\n sink_driver = 'ESRI Shapefile'\n sink_crs = {'init': crs}\n sink_schema = {\n 'geometry': data[0]['geometry']['type'],\n 'properties': OrderedDict(prop_schema)\n }\n\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n # Write all elements to output file\n with fiona.open(\n os.path.join(directory, filename), 'w',\n driver=sink_driver, crs=sink_crs, schema=sink_schema) as sink:\n for datum in data:\n sink.write(datum)", "def generate_data(out_fname, data_directory):\n def store_result(duration, loci_number):\n \"\"\" Store result of current timing run\n \"\"\"\n print(' %ds for %d loci' % (duration, loci_number))\n\n if os.path.isfile(out_fname):\n with open(out_fname, 'r') as fd:\n cur = json.load(fd)\n else:\n cur = []\n\n with open(out_fname, 'w') as fd:\n cur.append((loci_number, duration))\n json.dump(cur, fd)\n\n for fn in os.listdir(data_directory):\n fname = os.path.join(data_directory, fn)\n\n print('Loading \"%s\"...' % fname, end=' ', flush=True)\n contacts = np.loadtxt(fname)\n print('Done')\n\n start = time.time()\n try:\n apply_shrec3d(contacts)\n except:\n print('>>> Some error occured')\n traceback.print_exc()\n end = time.time()\n\n store_result(end-start, contacts.shape[0])", "def read_files_and_visualize(data):\n\n image = cv2.imread(data[0])\n label = cv2.imread(data[1], 0)\n name = data[1].split('/')[-1].split('.')[0]\n obj_label = None\n\n if generator_options.save_label_preview:\n obj_label = []\n if os.path.isfile(data[2]):\n with open(data[2], 'r') as f:\n obj = csv.reader(f, delimiter=',')\n for row in obj:\n row = [int(r.split('.')[0]) if index != 0 else r\n for index, r in enumerate(row)]\n obj_label.append(row)\n\n else:\n label_vals = np.unique(label)\n for val in label_vals:\n obj_label.append([_LABEL_DEF_FULL[val], 0, 0, 0, 0])\n\n save_visuals(image, label, obj_label, name)", "def process_scene_data(self, scene, data, tmp_dir):\n scene_dir = join(tmp_dir, str(scene.id))\n img_dir = join(scene_dir, 'img')\n labels_dir = join(scene_dir, 'labels')\n\n make_dir(img_dir)\n make_dir(labels_dir)\n\n for ind, (chip, window, labels) in enumerate(data):\n chip_path = join(img_dir, '{}-{}.png'.format(scene.id, ind))\n label_path = join(labels_dir, '{}-{}.png'.format(scene.id, ind))\n\n label_im = labels.get_label_arr(window).astype(np.uint8)\n save_img(label_im, label_path)\n save_img(chip, chip_path)\n\n return scene_dir", "def apply(node, data):\n # -- If the data is a filepath we need to extract it\n if not isinstance(data, dict):\n\n # -- Check for a filepath\n if not os.path.exists(data):\n # -- Look for a filename in the shape dir\n data = find_shape(data)\n \n # -- If the path still does not exist then we cannot do\n # -- anything with it\n if not data or not os.path.exists(data):\n constants.log.warning('Could not find shape data for %s' % data)\n return None\n\n with open(data, 'r') as f:\n data = json.load(f)\n\n # -- Define a list which we will collate all the shapes\n # -- in\n shapes = list()\n\n # -- Cycle over each curve element in the data\n for curve_data in data['curves']:\n\n # -- Create a curve with the given cv's\n transform = pm.curve(\n p=[refine_from_up_axis(p, up_axis=data.get('up_axis', 'z')) for p in curve_data['cvs']],\n d=curve_data['degree'],\n k=curve_data['knots'],\n )\n\n # -- Parent the shape under the node\n shape = transform.getShape()\n\n pm.parent(\n shape,\n node,\n shape=True,\n r=True,\n )\n\n # -- Delete the transform\n pm.delete(transform)\n\n shapes.append(shape)\n\n pm.select(node)\n\n return shapes", "def shapes():\n # -- Define a list of locations to search for, starting by\n # -- adding in our builtin shape locations\n paths = [\n os.path.join(\n os.path.dirname(os.path.dirname(__file__)),\n 'shapes',\n ),\n ]\n\n # -- If we have any paths defined by environment\n # -- variables we should add them here\n if constants.PLUGIN_ENVIRONMENT_VARIABLE in os.environ:\n paths.extend(\n os.environ[constants.PLUGIN_ENVIRONMENT_VARIABLE].split(';'),\n )\n\n shape_list = list()\n\n for path in paths:\n for root, _, files in os.walk(path):\n for filename in files:\n if filename.endswith('.json'):\n shape_list.append(\n os.path.join(\n root,\n filename,\n ),\n )\n\n return shape_list", "def create(path=\"cubes\",pathIm=\"cubes/img\"):\r\n\tobj_name=createNames(pathImg=pathIm)\r\n\tfor i in obj_name:\r\n\t\tfor j in obj_name[i]:\r\n\t\t\tdest=path+chr(47)+str(i)+\"_\"+str(j)\r\n\t\t\tcreate_mtl(dest+\".mtl\",\"img\"+chr(47)+str(i)+chr(47)+str(j)+\".png\")\r\n\t\t\tcreate_obj(dest+\".obj\",str(i)+\"_\"+str(j)+\".mtl\")\r\n\t\t\tcreate_urdf(dest+\".urdf\",dest+\".obj\")\r\n\treturn obj_name", "def generate_test_data(root: str) -> str:\n size = (64, 64)\n folder_path = os.path.join(root, \"enviroatlas_lotp\")\n\n if not os.path.exists(folder_path):\n os.makedirs(folder_path)\n\n for prefix in tile_list:\n for suffix, data_profile in layer_data_profiles.items():\n img_path = os.path.join(folder_path, f\"{prefix}_{suffix}.tif\")\n img_dir = os.path.dirname(img_path)\n if not os.path.exists(img_dir):\n os.makedirs(img_dir)\n\n data_profile[\"profile\"][\"height\"] = size[0]\n data_profile[\"profile\"][\"width\"] = size[1]\n data_profile[\"profile\"][\"transform\"] = Affine(\n 1.0, 0.0, 608170.0, 0.0, -1.0, 3381430.0\n )\n\n write_data(\n img_path,\n data_profile[\"profile\"],\n data_profile[\"data_type\"],\n data_profile[\"vals\"],\n )\n\n # build the spatial index\n schema = {\n \"geometry\": \"Polygon\",\n \"properties\": {\n \"split\": \"str\",\n \"naip\": \"str\",\n \"nlcd\": \"str\",\n \"roads\": \"str\",\n \"water\": \"str\",\n \"waterways\": \"str\",\n \"waterbodies\": \"str\",\n \"buildings\": \"str\",\n \"lc\": \"str\",\n \"prior_no_osm_no_buildings\": \"str\",\n \"prior\": \"str\",\n },\n }\n with fiona.open(\n os.path.join(folder_path, \"spatial_index.geojson\"),\n \"w\",\n driver=\"GeoJSON\",\n crs=\"EPSG:3857\",\n schema=schema,\n ) as dst:\n for prefix in tile_list:\n img_path = os.path.join(folder_path, f\"{prefix}_a_naip.tif\")\n with rasterio.open(img_path) as f:\n geom = shapely.geometry.mapping(shapely.geometry.box(*f.bounds))\n geom = fiona.transform.transform_geom(\n f.crs.to_string(), \"EPSG:3857\", geom\n )\n\n row = {\n \"geometry\": geom,\n \"properties\": {\n \"split\": prefix.split(\"/\")[0].replace(\"_tiles-debuffered\", \"\")\n },\n }\n for suffix, data_profile in layer_data_profiles.items():\n key = suffix_to_key_map[suffix]\n row[\"properties\"][key] = f\"{prefix}_{suffix}.tif\"\n dst.write(row)\n\n # Create archive\n archive_path = os.path.join(root, \"enviroatlas_lotp\")\n shutil.make_archive(archive_path, \"zip\", root_dir=root, base_dir=\"enviroatlas_lotp\")\n shutil.rmtree(folder_path)\n md5: str = calculate_md5(archive_path + \".zip\")\n return md5", "def load_data(data_dir):\n\n # Initiate lists\n images = []\n labels = []\n\n main_dir = os.path.abspath(os.curdir)\n\n for i in range(NUM_CATEGORIES):\n os.chdir(os.path.join(data_dir, str(i))) # Open directory i\n dir_images = os.listdir() # Create a list of all images in directory\n\n for j in range(len(dir_images)):\n image = cv2.imread(dir_images[j]) # Read image from file\n image = tf.keras.preprocessing.image.img_to_array(image) # Transform image to numpy array\n image = tf.image.resize(image, (IMG_WIDTH, IMG_HEIGHT)) # Reshape image to 30 x 30 px\n image = image/255 # Normalize image RGB values\n images.append(image) \n labels.append(i)\n\n os.chdir(main_dir)\n \n return (images, labels)", "def create_data_dict(data_dir, img_size=[25, 83]):\n print(\"Creating data dictionary\")\n print(\"- Using data at:\", data_dir)\n\n # Directories\n imgs_dir = os.path.join(data_dir, \"training/image_2\")\n labels_dir = os.path.join(data_dir, \"training/gt_image_2\")\n\n print(\"- Getting list of files\")\n # Only get the label files for road (not lane)\n label_files = glob.glob(os.path.join(labels_dir, \"*_road_*.png\"))\n\n # Create corresponding list of training image files\n img_files = list(map(lambda f: os.path.basename(f).replace(\"_road\", \"\"), label_files))\n img_files = list(map(lambda f: os.path.join(imgs_dir, f), img_files)) # absolute path\n\n n_samples = len(img_files)\n print(\"- Encountered {} samples\".format(n_samples))\n est_filesize = (n_samples*np.prod(img_size)*(3+1))/1e6\n print(\"- Estimated output filesize: {:0.3f} MB + overhead\".format(est_filesize))\n\n data = {}\n data[\"X_train\"] = np.empty([n_samples]+img_size+[3], dtype=np.uint8)\n data[\"Y_train\"] = np.empty([n_samples]+img_size, dtype=np.uint8)\n\n print(\"- Processing image files\")\n for i in range(n_samples):\n label_img = scipy.misc.imread(label_files[i])\n input_img = scipy.misc.imread(img_files[i])\n\n # PRERPOCESS THE IMAGES\n label_img = scipy.misc.imresize(label_img, img_size)\n input_img = scipy.misc.imresize(input_img, img_size)\n\n # PROCESSING LABEL IMAGE\n # Only one channel, (1=road, 0=not road)\n non_road_class = np.array([255,0,0])\n label_img = (1-np.all(label_img==non_road_class, axis=2, keepdims=False)).astype(np.uint8)\n\n # Place the images into the data arrays\n data[\"X_train\"][i] = input_img\n data[\"Y_train\"][i] = label_img\n\n print(\"- Shuffling the data\")\n np.random.seed(seed=128)\n ids = list(np.random.permutation(n_samples))\n data[\"X_train\"] = data[\"X_train\"][ids]\n data[\"Y_train\"] = data[\"Y_train\"][ids]\n\n print(\"- Done!\")\n return data", "def fixture_image_data(tmp_path_factory, request):\n # Make root dir\n root = tmp_path_factory.mktemp(\"data\")\n\n # Set params\n num_images = request.param\n\n # Create image files\n paths = [root / Path(f\"{idx}.png\") for idx in range(num_images)]\n dimensions = [(idx % 10 + 1, (10 - idx) % 10 + 1) for idx in range(num_images)]\n for path, dim in zip(paths, dimensions):\n image = Image.new(mode=\"RGB\", size=dim)\n if not path.parent.exists():\n path.parent.mkdir(parents=True)\n with open(path, \"wb\") as img_file:\n image.save(img_file)\n return root", "def create_dataloaders(data_dir, input_size, batch_size):\n # Get dataset mean and std\n dataset_mean, dataset_std = estimate_dataset_mean_and_std(expand_subdir(data_dir), input_size)\n\n # Data augmentation and normalization for training\n # Just normalization for validation\n data_transforms = {\n 'train': transforms.Compose([\n opencv_transforms.RandomRotation(45),\n opencv_transforms.RandomResizedCrop(input_size),\n opencv_transforms.RandomHorizontalFlip(),\n opencv_transforms.RandomVerticalFlip(),\n opencv_transforms.ToTensor(),\n transforms.Normalize(dataset_mean, dataset_std)\n ]),\n 'val': transforms.Compose([\n opencv_transforms.Resize(input_size),\n opencv_transforms.ToTensor(),\n transforms.Normalize(dataset_mean, dataset_std)\n ]),\n 'test': transforms.Compose([\n opencv_transforms.Resize(input_size),\n opencv_transforms.ToTensor(),\n transforms.Normalize(dataset_mean, dataset_std)\n ])\n }\n\n # Create training and validation datasets\n image_datasets = {\n x: datasets.DatasetFolder(\n os.path.join(data_dir, x),\n opencv_loader,\n ['jpg', 'tif'],\n transform=data_transforms[x]\n )\n for x in ['train', 'val', 'test']\n }\n\n # Create training and validation dataloaders\n dataloaders = {\n x: torch.utils.data.DataLoader(image_datasets[x], batch_size=batch_size, shuffle=True, num_workers=1)\n for x in ['train', 'val', 'test']\n }\n\n return dataloaders, dataset_mean, dataset_std", "def add_shapes(\n self,\n data=None,\n *,\n ndim=None,\n properties=None,\n text=None,\n shape_type='rectangle',\n edge_width=1,\n edge_color='black',\n edge_color_cycle=None,\n edge_colormap='viridis',\n edge_contrast_limits=None,\n face_color='white',\n face_color_cycle=None,\n face_colormap='viridis',\n face_contrast_limits=None,\n z_index=0,\n name=None,\n metadata=None,\n scale=None,\n translate=None,\n opacity=0.7,\n blending='translucent',\n visible=True,\n ) -> layers.Shapes:\n if data is None:\n if ndim is None:\n ndim = max(self.dims.ndim, 2)\n data = np.empty((0, 0, ndim))\n\n layer = layers.Shapes(\n data=data,\n ndim=ndim,\n properties=properties,\n text=text,\n shape_type=shape_type,\n edge_width=edge_width,\n edge_color=edge_color,\n edge_color_cycle=edge_color_cycle,\n edge_colormap=edge_colormap,\n edge_contrast_limits=edge_contrast_limits,\n face_color=face_color,\n face_color_cycle=face_color_cycle,\n face_colormap=face_colormap,\n face_contrast_limits=face_contrast_limits,\n z_index=z_index,\n name=name,\n metadata=metadata,\n scale=scale,\n translate=translate,\n opacity=opacity,\n blending=blending,\n visible=visible,\n )\n self.add_layer(layer)\n return layer", "def gen_dtu_resized_path(dtu_data_folder, mode='training'):\n sample_list = []\n \n # parse camera pairs\n cluster_file_path = dtu_data_folder + '/Cameras/pair.txt'\n \n # cluster_list = open(cluster_file_path).read().split()\n cluster_list = file_io.FileIO(cluster_file_path, mode='r').read().split()\n\n # 3 sets\n training_set = [2, 6, 7, 8, 14, 16, 18, 19, 20, 22, 30, 31, 36, 39, 41, 42, 44,\n 45, 46, 47, 50, 51, 52, 53, 55, 57, 58, 60, 61, 63, 64, 65, 68, 69, 70, 71, 72,\n 74, 76, 83, 84, 85, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100,\n 101, 102, 103, 104, 105, 107, 108, 109, 111, 112, 113, 115, 116, 119, 120,\n 121, 122, 123, 124, 125, 126, 127, 128]\n validation_set = [3, 5, 17, 21, 28, 35, 37, 38, 40, 43, 56, 59, 66, 67, 82, 86, 106, 117]\n\n data_set = []\n if mode == 'training':\n data_set = training_set\n elif mode == 'validation':\n data_set = validation_set\n\n # for each dataset\n for i in data_set:\n\n image_folder = os.path.join(dtu_data_folder, ('Rectified/scan%d_train' % i))\n cam_folder = os.path.join(dtu_data_folder, 'Cameras/train')\n depth_folder = os.path.join(dtu_data_folder, ('Depths/scan%d_train' % i))\n\n if mode == 'training':\n # for each lighting\n for j in range(0, 7):\n # for each reference image\n for p in range(0, int(cluster_list[0])):\n paths = []\n # ref image\n ref_index = int(cluster_list[22 * p + 1])\n ref_image_path = os.path.join(\n image_folder, ('rect_%03d_%d_r5000.png' % ((ref_index + 1), j)))\n ref_cam_path = os.path.join(cam_folder, ('%08d_cam.txt' % ref_index))\n paths.append(ref_image_path)\n paths.append(ref_cam_path)\n # view images\n for view in range(FLAGS.view_num - 1):\n view_index = int(cluster_list[22 * p + 2 * view + 3])\n view_image_path = os.path.join(\n image_folder, ('rect_%03d_%d_r5000.png' % ((view_index + 1), j)))\n view_cam_path = os.path.join(cam_folder, ('%08d_cam.txt' % view_index))\n paths.append(view_image_path)\n paths.append(view_cam_path)\n # depth path\n depth_image_path = os.path.join(depth_folder, ('depth_map_%04d.pfm' % ref_index))\n paths.append(depth_image_path)\n sample_list.append(paths)\n elif mode == 'validation':\n j = 3\n # for each reference image\n for p in range(0, int(cluster_list[0])):\n paths = []\n # ref image\n ref_index = int(cluster_list[22 * p + 1])\n ref_image_path = os.path.join(\n image_folder, ('rect_%03d_%d_r5000.png' % ((ref_index + 1), j)))\n ref_cam_path = os.path.join(cam_folder, ('%08d_cam.txt' % ref_index))\n paths.append(ref_image_path)\n paths.append(ref_cam_path)\n # view images\n for view in range(FLAGS.view_num - 1):\n view_index = int(cluster_list[22 * p + 2 * view + 3])\n view_image_path = os.path.join(\n image_folder, ('rect_%03d_%d_r5000.png' % ((view_index + 1), j)))\n view_cam_path = os.path.join(cam_folder, ('%08d_cam.txt' % view_index))\n paths.append(view_image_path)\n paths.append(view_cam_path)\n # depth path\n depth_image_path = os.path.join(depth_folder, ('depth_map_%04d.pfm' % ref_index))\n paths.append(depth_image_path)\n sample_list.append(paths)\n \n return sample_list", "def load_data(f='', use_cols=[], xlabel='', ylabel='',scatter=False,contour=False, connect=False,\n errorbar=False, std_col=None, data_domain=None, labo=[], title='',\n return_data=False, graph=False, multiple_files=None, file_stem='', folder=None,\n file_list = None, color_list=None, combine=False):\n\n def combine_data(data_files_dict):\n \"\"\"\n This combines data from different files so that the first column of file 1 is concatenated with first column\n oof file 2 and so on\n :param data_files_dict:\n :return:\n \"\"\"\n key_list = list(data_files_dict.keys())\n no_col = len(data_files_dict[key_list[0]])\n combined = []\n for n in range(0, no_col):\n d = np.empty(shape=[0, 1])\n for k in data_files_dict:\n d = np.append(d, data_files_dict[k][n])\n combined.append(d)\n return combined\n\n def data_graph(graph=False):\n \"\"\"\n Gets data from folder or list of files or file and graphs it in\n some manner\n :param graph:\n :return:\n \"\"\"\n def axes_data(use_cols1, data1, domain=None):\n if domain is not None:\n axis = [0] * (2 * len(use_cols1))\n for k in range(len(use_cols1)):\n axis[2 * k] = domain\n axis[2 * k + 1] = data1[k]\n return axis\n else:\n axis = [data1[k] for k in use_cols1]\n return axis\n if graph:\n axes = axes_data(use_cols, data, domain=data_domain)\n if scatter:\n for i in range(int(len(axes)/2)):\n if i % 2 == 0:\n plt.scatter(axes[2*i], axes[2*i+1], color_list[i],\n label=labo[i])\n else:\n plt.scatter(axes[2 * i], axes[2 * i + 1], color_list[i]\n ,label=labo[i])\n\n if connect:\n for i in range(int(len(axes)/2)):\n if i % 2 == 0:\n plt.plot(axes[2*i], axes[2*i+1], color_list[i],\n label=labo[i])\n else:\n plt.plot(axes[2 * i], axes[2 * i + 1], color_list[i]\n , label=labo[i])\n\n if errorbar:\n plt.errorbar(*axes, yerr=data[std_col], fmt='o', label=labo)\n if contour:\n from mpl_toolkits.mplot3d import Axes3D\n from matplotlib import cm\n from matplotlib.ticker import LinearLocator, FormatStrFormatter\n fig = plt.figure()\n ax = fig.gca(projection='3d')\n z = axes[2][:100]\n len_z = len(z)\n n = np.sqrt(len_z)\n x = np.arange(0, n)\n y = x\n x, y = np.meshgrid(x,y)\n two_dz = z.reshape((int(n), int(n)))\n surf = ax.plot_surface(x,y,two_dz,cmap=cm.coolwarm,\n linewidth=0, antialiased=False)\n # Customize the z axis.\n ax.set_zlim(0, 0.18)\n ax.zaxis.set_major_locator(LinearLocator(10))\n ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))\n\n # Add a color bar which maps values to colors.\n fig.colorbar(surf, shrink=0.5, aspect=5)\n # heat_map = plt.pcolor(two_dz)\n # heat_color = plt.colorbar(heat_map, orientation = 'horizontal')\n # heat_color.set_label('Average Fidelity')\n\n plt.xlabel(xlabel)\n plt.ylabel(ylabel)\n plt.title(title)\n plt.legend()\n plt.show()\n\n if multiple_files and folder is not None:\n os.chdir(folder)\n files = sorted(glob.glob(file_stem))\n for g in files:\n data = genfromtxt(g, dtype=float, unpack=True)\n data_graph(graph)\n elif multiple_files and file_list:\n if combine:\n file_dictionary = {g: genfromtxt(g, dtype=float, unpack=True) for g in file_list}\n data = combine_data(file_dictionary)\n data_graph(graph)\n else:\n for g in file_list:\n data = genfromtxt(g, dtype=float, unpack=True)\n data_graph(graph)\n\n else:\n data = genfromtxt(f, dtype=float, unpack=True)\n data_graph(graph)\n\n if return_data:\n retrieved = [data[k] for k in use_cols]\n return retrieved", "def maybe_generate_data(data_dir,\n shape=None,\n num_examples=None,\n stone_probability=0.45,\n num_files=2):\n dest_dir = os.path.join(data_dir, \"batches-bin\")\n if not os.path.exists(dest_dir):\n os.makedirs(dest_dir)\n\n # Log hook to measure progress\n # TODO: not in use\n def _progress(count, block_size, total_size):\n sys.stdout.write(\"\\r>> Generating %s %.1f%%\" % (filename,\n float(count * block_size) / float(total_size) * 100.0))\n sys.stdout.flush()\n\n # generate training batches\n # constrained\n filenames = [\"data_batch_%d.bin\" % i for i in range(num_files)]\n for filename in filenames:\n filepath = os.path.join(dest_dir, filename)\n if not os.path.exists(filepath):\n print(\"%s not found - generating...\" % filename)\n x, y = generate_constrained_dataset(_progress, **{\n \"num_examples\": num_examples or NUM_EXAMPLES,\n \"stone_probability\": stone_probability,\n \"shape\": shape})\n _convert_to_tfrecords(x, shape, y, filepath)\n print()\n statinfo = os.stat(filepath)\n print(\"Successfully generated\", filename,\n statinfo.st_size, \"bytes.\")\n\n # generate testing batches\n # random\n # TODO: generate random dataset\n filenames = [\"test_batch_%d.bin\" % i for i in range(num_files)]\n for filename in filenames:\n filepath = os.path.join(dest_dir, filename)\n if not os.path.exists(filepath):\n print(\"%s not found - generating...\" % filename)\n # utils.generate_dataset(filepath, _progress, **{\n x, y = generate_constrained_dataset(_progress, **{\n \"num_examples\": num_examples or NUM_EXAMPLES,\n \"stone_probability\": stone_probability,\n \"shape\": shape})\n _convert_to_tfrecords(x, shape, y, filepath)\n print()\n statinfo = os.stat(filepath)\n print(\"Successfully generated\", filename,\n statinfo.st_size, \"bytes.\")", "def create_random_data(output_path: str, num_images: int = 5) -> None:\n train_path = os.path.join(output_path, \"train\")\n class1_train_path = os.path.join(train_path, \"class1\")\n class2_train_path = os.path.join(train_path, \"class2\")\n\n val_path = os.path.join(output_path, \"val\")\n class1_val_path = os.path.join(val_path, \"class1\")\n class2_val_path = os.path.join(val_path, \"class2\")\n\n test_path = os.path.join(output_path, \"test\")\n class1_test_path = os.path.join(test_path, \"class1\")\n class2_test_path = os.path.join(test_path, \"class2\")\n\n paths = [\n class1_train_path,\n class1_val_path,\n class1_test_path,\n class2_train_path,\n class2_val_path,\n class2_test_path,\n ]\n\n for path in paths:\n try:\n os.makedirs(path)\n except FileExistsError:\n pass\n\n for i in range(num_images):\n pixels = numpy.random.rand(64, 64, 3) * 255\n im = Image.fromarray(pixels.astype(\"uint8\")).convert(\"RGB\")\n im.save(os.path.join(path, f\"rand_image_{i}.jpeg\"))\n\n process_images(output_path)", "def dyn_flareplots(df, folderpath, dyn_list, itype, flare_template = False):\n os.makedirs(folderpath, exist_ok = True)\n colors_auld = ['#800000', '#860000', '#8c0000', '#930000', '#990000', '#9f0000', '#a60000', '#ac0000', '#b20000', '#b90000', '#bf0000', '#c50000', '#cc0000', '#d20000', '#d80000', '#df0000', '#e50000', '#eb0000', '#f20000', '#f80000', '#ff0000', '#ff0700', '#ff0e00', '#ff1500', '#ff1c00', '#ff2300', '#ff2a00', '#ff3100', '#ff3800', '#ff3f00', '#ff4600', '#ff4d00', '#ff5400', '#ff5b00', '#ff6200', '#ff6900', '#ff7000', '#ff7700', '#ff7e00', '#ff8500', '#ff8c00', '#ff9100', '#ff9700', '#ff9d00', '#ffa300', '#ffa800', '#ffae00', '#ffb400', '#ffba00', '#ffbf00', '#ffc500', '#ffcb00', '#ffd100', '#ffd600', '#ffdc00', '#ffe200', '#ffe800', '#ffed00', '#fff300', '#fff900', '#ffff00', '#f2ff00', '#e5ff00', '#d8ff00', '#ccff00', '#bfff00', '#b2ff00', '#a5ff00', '#99ff00', '#8cff00', '#7fff00', '#72ff00', '#66ff00', '#59ff00', '#4cff00', '#3fff00', '#33ff00', '#26ff00', '#19ff00', '#0cff00', '#00ff00', '#0afc0a', '#15fa15', '#1ff81f', '#2af62a', '#34f434', '#3ff13f', '#49ef49', '#54ed54', '#5eeb5e', '#69e969', '#74e674', '#7ee47e', '#89e289', '#93e093', '#9ede9e', '#a8dba8', '#b3d9b3', '#bdd7bd', '#c8d5c8', '#d3d3d3']\n colors_ylorrd = ['#800026', '#850026', '#8a0026', '#8f0026', '#940026', '#990026', '#9e0026', '#a30026', '#a80026', '#ad0026', '#b20026', '#b70026', '#bd0026', '#c00225', '#c30424', '#c60623', '#c90822', '#cc0a21', '#d00d21', '#d30f20', '#d6111f', '#d9131e', '#dc151d', '#df171c', '#e31a1c', '#e51e1d', '#e7221e', '#e9271f', '#eb2b20', '#ed2f21', '#ef3423', '#f13824', '#f33c25', '#f54126', '#f74527', '#f94928', '#fc4e2a', '#fc532b', '#fc582d', '#fc5d2e', '#fc6330', '#fc6831', '#fc6d33', '#fc7234', '#fc7836', '#fc7d37', '#fc8239', '#fc873a', '#fd8d3c', '#fd903d', '#fd933e', '#fd9640', '#fd9941', '#fd9c42', '#fd9f44', '#fda245', '#fda546', '#fda848', '#fdab49', '#fdae4a', '#feb24c', '#feb54f', '#feb853', '#febb56', '#febf5a', '#fec25d', '#fec561', '#fec864', '#fecc68', '#fecf6b', '#fed26f', '#fed572', '#fed976', '#feda79', '#fedc7d', '#fede80', '#fedf84', '#fee187', '#fee38b', '#fee48e', '#fee692', '#fee895', '#fee999', '#feeb9c', '#ffeda0', '#ffeea3', '#fff0a7', '#fff1ab', '#fff3ae', '#fff4b2', '#fff6b6', '#fff7b9', '#fff9bd', '#fffac1', '#fffcc4', '#fffdc8', '#ffffcc']\n colors_inferno = ['#000003', '#000004', '#000006', '#010007', '#010109', '#01010B', '#02010E', '#020210', '#030212', '#040314', '#040316', '#050418', '#06041B', '#07051D', '#08061F', '#090621', '#0A0723', '#0B0726', '#0D0828', '#0E082A', '#0F092D', '#10092F', '#120A32', '#130A34', '#140B36', '#160B39', '#170B3B', '#190B3E', '#1A0B40', '#1C0C43', '#1D0C45', '#1F0C47', '#200C4A', '#220B4C', '#240B4E', '#260B50', '#270B52', '#290B54', '#2B0A56', '#2D0A58', '#2E0A5A', '#300A5C', '#32095D', '#34095F', '#350960', '#370961', '#390962', '#3B0964', '#3C0965', '#3E0966', '#400966', '#410967', '#430A68', '#450A69', '#460A69', '#480B6A', '#4A0B6A', '#4B0C6B', '#4D0C6B', '#4F0D6C', '#500D6C', '#520E6C', '#530E6D', '#550F6D', '#570F6D', '#58106D', '#5A116D', '#5B116E', '#5D126E', '#5F126E', '#60136E', '#62146E', '#63146E', '#65156E', '#66156E', '#68166E', '#6A176E', '#6B176E', '#6D186E', '#6E186E', '#70196E', '#72196D', '#731A6D', '#751B6D', '#761B6D', '#781C6D', '#7A1C6D', '#7B1D6C', '#7D1D6C', '#7E1E6C', '#801F6B', '#811F6B', '#83206B', '#85206A', '#86216A', '#88216A', '#892269', '#8B2269', '#8D2369', '#8E2468', '#902468', '#912567', '#932567', '#952666', '#962666', '#982765', '#992864', '#9B2864', '#9C2963', '#9E2963', '#A02A62', '#A12B61', '#A32B61', '#A42C60', '#A62C5F', '#A72D5F', '#A92E5E', '#AB2E5D', '#AC2F5C', '#AE305B', '#AF315B', '#B1315A', '#B23259', '#B43358', '#B53357', '#B73456', '#B83556', '#BA3655', '#BB3754', '#BD3753', '#BE3852', '#BF3951', '#C13A50', '#C23B4F', '#C43C4E', '#C53D4D', '#C73E4C', '#C83E4B', '#C93F4A', '#CB4049', '#CC4148', '#CD4247', '#CF4446', '#D04544', '#D14643', '#D24742', '#D44841', '#D54940', '#D64A3F', '#D74B3E', '#D94D3D', '#DA4E3B', '#DB4F3A', '#DC5039', '#DD5238', '#DE5337', '#DF5436', '#E05634', '#E25733', '#E35832', '#E45A31', '#E55B30', '#E65C2E', '#E65E2D', '#E75F2C', '#E8612B', '#E9622A', '#EA6428', '#EB6527', '#EC6726', '#ED6825', '#ED6A23', '#EE6C22', '#EF6D21', '#F06F1F', '#F0701E', '#F1721D', '#F2741C', '#F2751A', '#F37719', '#F37918', '#F47A16', '#F57C15', '#F57E14', '#F68012', '#F68111', '#F78310', '#F7850E', '#F8870D', '#F8880C', '#F88A0B', '#F98C09', '#F98E08', '#F99008', '#FA9107', '#FA9306', '#FA9506', '#FA9706', '#FB9906', '#FB9B06', '#FB9D06', '#FB9E07', '#FBA007', '#FBA208', '#FBA40A', '#FBA60B', '#FBA80D', '#FBAA0E', '#FBAC10', '#FBAE12', '#FBB014', '#FBB116', '#FBB318', '#FBB51A', '#FBB71C', '#FBB91E', '#FABB21', '#FABD23', '#FABF25', '#FAC128', '#F9C32A', '#F9C52C', '#F9C72F', '#F8C931', '#F8CB34', '#F8CD37', '#F7CF3A', '#F7D13C', '#F6D33F', '#F6D542', '#F5D745', '#F5D948', '#F4DB4B', '#F4DC4F', '#F3DE52', '#F3E056', '#F3E259', '#F2E45D', '#F2E660', '#F1E864', '#F1E968', '#F1EB6C', '#F1ED70', '#F1EE74', '#F1F079', '#F1F27D', '#F2F381', '#F2F485', '#F3F689', '#F4F78D', '#F5F891', '#F6FA95', '#F7FB99', '#F9FC9D', '#FAFDA0', '#FCFEA4']\n colors_magma = ['#000003', '#000004', '#000006', '#010007', '#010109', '#01010B', '#02020D', '#02020F', '#030311', '#040313', '#040415', '#050417', '#060519', '#07051B', '#08061D', '#09071F', '#0A0722', '#0B0824', '#0C0926', '#0D0A28', '#0E0A2A', '#0F0B2C', '#100C2F', '#110C31', '#120D33', '#140D35', '#150E38', '#160E3A', '#170F3C', '#180F3F', '#1A1041', '#1B1044', '#1C1046', '#1E1049', '#1F114B', '#20114D', '#221150', '#231152', '#251155', '#261157', '#281159', '#2A115C', '#2B115E', '#2D1060', '#2F1062', '#301065', '#321067', '#341068', '#350F6A', '#370F6C', '#390F6E', '#3B0F6F', '#3C0F71', '#3E0F72', '#400F73', '#420F74', '#430F75', '#450F76', '#470F77', '#481078', '#4A1079', '#4B1079', '#4D117A', '#4F117B', '#50127B', '#52127C', '#53137C', '#55137D', '#57147D', '#58157E', '#5A157E', '#5B167E', '#5D177E', '#5E177F', '#60187F', '#61187F', '#63197F', '#651A80', '#661A80', '#681B80', '#691C80', '#6B1C80', '#6C1D80', '#6E1E81', '#6F1E81', '#711F81', '#731F81', '#742081', '#762181', '#772181', '#792281', '#7A2281', '#7C2381', '#7E2481', '#7F2481', '#812581', '#822581', '#842681', '#852681', '#872781', '#892881', '#8A2881', '#8C2980', '#8D2980', '#8F2A80', '#912A80', '#922B80', '#942B80', '#952C80', '#972C7F', '#992D7F', '#9A2D7F', '#9C2E7F', '#9E2E7E', '#9F2F7E', '#A12F7E', '#A3307E', '#A4307D', '#A6317D', '#A7317D', '#A9327C', '#AB337C', '#AC337B', '#AE347B', '#B0347B', '#B1357A', '#B3357A', '#B53679', '#B63679', '#B83778', '#B93778', '#BB3877', '#BD3977', '#BE3976', '#C03A75', '#C23A75', '#C33B74', '#C53C74', '#C63C73', '#C83D72', '#CA3E72', '#CB3E71', '#CD3F70', '#CE4070', '#D0416F', '#D1426E', '#D3426D', '#D4436D', '#D6446C', '#D7456B', '#D9466A', '#DA4769', '#DC4869', '#DD4968', '#DE4A67', '#E04B66', '#E14C66', '#E24D65', '#E44E64', '#E55063', '#E65162', '#E75262', '#E85461', '#EA5560', '#EB5660', '#EC585F', '#ED595F', '#EE5B5E', '#EE5D5D', '#EF5E5D', '#F0605D', '#F1615C', '#F2635C', '#F3655C', '#F3675B', '#F4685B', '#F56A5B', '#F56C5B', '#F66E5B', '#F6705B', '#F7715B', '#F7735C', '#F8755C', '#F8775C', '#F9795C', '#F97B5D', '#F97D5D', '#FA7F5E', '#FA805E', '#FA825F', '#FB8460', '#FB8660', '#FB8861', '#FB8A62', '#FC8C63', '#FC8E63', '#FC9064', '#FC9265', '#FC9366', '#FD9567', '#FD9768', '#FD9969', '#FD9B6A', '#FD9D6B', '#FD9F6C', '#FDA16E', '#FDA26F', '#FDA470', '#FEA671', '#FEA873', '#FEAA74', '#FEAC75', '#FEAE76', '#FEAF78', '#FEB179', '#FEB37B', '#FEB57C', '#FEB77D', '#FEB97F', '#FEBB80', '#FEBC82', '#FEBE83', '#FEC085', '#FEC286', '#FEC488', '#FEC689', '#FEC78B', '#FEC98D', '#FECB8E', '#FDCD90', '#FDCF92', '#FDD193', '#FDD295', '#FDD497', '#FDD698', '#FDD89A', '#FDDA9C', '#FDDC9D', '#FDDD9F', '#FDDFA1', '#FDE1A3', '#FCE3A5', '#FCE5A6', '#FCE6A8', '#FCE8AA', '#FCEAAC', '#FCECAE', '#FCEEB0', '#FCF0B1', '#FCF1B3', '#FCF3B5', '#FCF5B7', '#FBF7B9', '#FBF9BB', '#FBFABD', '#FBFCBF']\n colors_ylgnbl = ['#081d58', '#0a1e5d', '#0c2062', '#0f2267', '#11246c', '#142671', '#162876', '#182a7b', '#1b2c80', '#1d2e85', '#20308a', '#22328f', '#253494', '#243795', '#243b97', '#243e99', '#24429a', '#23459c', '#23499e', '#234c9f', '#2350a1', '#2253a3', '#2257a4', '#225aa6', '#225ea8', '#2162aa', '#2166ac', '#206aae', '#206fb0', '#1f73b2', '#1f77b4', '#1f7bb6', '#1e80b8', '#1e84ba', '#1d88bc', '#1d8cbe', '#1d91c0', '#2094c0', '#2397c0', '#269ac1', '#299dc1', '#2ca0c1', '#2fa3c2', '#32a6c2', '#35a9c2', '#38acc3', '#3bafc3', '#3eb2c3', '#41b6c4', '#46b7c3', '#4bb9c2', '#50bbc1', '#55bdc1', '#5abfc0', '#60c1bf', '#65c3be', '#6ac5be', '#6fc7bd', '#74c9bc', '#79cbbb', '#7fcdbb', '#85cfba', '#8bd1b9', '#91d4b9', '#97d6b8', '#9dd8b8', '#a3dbb7', '#a9ddb6', '#afdfb6', '#b5e2b5', '#bbe4b5', '#c1e6b4', '#c7e9b4', '#caeab3', '#cdebb3', '#d0ecb3', '#d3eeb3', '#d6efb2', '#daf0b2', '#ddf1b2', '#e0f3b2', '#e3f4b1', '#e6f5b1', '#e9f6b1', '#edf8b1', '#eef8b4', '#f0f9b7', '#f1f9bb', '#f3fabe', '#f4fac1', '#f6fbc5', '#f7fcc8', '#f9fccb', '#fafdcf', '#fcfdd2', '#fdfed5', '#ffffd9']\n colors_grorrd = ['#800026', '#850026', '#8a0026', '#8f0026', '#940026', '#990026', '#9e0026', '#a30026', '#a80026', '#ad0026', '#b20026', '#b70026', '#bd0026', '#c00225', '#c30424', '#c60623', '#c90822', '#cc0a21', '#d00d21', '#d30f20', '#d6111f', '#d9131e', '#dc151d', '#df171c', '#e31a1c', '#e51e1d', '#e7221e', '#e9271f', '#eb2b20', '#ed2f21', '#ef3423', '#f13824', '#f33c25', '#f54126', '#f74527', '#f94928', '#fc4e2a', '#fc532b', '#fc582d', '#fc5d2e', '#fc6330', '#fc6831', '#fc6d33', '#fc7234', '#fc7836', '#fc7d37', '#fc8239', '#fc873a', '#fd8d3c', '#fd903d', '#fd933e', '#fd9640', '#fd9941', '#fd9c42', '#fd9f44', '#fda245', '#fda546', '#fda848', '#fdab49', '#fdae4a', '#feb24c', '#feb54f', '#feb853', '#febb56', '#febf5a', '#fec25d', '#fec561', '#fec864', '#fecc68', '#fecf6b', '#fed26f', '#fed572', '#fed976', '#feda79', '#fedc7d', '#fede80', '#fedf84', '#fee187', '#fee38b', '#fee48e', '#fee692', '#fee895', '#fee999', '#feeb9c', '#ffeda0', '#fbeaa4', '#f7e8a8', '#f4e6ac', '#f0e4b1', '#ece2b5', '#e9e0b9', '#e5ddbd', '#e1dbc2', '#ded9c6', '#dad7ca', '#d6d5ce', '#d3d3d3']\n colors = colors_grorrd\n for dyn in dyn_list:\n\n # Select top interactions based on its mean frequency. Also asign color based on mean value\n color_len = len(colors) -1\n df_clust = df.filter(items = [dyn, 'APosition1', 'APosition2', 'BPosition1', 'BPosition2','CPosition1', 'CPosition2','FPosition1', 'FPosition2',])\n df_clust['color'] = df_clust[dyn].apply(lambda x: colors[color_len-round(x*color_len/100)]) #There are 101 colors avalible in list\n\n #Filter top 5 in df_clust\n df_clust = df_clust.nlargest(20, dyn)\n\n # 'Edge' entry for json file\n df_dict = pd.DataFrame(columns = [\"name1\", \"name2\", \"frames\"])\n df_dict['name1'] = df_clust['APosition1'] \n df_dict['name2'] = df_clust['APosition2']\n df_dict['frames'] = [[1]]*len(df_dict)\n df_dict['color'] = df_clust['color']\n df_dict['value'] = df_clust[dyn]\n edges = df_dict.to_dict(orient=\"records\")\n\n # Appending edges to flare plot template, if any submitted\n if flare_template:\n flare_template['edges'] = edges\n jsondict = flare_template\n else:\n jsondict = { 'edges' : edges }\n\n #'Edge' multi-entries, based on the 4 GPCR nomenclatures\n for leter in ['A', 'B', 'C', 'F']:\n df_dict = pd.DataFrame(columns = [\"name1\", \"name2\", \"frames\"])\n df_dict['name1'] = df_clust[leter+'Position1'] \n df_dict['name2'] = df_clust[leter+'Position2']\n df_dict['frames'] = [[1]]*len(df_dict)\n df_dict['color'] = df_clust['color']\n df_dict['value'] = df_clust[dyn]\n leter_edges = df_dict.to_dict(orient=\"records\")\n\n #Appending edges\n if flare_template:\n flare_template[leter+'edges'] = leter_edges\n jsondict = flare_template\n else:\n jsondict = { leter+'edges' : leter_edges }\n\n #Writing json\n jsonpath = folderpath + dyn + \"_top.json\"\n with open(jsonpath, 'w') as jsonfile:\n dump(jsondict, jsonfile, ensure_ascii=False, indent = 4)", "def _create_directories(self):\n print \"[--init] creating directory structure in %s\" % (self.target_path)\n ensure_path(self.conf_path)\n for subdir in config.PROCESSING_AREAS:\n subdir_path = self.data_path + os.sep + subdir\n ensure_path(subdir_path)", "def flareplot_json(df, clustdict, folderpath, flare_template = False):\n os.makedirs(folderpath, exist_ok = True)\n colors_auld = ['#800000', '#860000', '#8c0000', '#930000', '#990000', '#9f0000', '#a60000', '#ac0000', '#b20000', '#b90000', '#bf0000', '#c50000', '#cc0000', '#d20000', '#d80000', '#df0000', '#e50000', '#eb0000', '#f20000', '#f80000', '#ff0000', '#ff0700', '#ff0e00', '#ff1500', '#ff1c00', '#ff2300', '#ff2a00', '#ff3100', '#ff3800', '#ff3f00', '#ff4600', '#ff4d00', '#ff5400', '#ff5b00', '#ff6200', '#ff6900', '#ff7000', '#ff7700', '#ff7e00', '#ff8500', '#ff8c00', '#ff9100', '#ff9700', '#ff9d00', '#ffa300', '#ffa800', '#ffae00', '#ffb400', '#ffba00', '#ffbf00', '#ffc500', '#ffcb00', '#ffd100', '#ffd600', '#ffdc00', '#ffe200', '#ffe800', '#ffed00', '#fff300', '#fff900', '#ffff00', '#f2ff00', '#e5ff00', '#d8ff00', '#ccff00', '#bfff00', '#b2ff00', '#a5ff00', '#99ff00', '#8cff00', '#7fff00', '#72ff00', '#66ff00', '#59ff00', '#4cff00', '#3fff00', '#33ff00', '#26ff00', '#19ff00', '#0cff00', '#00ff00', '#0afc0a', '#15fa15', '#1ff81f', '#2af62a', '#34f434', '#3ff13f', '#49ef49', '#54ed54', '#5eeb5e', '#69e969', '#74e674', '#7ee47e', '#89e289', '#93e093', '#9ede9e', '#a8dba8', '#b3d9b3', '#bdd7bd', '#c8d5c8', '#d3d3d3']\n colors_ylorrd = ['#800026', '#850026', '#8a0026', '#8f0026', '#940026', '#990026', '#9e0026', '#a30026', '#a80026', '#ad0026', '#b20026', '#b70026', '#bd0026', '#c00225', '#c30424', '#c60623', '#c90822', '#cc0a21', '#d00d21', '#d30f20', '#d6111f', '#d9131e', '#dc151d', '#df171c', '#e31a1c', '#e51e1d', '#e7221e', '#e9271f', '#eb2b20', '#ed2f21', '#ef3423', '#f13824', '#f33c25', '#f54126', '#f74527', '#f94928', '#fc4e2a', '#fc532b', '#fc582d', '#fc5d2e', '#fc6330', '#fc6831', '#fc6d33', '#fc7234', '#fc7836', '#fc7d37', '#fc8239', '#fc873a', '#fd8d3c', '#fd903d', '#fd933e', '#fd9640', '#fd9941', '#fd9c42', '#fd9f44', '#fda245', '#fda546', '#fda848', '#fdab49', '#fdae4a', '#feb24c', '#feb54f', '#feb853', '#febb56', '#febf5a', '#fec25d', '#fec561', '#fec864', '#fecc68', '#fecf6b', '#fed26f', '#fed572', '#fed976', '#feda79', '#fedc7d', '#fede80', '#fedf84', '#fee187', '#fee38b', '#fee48e', '#fee692', '#fee895', '#fee999', '#feeb9c', '#ffeda0', '#ffeea3', '#fff0a7', '#fff1ab', '#fff3ae', '#fff4b2', '#fff6b6', '#fff7b9', '#fff9bd', '#fffac1', '#fffcc4', '#fffdc8', '#ffffcc']\n colors_grorrd = ['#800026', '#850026', '#8a0026', '#8f0026', '#940026', '#990026', '#9e0026', '#a30026', '#a80026', '#ad0026', '#b20026', '#b70026', '#bd0026', '#c00225', '#c30424', '#c60623', '#c90822', '#cc0a21', '#d00d21', '#d30f20', '#d6111f', '#d9131e', '#dc151d', '#df171c', '#e31a1c', '#e51e1d', '#e7221e', '#e9271f', '#eb2b20', '#ed2f21', '#ef3423', '#f13824', '#f33c25', '#f54126', '#f74527', '#f94928', '#fc4e2a', '#fc532b', '#fc582d', '#fc5d2e', '#fc6330', '#fc6831', '#fc6d33', '#fc7234', '#fc7836', '#fc7d37', '#fc8239', '#fc873a', '#fd8d3c', '#fd903d', '#fd933e', '#fd9640', '#fd9941', '#fd9c42', '#fd9f44', '#fda245', '#fda546', '#fda848', '#fdab49', '#fdae4a', '#feb24c', '#feb54f', '#feb853', '#febb56', '#febf5a', '#fec25d', '#fec561', '#fec864', '#fecc68', '#fecf6b', '#fed26f', '#fed572', '#fed976', '#feda79', '#fedc7d', '#fede80', '#fedf84', '#fee187', '#fee38b', '#fee48e', '#fee692', '#fee895', '#fee999', '#feeb9c', '#ffeda0', '#fbeaa4', '#f7e8a8', '#f4e6ac', '#f0e4b1', '#ece2b5', '#e9e0b9', '#e5ddbd', '#e1dbc2', '#ded9c6', '#dad7ca', '#d6d5ce', '#d3d3d3']\n colors_inferno = ['#000003', '#000004', '#000006', '#010007', '#010109', '#01010B', '#02010E', '#020210', '#030212', '#040314', '#040316', '#050418', '#06041B', '#07051D', '#08061F', '#090621', '#0A0723', '#0B0726', '#0D0828', '#0E082A', '#0F092D', '#10092F', '#120A32', '#130A34', '#140B36', '#160B39', '#170B3B', '#190B3E', '#1A0B40', '#1C0C43', '#1D0C45', '#1F0C47', '#200C4A', '#220B4C', '#240B4E', '#260B50', '#270B52', '#290B54', '#2B0A56', '#2D0A58', '#2E0A5A', '#300A5C', '#32095D', '#34095F', '#350960', '#370961', '#390962', '#3B0964', '#3C0965', '#3E0966', '#400966', '#410967', '#430A68', '#450A69', '#460A69', '#480B6A', '#4A0B6A', '#4B0C6B', '#4D0C6B', '#4F0D6C', '#500D6C', '#520E6C', '#530E6D', '#550F6D', '#570F6D', '#58106D', '#5A116D', '#5B116E', '#5D126E', '#5F126E', '#60136E', '#62146E', '#63146E', '#65156E', '#66156E', '#68166E', '#6A176E', '#6B176E', '#6D186E', '#6E186E', '#70196E', '#72196D', '#731A6D', '#751B6D', '#761B6D', '#781C6D', '#7A1C6D', '#7B1D6C', '#7D1D6C', '#7E1E6C', '#801F6B', '#811F6B', '#83206B', '#85206A', '#86216A', '#88216A', '#892269', '#8B2269', '#8D2369', '#8E2468', '#902468', '#912567', '#932567', '#952666', '#962666', '#982765', '#992864', '#9B2864', '#9C2963', '#9E2963', '#A02A62', '#A12B61', '#A32B61', '#A42C60', '#A62C5F', '#A72D5F', '#A92E5E', '#AB2E5D', '#AC2F5C', '#AE305B', '#AF315B', '#B1315A', '#B23259', '#B43358', '#B53357', '#B73456', '#B83556', '#BA3655', '#BB3754', '#BD3753', '#BE3852', '#BF3951', '#C13A50', '#C23B4F', '#C43C4E', '#C53D4D', '#C73E4C', '#C83E4B', '#C93F4A', '#CB4049', '#CC4148', '#CD4247', '#CF4446', '#D04544', '#D14643', '#D24742', '#D44841', '#D54940', '#D64A3F', '#D74B3E', '#D94D3D', '#DA4E3B', '#DB4F3A', '#DC5039', '#DD5238', '#DE5337', '#DF5436', '#E05634', '#E25733', '#E35832', '#E45A31', '#E55B30', '#E65C2E', '#E65E2D', '#E75F2C', '#E8612B', '#E9622A', '#EA6428', '#EB6527', '#EC6726', '#ED6825', '#ED6A23', '#EE6C22', '#EF6D21', '#F06F1F', '#F0701E', '#F1721D', '#F2741C', '#F2751A', '#F37719', '#F37918', '#F47A16', '#F57C15', '#F57E14', '#F68012', '#F68111', '#F78310', '#F7850E', '#F8870D', '#F8880C', '#F88A0B', '#F98C09', '#F98E08', '#F99008', '#FA9107', '#FA9306', '#FA9506', '#FA9706', '#FB9906', '#FB9B06', '#FB9D06', '#FB9E07', '#FBA007', '#FBA208', '#FBA40A', '#FBA60B', '#FBA80D', '#FBAA0E', '#FBAC10', '#FBAE12', '#FBB014', '#FBB116', '#FBB318', '#FBB51A', '#FBB71C', '#FBB91E', '#FABB21', '#FABD23', '#FABF25', '#FAC128', '#F9C32A', '#F9C52C', '#F9C72F', '#F8C931', '#F8CB34', '#F8CD37', '#F7CF3A', '#F7D13C', '#F6D33F', '#F6D542', '#F5D745', '#F5D948', '#F4DB4B', '#F4DC4F', '#F3DE52', '#F3E056', '#F3E259', '#F2E45D', '#F2E660', '#F1E864', '#F1E968', '#F1EB6C', '#F1ED70', '#F1EE74', '#F1F079', '#F1F27D', '#F2F381', '#F2F485', '#F3F689', '#F4F78D', '#F5F891', '#F6FA95', '#F7FB99', '#F9FC9D', '#FAFDA0', '#FCFEA4']\n colors_magma = ['#000003', '#000004', '#000006', '#010007', '#010109', '#01010B', '#02020D', '#02020F', '#030311', '#040313', '#040415', '#050417', '#060519', '#07051B', '#08061D', '#09071F', '#0A0722', '#0B0824', '#0C0926', '#0D0A28', '#0E0A2A', '#0F0B2C', '#100C2F', '#110C31', '#120D33', '#140D35', '#150E38', '#160E3A', '#170F3C', '#180F3F', '#1A1041', '#1B1044', '#1C1046', '#1E1049', '#1F114B', '#20114D', '#221150', '#231152', '#251155', '#261157', '#281159', '#2A115C', '#2B115E', '#2D1060', '#2F1062', '#301065', '#321067', '#341068', '#350F6A', '#370F6C', '#390F6E', '#3B0F6F', '#3C0F71', '#3E0F72', '#400F73', '#420F74', '#430F75', '#450F76', '#470F77', '#481078', '#4A1079', '#4B1079', '#4D117A', '#4F117B', '#50127B', '#52127C', '#53137C', '#55137D', '#57147D', '#58157E', '#5A157E', '#5B167E', '#5D177E', '#5E177F', '#60187F', '#61187F', '#63197F', '#651A80', '#661A80', '#681B80', '#691C80', '#6B1C80', '#6C1D80', '#6E1E81', '#6F1E81', '#711F81', '#731F81', '#742081', '#762181', '#772181', '#792281', '#7A2281', '#7C2381', '#7E2481', '#7F2481', '#812581', '#822581', '#842681', '#852681', '#872781', '#892881', '#8A2881', '#8C2980', '#8D2980', '#8F2A80', '#912A80', '#922B80', '#942B80', '#952C80', '#972C7F', '#992D7F', '#9A2D7F', '#9C2E7F', '#9E2E7E', '#9F2F7E', '#A12F7E', '#A3307E', '#A4307D', '#A6317D', '#A7317D', '#A9327C', '#AB337C', '#AC337B', '#AE347B', '#B0347B', '#B1357A', '#B3357A', '#B53679', '#B63679', '#B83778', '#B93778', '#BB3877', '#BD3977', '#BE3976', '#C03A75', '#C23A75', '#C33B74', '#C53C74', '#C63C73', '#C83D72', '#CA3E72', '#CB3E71', '#CD3F70', '#CE4070', '#D0416F', '#D1426E', '#D3426D', '#D4436D', '#D6446C', '#D7456B', '#D9466A', '#DA4769', '#DC4869', '#DD4968', '#DE4A67', '#E04B66', '#E14C66', '#E24D65', '#E44E64', '#E55063', '#E65162', '#E75262', '#E85461', '#EA5560', '#EB5660', '#EC585F', '#ED595F', '#EE5B5E', '#EE5D5D', '#EF5E5D', '#F0605D', '#F1615C', '#F2635C', '#F3655C', '#F3675B', '#F4685B', '#F56A5B', '#F56C5B', '#F66E5B', '#F6705B', '#F7715B', '#F7735C', '#F8755C', '#F8775C', '#F9795C', '#F97B5D', '#F97D5D', '#FA7F5E', '#FA805E', '#FA825F', '#FB8460', '#FB8660', '#FB8861', '#FB8A62', '#FC8C63', '#FC8E63', '#FC9064', '#FC9265', '#FC9366', '#FD9567', '#FD9768', '#FD9969', '#FD9B6A', '#FD9D6B', '#FD9F6C', '#FDA16E', '#FDA26F', '#FDA470', '#FEA671', '#FEA873', '#FEAA74', '#FEAC75', '#FEAE76', '#FEAF78', '#FEB179', '#FEB37B', '#FEB57C', '#FEB77D', '#FEB97F', '#FEBB80', '#FEBC82', '#FEBE83', '#FEC085', '#FEC286', '#FEC488', '#FEC689', '#FEC78B', '#FEC98D', '#FECB8E', '#FDCD90', '#FDCF92', '#FDD193', '#FDD295', '#FDD497', '#FDD698', '#FDD89A', '#FDDA9C', '#FDDC9D', '#FDDD9F', '#FDDFA1', '#FDE1A3', '#FCE3A5', '#FCE5A6', '#FCE6A8', '#FCE8AA', '#FCEAAC', '#FCECAE', '#FCEEB0', '#FCF0B1', '#FCF1B3', '#FCF3B5', '#FCF5B7', '#FBF7B9', '#FBF9BB', '#FBFABD', '#FBFCBF']\n colors_ylgnbl = ['#081d58', '#0a1e5d', '#0c2062', '#0f2267', '#11246c', '#142671', '#162876', '#182a7b', '#1b2c80', '#1d2e85', '#20308a', '#22328f', '#253494', '#243795', '#243b97', '#243e99', '#24429a', '#23459c', '#23499e', '#234c9f', '#2350a1', '#2253a3', '#2257a4', '#225aa6', '#225ea8', '#2162aa', '#2166ac', '#206aae', '#206fb0', '#1f73b2', '#1f77b4', '#1f7bb6', '#1e80b8', '#1e84ba', '#1d88bc', '#1d8cbe', '#1d91c0', '#2094c0', '#2397c0', '#269ac1', '#299dc1', '#2ca0c1', '#2fa3c2', '#32a6c2', '#35a9c2', '#38acc3', '#3bafc3', '#3eb2c3', '#41b6c4', '#46b7c3', '#4bb9c2', '#50bbc1', '#55bdc1', '#5abfc0', '#60c1bf', '#65c3be', '#6ac5be', '#6fc7bd', '#74c9bc', '#79cbbb', '#7fcdbb', '#85cfba', '#8bd1b9', '#91d4b9', '#97d6b8', '#9dd8b8', '#a3dbb7', '#a9ddb6', '#afdfb6', '#b5e2b5', '#bbe4b5', '#c1e6b4', '#c7e9b4', '#caeab3', '#cdebb3', '#d0ecb3', '#d3eeb3', '#d6efb2', '#daf0b2', '#ddf1b2', '#e0f3b2', '#e3f4b1', '#e6f5b1', '#e9f6b1', '#edf8b1', '#eef8b4', '#f0f9b7', '#f1f9bb', '#f3fabe', '#f4fac1', '#f6fbc5', '#f7fcc8', '#f9fccb', '#fafdcf', '#fcfdd2', '#fdfed5', '#ffffd9']\n colors_grorrd = ['#800026', '#850026', '#8a0026', '#8f0026', '#940026', '#990026', '#9e0026', '#a30026', '#a80026', '#ad0026', '#b20026', '#b70026', '#bd0026', '#c00225', '#c30424', '#c60623', '#c90822', '#cc0a21', '#d00d21', '#d30f20', '#d6111f', '#d9131e', '#dc151d', '#df171c', '#e31a1c', '#e51e1d', '#e7221e', '#e9271f', '#eb2b20', '#ed2f21', '#ef3423', '#f13824', '#f33c25', '#f54126', '#f74527', '#f94928', '#fc4e2a', '#fc532b', '#fc582d', '#fc5d2e', '#fc6330', '#fc6831', '#fc6d33', '#fc7234', '#fc7836', '#fc7d37', '#fc8239', '#fc873a', '#fd8d3c', '#fd903d', '#fd933e', '#fd9640', '#fd9941', '#fd9c42', '#fd9f44', '#fda245', '#fda546', '#fda848', '#fdab49', '#fdae4a', '#feb24c', '#feb54f', '#feb853', '#febb56', '#febf5a', '#fec25d', '#fec561', '#fec864', '#fecc68', '#fecf6b', '#fed26f', '#fed572', '#fed976', '#feda79', '#fedc7d', '#fede80', '#fedf84', '#fee187', '#fee38b', '#fee48e', '#fee692', '#fee895', '#fee999', '#feeb9c', '#ffeda0', '#fbeaa4', '#f7e8a8', '#f4e6ac', '#f0e4b1', '#ece2b5', '#e9e0b9', '#e5ddbd', '#e1dbc2', '#ded9c6', '#dad7ca', '#d6d5ce', '#d3d3d3']\n colors = colors_grorrd\n color_len = len(colors) -1\n for clust in clustdict.keys():\n\n # Select top interactions based on its mean frequency. Also asign color based on mean value\n df_clust = df.filter(items = clustdict[clust] + ['APosition1', 'APosition2', 'BPosition1', 'BPosition2','CPosition1', 'CPosition2','FPosition1', 'FPosition2',])\n df_clust['mean'] = df_clust.mean(axis = 1, numeric_only = True)\n mean_threshold = min(df_clust['mean'].nlargest(20).tolist())\n df_clust['color'] = df_clust['mean'].apply(lambda x: colors[color_len-round(x*color_len/100)]) #There are 101 colors avalible in list\n\n #Filter top 5 in df_clust\n df_clust = df_clust.nlargest(20,'mean')\n\n # 'Edge' entry for json file\n df_dict = pd.DataFrame(columns = [\"name1\", \"name2\", \"frames\"])\n df_dict['name1'] = df_clust['APosition1'] \n df_dict['name2'] = df_clust['APosition2']\n df_dict['frames'] = [[1]]*len(df_dict)\n df_dict['color'] = df_clust['color']\n df_dict['value'] = df_clust['mean']\n edges = df_dict.to_dict(orient=\"records\")\n\n # Appending edges to flare plot template, if any submitted\n if flare_template:\n flare_template['edges'] = edges\n jsondict = flare_template\n else:\n jsondict = { 'edges' : edges }\n\n #'Edge' multi-entries, based on the 4 GPCR nomenclatures\n for leter in ['A', 'B', 'C', 'F']:\n df_dict = pd.DataFrame(columns = [\"name1\", \"name2\", \"frames\"])\n df_dict['name1'] = df_clust[leter+'Position1'] \n df_dict['name2'] = df_clust[leter+'Position2']\n df_dict['frames'] = [[1]]*len(df_dict)\n df_dict['color'] = df_clust['color']\n df_dict['value'] = df_clust['mean']\n leter_edges = df_dict.to_dict(orient=\"records\")\n\n #Appending edges\n if flare_template:\n flare_template[leter+'edges'] = leter_edges\n jsondict = flare_template\n else:\n jsondict = { leter+'edges' : leter_edges }\n\n #Writing json\n jsonpath = folderpath + clust + \".json\"\n with open(jsonpath, 'w') as jsonfile:\n dump(jsondict, jsonfile, ensure_ascii=False, indent = 4)", "def gen_data_dir(img_dir, id_label_dict, num_class, shuffle=True):\n img_file_path = gen_img_files(img_dir, shuffle)\n return gen_data_file(img_file_path, id_label_dict, num_class)", "def creation_data_sets(quality, dataset, test_case=False):\n current_path = Path.cwd()\n if dataset == 0:\n (x_train, y_train), (x_test, y_test) = mnist.load_data()\n del y_train, y_test\n train_path = current_path.joinpath(\"Mnist_{}\".format(quality))\n test_path = current_path.joinpath(\"Mnist_{}_test\".format(quality))\n else:\n (x_train, y_train), (x_test, y_test) = cifar10.load_data()\n del y_train, y_test\n train_path = current_path.joinpath(\"Cifar-10_{}\".format(quality))\n test_path = current_path.joinpath(\"Cifar-10_{}_test\".format(quality))\n\n create_directories(train_path, test_path)\n convert(train_path, x_train, dataset, quality, test_case)\n convert(test_path, x_test, dataset, quality, test_case)", "def test_generator(self, test_path):\n\n img_list = os.scandir(test_path)\n for img_entry in img_list:\n\n img = cv2.imread(img_entry.path, COLOR_TO_OPENCV[self.color_mode])\n if img.shape[-1] == 3:\n orig_shape = img.shape[-2::-1]\n else:\n orig_shape = img.shape[::-1]\n\n\n img = cv2.resize(img, tuple(self.target_size))\n img = img / 255\n if self.color_mode == \"grayscale\":\n img = np.reshape(img, img.shape + (1,))\n img = np.reshape(img, (1,) + img.shape)\n yield img, img_entry, orig_shape", "def createdatafolder(name):\n folder = os.path.join(pathtofolder(),name)\n os.makedirs(folder)\n pass", "def load_colors():\n\n print \"Color\"\n\n for key, value in css3_hex_to_names.items():\n color_hex, color_name = key, value\n color = Color(color_hex=color_hex,\n color_name=color_name)\n\n db.session.add(color)\n\n # Once we're done, we should commit our work\n db.session.commit()", "def gen_folders(rho, kappa, km, pa, analysis, dbase, analysisdbase):\n \n path1 = 'density_' + + str(rho) + \"_kappa_\" + \\\n str(kappa) + \"_km_\" + str(km) + \"_panti_\" + str(pa)\n path2 = analysis + '_density_' + + str(rho) + \"_kappa_\" + \\\n str(kappa) + \"_km_\" + str(km) + \"_panti_\" + str(pa) + '.txt' \n datafolder = dbase + path1 + '/'\n analysisfile = analysisdbase + path2 \n\n return datafolder, analysisfile", "def _makeGraphs(cnames=(\"cnt\",\"temp\"), finame=\"graf.png\"):\n colors=[\"660000\",\"ff0000\", \"770000\"]\n if len(cnames)==1: finame= cnames[0]+'.png'\n ri= open(\"graph.txt\",\"w\")\n #ri.write(\"graph graf.png --start %d -e %d --step 60 -w 600 \"%\n # (time0, time0+60*60))\n #ri.write(\"graph graf.png -s teatime --step 60 -w 600 \")\n #ri.write(\"graph graf.png -s 17:55 --step 60 -w 600 \") # -10 hours max.\n # time: -s now-10h -s 1:0 -e 4:0\n #ri.write(\"graph graf.png -s now-10h --step 60 -w 600 \")\n ri.write(\"graph \"+finame+\" -s now-2d --step 60 -w 600 \")\n ix=0\n while ix<len(cnames):\n cn=cnames[ix]\n ri.write(\"DEF:%s=%s:%s:AVERAGE \"% (cn, RRDDB, cn))\n ix=ix+1\n ix=0\n while ix<len(cnames):\n cn=cnames[ix]\n ri.write(\"LINE1:%s#%s:%s \"%(cn,colors[ix],cn))\n ix=ix+1\n ri.close()\n os.system(\"rrdtool - <graph.txt\")", "def create_network_and_stats(\r\n dir_path, map_lines, otu_table_fp, prefs, data, background_color, label_color):\r\n cat_by_sample, sample_by_cat, num_meta, meta_dict, labels, node_labels,\\\r\n label_list = get_sample_info(map_lines)\r\n con_by_sample, node_file, edge_file, red_node_file,\\\r\n red_edge_file, otu_dc, degree_counts, sample_dc, \\\r\n = get_connection_info(otu_table_fp, num_meta, meta_dict)\r\n num_con_cat, num_con = get_num_con_cat(con_by_sample, cat_by_sample)\r\n num_cat = get_num_cat(sample_by_cat, con_by_sample.keys())\r\n dir_path = os.path.join(dir_path, \"otu_network\")\r\n make_table_file(edge_file, labels, dir_path, \"real_edge_table.txt\")\r\n make_table_file(node_file, node_labels, dir_path, \"real_node_table.txt\")\r\n make_table_file(red_edge_file, labels, dir_path,\r\n \"real_reduced_edge_table.txt\")\r\n make_table_file(red_node_file, node_labels, dir_path,\r\n \"real_reduced_node_table.txt\")\r\n make_stats_files(\r\n sample_dc,\r\n otu_dc,\r\n degree_counts,\r\n num_con_cat,\r\n num_con,\r\n num_cat,\r\n cat_by_sample,\r\n dir_path)\r\n if background_color == 'white':\r\n background_color = Color('white', (255, 255, 255))\r\n elif background_color == 'black':\r\n background_color = Color('black', (0, 0, 0))\r\n else:\r\n try:\r\n background_color = data_colors[background_color]\r\n except KeyError:\r\n raise KeyError(\"background_color unknown\")\r\n\r\n if label_color == 'white':\r\n label_color = Color('white', (255, 255, 255))\r\n elif label_color == 'black':\r\n label_color = Color('black', (0, 0, 0))\r\n else:\r\n try:\r\n label_color = data_colors[label_color]\r\n except KeyError:\r\n raise KeyError(\"label_color unknown\")\r\n\r\n make_props_files(\r\n labels,\r\n label_list,\r\n dir_path,\r\n data,\r\n background_color,\r\n label_color,\r\n prefs)", "def creatTestData(testImage):\n \n bgrArray = testImage[:,:,0:3] # make sure that there is only 3 channels (BGR) per pixel\n \n bgrArray=bgrArray.reshape(np.shape(bgrArray)[0]*np.shape(bgrArray)[1],1,3) # transform the rectangular array into a column with one pixel per row\n bgrArray=bgrArray.astype('uint8')\n \n #Calculate other color properties from the bgr values\n hsvArray = cv2.cvtColor(bgrArray,cv2.COLOR_BGR2HSV)\n\n LabArray=cv2.cvtColor(bgrArray,cv2.COLOR_BGR2Lab) \n \n #Save everything in a big array\n fusion=np.concatenate((bgrArray, hsvArray), axis=1)\n fusion=np.concatenate((fusion, LabArray),axis=1)\n fusion=fusion.reshape((len(fusion),9))\n\n return fusion", "def create_world(size_x = 100, size_y=100):\n colors = np.random.randint(0,2,(size_x,size_y)).tolist()\n for row in range(len(colors)):\n for col in range(len(colors[row])):\n if (colors[row][col]== 1):\n colors[row][col] = 'R'\n else:\n colors[row][col] = 'G'\n\n r = [[10.0 for i in range(size_y)] for i in range(size_x)]\n g = [[10.0 for i in range(size_y)] for i in range(size_x)]\n b = [[10.0 for i in range(size_y)] for i in range(size_x)]\n RGB = []\n for i in range(size_x):\n for j in range(size_y):\n if colors[i][j] == 'R':\n r[i][j] = 255.0\n else:\n b[i][j] = 255.0\n RGB.append(b[i][j])\n RGB.append(r[i][j])\n RGB.append(g[i][j])\n \n RGB = np.array(RGB).reshape(size_x,size_y,3)\n return RGB, colors", "def arrangeData(input_dir, output_root, target='.shp', train_ratio=0.7, val_ratio=0.2, test_ratio=0.1, overwrite=False):\n\n plots = []\n train_dir = output_root + '/train'\n val_dir = output_root + '/val'\n test_dir = output_root + '/test'\n\n if not os.path.exists(train_dir):\n os.makedirs(train_dir)\n if not os.path.exists(val_dir):\n os.makedirs(val_dir)\n if not os.path.exists(test_dir):\n os.makedirs(test_dir)\n\n for i in sorted(os.listdir(input_dir)):\n if not i.endswith('.shp'):\n continue\n tls_id = extractTLSID(i)\n plots.append(tls_id)\n train, val, test = trainValTest(plots, train_ratio=train_ratio, val_ratio=val_ratio, test_ratio=test_ratio)\n for i in sorted(os.listdir(input_dir)):\n input_file = input_dir + '/' + i\n tls_id = extractTLSID(i)\n if tls_id in test:\n output_file = test_dir + '/' + i\n elif tls_id in val:\n output_file = val_dir + '/' + i\n else:\n output_file = train_dir + '/' + i\n if os.path.exists(output_file) and overwrite==False:\n continue\n shutil.copy(input_file, output_file)", "def load_and_shape_data(self, path_to_load):\n\n # Initialize the dictionary for the loaded files\n loaded_file = {}\n if '.csv' in path_to_load:\n loaded_file[self.mode_name] = load_file(path_to_load)\n else:\n files_to_load = get_paths(path_to_load, ext='')\n # Load files and get names without file extension or directory\n for f in files_to_load:\n f_name = f.split('/')[-1].split('.')[0]\n if f_name in self.required_files or f_name in self.full_roi_list:\n loaded_file[f_name] = load_file(f)\n\n # Initialize matrices for features\n shaped_data = {}.fromkeys(self.required_files)\n for key in shaped_data:\n shaped_data[key] = np.zeros(self.required_files[key])\n\n # Populate matrices that were no initialized as []\n for key in shaped_data:\n if key == 'structure_masks':\n # Convert dictionary of masks into a tensor (necessary for tensorflow)\n for roi_idx, roi in enumerate(self.full_roi_list):\n if roi in loaded_file.keys():\n np.put(shaped_data[key][roi_idx], loaded_file[roi], int(1))#self.num_rois * loaded_file[roi] + roi_idx\n elif key == 'possible_dose_mask':\n np.put(shaped_data[key], loaded_file[key], int(1))\n elif key == 'voxel_dimensions':\n shaped_data[key] = loaded_file[key]\n else: # Files with shape\n np.put(shaped_data[key], loaded_file[key]['indices'], loaded_file[key]['data'])\n\n return shaped_data", "def split(directory='', name=''):\n d = directory\n r_path = build_path(d, path.splitext(name)[0] + '_r.png')\n g_path = build_path(d, path.splitext(name)[0] + '_g.png')\n b_path = build_path(d, path.splitext(name)[0] + '_b.png')\n a_path = build_path(d, path.splitext(name)[0] + '_a.png')\n Image.open(build_path(d, name)).convert('RGBA').getchannel(0).save(r_path)\n Image.open(build_path(d, name)).convert('RGBA').getchannel(1).save(g_path)\n Image.open(build_path(d, name)).convert('RGBA').getchannel(2).save(b_path)\n Image.open(build_path(d, name)).convert('RGBA').getchannel(3).save(a_path)", "def reconstruct_folder(data_root_paths, pixel_size, na, emission_wavelengths, excitation_wavelengths,\n affine_data_paths, otf_data_fname, dmd_pattern_data_fpath,\n channel_inds=None, crop_image=False, img_centers=None,\n crop_sizes=None, use_scmos_cal=False, scmos_calibration_file=None, widefield_only=False,\n nangles=3, nphases=3, npatterns_ignored=0, saving=True,\n zinds_to_use=None, tinds_to_use=None, xyinds_to_use=None,\n save_tif_stack=True, **kwargs):\n\n nfolders = len(data_root_paths)\n if nfolders == 0:\n raise ValueError(\"No folder paths were provided.\")\n\n ncolors = len(emission_wavelengths)\n if ncolors == 0:\n raise ValueError(\"No wavelength channels were provided.\")\n\n if channel_inds is None:\n channel_inds = list(range(ncolors))\n\n # ensure crop_sizes is a list the same size as number of folders\n if not isinstance(crop_sizes, list):\n crop_sizes = [crop_sizes]\n\n if len(crop_sizes) == 1 and nfolders > 1:\n crop_sizes = crop_sizes * nfolders\n\n if len(img_centers) == 1 and nfolders > 1:\n img_centers = img_centers * nfolders\n\n # ############################################\n # load affine data\n # ############################################\n affine_xforms = []\n for p in affine_data_paths:\n with open(p, 'rb') as f:\n affine_xforms.append(pickle.load(f)['affine_xform'])\n\n # ############################################\n # load DMD patterns frequency and phase data\n # ############################################\n frqs_dmd = np.zeros((ncolors, nangles, 2))\n phases_dmd = np.zeros((ncolors, nangles, nphases))\n for kk in range(ncolors):\n ppath = dmd_pattern_data_fpath[kk]\n xform = affine_xforms[kk]\n\n with open(ppath, 'rb') as f:\n pattern_data = pickle.load(f)\n\n # DMD intensity frequency and phase (twice electric field frq/phase)\n frqs_dmd[kk] = 2 * pattern_data['frqs']\n phases_dmd[kk] = 2 * pattern_data['phases']\n dmd_nx = pattern_data['nx']\n dmd_ny = pattern_data['ny']\n\n # ############################################\n # load OTF data\n # ############################################\n with open(otf_data_fname, 'rb') as f:\n otf_data = pickle.load(f)\n otf_p = otf_data['fit_params']\n\n if len(otf_p) == 1:\n otf_fn = lambda f, fmax: 1 / (1 + (f / fmax * otf_p[0]) ** 2) * \\\n psf.circ_aperture_otf(f, 0, na, 2 * na / fmax)\n else:\n otf_fn = lambda f, fmax: 1 / (\n 1 + (f / fmax * otf_p[0]) ** 2 + (f / fmax * otf_p[1]) ** 4 + (f / fmax * otf_p[2]) ** 6 +\n (f / fmax * otf_p[3]) ** 8) * psf.circ_aperture_otf(f, 0, na, 2 * na / fmax)\n # ############################################\n # load camera calibration file, if we need it\n # ############################################\n if use_scmos_cal:\n with open(scmos_calibration_file, 'rb') as f:\n data = pickle.load(f)\n gain_map = data['gains']\n offsets = data['offsets']\n #varmap = data['vars']\n\n # ############################################\n # SIM images\n # ############################################\n if not crop_image:\n crop_sizes = [np.nan] * len(data_root_paths)\n img_centers = [[np.nan, np.nan]] * len(data_root_paths)\n\n for rpath, crop_size, img_center in zip(data_root_paths, crop_sizes, img_centers):\n folder_path, folder = os.path.split(rpath)\n print(\"# ################################################################################\")\n print(\"analyzing folder: %s\" % folder)\n print(\"located in: %s\" % folder_path)\n\n tstamp = tools.get_timestamp()\n # path to store processed results\n if saving:\n sim_results_path = os.path.join(rpath, '%s_sim_reconstruction' % tstamp)\n if not os.path.exists(sim_results_path):\n os.mkdir(sim_results_path)\n print(\"save directory: %s\" % sim_results_path)\n\n # copy useful data files here\n for kk in range(ncolors):\n # copy affine data here\n _, fname = os.path.split(affine_data_paths[kk])\n fpath = os.path.join(sim_results_path, fname)\n shutil.copyfile(affine_data_paths[kk], fpath)\n\n # copy otf data here\n _, fname = os.path.split(otf_data_fname)\n fpath = os.path.join(sim_results_path, fname)\n shutil.copyfile(otf_data_fname, fpath)\n\n # copy DMD pattern data here\n _, fname = os.path.split(dmd_pattern_data_fpath[kk])\n fpath = os.path.join(sim_results_path, fname)\n shutil.copyfile(dmd_pattern_data_fpath[kk], fpath)\n\n # load metadata\n metadata, dims, summary = tools.parse_mm_metadata(rpath)\n start_time = datetime.datetime.strptime(summary['StartTime'], '%Y-%d-%m;%H:%M:%S.%f')\n nz = dims['z']\n nxy = dims['position']\n nt = dims['time']\n\n # use this construction as zinds can be different for different folders\n if zinds_to_use is None:\n zinds_to_use_temp = range(nz)\n else:\n zinds_to_use_temp = zinds_to_use\n nz_used = len(zinds_to_use_temp)\n\n if tinds_to_use is None:\n tinds_to_use_temp = range(nt)\n else:\n tinds_to_use_temp = tinds_to_use\n nt_used = len(tinds_to_use_temp)\n\n if xyinds_to_use is None:\n xyinds_to_use_temp = range(nxy)\n else:\n xyinds_to_use_temp = xyinds_to_use\n nxy_used = len(xyinds_to_use_temp)\n\n if pixel_size is None:\n pixel_size = metadata['PixelSizeUm'][0]\n\n # set up image size\n # load one file to check size\n fname = os.path.join(rpath, metadata['FileName'].values[0])\n im, _ = tools.read_tiff(fname, [metadata['ImageIndexInFile'].values[0]])\n _, ny_raw, nx_raw = im.shape\n if crop_image:\n # or pick ROI\n roi = tools.get_centered_roi(img_center, [crop_size, crop_size])\n\n # check points don't exceed image size\n if roi[0] < 0:\n roi[0] = 0\n if roi[1] > ny_raw:\n roi[1] = ny_raw\n if roi[2] < 0:\n roi[2] = 0\n if roi[3] > nx_raw:\n roi[3] = nx_raw\n else:\n roi = [0, ny_raw, 0, nx_raw]\n\n ny = roi[1] - roi[0]\n nx = roi[3] - roi[2]\n\n # arrays to save results\n imgs_sr = []\n imgs_os = []\n imgs_wf = []\n imgs_deconvolved = []\n counter = 1\n for kk in range(ncolors):\n sim_options = {'pixel_size': pixel_size, 'wavelength': emission_wavelengths[kk], 'na': na}\n\n # estimate otf\n fmax = 1 / (0.5 * emission_wavelengths[kk] / na)\n fx = tools.get_fft_frqs(nx, sim_options['pixel_size'])\n fy = tools.get_fft_frqs(ny, sim_options['pixel_size'])\n ff = np.sqrt(fx[None, :] ** 2 + fy[:, None] ** 2)\n otf = otf_fn(ff, fmax)\n otf[ff >= fmax] = 0\n\n # guess frequencies/phases\n frqs_guess = np.zeros((nangles, 2))\n phases_guess = np.zeros((nangles, nphases))\n for ii in range(nangles):\n for jj in range(nphases):\n # estimate frequencies based on affine_xform\n frqs_guess[ii, 0], frqs_guess[ii, 1], phases_guess[ii, jj] = \\\n affine.xform_sinusoid_params_roi(frqs_dmd[kk, ii, 0], frqs_dmd[kk, ii, 1],\n phases_dmd[kk, ii, jj], [dmd_ny, dmd_nx], roi, xform)\n\n # convert from 1/mirrors to 1/um\n frqs_guess = frqs_guess / pixel_size\n\n # analyze pictures\n for ii in tinds_to_use_temp:\n for bb in xyinds_to_use_temp:\n for aa in zinds_to_use_temp:\n tstart = time.process_time()\n\n identifier = \"%.0fnm_nt=%d_nxy=%d_nz=%d\" % (excitation_wavelengths[kk] * 1e3, ii, bb, aa)\n file_identifier = \"nc=%d_nt=%d_nxy=%d_nz=%d\" % (kk, ii, bb, aa)\n\n # where we will store results for this particular set\n if not widefield_only:\n sim_diagnostics_path = os.path.join(sim_results_path, identifier)\n if not os.path.exists(sim_diagnostics_path):\n os.mkdir(sim_diagnostics_path)\n\n # find images and load them\n raw_imgs = tools.read_dataset(metadata, z_indices=aa, xy_indices=bb, time_indices=ii,\n user_indices={\"UserChannelIndex\": channel_inds[kk],\n \"UserSimIndex\": list(range(npatterns_ignored, npatterns_ignored + nangles * nphases))})\n\n # error if we have wrong number of images\n if np.shape(raw_imgs)[0] != (nangles * nphases):\n raise ValueError(\"Found %d images, but expected %d images at channel=%d,\"\n \" zindex=%d, tindex=%d, xyindex=%d\" % (\n np.shape(raw_imgs)[0], nangles * nphases,\n channel_inds[kk], aa, ii, bb))\n\n # optionally convert from ADC to photons\n # todo: not very useful to do this way...\n if use_scmos_cal:\n imgs_sim = camera_noise.adc2photons(raw_imgs, gain_map, offsets)\n else:\n imgs_sim = raw_imgs\n\n # reshape to [nangles, nphases, ny, nx]\n imgs_sim = imgs_sim.reshape((nangles, nphases, raw_imgs.shape[1], raw_imgs.shape[2]))\n imgs_sim = imgs_sim[:, :, roi[0]:roi[1], roi[2]:roi[3]]\n\n # instantiate reconstruction object\n r = SimImageSet(sim_options, imgs_sim, frqs_guess, phases_guess=phases_guess, otf=otf,\n save_dir=sim_diagnostics_path, **kwargs)\n\n # if not saving stack, maybe want to handle in class?\n if saving and not save_tif_stack:\n fname = os.path.join(sim_results_path, \"sim_os_%s.tif\" % file_identifier)\n tools.save_tiff(r.imgs_os, fname, dtype='float32', datetime=start_time)\n\n fname = os.path.join(sim_results_path, \"widefield_%s.tif\" % file_identifier)\n tools.save_tiff(r.widefield, fname, dtype='float32', datetime=start_time)\n else:\n # store widefield and os\n imgs_os.append(r.imgs_os)\n imgs_wf.append(r.widefield)\n\n if not widefield_only:\n # do reconstruction\n r.reconstruct()\n r.plot_figs()\n\n if saving and not save_tif_stack:\n fname = os.path.join(sim_results_path, \"sim_sr_%s.tif\" % file_identifier)\n tools.save_tiff(r.img_sr, fname, dtype='float32', datetime=start_time)\n\n fname = os.path.join(sim_results_path, \"deconvolved_%s.tif\" % file_identifier)\n tools.save_tiff(r.widefield_deconvolution, fname, dtype='float32', datetime=start_time)\n else:\n # store sr and deconvolved\n imgs_sr.append(r.img_sr)\n imgs_deconvolved.append(r.widefield_deconvolution)\n\n # save reconstruction summary data\n r.save_result(os.path.join(sim_diagnostics_path, \"sim_reconstruction_params.pkl\"))\n\n tend = time.process_time()\n print(\"%d/%d from %s in %0.2fs\" % (counter, ncolors * nt_used * nxy_used * nz_used, folder, tend - tstart))\n\n counter += 1\n\n # #################################\n # save data for all reconstructed files\n # #################################\n if saving and save_tif_stack:\n\n # todo: want to include metadata in tif.\n fname = tools.get_unique_name(os.path.join(sim_results_path, 'widefield.tif'))\n imgs_wf = np.asarray(imgs_wf)\n wf_to_save = np.reshape(imgs_wf, [ncolors, nt_used, nz_used, imgs_wf[0].shape[-2], imgs_wf[0].shape[-1]])\n tools.save_tiff(wf_to_save, fname, dtype='float32', axes_order=\"CTZYX\", hyperstack=True,\n datetime=start_time)\n\n fname = tools.get_unique_name(os.path.join(sim_results_path, 'sim_os.tif'))\n imgs_os = np.asarray(imgs_os)\n sim_os = np.reshape(imgs_os, [ncolors, nt_used, nz_used, imgs_os[0].shape[-2], imgs_os[0].shape[-1]])\n tools.save_tiff(sim_os, fname, dtype='float32', axes_order=\"CTZYX\", hyperstack=True,\n datetime=start_time)\n\n if not widefield_only:\n fname = tools.get_unique_name(os.path.join(sim_results_path, 'sim_sr.tif'))\n imgs_sr = np.asarray(imgs_sr)\n sim_to_save = np.reshape(imgs_sr, [ncolors, nt_used, nz_used, imgs_sr[0].shape[-2], imgs_sr[0].shape[-1]])\n tools.save_tiff(sim_to_save, fname, dtype='float32', axes_order=\"CTZYX\", hyperstack=True,\n datetime=start_time)\n\n fname = tools.get_unique_name(os.path.join(sim_results_path, 'deconvolved.tif'))\n imgs_deconvolved = np.asarray(imgs_deconvolved)\n deconvolved_to_save = np.reshape(imgs_deconvolved, [ncolors, nt_used, nz_used, imgs_deconvolved[0].shape[-2],\n imgs_deconvolved[0].shape[-1]])\n tools.save_tiff(deconvolved_to_save, fname, dtype='float32', axes_order='CTZYX', hyperstack=True,\n datetime=start_time)\n\n return imgs_sr, imgs_wf, imgs_deconvolved, imgs_os", "def plot_shapes(shapes, colors, title=\"Shape Model\", save=False):\n cv2.namedWindow(title)\n shapes = [scale_by_param(scale_to_unit(dshape), 1000) for dshape in shapes]\n #shapes = [shape.scale_to_unit().scale(1000) for shape in shapes]\n\n max_x = int(max([dshape[:, 0].max() for dshape in shapes]))\n max_y = int(max([dshape[:, 1].max() for dshape in shapes]))\n min_x = int(min([dshape[:, 0].min() for dshape in shapes]))\n min_y = int(min([dshape[:, 1].min() for dshape in shapes]))\n\n img = np.ones((max_y-min_y+20, max_x-min_x+20, 3), np.uint8)*255\n for shape_num, shape in enumerate(shapes):\n points = shape\n for i in range(len(points)):\n cv2.line(img, (int(points[i, 0]-min_x+10), int(points[i, 1]-min_y+10)),\n (int(points[(i + 1) % 40, 0]-min_x+10), int(points[(i + 1) % 40, 1]-min_y+10)),\n colors[shape_num], thickness=1, lineType=8)\n\n cv2.imshow(title, img)\n cv2.waitKey()\n if save:\n cv2.imwrite('Plot/'+title+'.png', img)\n cv2.destroyAllWindows()", "def create_data_base():\n\n\tscript_files = []\n\tjson_files = []\n\t\n\t# get script files list\n\tfor file in os.listdir(\"learned_objects_scripts/\"):\n\t\tif file.endswith(\".script\"):\n\t\t\tscript_files.append(file)\n\n\t# get json files list\n\tfor file in os.listdir(\"object_models/\"):\n\t\tif file.endswith(\".json\"):\n\t\t\tjson_files.append(file)\n\t\n\t# create json file for new objects\n\tmodel_created = False\n\tfor file in script_files:\n\t\tif \"{}.json\".format(file[:-7]) not in json_files:\n\t\t\twith open(\"object_models/{}.json\".format(file[:-7]), 'w') as outfile:\n\t\t\t\tobj_model = object_script_to_model(\"learned_objects_scripts/\" + file)\n\t\t\t\tjson.dump(obj_model, outfile)\n\t\t\t\tmodel_created = True\n\t\t\t\tprint(\"model created for\", file)\n\tif not model_created:\n\t\tprint(\"data base is already up to date\")", "def __init__(self, data_dir, pairs_filepath, img_ext, num_random_images_per_folder):\n self.data_dir = data_dir\n self.pairs_filepath = pairs_filepath\n self.img_ext = img_ext\n self.num_random_images_per_folder = num_random_images_per_folder\n\n if os.name == 'nt':\n self.separator = \"\\\\\"\n else:\n self.separator = \"/\"\n\n self.remaining = []\n for name in os.listdir(self.data_dir):\n if os.path.isdir(os.path.join(self.data_dir, name)):\n self.remaining.append(name)", "def make_shape(n_length,pathy):\n\n os.chdir(pathy)\n\n sample_complex = random.choice([f for f in os.listdir(pathy) if not f.startswith('.')])\n\n print(sample_complex)\n\n new_shape = (n_length,)+ np.load(sample_complex).shape\n\n return(new_shape)", "def createDataset_inpainting(outputPath, imagePathList, labelList):\n assert (len(imagePathList) == len(box_x_list) == len(box_y_list))\n nSamples = len(imagePathList)\n if not os.path.exists(outputPath):\n os.mkdir(outputPath)\n env = lmdb.open(outputPath, map_size=1099511627776)\n cache = {}\n cnt = 1\n for i in range(nSamples):\n imagePath = imagePathList[i]\n box_x = box_x_list[i]\n box_y = box_y_list[i]\n if len(box_x) == 0:\n continue\n if not os.path.exists(imagePath):\n print('%s does not exist' % imagePath)\n continue\n with open(imagePath, 'rb') as f:\n imageBin = f.read()\n\n imageKey = 'image-%09d' % cnt\n cache[imageKey] = imageBin\n box_x_Key = 'boxes_x-%09d' % cnt\n box_y_Key = 'boxes_y-%09d' % cnt\n cache[box_x_Key] = box_x.encode()\n cache[box_y_Key] = box_y.encode()\n\n if labelList:\n labelKey = 'label-%09d' % cnt\n cache[labelKey] = labelList[i].encode()\n if region_mask_list:\n region_mask_Key = 'region_mask-%09d' % cnt\n cache[region_mask_Key] = open(region_mask_list[i], 'rb').read()\n if pixel_mask_list:\n pixel_mask_Key = 'pixel_mask-%09d' % cnt\n cache[pixel_mask_Key] = open(pixel_mask_list[i], 'rb').read()\n # embed()\n if cnt % 1000 == 0:\n writeCache(env, cache)\n cache = {}\n print('Written %d / %d' % (cnt, nSamples))\n cnt += 1\n nSamples = cnt - 1\n cache['num-samples'] = str(nSamples).encode()\n writeCache(env, cache)\n print('Created dataset with %d samples' % nSamples)", "def process_data(datapath, batch_sizes=[64, 32, 32],\n shuffles=[True, False, False]):\n train_dir = os.path.join(datapath, 'train')\n valid_dir = os.path.join(datapath, 'valid')\n test_dir = os.path.join(datapath, 'test')\n\n data_transforms = {'train': transforms.Compose([\n transforms.Resize(256),\n transforms.RandomHorizontalFlip(0.3),\n transforms.RandomRotation(30),\n transforms.RandomResizedCrop(224),\n transforms.ToTensor(),\n transforms.Normalize([.485, .456, .406],\n [.229, .224, .225])\n ]),\n 'valid': transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize([.485, .456, .406],\n [.229, .224, .225])\n ]),\n 'test': transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize([.485, .456, .406],\n [.229, .224, .225])\n ])}\n\n image_dataset = {\n 'train': datasets.ImageFolder(\n train_dir, transform=data_transforms['train']\n ),\n 'valid': datasets.ImageFolder(\n valid_dir, transform=data_transforms['valid']\n ),\n 'test': datasets.ImageFolder(\n test_dir, transform=data_transforms['test']\n )\n }\n\n dataloaders = {\n 'train': torch.utils.data.DataLoader(\n image_dataset['train'],\n batch_size=batch_sizes[0],\n shuffle=shuffles[0]\n ),\n 'valid': torch.utils.data.DataLoader(\n image_dataset['valid'],\n batch_size=batch_sizes[1],\n shuffle=shuffles[1]\n ),\n 'test': torch.utils.data.DataLoader(\n image_dataset['test'],\n batch_size=batch_sizes[2],\n shuffle=shuffles[2]\n )\n }\n\n return image_dataset, dataloaders", "def gen_batch_function(data_folder, image_shape):\n def get_batches_fn(batch_size):\n \"\"\"\n Create batches of training data\n :param batch_size: Batch Size\n :return: Batches of training data\n \"\"\"\n image_paths = glob(os.path.join(data_folder, 'image_2', '*.png'))\n label_paths = {\n re.sub(r'_(lane|road)_', '_', os.path.basename(path)): path\n for path in glob(os.path.join(data_folder, 'gt_image_2', '*_road_*.png'))}\n background_color = np.array([255, 0, 0])\n other_road_color = np.array([0,0,0])\n random.shuffle(image_paths)\n for batch_i in range(0, len(image_paths), batch_size):\n images = []\n gt_images = []\n for image_file in image_paths[batch_i:batch_i+batch_size]:\n gt_image_file = label_paths[os.path.basename(image_file)]\n\n image = scipy.misc.imresize(scipy.misc.imread(image_file), image_shape)\n image_flip = np.flip(image, axis=1)\n \n gt_image = scipy.misc.imresize(scipy.misc.imread(gt_image_file), image_shape)\n gt_image_flip = np.flip(gt_image, axis=1)\n \n #---------- classification : single road---------------------\n #gt_bg = np.all(gt_image == background_color, axis=2)\n #gt_bg = gt_bg.reshape(*gt_bg.shape, 1)\n #gt_image = np.concatenate((gt_bg, np.invert(gt_bg)), axis=2)\n #------------------------------------------------------------\n \n \n #---------- classification : multi road----------------------\n gt_bg = np.all(gt_image == background_color, axis=2)\n gt_bg_flip = np.all(gt_image_flip == background_color, axis=2)\n \n # road segment\n road_mask = ((gt_image == other_road_color) | (gt_image == background_color))\n gt_road = np.invert(np.all(road_mask, axis=2))\n \n # flip of road segment\n road_mask_flip = ((gt_image_flip == other_road_color) | (gt_image_flip == background_color))\n gt_road_flip = np.invert(np.all(road_mask_flip, axis=2))\n \n # other_road segment\n oher_road_mask = (gt_image == other_road_color)\n gt_other_road = np.all(oher_road_mask, axis=2)\n \n # flip of other_road segment\n other_road_mask_flip = (gt_image_flip == other_road_color)\n gt_oher_road_flip = np.all(other_road_mask_flip, axis=2)\n\n # reshaping segments\n gt_bg = gt_bg.reshape(*gt_bg.shape, 1)\n gt_other_road = gt_other_road.reshape(*gt_other_road.shape, 1)\n gt_road = gt_road.reshape(*gt_road.shape, 1)\n \n # reshaping flip segments\n gt_bg_flip = gt_bg_flip.reshape(*gt_bg_flip.shape, 1)\n gt_oher_road_flip = gt_oher_road_flip.reshape(*gt_oher_road_flip.shape, 1)\n gt_road_flip = gt_road_flip.reshape(*gt_road_flip.shape, 1)\n \n # concatenating classes bg, road, other_road\n gt_image = np.concatenate((gt_bg, gt_road, gt_other_road), axis=2)\n gt_image_flip = np.concatenate((gt_bg_flip, gt_road_flip, gt_oher_road_flip), axis=2)\n \n images.append(image)\n images.append(image_flip)\n \n gt_images.append(gt_image)\n gt_images.append(gt_image_flip)\n\n yield np.array(images), np.array(gt_images)\n return get_batches_fn", "def generatePolygons():", "def crescent_data(num_data=200, seed=default_seed):\r\n np.random.seed(seed=seed)\r\n sqrt2 = np.sqrt(2)\r\n # Rotation matrix\r\n R = np.array([[sqrt2 / 2, -sqrt2 / 2], [sqrt2 / 2, sqrt2 / 2]])\r\n # Scaling matrices\r\n scales = []\r\n scales.append(np.array([[3, 0], [0, 1]]))\r\n scales.append(np.array([[3, 0], [0, 1]]))\r\n scales.append([[1, 0], [0, 3]])\r\n scales.append([[1, 0], [0, 3]])\r\n means = []\r\n means.append(np.array([4, 4]))\r\n means.append(np.array([0, 4]))\r\n means.append(np.array([-4, -4]))\r\n means.append(np.array([0, -4]))\r\n\r\n Xparts = []\r\n num_data_part = []\r\n num_data_total = 0\r\n for i in range(0, 4):\r\n num_data_part.append(round(((i + 1) * num_data) / 4.))\r\n num_data_part[i] -= num_data_total\r\n part = np.random.normal(size=(num_data_part[i], 2))\r\n part = np.dot(np.dot(part, scales[i]), R) + means[i]\r\n Xparts.append(part)\r\n num_data_total += num_data_part[i]\r\n X = np.vstack((Xparts[0], Xparts[1], Xparts[2], Xparts[3]))\r\n\r\n Y = np.vstack((np.ones((num_data_part[0] + num_data_part[1], 1)), -np.ones((num_data_part[2] + num_data_part[3], 1))))\r\n return {'X':X, 'Y':Y, 'info': \"Two separate classes of data formed approximately in the shape of two crescents.\"}", "def MakeDataSetFiles(dirname):\n\n\n if not os.path.exists(dirname):\n os.mkdir(dirname)\n if not os.path.exists(os.path.join(dirname, 'train')):\n os.mkdir(os.path.join(dirname, 'train'))\n if not os.path.exists(os.path.join(dirname, 'test')):\n os.mkdir(os.path.join(dirname, 'test'))\n data_train = fetch_20newsgroups(subset='train', categories=None, shuffle=True, random_state=42)\n data_test = fetch_20newsgroups(subset='test', categories=None, shuffle=True, random_state=42)\n\n if dirname[-1] == '/' or dirname[-1] == '\\\\':\n dirname = dirname[:-1]\n \n Util.WriteClassFile(data_train.target, os.path.join(dirname, 'train_classes.txt'))\n Util.WriteClassFile(data_test.target,os.path.join(dirname, 'test_classes.txt'))\n\n\n train_counter = 0;\n for doc in data_train.data:\n filename = 'train_' + str(train_counter).zfill(5);\n f = file(os.path.join(dirname, 'train', filename), 'w');\n f.write(doc.encode('ascii', 'ignore'));\n f.close();\n train_counter = train_counter + 1;\n\n test_counter = 0;\n for doc in data_test.data:\n filename = 'test_' + str(test_counter).zfill(5);\n f = file(os.path.join(dirname, 'test', filename), 'w');\n f.write(doc.encode('ascii', 'ignore'));\n f.close();\n test_counter = test_counter + 1;\n\n class_index = file(os.path.join(dirname, 'class_label_index.txt'), 'w')\n for label in data_train.target_names:\n class_index.write(label + '\\n')\n class_index.close()", "def get_path_class_shapes(self):\n df = self.df_roi\n self.tumor_shape = MultiPolygon([self._get_shape(i) for i in df[df.class_ == \"Tumor\"]['geometry']])\n self.stroma_shape = MultiPolygon([self._get_shape(i) for i in df[df.class_ == \"Stroma\"]['geometry']])\n self.dcis_shape = MultiPolygon([self._get_shape(i) for i in df[df.class_ == \"Other\"]['geometry']]) \n\n # path_class_qupath_names = [\"Tumor\", \"Stroma\", \"Other\"]\n # for path_class in path_class_qupath_names:\n # mpolygon = MultiPolygon([self._get_shape(i) for i in df[df.class_ == path_class]['geometry']])\n\n # # replace name\n # if path_class == \"Other\":\n # path_class = \"dcis\"\n\n # attr_name = path_class.lower() + \"_shape\"\n # setattr(self, path_class, mpolygon)", "def create_ds9_regions(cat_name,output_fname,color,radius,Type):\n cat=ascii.read('%s.txt' % cat_name)\n with open('%s.reg' % output_fname, 'w') as f:\n f.write('global color=%s dashlist=8 3 width=1\\n' % color)\n f.write('image\\n')\n if Type == 'input':\n for x,y,mag in zip(cat['COORD_XPIXEL'],cat['COORD_YPIXEL'],cat['MAG']):\n f.write('circle(%d,%d,%d) # text = {%.2f}\\n' %(x,y,radius,mag))\n elif Type=='detected':\n for x,y,mag in zip(cat['x_coord_det'],cat['y_coord_det'],cat['mag_det']):\n f.write('circle(%d,%d,%d) # text = { %.2f}\\n' %(x,y,radius,mag))\n elif Type=='false_detection':\n for x,y,mag in zip(cat['x_coord'],cat['y_coord'],cat['mag_det']):\n f.write('circle(%d,%d,%d) # text = { %.2f}\\n' %(x,y,radius,mag))", "def create_folder(self):\n self.config.csv_path.mkdir(parents=True, exist_ok=True)\n self.config.images_path.mkdir(parents=True, exist_ok=True)", "def data_frame_creator(self):\n\n rgb_dir = [\n self.dataset_address + sequence_f + rgb_f\n for rgb_f in self.rgb_folder for sequence_f in self.sequence_folder\n ]\n rgb_data = [\n rgb_d + rgb for rgb_d in rgb_dir for rgb in os.listdir(rgb_d)\n ]\n\n depth_dir = [\n self.dataset_address + sequence_f + depth_f\n for depth_f in self.depth_folder\n for sequence_f in self.sequence_folder\n ]\n depth_data = [\n depth_d + depth for depth_d in depth_dir\n for depth in os.listdir(depth_d)\n ]\n\n segmentation_dir = [\n self.dataset_address + sequence_f + segmentation_f\n for segmentation_f in self.segmentation_folder\n for sequence_f in self.sequence_folder\n ]\n segmentation_data = [\n segmentation_d + segmentation\n for segmentation_d in segmentation_dir\n for segmentation in os.listdir(segmentation_d)\n ]\n\n dataset = {\n 'RGB': rgb_data,\n 'DEPTH': depth_data,\n 'SEGMENTATION': segmentation_data\n }\n\n if self.shuffle:\n return pd.DataFrame(dataset).sample(frac=1)\n\n return pd.DataFrame(dataset)", "def create_train_test_valid(self, categories, paths, split_ratio=(0.85, 0.10, 0.05)):\n # # a data dictionary holds categories and a list of all the image paths\n try:\n data_dict = dict()\n\n data_list = list()\n label_list = list()\n # loop through all the categories, make a list of all images lying in each categorical folder and append their\n # paths into a list. This list of paths corresponds to a categorical key in the dictionary\n for category in categories:\n category_dir = os.path.sep.join([paths[\"data_path\"], category])\n\n dl = [category_dir + \"/\" + file for file in os.listdir(category_dir) if not file.startswith('.')]\n data_list.extend(dl)\n label_list.extend([category] * len(dl))\n\n combined = list(zip(data_list, label_list))\n shuffle(combined)\n data_list[:], label_list[:] = zip(*combined)\n tr_data, data_dict[\"test_data\"], tr_labels, data_dict[\"test_labels\"] = train_test_split(data_list, label_list,\n test_size=split_ratio[2], random_state=42)\n data_dict[\"train_data\"], data_dict[\"valid_data\"], data_dict[\"train_labels\"], data_dict[\"valid_labels\"] = \\\n train_test_split(tr_data, tr_labels, test_size=split_ratio[1], random_state=42)\n\n return data_dict\n except Exception as e:\n self.logger.exception(e)\n sys.exit(1)" ]
[ "0.6856443", "0.618052", "0.60780936", "0.60242367", "0.5808405", "0.57668114", "0.5690427", "0.56130856", "0.5600461", "0.55399936", "0.54958624", "0.5479186", "0.5459529", "0.5456665", "0.5448621", "0.54074323", "0.5362034", "0.53619134", "0.5324682", "0.53203875", "0.53083163", "0.5288739", "0.52818304", "0.52659994", "0.52484584", "0.5247805", "0.52442896", "0.5239872", "0.5237977", "0.5231064", "0.52250254", "0.5202707", "0.5200095", "0.51886004", "0.5184084", "0.5177228", "0.5168213", "0.5168183", "0.51673406", "0.51529473", "0.515167", "0.51501864", "0.5141751", "0.51392704", "0.51327354", "0.5128984", "0.51253545", "0.5117677", "0.5112649", "0.5107196", "0.5102576", "0.5101295", "0.5093498", "0.5087091", "0.5076746", "0.507598", "0.5072902", "0.5067038", "0.50629026", "0.5058041", "0.5057696", "0.50571454", "0.5052309", "0.5046014", "0.5045783", "0.50429976", "0.5039359", "0.5038341", "0.5036015", "0.503187", "0.50251395", "0.50172305", "0.50081635", "0.50071496", "0.49882948", "0.4983642", "0.4970739", "0.49707076", "0.49689877", "0.4962552", "0.49612674", "0.49594095", "0.49516508", "0.49504557", "0.49437034", "0.49432603", "0.49409947", "0.49362716", "0.49329397", "0.4931541", "0.49288443", "0.492646", "0.4924564", "0.49240893", "0.49189088", "0.4914935", "0.49136916", "0.49126568", "0.49060407", "0.4904092" ]
0.70245886
0
It should create grayscale shapes in the given data directory.
def test_create_shapes_grayscale(data_dir): dataset.create_shapes(10, 10, 1, channels=1, data_dir=data_dir) img_path = os.path.join(data_dir, "ellipse/0.png") assert os.path.exists(img_path) img = imageio.imread(img_path) assert img.shape == (10, 10)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_create_shapes(data_dir):\n dataset.create_shapes(10, 10, 1, data_dir=data_dir)\n img_path = os.path.join(data_dir, \"ellipse/0.png\")\n assert os.path.exists(img_path)\n img = imageio.imread(img_path)\n assert img.shape == (10, 10, 4)", "def makeDataset(numberOfTrials, data_type):\n\n\tdata_folder = data_type + \"_images\"\n\tlabel_file = os.path.join(dataset_params.data_path, data_type + \"_lables.csv\")\n\n\tutils.create_directory(dataset_params.data_path)\n\tutils.create_directory(os.path.join(dataset_params.data_path, data_folder))\n\n\tallowedRadius = utils.defineShapePerimeter()\n\tcolorsRGB = utils.defineColorValues()\n\tshapeDict = utils.defineShapeSides()\n\tpadding = dataset_params.padding\n\n\tnum = 0\n\toutput_images = [[\"figNum\", \"shape\", \"color\", \"size\", \"background\", \"quadrant\", \"radius\"]]\n\tfor c in dataset_params.colors: # for all 7 foreground colors \n\t\tfor q in dataset_params.quadrants: # for all 4 quadratns \n\t\t\tfor s in dataset_params.shapes: # for all 5 shapes\n\t\t\t\tfor k in dataset_params.sizes: # for all 3 sizes\n\t\t\t\t\tfor b in dataset_params.backgrounds: # for all 3 background colors\n\t\t\t\t\t\tfor i in range(numberOfTrials):\n\t\t\t\t\t\t\tfileName = os.path.join(dataset_params.data_path, data_folder, str(num) + \".png\")\n\t\t\t\t\t\t\tpresentQuadrant = dataset_params.quadrants[q]\n\t\t\t\t\t\t\tradius = random.randint(allowedRadius[s][k][0],allowedRadius[s][k][1])\n\n\t\t\t\t\t\t\tif(presentQuadrant == 3):\n\t\t\t\t\t\t\t\txMin = 128 + padding\n\t\t\t\t\t\t\t\txMax = 255 - radius\n\t\t\t\t\t\t\t\tyMin = 128 + padding\n\t\t\t\t\t\t\t\tyMax = 255 - radius\n\n\t\t\t\t\t\t\telif(presentQuadrant == 2):\n\t\t\t\t\t\t\t\txMin = 0 + radius\n\t\t\t\t\t\t\t\txMax = 128 - padding\n\t\t\t\t\t\t\t\tyMin = 128 + padding\n\t\t\t\t\t\t\t\tyMax = 255 - radius\n\n\t\t\t\t\t\t\telif(presentQuadrant == 1):\n\t\t\t\t\t\t\t\txMin = 0 + radius\n\t\t\t\t\t\t\t\txMax = 128 - padding\n\t\t\t\t\t\t\t\tyMin = 0 + radius\n\t\t\t\t\t\t\t\tyMax = 128 - padding\n\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\txMin = 128 + padding\n\t\t\t\t\t\t\t\txMax = 255 - radius\n\t\t\t\t\t\t\t\tyMin = 0 + radius\n\t\t\t\t\t\t\t\tyMax = 128 - padding\n\n\t\t\t\t\t\t\txCenter = random.randint(xMin, xMax)\n\t\t\t\t\t\t\tyCenter = random.randint(yMin, yMax)\n\t\t\t\t\t\t\tcenter = [xCenter, yCenter]\n\n\t\t\t\t\t\t\tif(s == \"circle\"):\n\t\t\t\t\t\t\t\toutput_images.append([num, \"circle\", c, k, b, presentQuadrant, radius])\n\t\t\t\t\t\t\t\timg = makeCircle(c, radius, center, b, colorsRGB)\n\t\t\t\t\t\t\t\timg = img[:,:,::-1]\n\t\t\t\t\t\t\t\tcv2.imwrite(fileName, img)\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tn = shapeDict[s]\n\t\t\t\t\t\t\t\timg = makePolygon(center, n, radius, b, c, colorsRGB)\n\t\t\t\t\t\t\t\timg = img[:,:,::-1]\n\t\t\t\t\t\t\t\tcv2.imwrite(fileName, img)\n\t\t\t\t\t\t\t\toutput_images.append([num, s, c, k, b, presentQuadrant, radius])\n\t\t\t\t\t\t\tnum += 1\n\t\n\tprint(\"Number of image generated\", num)\n\n\tprint(\"Saving \" + data_type + \" data meta information to CSV ......\")\n\tdf = pd.DataFrame(output_images[1:], columns=output_images[0])\n\tdf.to_csv(label_file, index=False)\n\tprint(\"Saved \" + data_type + \" data meta information: \" + data_folder)\n\t\n\n\tprint(\"Saving \" + data_type + \" images data to npz(numpy) compressed file ......\")\n\tmake_npz_file(data_type)\n\tprint(\"Saved \" + data_type + \" images data to npz(numpy) compressed file!\")\n\t\n\treturn None", "def _preprocess_data(self, name, directory):\n if name.endswith('data'):\n for path in glob(str(directory / '**/*.jpg'), recursive=True):\n try:\n with Image.open(path) as img:\n if not name.startswith('feature'):\n img = img.rotate(-90, 0, 1)\n img = img.resize(self.input_shape)\n except (ValueError, OSError):\n print(\"Couldn't open {}\".format(path))\n else:\n path = Path(path)\n filename = path.name.split('img-')[1]\n target = (path.parent / filename).with_suffix('.image.png')\n img.save(target, 'PNG')\n os.remove(str(path))\n elif name.endswith('targets'):\n for path in glob(str(directory / '**/*.mat'), recursive=True):\n try:\n mat = spio.loadmat(path)['depthMap']\n img = spmisc.toimage(mat).resize(self.target_shape)\n except ValueError:\n print(\"Couldn't open {}\".format(path))\n else:\n path = Path(path)\n name = path.name[path.name.index('-') + 1:]\n target = (path.parent / name).with_suffix('.depth.png')\n img.save(target, 'PNG')\n os.remove(str(path))", "def load_shapes(self,count,img_folder,mask_folder,imglist,dataset_root_path):\n self.add_class(\"shapes\",1,\"red_s\")\n self.add_class(\"shapes\",2,\"red_m\")\n self.add_class(\"shapes\",3,\"red_l\")\n self.add_class(\"shapes\",4,\"yellow_s\")\n self.add_class(\"shapes\",5,\"yellow_m\")\n self.add_class(\"shapes\",6,\"yellow_l\")\n self.add_class(\"shapes\",7,\"green_s\")\n self.add_class(\"shapes\",8,\"green_m\")\n self.add_class(\"shapes\",9,\"green_l\")\n self.add_class(\"shapes\",10,\"blue_s\")\n self.add_class(\"shapes\",11,\"blue_m\")\n self.add_class(\"shapes\",12,\"blue_l\")\n self.add_class(\"shapes\",13,\"orange_s\")\n self.add_class(\"shapes\",14,\"orange_m\")\n self.add_class(\"shapes\",15,\"orange_l\")\n\n for i in range(count):\n filestr = imglist[i].split(\".\")[0]\n package_id = (int(filestr)-1)//30 + 1\n package_path = \"package%s\" % package_id\n # print(filestr)\n if mask_folder == 'mask/training_data/':\n mask_path = mask_folder+package_path +\"/image%s\" % filestr\n # print('====>',mask_path)\n csv_path_str = \"training_data/\"+package_path\n path_to_img = img_folder+'/'+package_path+ \"/%s.png\" % filestr\n else:\n mask_path = mask_folder + \"/image%s\" % filestr\n csv_path_str = img_folder\n path_to_img = img_folder+ \"/%s.png\" % filestr\n label_index = filestr\n # path_to_img = img_folder+ \"/%s.png\" % filestr\n # print(path_to_img)\n cv_img = cv2.imread(path_to_img)\n # print(cv_img)\n # resize_img = cv2.resize(cv_img,(384,384),interpolation = cv2.INTER_AREA)\n self.add_image(\"shapes\",image_id=i, path=path_to_img, csv_path=csv_path_str, width=cv_img.shape[1], height=cv_img.shape[0], mask_path=mask_path, label_index=label_index)", "def preprocess_directory(data_path, label_path, damage_fn):\r\n\r\n file_names = os.listdir(data_path)\r\n os.mkdir(label_path)\r\n\r\n for file_name in file_names:\r\n file_path = data_path + \"/\" + file_name\r\n cur_label_path = label_path + \"/\" + file_name\r\n current_image = Image.open(file_path)\r\n label = damage_fn(current_image)\r\n label.save(cur_label_path, \"JPEG\")", "def create_data(data_size,heme, nucleotide, control, steroid,data_total,path_to_data):\n\n os.chdir(path_to_data)\n\n x_array = np.zeros(shape = (data_size,14,32,32,32))\n\n y_array = np.zeros(shape = data_size)\n\n print(\"data size = \", data_size)\n\n #training set :\n\n file_count = 0\n\n for file in data_total:\n\n y_array[file_count]= find_class(str(file), heme, nucleotide, control, steroid)\n\n x_array[file_count] = np.load(str(file+\".npy\"))\n\n file_count+=1\n\n\n return (x_array, y_array)", "def training_data_generation(DATA_DIR, img_height_size, img_width_size, label_list):\r\n \r\n img_ms_files = glob.glob(DATA_DIR + '\\\\Train_MS' + '\\\\Train_*.tif')\r\n img_pan_files = glob.glob(DATA_DIR + '\\\\Train_Pan' + '\\\\Train_*.tif')\r\n polygon_files = glob.glob(DATA_DIR + '\\\\Train_Polygons' + '\\\\Train_*.geojson')\r\n \r\n img_ms_array_list = []\r\n img_pan_array_list = []\r\n mask_array_list = []\r\n \r\n for file in range(len(img_ms_files)):\r\n with rasterio.open(img_ms_files[file]) as f:\r\n metadata = f.profile\r\n img_ms = np.transpose(f.read(tuple(np.arange(metadata['count']) + 1)), [1, 2, 0])\r\n \r\n with rasterio.open(img_pan_files[file]) as g:\r\n metadata_pan = g.profile\r\n img_pan = np.expand_dims(g.read(1), axis = 2)\r\n \r\n ms_to_pan_ratio = metadata['transform'][0] / metadata_pan['transform'][0]\r\n \r\n if (img_height_size % ms_to_pan_ratio) != 0 or (img_width_size % ms_to_pan_ratio) != 0:\r\n raise ValueError('Please make sure that both img_height_size and img_width_size can be divided by {}'.format(int(ms_to_pan_ratio)))\r\n \r\n mask = training_mask_generation(img_pan_files[file], polygon_files[file], labels = label_list)\r\n \r\n img_ms_array, img_pan_array, mask_array = image_clip_to_segment_and_convert(img_ms, img_pan, mask, ms_to_pan_ratio, \r\n img_height_size, img_width_size)\r\n \r\n img_ms_array_list.append(img_ms_array)\r\n img_pan_array_list.append(img_pan_array)\r\n mask_array_list.append(mask_array)\r\n \r\n img_ms_full_array = np.concatenate(img_ms_array_list, axis = 0)\r\n img_pan_full_array = np.concatenate(img_pan_array_list, axis = 0)\r\n mask_full_array = to_categorical(np.concatenate(mask_array_list, axis = 0), num_classes = len(label_list))\r\n \r\n return img_ms_full_array, img_pan_full_array, mask_full_array", "def create_dataset(data_folder: str, dataset_file: str, targets_file: str = os.path.join('data', 'targets.pkl')):\n files = sorted(glob.glob(os.path.join(data_folder, '**/*.jpg'), recursive=True))\n images = []\n crop_sizes = []\n crop_centers = []\n targets = []\n for image in tqdm(files, desc='creating dataset', total=len(files)):\n img = Image.open(image)\n # quadruple dataset by vertical and horizontal flipping\n for i in range(4):\n if i == 1 or i == 3:\n img = img.transpose(Image.FLIP_LEFT_RIGHT)\n if i == 2:\n img = img.transpose(Image.FLIP_TOP_BOTTOM)\n x, y, w, h, cx, cy = get_random_image_values()\n resized = img.resize((y, x), Image.LANCZOS) # mind thee: x and y swapped\n arr = np.array(resized, dtype=np.float32)\n arr, target_array = create_cropped_data(np.copy(arr), (w, h), (cx, cy), crop_only=False)\n images.append(arr)\n crop_sizes.append((w, h))\n crop_centers.append((cx, cy))\n targets.append(target_array)\n data = {'images': images, 'crop_sizes': crop_sizes, 'crop_centers': crop_centers}\n # persist on harddrive\n with open(dataset_file, 'wb') as f:\n pickle.dump(data, f)\n with open(targets_file, 'wb') as f:\n pickle.dump(targets, f)\n print(f'created datset and saved it to {dataset_file} and targets to {targets_file}')", "def process_data(output_folder):\n # select imgs\n img_folder = join(output_folder, 'img')\n select_img(output_folder, img_folder, 'HE-green')\n\n mask_folder = join(output_folder, 'mask')\n select_img(output_folder, mask_folder, '_EF5')", "def preprocess_dir(data_path,\n output_path,\n dataset,\n n_train,\n new_size,\n ):\n img_type_dict = get_class_labels()\n\n print('Preprocessing:', dataset)\n target_data_path = data_path\n disease_dirs = os.listdir(target_data_path)\n disease_dirs = [d for d in disease_dirs if\n os.path.isdir(os.path.join(target_data_path, d))]\n img_stack, target_list = [], []\n img_names = []\n for img_type in disease_dirs:\n class_lbl = img_type_dict[img_type]\n n_class = int(n_train / len(disease_dirs))\n print('\\t', img_type)\n img_files_path = os.path.join(target_data_path, img_type)\n if not (os.path.isdir(img_files_path)):\n continue\n img_files = os.listdir(img_files_path)\n img_files = [f for f in img_files if f.endswith('.jpeg')]\n if dataset == 'train':\n img_files = img_files[0:n_class]\n for img_fname in img_files:\n img_path = os.path.join(img_files_path, img_fname)\n img_arr = np.array(Image.open(img_path))\n img_arr = skimage.transform.resize(img_arr, new_size)\n img_arr = (img_arr - img_arr.min()) / img_arr.max()\n img_stack.append(img_arr)\n target_list.append(class_lbl)\n img_names += [n.split('.')[0] for n in img_files]\n # Save preprocessed data\n save_data(output_path, img_stack, target_list,\n new_size, dataset, n_train, img_names)", "def load_png_data():\n m=1 #训练文件个数\n n=1 #测试文件个数\n train_set_x=[]#训练数据集\n train_set_y=[]#训练标签集\n\n test_set_x=[]#测试数据集\n test_set_y=[]#测试标签集\n\n train_data={}\n\n train_path=r\".\\dataset\\train_label\\\\\"\n dirs=os.listdir(train_path)\n\n for file in dirs:\n srcImg=cv2.imread(train_path+file)\n #将label数据集保存为numpy格式并保存\n npImg=np.array(srcImg)\n np.save(train_path+str(m)+'.npy',npImg)\n train_set_x.append(npImg)\n\n\n NoiseImg = GaussianNoise(srcImg, 25, 4, 0.8)\n npNoiseImg = np.array(NoiseImg)\n cv2.imwrite(r\".\\dataset\\trainset\\\\\"+str(m)+'.png', NoiseImg, [int(cv2.IMWRITE_PNG_STRATEGY_DEFAULT)])\n np.save(r\".\\dataset\\trainset\\\\\" + str(m) + '.npy', npNoiseImg)\n train_set_y.append(npNoiseImg)\n m=m+1\n train_data['train_set_x']=train_set_x\n train_data['train_set_y']=train_set_y\n\n test_path = r\".\\dataset\\test_label\\\\\"\n dirs_test = os.listdir(test_path)\n for file in dirs_test:\n srcImg=cv2.imread(test_path+file)\n #将label数据集保存为numpy格式并保存\n npImg=np.array(srcImg)\n np.save(test_path+str(n)+'.npy',npImg)\n test_set_x.append(npImg)\n\n\n NoiseImg = GaussianNoise(srcImg, 25, 4, 0.8)\n npNoiseImg = np.array(NoiseImg)\n cv2.imwrite(r\".\\dataset\\testset\\\\\"+str(n)+'.png', NoiseImg, [int(cv2.IMWRITE_PNG_STRATEGY_DEFAULT)])\n np.save(r\".\\dataset\\testset\\\\\" + str(n) + '.npy', npNoiseImg)\n test_set_y.append(npNoiseImg)\n n=n+1\n train_data['test_set_x']=test_set_x\n train_data['test_set_y']=test_set_y\n\n np.savez(r\"E:\\DeepLearning\\CNNDenoiser\\dataset\\train_data.npz\",**train_data)", "def _generate_dataset(self):\n # create train images\n train_path = os.path.join(self.root_dir, \"shapes\", \"train\", \"good\")\n os.makedirs(train_path, exist_ok=True)\n for i in range(self.num_train):\n result = generate_random_anomaly_image(\n image_width=self.image_width,\n image_height=self.image_height,\n shapes=self.train_shapes,\n generate_mask=False,\n )\n image = result[\"image\"]\n imsave(os.path.join(train_path, f\"{i:03}.png\"), image, check_contrast=False)\n\n # create test images\n for test_category in self.test_shapes:\n test_path = os.path.join(self.root_dir, \"shapes\", \"test\", test_category)\n mask_path = os.path.join(self.root_dir, \"shapes\", \"ground_truth\", test_category)\n os.makedirs(test_path, exist_ok=True)\n os.makedirs(mask_path, exist_ok=True)\n # anomaly and masks. The idea is to superimpose anomalous shapes on top of correct ones\n for i in range(self.num_test):\n correct_shapes = generate_random_anomaly_image(\n image_width=self.image_width,\n image_height=self.image_height,\n shapes=self.train_shapes,\n generate_mask=False,\n )\n result = generate_random_anomaly_image(\n image_width=self.image_width,\n image_height=self.image_height,\n shapes=[test_category],\n generate_mask=True,\n )\n correct_shapes = correct_shapes[\"image\"]\n image, mask = result[\"image\"], result[\"mask\"]\n image = np.minimum(image, correct_shapes) # since 255 is white\n imsave(os.path.join(test_path, f\"{i:03}.png\"), image, check_contrast=False)\n imsave(os.path.join(mask_path, f\"{i:03}_mask.png\"), mask, check_contrast=False)\n # good test\n test_good = os.path.join(self.root_dir, \"shapes\", \"test\", \"good\")\n os.makedirs(test_good, exist_ok=True)\n for i in range(self.num_test):\n result = generate_random_anomaly_image(\n image_width=self.image_width,\n image_height=self.image_height,\n shapes=self.train_shapes,\n )\n image = result[\"image\"]\n imsave(os.path.join(test_good, f\"{i:03}.png\"), image, check_contrast=False)", "def __data_generation(self, image_mask_dirs): # X : (n_samples, *dim, n_channels)\n # Initialization\n X = np.empty((self.batch_size, *self.dim, self.n_channels))\n y = np.empty((self.batch_size, *self.dim, 1))\n\n # Generate data\n for i, dirs in enumerate(image_mask_dirs):\n # Store image\n x_img = cv2.imread(dirs[0])\n X[i,] = cv2.cvtColor(x_img, cv2.COLOR_BGR2RGB)\n\n # Store mask\n y_img = cv2.imread(dirs[1], cv2.IMREAD_GRAYSCALE).reshape((*self.dim, 1))\n y[i,] = y_img\n\n if self.preprocessor is not None:\n X = self.preprocessor(X)\n y = self.preprocessor(y)\n\n X = X.astype('float32')\n X /= 255\n y = y.astype('float32')\n y /= 255\n\n return X, y", "def gen_batch_function(self, data_folder, image_shape):\n\n\t\tdef get_batches_fn(batch_size):\n\t\t\t#\n\t\t\timage_paths = glob(os.path.join(data_folder, 'image_2', '*.png'))\n\t\t\t#\n\t\t\tlabel_paths = {\tre.sub(r'_(lane|road)_', '_', os.path.basename(path)): path\n\t\t\t\tfor path in glob(os.path.join(data_folder, 'gt_image_2', '*_road_*.png'))}\n\t\t\t#\n\t\t\tbackground_color = np.array([255, 0, 0])\n\t\t\t#\n\t\t\trandom.shuffle(image_paths)\n\t\t\t#\n\t\t\tfor batch_i in range(0, len(image_paths), batch_size):\n\t\t\t\t#\n\t\t\t\timages = []\n\t\t\t\t#\n\t\t\t\tgt_images = []\n\t\t\t\t#\n\t\t\t\tfor image_file in image_paths[batch_i:batch_i+batch_size]:\n\t\t\t\t\t#\n\t\t\t\t\tgt_image_file = label_paths[os.path.basename(image_file)]\n\t\t\t\t\t#\n\t\t\t\t\timage = scipy.misc.imresize(scipy.misc.imread(image_file), image_shape)\n\t\t\t\t\t#\n\t\t\t\t\tgt_image = scipy.misc.imresize(scipy.misc.imread(gt_image_file), image_shape)\n\t\t\t\t\t#\n\t\t\t\t\tgt_bg = np.all(gt_image == background_color, axis=2)\n\t\t\t\t\t#\n\t\t\t\t\tgt_bg = gt_bg.reshape(*gt_bg.shape, 1)\n\t\t\t\t\t#\n\t\t\t\t\tgt_image = np.concatenate((gt_bg, np.invert(gt_bg)), axis=2)\n\t\t\t\t\t#\n\t\t\t\t\timages.append(image)\n\t\t\t\t\t#\n\t\t\t\t\tgt_images.append(gt_image)\n\t\t\t\t#\n\t\t\t\tyield np.array(images), np.array(gt_images)\n\t\t#\n\t\treturn get_batches_fn", "def Dev_Image_data_generator(folderlist,resize = (920,1200),Transformation = True, scaling = True, batch_size = 16):\n\n while True:\n total_classes = len(folderlist.keys())\n keys = folderlist.keys()\n Images = []\n Image_label = []\n for key in folderlist.keys():\n img_label = random.choice(folderlist[key])\n img = Image.open(img_label,'r')\n h = resize[1]\n l = int(img.size[1]*h/img.size[0])\n img = img.resize((h,l), Image.ANTIALIAS)\n background = Image.new('RGB', (resize[1], resize[0]), (255, 255, 255))\n img_w, img_h = img.size\n bg_w, bg_h = background.size\n offset = (int((bg_w - img_w) / 2), int((bg_h - img_h) / 2))\n background.paste(img, offset)\n background = np.asarray(background)\n if Transformation == True:\n rotation = rotate(background,random.choice(range(360)))\n translate = translate_xy(background,random.choice(range(resize[0]/4)),random.choice(range(resize[1]/4)))\n flip = cv2.flip(rotation,1)\n Y = np.concatenate((rotation[np.newaxis,:,:,:],flip[np.newaxis,:,:,:],translate[np.newaxis,:,:,:]))\n Images.append(Y)\n Images.append(background[np.newaxis,:,:,:])\n Image_label.append([key for i in range(4)])\n else:\n Images.append(background[np.newaxis,:,:,:])\n Image_label.append([key])\n Image_label = np.concatenate(Image_label)\n Images = np.concatenate(Images)\n Image_label = np.array(pd.get_dummies(Image_label))\n X_Image , Y_Image = shuffle(Images,Image_label,random_state=0)\n if scaling == True:\n X_Image = X_Image/255\n else:\n X_Image = X_Image\n batches = int(len(X_Image)/batch_size)\n for batch in range(batches):\n x = X_Image[batch*batch_size:(batch+1)*batch_size,:,:,:]\n y = Y_Image[batch*batch_size:(batch+1)*batch_size]\n yield((x,y))", "def create_random_data(output_path: str, num_images: int = 5) -> None:\n train_path = os.path.join(output_path, \"train\")\n class1_train_path = os.path.join(train_path, \"class1\")\n class2_train_path = os.path.join(train_path, \"class2\")\n\n val_path = os.path.join(output_path, \"val\")\n class1_val_path = os.path.join(val_path, \"class1\")\n class2_val_path = os.path.join(val_path, \"class2\")\n\n test_path = os.path.join(output_path, \"test\")\n class1_test_path = os.path.join(test_path, \"class1\")\n class2_test_path = os.path.join(test_path, \"class2\")\n\n paths = [\n class1_train_path,\n class1_val_path,\n class1_test_path,\n class2_train_path,\n class2_val_path,\n class2_test_path,\n ]\n\n for path in paths:\n try:\n os.makedirs(path)\n except FileExistsError:\n pass\n\n for i in range(num_images):\n pixels = numpy.random.rand(64, 64, 3) * 255\n im = Image.fromarray(pixels.astype(\"uint8\")).convert(\"RGB\")\n im.save(os.path.join(path, f\"rand_image_{i}.jpeg\"))\n\n process_images(output_path)", "def explore_data():\n labels = [\"vehicles\", \"non-vehicles\"]\n labelmap = {0: \"vehicles\", 1: \"non-vehicles\"}\n vehicles_glob = os.path.join(data_dir, \"vehicles\", \"**\", \"*.png\")\n nonvehicles_glob = os.path.join(data_dir, \"non-vehicles\", \"**\", \"*.png\")\n class_fnames = [\n glob.glob(vehicles_glob, recursive = True),\n glob.glob(nonvehicles_glob, recursive = True)]\n n_samples = [len(fnames) for fnames in class_fnames]\n shapes = []\n samples = []\n print(table_format([\"label\", \"size\", \"shape\"], header = True))\n for label, fnames in enumerate(class_fnames):\n indices = np.random.choice(len(fnames), 4*10, replace = False)\n for i in indices:\n fname = fnames[i]\n img = cv2.imread(fname)\n samples.append(img)\n shape = img.shape\n shapes.append(shape)\n print(table_format([labels[label], n_samples[label], shapes[label]]))\n\n samples = np.stack(samples)\n samples = tile(samples, 2*4, 10)\n cv2.imwrite(os.path.join(out_dir, \"datasamples.png\"), samples)\n\n return class_fnames, labelmap", "def create_patches(data, patch_shape):\n\n imgs = []\n\n if data[0].shape[0] == test_size:\n step_length = (test_size - patch_shape[0]) // 2 # 176\n else:\n step_length = (training_size - patch_shape[0])\n\n for i in range(data.shape[0]):\n if len(patch_shape) == 3: # RGB images\n patches = patchify(data[i], patch_shape, step=step_length)\n patches = patches.reshape((-1, patch_shape[0], patch_shape[1], patch_shape[2]))\n imgs.extend(patches)\n else:\n patches = patchify(data[i], patch_shape, step=step_length)\n patches = patches.reshape((-1, patch_shape[0], patch_shape[1]))\n imgs.extend(patches)\n\n return np.asarray(imgs)", "def prepare_data(data_path, val_data_path, patch_size,stride,scales = [1, 0.9, 0.8, 0.7],\n max_num_patches=None, aug_times=1,random_aug=False, gray_mode=False):\n # training database\n print('> Training database')\n types = ('*.bmp', '*.png')\n files = []\n for tp in types:\n files.extend(glob.glob(os.path.join(data_path, tp)))\n files.sort()\n\n if gray_mode:\n traindbf = './data/set400_p64.h5'\n valdbf = './data/set12.h5'\n else:\n traindbf = './data/train_rgb.h5'\n valdbf = './data/val_rgb.h5'\n\n if max_num_patches is None:\n max_num_patches = 5000000\n print(\"\\tMaximum number of patches not set\")\n else:\n print(\"\\tMaximum number of patches set to {}\".format(max_num_patches))\n train_num = 0\n i = 0\n with h5py.File(traindbf, 'w') as h5f:\n while i < len(files) and train_num < max_num_patches:\n imgor = cv2.imread(files[i])\n # h, w, c = img.shape\n for sca in scales:\n img = cv2.resize(imgor, (0, 0), fx=sca, fy=sca, \\\n interpolation=cv2.INTER_CUBIC)\n if not gray_mode:\n # CxHxW RGB image\n img = (cv2.cvtColor(img, cv2.COLOR_BGR2RGB)).transpose(2, 0, 1)\n else:\n # CxHxW grayscale image (C=1)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n img = np.expand_dims(img, 0)\n img = normalize(img)\n patches = img_to_patches(img, win=patch_size, stride=stride)\n print(\"\\tfile: %s scale %.1f # samples: %d\" % \\\n (files[i], sca, patches.shape[3] * 8))\n for nx in range(patches.shape[3]):\n if random_aug == False:\n for j in range(aug_times):\n data = data_augmentation(patches[:, :, :, nx].copy(), j)\n h5f.create_dataset(str(train_num), data=data)\n train_num += 1\n else:\n for j in range(aug_times):\n data = data_augmentation(patches[:, :, :, nx].copy(), random.randint(0, 7))\n h5f.create_dataset(str(train_num), data=data)\n train_num += 1\n i += 1\n # validation database\n print('\\n> Validation database')\n files = []\n for tp in types:\n files.extend(glob.glob(os.path.join(val_data_path, tp)))\n files.sort()\n h5f = h5py.File(valdbf, 'w')\n val_num = 0\n for i, item in enumerate(files):\n print(\"\\tfile: %s\" % item)\n img = cv2.imread(item)\n if not gray_mode:\n # C. H. W, RGB image\n img = (cv2.cvtColor(img, cv2.COLOR_BGR2RGB)).transpose(2, 0, 1)\n else:\n # C, H, W grayscale image (C=1)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n img = np.expand_dims(img, 0)\n\n C,H,W=img.shape\n\n # if H % 2 == 1:\n # \timg = img[:, :-1, :]\n # if W % 2 == 1:\n # \timg = img[:, :, :-1]\n\n img = normalize(img)\n h5f.create_dataset(str(val_num), data=img)\n val_num += 1\n h5f.close()\n\n print('\\n> Total')\n print('\\ttraining set, # samples %d' % train_num)\n print('\\tvalidation set, # samples %d\\n' % val_num)", "def load_data(train_test_ratio = 0.8, class_range = 8, randomised = True):\n\n # Get image filenames, labels, and the number of classification classes\n filenames = glob.glob(\"../img/*.png\")\n if randomised:\n random.shuffle(filenames)\n\n img_labels = []\n for filename in filenames:\n label = int(filename.split(\"-d\",1)[1].split('-',1)[0])\n label = max(0, (label - 1) // (class_range))\n img_labels.append(label)\n\n num_classes = max(img_labels) + 1 # E.g. max label 5 -> 0-5 inclusive\n num_total_samples = len(filenames)\n num_train_samples = int(num_total_samples * train_test_ratio)\n num_test_samples = num_total_samples - num_train_samples\n\n training_images = np.empty(\n (num_train_samples, OUTPUT_RES, OUTPUT_RES, 3), dtype='uint8'\n )\n training_labels = np.asarray(img_labels[:num_train_samples], dtype='uint8')\n\n for i in range(0, num_train_samples):\n training_images[i] = parse_img(filenames[i])\n\n test_images = np.empty(\n (num_test_samples, OUTPUT_RES, OUTPUT_RES, 3), dtype='uint8'\n )\n test_labels = np.asarray(img_labels[num_train_samples:], dtype='uint8')\n\n for i in range(0, num_test_samples):\n test_images[i] = parse_img(filenames[i + num_train_samples])\n\n return ((training_images, training_labels),\n (test_images, test_labels),\n num_classes)", "def main():\n labels, data = load_image_data()\n print(labels.shape, data.shape)", "def build_dataset(self):\n print(\"reading data of images currently , please wait......\")\n x_train, y_train, _ = get_images(self.train_directory)\n x_test, y_test, _ = get_images(self.test_directory)\n x_train, y_train = image_subset(self.num_classes, x_train, y_train)\n x_test, y_test = image_subset(self.num_classes, x_test, y_test)\n x_train = x_train.astype('float32')\n x_test = x_test.astype('float32')\n self.x_train = x_train / 255\n self.x_test = x_test / 255\n self.y_train = utils.to_categorical(y_train, self.num_classes)\n self.y_test = utils.to_categorical(y_test, self.num_classes)", "def create_data_dict(data_dir, img_size=[25, 83]):\n print(\"Creating data dictionary\")\n print(\"- Using data at:\", data_dir)\n\n # Directories\n imgs_dir = os.path.join(data_dir, \"training/image_2\")\n labels_dir = os.path.join(data_dir, \"training/gt_image_2\")\n\n print(\"- Getting list of files\")\n # Only get the label files for road (not lane)\n label_files = glob.glob(os.path.join(labels_dir, \"*_road_*.png\"))\n\n # Create corresponding list of training image files\n img_files = list(map(lambda f: os.path.basename(f).replace(\"_road\", \"\"), label_files))\n img_files = list(map(lambda f: os.path.join(imgs_dir, f), img_files)) # absolute path\n\n n_samples = len(img_files)\n print(\"- Encountered {} samples\".format(n_samples))\n est_filesize = (n_samples*np.prod(img_size)*(3+1))/1e6\n print(\"- Estimated output filesize: {:0.3f} MB + overhead\".format(est_filesize))\n\n data = {}\n data[\"X_train\"] = np.empty([n_samples]+img_size+[3], dtype=np.uint8)\n data[\"Y_train\"] = np.empty([n_samples]+img_size, dtype=np.uint8)\n\n print(\"- Processing image files\")\n for i in range(n_samples):\n label_img = scipy.misc.imread(label_files[i])\n input_img = scipy.misc.imread(img_files[i])\n\n # PRERPOCESS THE IMAGES\n label_img = scipy.misc.imresize(label_img, img_size)\n input_img = scipy.misc.imresize(input_img, img_size)\n\n # PROCESSING LABEL IMAGE\n # Only one channel, (1=road, 0=not road)\n non_road_class = np.array([255,0,0])\n label_img = (1-np.all(label_img==non_road_class, axis=2, keepdims=False)).astype(np.uint8)\n\n # Place the images into the data arrays\n data[\"X_train\"][i] = input_img\n data[\"Y_train\"][i] = label_img\n\n print(\"- Shuffling the data\")\n np.random.seed(seed=128)\n ids = list(np.random.permutation(n_samples))\n data[\"X_train\"] = data[\"X_train\"][ids]\n data[\"Y_train\"] = data[\"Y_train\"][ids]\n\n print(\"- Done!\")\n return data", "def get_data(folder: str, dimensions: int):\n preprocess = transforms.Compose(\n [\n transforms.Resize(256),\n transforms.CenterCrop(dimensions),\n transforms.ToTensor(),\n transforms.Normalize(\n mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]\n )\n ]\n )\n return datasets.ImageFolder(folder, transform=preprocess)", "def load_data(data_dir):\n\n # Initiate lists\n images = []\n labels = []\n\n main_dir = os.path.abspath(os.curdir)\n\n for i in range(NUM_CATEGORIES):\n os.chdir(os.path.join(data_dir, str(i))) # Open directory i\n dir_images = os.listdir() # Create a list of all images in directory\n\n for j in range(len(dir_images)):\n image = cv2.imread(dir_images[j]) # Read image from file\n image = tf.keras.preprocessing.image.img_to_array(image) # Transform image to numpy array\n image = tf.image.resize(image, (IMG_WIDTH, IMG_HEIGHT)) # Reshape image to 30 x 30 px\n image = image/255 # Normalize image RGB values\n images.append(image) \n labels.append(i)\n\n os.chdir(main_dir)\n \n return (images, labels)", "def prepare_test_data(args):\n image_dir = args.test_image_dir\n\n files = os.listdir(image_dir)\n files = [f for f in files if f.lower().endswith('.png')]\n\n img_ids = list(range(len(files)))\n img_files = []\n img_heights = []\n img_widths = []\n \n for f in files:\n img_path = os.path.join(image_dir, f)\n img_files.append(img_path)\n img = cv2.imread(img_path)\n img_heights.append(img.shape[0]) \n img_widths.append(img.shape[1]) \n\n print(\"Building the testing dataset...\")\n dataset = DataSet(img_ids, img_files, img_heights, img_widths)\n print(\"Dataset built.\")\n return dataset", "def test_generator(self, test_path):\n\n img_list = os.scandir(test_path)\n for img_entry in img_list:\n\n img = cv2.imread(img_entry.path, COLOR_TO_OPENCV[self.color_mode])\n if img.shape[-1] == 3:\n orig_shape = img.shape[-2::-1]\n else:\n orig_shape = img.shape[::-1]\n\n\n img = cv2.resize(img, tuple(self.target_size))\n img = img / 255\n if self.color_mode == \"grayscale\":\n img = np.reshape(img, img.shape + (1,))\n img = np.reshape(img, (1,) + img.shape)\n yield img, img_entry, orig_shape", "def gen_batch_function(data_folder, image_shape, seed=None, samples_limit=None):\n # Grab image and label paths\n image_paths = glob(os.path.join(data_folder, 'image_2', '*.png'))\n label_paths = {\n re.sub(r'_(lane|road)_', '_', os.path.basename(path)): path\n for path in glob(os.path.join(data_folder, 'gt_image_2', '*_road_*.png'))\n }\n background_color = np.array([255, 0, 0])\n\n if samples_limit:\n image_paths = image_paths[0:samples_limit]\n\n samples_n = len(image_paths)\n\n rnd = random.Random(seed)\n\n def get_batches_fn(batch_size):\n \"\"\"\n\t\tCreate batches of training data\n\t\t:param batch_size: Batch Size\n\t\t:return: Batches of training data\n\t\t\"\"\"\n # Shuffle training data\n rnd.shuffle(image_paths)\n # Loop through batches and grab images, yielding each batch\n for batch_i in range(0, samples_n, batch_size):\n images = []\n gt_images = []\n for image_file in image_paths[batch_i:batch_i + batch_size]:\n gt_image_file = label_paths[os.path.basename(image_file)]\n # Re-size to image_shape\n image = scipy.misc.imresize(scipy.misc.imread(image_file), image_shape)\n gt_image = scipy.misc.imresize(scipy.misc.imread(gt_image_file), image_shape)\n\n # Create \"one-hot-like\" labels by class\n gt_bg = np.all(gt_image == background_color, axis=2)\n gt_bg = gt_bg.reshape(*gt_bg.shape, 1)\n gt_image = np.concatenate((gt_bg, np.invert(gt_bg)), axis=2)\n\n images.append(image)\n gt_images.append(gt_image)\n\n yield np.array(images), np.array(gt_images)\n\n return get_batches_fn, samples_n", "def process_images(path, dataset):\n \n print(f\"Processing images {os.path.join(path, dataset)}\", flush=True)\n label_file = os.path.join(path, dataset + '-labels-idx1-ubyte')\n with open(label_file, 'rb') as file:\n _, num = struct.unpack(\">II\", file.read(8))\n labels = numpy.fromfile(file, dtype=numpy.int8) #int8\n new_labels = numpy.zeros((num, 10))\n new_labels[numpy.arange(num), labels] = 1\n\n img_file = os.path.join(path, dataset + '-images-idx3-ubyte')\n with open(img_file, 'rb') as file:\n _, num, rows, cols = struct.unpack(\">IIII\", file.read(16))\n imgs = numpy.fromfile(file, dtype=numpy.uint8).reshape(num, rows, cols) #uint8\n imgs = imgs.astype(numpy.float32) / 255.0\n\n os.remove(label_file); os.remove(img_file)\n print(f\"Saving files under {os.path.join(path, dataset)} path\", flush=True)\n numpy.savez_compressed(os.path.join(path, dataset), imgs=imgs, labels=labels)", "def read_files_and_visualize(data):\n\n image = cv2.imread(data[0])\n label = cv2.imread(data[1], 0)\n name = data[1].split('/')[-1].split('.')[0]\n obj_label = None\n\n if generator_options.save_label_preview:\n obj_label = []\n if os.path.isfile(data[2]):\n with open(data[2], 'r') as f:\n obj = csv.reader(f, delimiter=',')\n for row in obj:\n row = [int(r.split('.')[0]) if index != 0 else r\n for index, r in enumerate(row)]\n obj_label.append(row)\n\n else:\n label_vals = np.unique(label)\n for val in label_vals:\n obj_label.append([_LABEL_DEF_FULL[val], 0, 0, 0, 0])\n\n save_visuals(image, label, obj_label, name)", "def colourise_image(self, folder_name):\n colourised_folder_name = folder_name + '_colourised'\n\n try:\n print(\"Making dir \" + str(colourised_folder_name) + \" for colourisation\")\n os.mkdir(colourised_folder_name)\n except OSError:\n print(\"Folder exists, have you already done this colourisation??\")\n return\n\n print(\"Writing to folder +\" + str(colourised_folder_name))\n photo_list = self.get_photo_list(folder_name)\n for i, name in enumerate(photo_list):\n file_name = folder_name + '/' + name\n colourised_image_name = colourised_folder_name + '/' + name\n image = cv2.imread(file_name, cv2.IMREAD_GRAYSCALE)\n image_8bit = image.astype(np.uint8)\n colour_image = cv2.applyColorMap(image_8bit, cv2.COLORMAP_JET)\n cv2.imwrite(colourised_image_name, colour_image)", "def preprocess(exam, data_folder, save_path, image_format):\n for v in ['L-CC', 'L-MLO', 'R-CC', 'R-MLO']:\n if len(exam[v]) == 0:\n continue\n else:\n for image in exam[v]:\n image_path = data_folder + '/' + image + '.' + image_format\n # Extract subdirectories\n subdirs = \"/\".join(image.split('/')[:-1])\n save_dirs = os.path.join(save_path, subdirs)\n # Extract image id\n image_id = image.split('/')[-1]\n # Create save directories\n os.makedirs(save_dirs, exist_ok=True)\n png_save_path = os.path.join(save_dirs, image_id + '.png')\n with Image(filename=image_path, format=image_format) as img:\n with img.clone() as i:\n i.auto_level()\n with i.convert('png') as png_image:\n png_image.transform(resize='896x1152!')\n png_image.save(filename=png_save_path)", "def generate_standard_dataset(dir_path):\n filenames = []\n \n # Normal Training data\n for pathAndFileName in glob.iglob(os.path.join(dir_path, '*.jpg')):\n filenames.append(pathAndFileName)\n \n filename_queue = tf.train.string_input_producer(filenames, shuffle=None)\n \n reader = tf.WholeFileReader()\n \n _, value = reader.read(filename_queue)\n \n image = tf.image.decode_jpeg(value, 3)\n \n image = preprocess_image(image, height=34, width=34)\n \n return image, filenames", "def create_folders():\n if not os.path.exists(\"data/train-npy/\"):\n os.makedirs(\"data/train-npy/\")\n if not os.path.exists(\"data/test-npy/\"):\n os.makedirs(\"data/test-npy/\")\n if not os.path.exists(\"data/valid-npy/\"):\n os.makedirs(\"data/valid-npy/\")", "def load_data_in_folder(self):\n print('loading files in data folder')\n n = len(self.filenames)\n idx_max = n // self.batch_size\n for idx in range(0, idx_max-1):\n data = []\n for f in self.filenames[idx:idx+64]:\n img = cv2.imread(f, int(self.color))\n if not self.color:\n img = np.expand_dims(img, axis=-1)\n data.append(img)\n data = np.array(data)\n data = data.astype('float32')\n data = (data - 127.5)/127.5\n np.save(op.join(self.data_path, str(idx)), data)\n # TODO last batch ?\n self.data_filenames = sorted(glob(op.join(self.data_path, '*.npy')))", "def convert_dataset(src_dir, dest_dir):\n subdirs = get_subdirs(src_dir)\n detector = dlib.simple_object_detector(MODEL_PATH)\n for img_dir in tqdm(subdirs):\n\tprint(img_dir)\n jpegs = get_img_paths_in_dir(img_dir)\n target_dir = dest_dir + img_dir.split('/')[-1]\n if not os.path.exists(target_dir):\n os.makedirs(target_dir)\n for src_path in jpegs:\n target_path = target_dir + '/' + src_path.split('/')[-1]\n img = io.imread(src_path)\n dets = detector(img)\n bounding_boxes = get_bounding_boxes(dets)\n if bounding_boxes:\n square_box = find_square_box(bounding_boxes[0])\n if is_valid(square_box, img):\n box = bounding_boxes[0]\n square_box = find_square_box(box)\n cropped_img = crop_frame(img, square_box)\n PIL_img = PIL.Image.fromarray(cropped_img)\n resized_img = PIL_img.resize((54,54), PIL.Image.BILINEAR)\n\t\t resized_img.save(target_path)\n print(target_path)\n # grey_img = resized_img.convert('L')\n # grey_img.save(target_path)", "def gen_batch_function(data_folder, image_shape):\n def get_batches_fn(batch_size):\n \"\"\"\n Create batches of training data\n :param batch_size: Batch Size\n :return: Batches of training data\n \"\"\"\n image_paths = glob(os.path.join(data_folder, 'image_2', '*.png'))\n label_paths = {\n re.sub(r'_(lane|road)_', '_', os.path.basename(path)): path\n for path in glob(os.path.join(data_folder, 'gt_image_2', '*_road_*.png'))}\n background_color = np.array([255, 0, 0])\n other_road_color = np.array([0,0,0])\n random.shuffle(image_paths)\n for batch_i in range(0, len(image_paths), batch_size):\n images = []\n gt_images = []\n for image_file in image_paths[batch_i:batch_i+batch_size]:\n gt_image_file = label_paths[os.path.basename(image_file)]\n\n image = scipy.misc.imresize(scipy.misc.imread(image_file), image_shape)\n image_flip = np.flip(image, axis=1)\n \n gt_image = scipy.misc.imresize(scipy.misc.imread(gt_image_file), image_shape)\n gt_image_flip = np.flip(gt_image, axis=1)\n \n #---------- classification : single road---------------------\n #gt_bg = np.all(gt_image == background_color, axis=2)\n #gt_bg = gt_bg.reshape(*gt_bg.shape, 1)\n #gt_image = np.concatenate((gt_bg, np.invert(gt_bg)), axis=2)\n #------------------------------------------------------------\n \n \n #---------- classification : multi road----------------------\n gt_bg = np.all(gt_image == background_color, axis=2)\n gt_bg_flip = np.all(gt_image_flip == background_color, axis=2)\n \n # road segment\n road_mask = ((gt_image == other_road_color) | (gt_image == background_color))\n gt_road = np.invert(np.all(road_mask, axis=2))\n \n # flip of road segment\n road_mask_flip = ((gt_image_flip == other_road_color) | (gt_image_flip == background_color))\n gt_road_flip = np.invert(np.all(road_mask_flip, axis=2))\n \n # other_road segment\n oher_road_mask = (gt_image == other_road_color)\n gt_other_road = np.all(oher_road_mask, axis=2)\n \n # flip of other_road segment\n other_road_mask_flip = (gt_image_flip == other_road_color)\n gt_oher_road_flip = np.all(other_road_mask_flip, axis=2)\n\n # reshaping segments\n gt_bg = gt_bg.reshape(*gt_bg.shape, 1)\n gt_other_road = gt_other_road.reshape(*gt_other_road.shape, 1)\n gt_road = gt_road.reshape(*gt_road.shape, 1)\n \n # reshaping flip segments\n gt_bg_flip = gt_bg_flip.reshape(*gt_bg_flip.shape, 1)\n gt_oher_road_flip = gt_oher_road_flip.reshape(*gt_oher_road_flip.shape, 1)\n gt_road_flip = gt_road_flip.reshape(*gt_road_flip.shape, 1)\n \n # concatenating classes bg, road, other_road\n gt_image = np.concatenate((gt_bg, gt_road, gt_other_road), axis=2)\n gt_image_flip = np.concatenate((gt_bg_flip, gt_road_flip, gt_oher_road_flip), axis=2)\n \n images.append(image)\n images.append(image_flip)\n \n gt_images.append(gt_image)\n gt_images.append(gt_image_flip)\n\n yield np.array(images), np.array(gt_images)\n return get_batches_fn", "def prepare_data(sourcedir):\n # Set up empty lists for storing the data and labels\n data, labels = [], []\n\n # Walk through the source directory\n for (root, subdirs, files) in os.walk(sourcedir):\n # Assign a numerical identifier to each class directory\n for i, class_dir in enumerate(subdirs):\n classes[class_dir] = i\n print(\"[INFO] Found class {}; \"\n \"assigned identifier {}.\".format(class_dir, i))\n\n # Define allowed image extensions\n ext = ['png', 'jpg', 'jpeg']\n\n # Loop over the files in each directory\n for f in files:\n # Check file extension\n if f.split('.')[-1] in ext:\n # Get image path\n path = os.path.join(root, f)\n # Extract class label from path\n label = path.split('/')[-2]\n # Get the corresponding label integer from the classes dict\n numlabel = classes[label]\n # Load image\n image = load_img(path, target_size=target_size)\n # Convert image to numpy array\n features = img_to_array(image)\n\n # Append data and labels to lists\n data.append(features)\n labels.append(numlabel)\n\n # Convert lists to numpy arrays\n data = np.array(data)\n labels = np.array(labels)\n\n # Convert numerical labels into one-hot encoded vectors\n labels = np_utils.to_categorical(labels, len(classes))\n\n # Normalize the RGB values into range 0...1\n data = data.astype('float') / 255.0\n\n # Return data and labels\n return data, labels", "def __init__(self, data_folder: str = os.path.join('data', 'user_images'),\n dataset_file: str = os.path.join('data', 'dataset.pkl'),\n targets: str = os.path.join('data', 'dataset.pkl')):\n # check for existing dataset\n if not os.path.exists(dataset_file):\n create_dataset(data_folder, dataset_file)\n with open(dataset_file, 'rb') as f:\n data = pickle.load(f)\n print(f'loaded dataset from {dataset_file}')\n self.images = data['images']\n self.crop_sizes = data['crop_sizes']\n self.crop_centers = data['crop_centers']", "def load_data_in_folder(self):\n if self.data_filenames:\n print('removing existing data files')\n for f in tqdm(self.data_filenames):\n os.remove(f)\n print('loading files in data folder')\n n = len(self.filenames)\n idx_max = n // self.batch_size\n for idx in tqdm(range(0, idx_max-1)):\n data = []\n for f in self.filenames[idx:idx+self.batch_size]:\n img = cv2.imread(f, int(self.color))\n if not self.color:\n img = np.expand_dims(img, axis=-1)\n data.append(img)\n data = np.array(data)\n data = data.astype('float32')\n data = (data - 127.5)/127.5\n np.save(op.join(self.data_path, str(idx)), data)\n # TODO last batch ?\n self.data_filenames = sorted(glob(op.join(self.data_path, '*.npy')))", "def __init__(self, folder_path, image_size=(320,240), batch_size=4, mode='seg', target_classes=[\"Good Crypts\"], filter_classes=[], augment=True):\n print(\"Initialising data generator\")\n # Making the image ids list\n self.folder_path = folder_path\n image_paths = [f for f in os.listdir(folder_path) if f.endswith(\".jpg\")]\n self.image_ids = [f.replace('.jpg', '') for f in image_paths]\n self.orig_image_ids = self.image_ids.copy()\n self.filter_classes = filter_classes\n self.filter_data()\n\n self.image_size = image_size\n self.batch_size = batch_size\n self.mode = mode\n self.target_classes = target_classes\n self.augment = augment\n print(\"Image count in {} path: {}\".format(self.folder_path,len(self.image_ids)))\n self.on_epoch_end()", "def load_shapes(self, count, img_floder, mask_floder, imglist, creatnpzfile:bool=True):\n # Add classes\n \n self.add_class(\"shapes\", 1, \"grasper\")\n self.add_class(\"shapes\", 2, \"grasper2\")\n self.add_class(\"shapes\", 3, \"grasper3\")\n self.add_class(\"shapes\", 4, \"irrigator\")\n self.add_class(\"shapes\", 5, \"hook\")\n self.add_class(\"shapes\", 6, \"clipper\")\n\n # Add images\n # Generate random specifications of images (i.e. color and\n # list of shapes sizes and locations). This is more compact than\n # actual images. Images are generated on the fly in load_image().\n for i in range(count):\n img = imglist[i]\n if img.endswith(\".jpg\"):\n img_name = img.split(\".\")[0]\n img_path = os.path.join(img_floder,img)\n mask_path = os.path.join(mask_floder,img_name+\".png\")\n #save the mask infomation with numpy\n mask_info = None\n \n if not os.path.exists(os.path.join(mask_infofloder,\"{}.npz\".format(img_name))):\n mask_info = self.load_mask_pre(i,mask_path)\n np.savez(os.path.join(mask_infofloder,img_name),mask_ = mask_info[0], id_=mask_info[1])\n else:\n data = np.load(os.path.join(mask_infofloder,\"{}.npz\".format(img_name)))\n mask_info = data['mask_'],data['id_']\n\n self.add_image(\"shapes\", image_id=i, path=img_path, name=img_name, mask_path=mask_path, mask_info=mask_info)\n sys.stdout.write('-------creating the np file:--%s-------------pross:--%.4f%%--'%(os.path.join(mask_infofloder,\"{}.npz\".format(img_name)),\n (i+1)/float(count)*100))\n sys.stdout.write('\\r')\n sys.stdout.flush()", "def creation_data_sets(quality, dataset, test_case=False):\n current_path = Path.cwd()\n if dataset == 0:\n (x_train, y_train), (x_test, y_test) = mnist.load_data()\n del y_train, y_test\n train_path = current_path.joinpath(\"Mnist_{}\".format(quality))\n test_path = current_path.joinpath(\"Mnist_{}_test\".format(quality))\n else:\n (x_train, y_train), (x_test, y_test) = cifar10.load_data()\n del y_train, y_test\n train_path = current_path.joinpath(\"Cifar-10_{}\".format(quality))\n test_path = current_path.joinpath(\"Cifar-10_{}_test\".format(quality))\n\n create_directories(train_path, test_path)\n convert(train_path, x_train, dataset, quality, test_case)\n convert(test_path, x_test, dataset, quality, test_case)", "def create_train_folder(df_train, target_path):\n folder_path = os.path.join(target_path, 'xray_preprocess/train')\n print(f'Create train set at: {folder_path}')\n for _, row in tqdm(df_train.iterrows(), total=df_train.shape[0]):\n if row['class']=='negative':\n destination_path = os.path.join(folder_path, 'negative')\n elif row['class']=='positive':\n destination_path = os.path.join(folder_path, 'positive')\n if not os.path.exists(destination_path):\n os.makedirs(destination_path) \n img = os.path.join(target_path, 'xray', 'train', row['filename'])\n shutil.copy(img, destination_path )", "def from_yolo_data(data_path, class_names):\n data = ImageData()\n\n # Construct regions\n data_lines = []\n with open(data_path) as data_file:\n data_lines = [line.rstrip() for line in data_file if line.rstrip() != \"\"]\n\n for line in data_lines:\n elements = line.split(\" \")\n tag_index = int(elements[0])\n tag_name = class_names[tag_index]\n center_x, center_y, width, height = [float(x) for x in elements[1:]]\n data.add_region(tag_name, center_x - (width / 2), center_y - (height / 2), width, height)\n\n return data", "def create_image_batch_generator_function(data_dir, img_shape, pattern=\"*.jpg\", shuffle=True):\n\n def batch_generator(batch_size):\n \"\"\" A function that returns a generator object that can be iterated\n over. Simply specify the batch size.\n \"\"\"\n # Randomly shuffle the order of the files in directory\n files = glob.glob(os.path.join(data_dir, pattern))\n np.random.shuffle(files)\n n_files = len(files)\n\n for batch_num in range(0, n_files, batch_size):\n batch = []\n\n for img_file in files[batch_num:batch_num+batch_size]:\n # Load image from file\n img = scipy.misc.imread(img_file)\n\n # -----------\n # BOOKMARK: File preprocessing steps here\n # -----------\n img = scipy.misc.imresize(img, img_shape)\n # -----------\n\n # Append to the batch\n batch.append(img)\n\n # Yield the current batch\n yield np.array(images)\n return batch_generator", "def _create_layout(root_dir, subsets):\n _create_folder(os.path.join(root_dir, \"images\"))\n _create_folder(os.path.join(root_dir, \"labels\"))\n\n for subset in subsets:\n _create_folder(os.path.join(root_dir, \"images\", subset))\n _create_folder(os.path.join(root_dir, \"labels\", subset))", "def images_example(path='train_images.pickle'):\n patch_size = (8, 8)\n\n with open('train_images.pickle', 'rb') as f:\n train_pictures = pickle.load(f)\n\n patches = sample_patches(train_pictures, psize=patch_size, n=20000)\n\n plt.figure()\n plt.imshow(train_pictures[0])\n plt.title(\"Picture Example\")\n\n plt.figure()\n for i in range(4):\n plt.subplot(2, 2, i + 1)\n plt.imshow(patches[:, i].reshape(patch_size), cmap='gray')\n plt.title(\"Patch Example\")\n plt.show()", "def create_images(dataFpath, width, height, angles=[]):\n\n imgSize = width * height\n\n\n def _resize_n_cast(_img):\n return resize(_img, (width, height)).ravel().reshape(1, imgSize).astype(theano.config.floatX)\n\n\n origImg = 1 - trim_image(imread(dataFpath, as_grey=True), bgValue=255)\n\n straightImg = _resize_n_cast(origImg)\n rotatedImgs = [_resize_n_cast(rotate(origImg, angle)) for angle in angles]\n\n return [straightImg] + rotatedImgs", "def gen_data_dir(img_dir, id_label_dict, num_class, shuffle=True):\n img_file_path = gen_img_files(img_dir, shuffle)\n return gen_data_file(img_file_path, id_label_dict, num_class)", "def _parse_data_dir(self, data_dir):\n categories = os.listdir(data_dir)\n for folder_name in categories:\n all_fnames_list_fname = os.path.join(data_dir, folder_name,\n folder_name + \".bmf\")\n if not os.path.isfile(all_fnames_list_fname):\n raise IOError(\"Not found file {}\".format(all_fnames_list_fname))\n all_fnames_list = np.loadtxt(all_fnames_list_fname, dtype=np.str,\n skiprows=1)\n # Correct from pgm to jpg\n all_fnames_list = [f.split('.')[0]+'.jpg' for f in all_fnames_list]\n\n all_fnames_list = [os.path.join(data_dir, folder_name, f) for f \\\n in all_fnames_list]\n\n self.samples += len(all_fnames_list)\n # Append the last\n self.image_filenames.append(all_fnames_list)", "def preprocess_images(file_path, new_file_path):\n if not os.path.isdir(new_file_path):\n os.mkdir(new_file_path)\n i = 0\n for dir in listdir(file_path):\n j = 0\n for image_path in listdir(file_path + '/' + dir):\n image = open_image(image_path)\n cv2.imwrite(file_path + '/' + image_path + '/' str(i) + '/' +str(i) + '.jpg', image)\n j += 1\n i += 1", "def trainingBatchGenerator(data_folder, image_shape):\n def get_batches_fn(batch_size):\n \"\"\"\n Create batches of training data\n :param batch_size: Batch Size\n :return: Batches of training data\n \"\"\"\n image_paths = glob(os.path.join(data_folder, 'image_2', '*.png'))\n label_paths = {\n re.sub(r'_(lane|road)_', '_', os.path.basename(path)): path\n for path in glob(os.path.join(data_folder, 'gt_image_2', '*_road_*.png'))}\n background_color = np.array([255, 0, 0])\n\n random.shuffle(image_paths)\n for batch_i in range(0, len(image_paths), batch_size):\n images = []\n gt_images = []\n for image_file in image_paths[batch_i:batch_i+batch_size]:\n gt_image_file = label_paths[os.path.basename(image_file)]\n\n image = scipy.misc.imresize(scipy.misc.imread(image_file), image_shape)\n gt_image = scipy.misc.imresize(scipy.misc.imread(gt_image_file), image_shape)\n\n gt_bg = np.all(gt_image == background_color, axis=2)\n gt_bg = gt_bg.reshape(*gt_bg.shape, 1)\n gt_image = np.concatenate((gt_bg, np.invert(gt_bg)), axis=2)\n\n images.append(image)\n gt_images.append(gt_image)\n\n yield np.array(images), np.array(gt_images)\n return get_batches_fn", "def load_dataset(path_test, width, height):\n tot_images = 0\n for label in listdir(path_test):\n label_full = join(path_test, label)\n for img_name in listdir(label_full):\n tot_images += 1\n\n # allocate the memory\n # THE DTYPE is float, should be the right one\n all_images = np.zeros((tot_images, width, height, 3))\n\n true_labels = []\n num_images = 0\n for label in listdir(path_test):\n label_full = join(path_test, label)\n for img_name in listdir(label_full):\n # for img_name in listdir(label_full)[:10]:\n img_name_full = join(label_full, img_name)\n print(f\"Opening {img_name_full} {width}\")\n\n image = cv2.imread(img_name_full)\n\n image = cv2.resize(image, (width, height))\n\n # scale the pixel values to [0, 1]\n image = image.astype(\"float\") / 255.0\n\n all_images[num_images, :, :, :] = image\n\n num_images += 1\n true_labels.append(label)\n\n print(f\"All_images.shape {all_images.shape}\")\n\n # cv2.imshow('Resized all_images[0]', all_images[0])\n # cv2.waitKey(0)\n\n return all_images, true_labels", "def _data_generation(self, batch_data):\n # Initialization\n batch_x = []\n batch_y = defaultdict(list)\n\n for ind, item_data in batch_data.iterrows():\n img_path = os.path.join(self.img_dir, \"images\", \"rgb\", item_data[\"name\"])\n img = cv2.imread(img_path)\n try:\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n except Exception as error:\n print(img_path)\n print(error)\n not_valid_mask = self.read_masks_borders(item_data[\"name\"])\n img[not_valid_mask] = 0\n\n # getmasks\n targets = np.zeros((img.shape[0], img.shape[1], len(self.classes)))\n for i, c in enumerate(self.classes):\n mask_path = os.path.join(self.img_dir, \"labels\", c, item_data[\"name\"])\n mask = cv2.imread(\n mask_path.replace(\".jpg\", \".png\"), cv2.IMREAD_GRAYSCALE\n )\n mask[not_valid_mask[:, :, 0]] = 0\n mask = mask > 0\n targets[:, :, i] = mask\n\n res = self.reshape_func(image=img, mask=targets)\n img, targets = res['image'], res['mask']\n if self.do_aug:\n res = self.aug(image=img, mask=targets)\n img, targets = res['image'], res['mask']\n\n for i, c in enumerate(self.classes):\n batch_y[c].append(targets[:, :, i])\n\n batch_x.append(img)\n\n batch_x = np.array(batch_x, np.float32)\n batch_y = {k: np.array(v, np.float32) for k, v in batch_y.items()}\n batch_y = {k: np.expand_dims(v, axis=-1) for k, v in batch_y.items()}\n\n return (\n imagenet_utils.preprocess_input(batch_x, \"channels_last\", mode=\"tf\"),\n batch_y\n )", "def preprocess(data_path, dataset):\n il_data_path = os.path.join(data_path, 'il' + dataset)\n train_path = os.path.join(il_data_path, 'train')\n val_path = os.path.join(il_data_path, 'val')\n\n if os.path.isdir(il_data_path):\n return\n\n os.makedirs(train_path)\n os.makedirs(val_path)\n\n train_set = _datasets[dataset](data_path, train=True, download=True)\n val_set = _datasets[dataset](data_path, train=False, download=True)\n\n # dump pickles for each class\n for cur_set, cur_path in [[train_set, train_path], [val_set, val_path]]:\n for idx, item in enumerate(cur_set):\n label = item[1]\n if not os.path.exists(os.path.join(cur_path, str(label))):\n os.makedirs(os.path.join(cur_path, str(label)))\n with open(os.path.join(cur_path, str(label), str(idx) + '.p'), 'wb') as f:\n pickle.dump(item, f)", "def generate_train_test_data(data_dir = '../../att_faces'):\n\n train_data = [ [ read_image('%s/s%d/%d.pgm'%( data_dir, i, j)) for j in range(1,11)] for i in range(1, 36)]\n test_data = [ [ read_image('%s/s%d/%d.pgm'%( data_dir, i, j)) for j in range(1,11)] for i in range(36, 41)]\n \n true_combinations_train = generate_true_combinations(train_data)\n false_combinations_train = generate_false_combination(train_data, int(len(true_combinations_train) / len(train_data)), 10)\n \n true_combinations_test = generate_true_combinations(test_data)\n false_combinations_test = generate_false_combination(test_data, int(len(true_combinations_test) / len(test_data)), 10)\n \n return prepare_to_classifier(true_combinations_train, false_combinations_train, true_combinations_test, false_combinations_test)", "def Read_Raw_Images(path_data,path_labels):\n \n data = skimage.io.imread(path_data).astype(np.float32)\n for i in range(data.shape[0]):\n data[i,...] = skimage.exposure.rescale_intensity(data[i,...], out_range=(0,1))\n data_labels = skimage.io.imread(path_labels) > 0\n \n training_data=data[0:25,:,:]\n training_labels=data_labels[0:25,:,:]\n \n testing_data=data[25:data.shape[0],:,:]\n testing_labels=data_labels[25:data.shape[0],:,:]\n \n np.save(\"data.npy\",training_data)\n np.save(\"labels.npy\",training_labels)\n np.save(\"data_validation.npy\",testing_data)\n np.save(\"labels_validation.npy\",testing_labels)\n \n return()", "def build_dataframe(input_path, img_input_shape, conform_shape=False):\r\n number_classes = os.listdir(path=input_path)\r\n \r\n image_array = []\r\n class_label = []\r\n \r\n for folder in number_classes:\r\n image_array.extend([cv2.imread(os.path.join(input_path,folder,x)) for\r\n x in os.listdir(os.path.join(input_path,folder))\r\n if '.jpeg' in x or '.jpg' in x])\r\n class_label.extend([folder for x in os.listdir(os.path.join(input_path,folder)) if\r\n '.jpeg' in x or '.jpg' in x])\r\n \r\n if conform_shape:\r\n if max([len(img) for img in image_array]) > img_input_shape[0]:\r\n image_array = [pp.resize_image(x,img_input_shape[0]) for x in image_array]\r\n\r\n # Ensure shape matches exactly \r\n for i,img in enumerate(image_array):\r\n shape_delta = img_input_shape[0] - img.shape[0]\r\n if shape_delta > 0:\r\n new_row = np.random.randint(0,255,[shape_delta,img_input_shape[1],img_input_shape[2]],dtype='uint8')\r\n image_array[i] = np.vstack([image_array[i],new_row])\r\n \r\n elif shape_delta < 0:\r\n image_array[i] = image_array[i][:img_input_shape[0],:,:]\r\n \r\n # Ensure type is uint8 for HOG & Surf\r\n image_array = [x.astype('uint8') for x in image_array]\r\n \r\n return np.array(image_array), np.array(class_label)", "def create_test_folder(df_test, target_path):\n folder_path = os.path.join(target_path, 'xray_preprocess/test')\n print(f'Create test set at: {folder_path}')\n for _, row in tqdm(df_test.iterrows(), total=df_test.shape[0]):\n if row['class']=='negative':\n destination_path = os.path.join(folder_path, 'negative')\n elif row['class']=='positive':\n destination_path = os.path.join(folder_path, 'positive')\n if not os.path.exists(destination_path):\n os.makedirs(destination_path) \n img = os.path.join(target_path, 'xray', 'test', row['filename'])\n shutil.copy(img, destination_path )", "def form_sample_folder(self, input_folder, target_folder, sample_name):\n print(f'processing {sample_name} folder.')\n # first make a subfolder to contain the images - e.g. 'target_folder/sample_name'\n sample_dir = join(target_folder, sample_name)\n if not os.path.exists(sample_dir):\n mkdir(sample_dir)\n # resize and move the mask images - e.g. 'target_folder/sample_name/imgs_necrosis.png'\n img_file_nec = join(input_folder, 'Necrosis',\n 'Tissue Slides.'+sample_name+'.png')\n img_res = self.process_img(img_file_nec, self.rescale_ratio)\n img_nec = img_res.copy()\n cv2.imwrite(join(sample_dir, 'necrosis.png'), img_res)\n\n img_file_perf = join(input_folder, 'Perfusion',\n 'Tissue Slides.'+sample_name+'.png')\n img_res = self.process_img(img_file_perf, self.rescale_ratio)\n cv2.imwrite(join(sample_dir, 'perfusion.png'), img_res)\n\n # resize and move the maker HE and EF5 images\n files = listdir(input_folder)\n img_files = [x for x in files if x.split(\n '.')[-1] in ('tif', 'jpg', 'png')]\n for img_file in img_files:\n if (sample_name+'_' in img_file) or (sample_name+'-' in img_file):\n if ('HE-G' in img_file) or ('HE-green' in img_file) or ('HEgreen' in img_file):\n img_res = self.process_img(\n join(input_folder, img_file), self.rescale_ratio)\n if not os.path.exists(join(sample_dir, 'HE-green.png')):\n cv2.imwrite(join(sample_dir, 'HE-green.png'), img_res)\n else:\n warnings.warn(\n f\"file already exists, while processing {img_file}\")\n elif ('HE-R' in img_file) or ('HE-red' in img_file) or ('HEred' in img_file):\n img_res = self.process_img(\n join(input_folder, img_file), self.rescale_ratio)\n if not os.path.exists(join(sample_dir, 'HE-red.png')):\n cv2.imwrite(join(sample_dir, 'HE-red.png'), img_res)\n else:\n warnings.warn(\n f\"file already exists, while processing {img_file}\")\n elif ('HE-B' in img_file) or ('HE-blue' in img_file) or ('HE-blue' in img_file):\n img_res = self.process_img(\n join(input_folder, img_file), self.rescale_ratio)\n if not os.path.exists(join(sample_dir, 'HE-blue.png')):\n cv2.imwrite(join(sample_dir, 'HE-blue.png'), img_res)\n else:\n warnings.warn(\n f\"file already exists, while processing {img_file}\")\n elif 'EF5' in img_file:\n img_res = self.process_img(\n join(input_folder, img_file), self.rescale_ratio)\n img_ef5 = img_res.copy()\n if not os.path.exists(join(sample_dir, 'EF5.png')):\n cv2.imwrite(join(sample_dir, 'EF5.png'), img_res)\n else:\n warnings.warn(\n f\"file already exists, while processing {img_file}\")\n\n masked_ef5 = (img_ef5 * (img_nec <= 0)).astype(img_ef5.dtype)\n cv2.imwrite(join(sample_dir, 'EF5_masked.png'), masked_ef5)\n assert len(listdir(sample_dir)) == 7\n return", "def Valid_Image_data_generator(folderlist,resize = (920,1200),Transformation = True, scaling = True):\n\n while True:\n total_classes = len(folderlist.keys())\n keys = folderlist.keys()\n Images = []\n Image_label = []\n for key in tqdm(folderlist.keys()):\n for j in range(len(folderlist[key])):\n img_label = folderlist[key][j]\n img = Image.open(img_label,'r')\n h = resize[1]\n l = int(img.size[1]*h/img.size[0])\n img = img.resize((h,l), Image.ANTIALIAS)\n background = Image.new('RGB', (resize[1], resize[0]), (255, 255, 255))\n img_w, img_h = img.size\n bg_w, bg_h = background.size\n offset = (int((bg_w - img_w) / 2), int((bg_h - img_h) / 2))\n background.paste(img, offset)\n background = np.asarray(background)\n if Transformation == True:\n rotation = rotate(background,random.choice(range(360)))\n translate = translate_xy(background,random.choice(range(resize[0]/4)),random.choice(range(resize[1]/4)))\n flip = cv2.flip(rotation,1)\n Y = np.concatenate((rotation[np.newaxis,:,:,:],flip[np.newaxis,:,:,:],translate[np.newaxis,:,:,:]))\n Images.append(Y)\n Images.append(background[np.newaxis,:,:,:])\n Image_label.append([key for i in range(4)]) # Four because we are doing rot,trans,flip and one original Image\n else:\n Images.append(background[np.newaxis,:,:,:])\n Image_label.append([key])\n Image_label = np.concatenate(Image_label)\n Images = np.concatenate(Images)\n Image_label = np.array(pd.get_dummies(Image_label))\n X_Image , Y_Image = shuffle(Images,Image_label,random_state=0)\n if scaling == True:\n X_Image = X_Image/255\n else:\n X_Image = X_Image\n return (X_Image,Y_Image)", "def create_dataset(data_path, batch_size=32, num_parallel_workers=1):\n # Define dataset\n mnist_ds = ds.MnistDataset(data_path)\n\n resize_height, resize_width = 32, 32\n rescale = 1.0 / 255.0\n shift = 0.0\n rescale_nml = 1 / 0.3081\n shift_nml = -1 * 0.1307 / 0.3081\n\n # Define map operations\n resize_op = vision.Resize((resize_height, resize_width), interpolation=Inter.LINEAR)\n rescale_nml_op = vision.Rescale(rescale_nml, shift_nml)\n rescale_op = vision.Rescale(rescale, shift)\n hwc2chw_op = vision.HWC2CHW()\n type_cast_op = transforms.TypeCast(mstype.int32)\n\n # Apply map operations on images\n mnist_ds = mnist_ds.map(operations=type_cast_op, input_columns=\"label\", num_parallel_workers=num_parallel_workers)\n mnist_ds = mnist_ds.map(operations=resize_op, input_columns=\"image\", num_parallel_workers=num_parallel_workers)\n mnist_ds = mnist_ds.map(operations=rescale_op, input_columns=\"image\", num_parallel_workers=num_parallel_workers)\n mnist_ds = mnist_ds.map(operations=rescale_nml_op, input_columns=\"image\", num_parallel_workers=num_parallel_workers)\n mnist_ds = mnist_ds.map(operations=hwc2chw_op, input_columns=\"image\", num_parallel_workers=num_parallel_workers)\n mnist_ds = mnist_ds.batch(batch_size, drop_remainder=True)\n\n return mnist_ds", "def process_scene_data(self, scene, data, tmp_dir):\n scene_dir = join(tmp_dir, str(scene.id))\n img_dir = join(scene_dir, 'img')\n labels_dir = join(scene_dir, 'labels')\n\n make_dir(img_dir)\n make_dir(labels_dir)\n\n for ind, (chip, window, labels) in enumerate(data):\n chip_path = join(img_dir, '{}-{}.png'.format(scene.id, ind))\n label_path = join(labels_dir, '{}-{}.png'.format(scene.id, ind))\n\n label_im = labels.get_label_arr(window).astype(np.uint8)\n save_img(label_im, label_path)\n save_img(chip, chip_path)\n\n return scene_dir", "def create_train_file(img_folder_path: str, train_file_path: str) -> None:\n files = []\n for ext in (\"*.gif\", \"*.png\", \"*.jpg\", \"*.bmp\"):\n img_path = glob(join(img_folder_path, ext))\n if img_path:\n files.extend(img_path)\n\n write_to_train_file(files, train_file_path)\n\n print(\"Training files are created in \" + img_folder_path)", "def format_dataset(dataset_path, image_path_prefix):\n\n image_paths = load_image_paths(dataset_path, image_path_prefix)\n image_sizes = load_image_sizes(dataset_path)\n image_bboxes = load_bounding_box_annotations(dataset_path)\n image_parts = load_part_annotations(dataset_path)\n image_labels, new_label_to_original_label_map = format_labels(load_image_labels(dataset_path))\n class_names = load_class_names(dataset_path)\n train_images, test_images = load_train_test_split(dataset_path)\n\n train_data = []\n test_data = []\n\n for image_ids, data_store in [(train_images, train_data), (test_images, test_data)]:\n for image_id in image_ids:\n\n width, height = image_sizes[image_id]\n width = float(width)\n height = float(height)\n\n x, y, w, h = image_bboxes[image_id]\n x1 = max(x / width, 0.)\n x2 = min((x + w) / width, 1.)\n y1 = max(y / height, 0.)\n y2 = min((y + h) / height, 1.)\n\n parts_x = []\n parts_y = []\n parts_v = []\n parts = image_parts[image_id]\n for part_index in range(0, len(parts), 3):\n parts_x.append(max(parts[part_index] / width, 0.))\n parts_y.append(max(parts[part_index + 1] / height, 0.))\n parts_v.append(int(parts[part_index + 2]))\n\n data_store.append({\n \"filename\": image_paths[image_id],\n \"id\": image_id,\n \"class\": {\n \"label\": image_labels[image_id],\n \"text\": class_names[new_label_to_original_label_map[image_labels[image_id]]]\n },\n \"object\": {\n \"count\": 1,\n \"bbox\": {\n \"xmin\": [x1],\n \"xmax\": [x2],\n \"ymin\": [y1],\n \"ymax\": [y2],\n \"label\": [image_labels[image_id]],\n \"text\": [class_names[new_label_to_original_label_map[image_labels[image_id]]]]\n },\n \"parts\": {\n \"x\": parts_x,\n \"y\": parts_y,\n \"v\": parts_v\n },\n \"id\": [image_id],\n \"area\": [w * h]\n }\n })\n\n return train_data, test_data", "def get_dataset(data_path, mean, std, target_transform=None):\n # can add more data augmentation\n preprocess = transforms.Compose([\n transforms.Resize((299, 299)),\n transforms.ToTensor(),\n transforms.Normalize(mean=mean, std=std),\n transforms.RandomHorizontalFlip()\n ])\n\n return datasets.ImageFolder(root=data_path, transform=preprocess, target_transform=target_transform)", "def normalize_dataset_type(dir_path, num_samples, is_train=None):\n \n if is_train is not None:\n pass\n \n dataset = dict.fromkeys([''])\n for file_id in range(num_samples):\n image = imread(os.path.join(dir_path, '%s.jpg' % str(file_id)))\n dataset.append(image)\n\n dataset = np.array(dataset)\n return dataset", "def save_data(data,patches,labels,name,patch_dim,size=None):\n\n patchPerIm = (data.shape[1]*data.shape[2])/(patch_dim**2)\n\n if size is None:\n size = labels.shape[0]\n\n img_f = open('data/images_'+name,'w')\n patches_f = open('data/patches_'+name,'w')\n label_f = open('data/labels_'+name,'w')\n\n patches = patches.astype(np.float32)\n\n data[:size,...].tofile(img_f)\n patches[:,:(size*patchPerIm)].tofile(patches_f)\n labels[:size].tofile(label_f)\n\n img_f.close()\n patches_f.close()\n label_f.close()", "def run(dataset_dir,pic_path):\n if not tf.gfile.Exists(dataset_dir):\n tf.gfile.MakeDirs(dataset_dir)\n\n training_filename = _get_output_filename(dataset_dir, 'train')\n testing_filename = _get_output_filename(dataset_dir, 'test')\n\n if tf.gfile.Exists(training_filename) and tf.gfile.Exists(testing_filename):\n print('Dataset files already exist. Exiting without re-creating them.')\n return\n\n class_names = os.listdir(pic_path)\n labels_to_class_names = dict(zip(class_names,range(len(class_names))))\n \n picnames=[]\n for label in class_names:\n alabel_path=os.path.join(pic_path,label)\n names=os.listdir(alabel_path)\n picnames.extend([os.path.join(alabel_path,name) for name in names])\n random.shuffle(picnames) \n \n train_picnames = picnames[:int(0.7*len(picnames))]\n test_picnames = picnames[int(0.7*len(picnames)):]\n # First, process the training data:\n with tf.python_io.TFRecordWriter(training_filename) as tfrecord_writer:\n offset = 0\n for name in train_picnames:\n offset = _add_to_tfrecord(name, tfrecord_writer, labels_to_class_names, offset)\n\n # Next, process the testing data:\n with tf.python_io.TFRecordWriter(testing_filename) as tfrecord_writer:\n offset = 0\n for name in test_picnames:\n offset = _add_to_tfrecord(name, tfrecord_writer, labels_to_class_names, offset)\n\n # Finally, write the labels file:\n labels_to_class_names = dict(zip(labels_to_class_names.values(),labels_to_class_names.keys())) \n dataset_utils.write_label_file(labels_to_class_names, dataset_dir)\n with open(os.path.join(dataset_dir,'info.json'),'w') as f:\n info=json.dumps({'num_class':len(class_names),'num_sample_train':len(train_picnames),'num_sample_test':len(test_picnames)})\n f.write(info)\n\n print('\\nFinished converting the dataset in the {}!'.format(pic_path))\n print('\\nThe tfrecord files,info.json and labels file is located in the {}'.format(dataset_dir))", "def populate_train_test_val_dirs_nonrandomly(root_dir, val_ratio=0.15, test_ratio=0.05, preliminary_clahe=True,\n apply_masks=True):\n\n ''' Creating partitions of the data after shuffling '''\n # Folder to copy images from\n src = join(root_dir, 'CoregisteredBlurryImages')\n\n all_file_names = [f for f in os.listdir(src) if isfile(join(src, f))]\n\n if val_ratio == 0.0:\n # Select the number of images to skip between validation images\n val_skip_number = len(all_file_names) + 1\n else:\n # Select the number of images to skip between validation images\n val_skip_number = len(all_file_names) / (val_ratio * len(all_file_names))\n\n if test_ratio == 0.0:\n # Select the number of images to skip between test images\n test_skip_number = len(all_file_names) + 1\n else:\n # Select the number of images to skip between test images\n test_skip_number = len(all_file_names) / (test_ratio * len(all_file_names))\n\n # Get the list of validation file names, test file names, and train file names\n val_file_names = all_file_names[::int(val_skip_number)]\n test_file_names = [filename for filename in all_file_names[::int(test_skip_number + 1)]\n if filename not in val_file_names]\n train_file_names = [filename for filename in all_file_names\n if filename not in val_file_names and filename not in test_file_names]\n\n # Print the file distribution among the folders\n logger.print_file_distribution(len(all_file_names), len(train_file_names), len(val_file_names),\n len(test_file_names))\n\n # Copy-Pasting images into train dataset\n for name in train_file_names:\n shutil.copy(join(root_dir, 'CoregisteredBlurryImages', name), root_dir + '/train/CoregisteredBlurryImages')\n shutil.copy(join(root_dir, 'ClearImages', name), root_dir + '/train/ClearImages')\n if apply_masks:\n shutil.copy(join(root_dir, 'Masks', name), root_dir + '/train/Masks')\n\n # Copy-Pasting images into val dataset\n for name in val_file_names:\n shutil.copy(join(root_dir, 'CoregisteredBlurryImages', name), root_dir + '/val/CoregisteredBlurryImages')\n shutil.copy(join(root_dir, 'ClearImages', name), root_dir + '/val/ClearImages')\n if apply_masks:\n shutil.copy(join(root_dir, 'Masks', name), root_dir + '/val/Masks')\n\n # Copy-Pasting images into test dataset\n for name in test_file_names:\n shutil.copy(join(root_dir, 'CoregisteredBlurryImages', name), root_dir + '/test/CoregisteredBlurryImages')\n shutil.copy(join(root_dir, 'ClearImages', name), root_dir + '/test/ClearImages')\n if apply_masks:\n shutil.copy(join(root_dir, 'Masks', name), root_dir + '/test/Masks')\n\n ''' Augment the images in each new folder '''\n # If we want to use preliminary adaptive equalization...\n if preliminary_clahe:\n pass\n # ... then first, apply Contrast Limited Adaptive Histogram Equalization to clear images in all folders\n CLAHE_image_folder(root_dir + '/train/ClearImages')\n CLAHE_image_folder(root_dir + '/val/ClearImages')\n CLAHE_image_folder(root_dir + '/test/ClearImages')\n\n # Then, apply histogram equalization to make the blurry images' histogram match that of the clear images\n hist_match_image_folder(root_dir=join(root_dir, 'train'),\n clear_dir_name='ClearImages',\n blurry_dir_name='CoregisteredBlurryImages',\n match_to_clear=True)\n hist_match_image_folder(root_dir=join(root_dir, 'val'),\n clear_dir_name='ClearImages',\n blurry_dir_name='CoregisteredBlurryImages',\n match_to_clear=True)\n hist_match_image_folder(root_dir=join(root_dir, 'test'),\n clear_dir_name='ClearImages',\n blurry_dir_name='CoregisteredBlurryImages',\n match_to_clear=True)", "def create_masks(image_folder: str, annotation_path: str, outpath: str):\n\n train_reader = ReaderAnnotation(annotation_path)\n\n all_images = os.listdir(image_folder)\n annotated_images = train_reader.annotation.keys()\n\n creator = MaskCreator()\n\n for key in annotated_images:\n file_extension = \".JPG\"\n if not os.path.isfile(\n os.path.join(\n image_folder,\n key.split(\".\")[0] + file_extension,\n )\n ):\n file_extension = file_extension.lower()\n\n image_name = os.path.join(\n image_folder,\n key.split(\".\")[0] + file_extension,\n )\n print(image_name)\n\n out_image_path = os.path.join(outpath, os.path.split(image_name)[-1])\n assert os.path.exists(out_image_path), \"Out image path doesn't exist\"\n\n image = plt.imread(image_name)\n h, w, c = image.shape\n\n regions = train_reader.get(key)[\"regions\"]\n # less than minimal distance\n radius = int(train_reader.get_radius_min(regions=regions) * 0.9)\n\n masks = []\n for _, center in regions.items():\n masks.append(\n creator.create_circular_mask(\n h=h,\n w=w,\n center=(\n int(center[\"shape_attributes\"][\"cx\"]),\n int(center[\"shape_attributes\"][\"cy\"]),\n ),\n radius=radius,\n )\n )\n\n if len(masks) > 50:\n masks = [creator._unite_masks(masks)]\n\n if masks:\n creator.visualize(\n image=image,\n masks=masks,\n filename=out_image_path,\n use_image=False,\n )\n else:\n creator._create_empty_mask(image=image, filename=out_image_path)\n\n print(\"Empty images:\")\n for empty_image in list(set(all_images) - set(annotated_images)):\n if os.path.exists(out_image_path):\n continue\n empty_image = os.path.join(image_folder, empty_image)\n print(empty_image)\n image = plt.imread(empty_image)\n creator._create_empty_mask(\n image=image,\n filename=os.path.join(\n outpath,\n os.path.split(empty_image)[-1],\n ),\n )", "def single_to_rgb(R_file,G_file,B_file): \n R=gdal_array.LoadFile(R_file)\n G=gdal_array.LoadFile(G_file)\n B=gdal_array.LoadFile(B_file)\n \n \n basename=os.path.basename(R_file)\n basename=basename[:3]+basename[4:]\n basename=basename[:-4]+\"_rgb_.tif\" \n \n\n file_path=os.path.dirname(os.path.abspath(R_file))+\"/\"+basename\n\n \n driver=osgeo.gdal.GetDriverByName(\"GTiff\")\n options = ['PHOTOMETRIC=RGB', 'PROFILE=GeoTIFF']\n print(file_path)\n print(np.max(np.array([R.shape[1],B.shape[1],G.shape[1]])), np.max(np.array([R.shape[0],B.shape[0],G.shape[0]])))\n Xlen=np.max(np.array([R.shape[1],B.shape[1],G.shape[1]]))\n Ylen= np.max(np.array([R.shape[0],B.shape[0],G.shape[0]]))\n dataset=driver.Create(file_path, int(Xlen),int(Ylen), 3, osgeo.gdal.GDT_UInt16, options) \n \n dataset.GetRasterBand(1).WriteArray(R)\n dataset.GetRasterBand(2).WriteArray(G)\n dataset.GetRasterBand(3).WriteArray(B)\n \n return file_path", "def get_data(folder):\n X = []\n y = []\n\n for seismic_type in os.listdir(folder):\n if not seismic_type.startswith('.'):\n if seismic_type in ['Class1']:\n label = '0'\n else:\n label = '1'\n for image_filename in os.listdir(folder + seismic_type):\n img_file = cv2.imread(folder + seismic_type + '/' + image_filename)\n if img_file is not None:\n # Downsample the image to 120, 160, 3\n #img_file = scipy.misc.imresize(arr=img_file, size=(120, 160, 3))\n img_arr = np.asarray(img_file)\n # img_arr = image.img_to_array(img_arr)\n X.append(img_arr)\n y.append(label)\n X = np.asarray(X)\n y = np.asarray(y)\n return X,y", "def get_batches_mono(data_dir):\n X = np.load('/home/yunhan/data_dir/train_x_224.npy')\n # X = np.load('train_x_sample.npy')\n X = X / 255\n # X = np.load('/home/yunhan/data_dir/train_x_224.npy')\n Y = np.load('/home/yunhan/data_dir/train_y_224.npy')\n # Y = np.load('train_y_sample.npy')\n return [(X, Y, 32, 0.2), ]", "def make_nature(styles_dict: dict,foldername='tmp',processedfoldername='Processed_imgs',styles=[1]):\r\n # initialize counters for success/failure of process\r\n success,failure = 0,0\r\n\r\n # Sort files by creation time in drawings directory.\r\n files = sort_files(foldername)\r\n # Loop files\r\n for i in stqdm.stqdm(range(len(files))):\r\n p = files[i]\r\n for j in stqdm.stqdm(range(len(styles))):\r\n # Read each file\r\n with open(p, \"rb\") as f:\r\n # Get processed image back from GauGAN server.\r\n image = gaugan.processImage(f.read(), style=styles_dict[styles[j]])\r\n # Open the processed file for writing add style num to img name\r\n saving_path = processedfoldername+\"/\"+os.path.split(p[:-4])[-1]+'%s.jpg'%j\r\n\r\n # Load the error image that can be received as result in case of colors issue\r\n error_detector = cv2.imread('error_detector.png')\r\n\r\n if np.array_equal(image,error_detector): # If the process in GauGAN server failed\r\n failure += 1\r\n else:\r\n if not os.path.exists(processedfoldername):\r\n os.makedirs(processedfoldername) # create the processed in GauGAN folder if it doesn't exist\r\n\r\n with open(saving_path, \"wb\") as f: # Save the processed drawing\r\n # Write the processed file.\r\n f.write(image)\r\n success += 1\r\n if success>0:\r\n st.success(\"%s Files were processed successfully!\"%success)\r\n if failure>0:\r\n st.warning(\"%s Files failed to be processed due to an error!\")", "def gen_batches_functions(data_folder, image_paths, image_shape, out_shape,\n label_folder):\n\n def get_batches_fn(batch_size):\n \"\"\"\n Create batches of training data\n :param batch_size: Batch Size\n :return: Batches of training data\n \"\"\"\n id_road = 7\n id_lane = 6\n id_car = 10\n\n for batch_i in range(0, len(image_paths), batch_size):\n images = []\n gt_images = []\n for image_file in image_paths[batch_i:batch_i + batch_size]:\n # Get corresponding label img path\n gt_image_file = image_file.replace('CameraRGB', 'CameraSeg')\n # Read rgb and label images\n img_in = scipy.misc.imread(image_file, mode='RGB')\n gt_in = scipy.misc.imread(gt_image_file)\n # Crop sky part of the image\n image = img_in[-out_shape[0]:, :]\n gt_image = gt_in[-out_shape[0]:, :, 0]\n # Obtain labels\n gt_road = ((gt_image == id_road) | (gt_image == id_lane))\n gt_car = (gt_image == id_car)\n gt_car[-105:, :] = False\n gt_bg = np.invert(gt_car | gt_road)\n # Augmentation\n if bool(random.getrandbits(1)):\n image, gt_bg, gt_car, gt_road = flip_img(\n image, gt_bg, gt_car, gt_road)\n\n gt_bg = gt_bg.reshape(*gt_bg.shape, 1)\n gt_car = gt_car.reshape(*gt_car.shape, 1)\n gt_road = gt_road.reshape(*gt_road.shape, 1)\n\n gt_image = np.concatenate((gt_bg, gt_car, gt_road), axis=2)\n\n images.append(image)\n gt_images.append(gt_image)\n\n yield np.array(images), np.array(gt_images)\n\n return get_batches_fn", "def before_process(self,data,labels):\n # JM: if integer labels are given, then create different output\n # directories for each new label\n if all(isinstance(lbl,int) for lbl in labels):\n self.batch_dirs = \\\n [os.path.join(self.output_dir,str(lbl)) for lbl in labels]\n # JM: otherwise create the same output directory for each image\n else:\n self.batch_dirs = [self.output_dir] * len(data)\n\n # create output directories if they don't already exist\n uniques = set(self.batch_dirs)\n for out_dir in uniques:\n if not os.path.exists(out_dir):\n os.makedirs(out_dir)\n\n self.batch_index = 0", "def create_dataset(img_rows=128, img_cols=128):\n print('Creating original dataset from the raw data')\n # first, get the patients directory names located in the data/ directory. These names (e.g. 'patient0001') will\n # be used for indexing (also avoid hidden files & folders)\n patients = [name for name in os.listdir(os.path.join(os.curdir, 'data/')) if not name.startswith('.')]\n\n # We sort this list to get the patients id in increasing order\n patients.sort(key=lambda s: s[-3:]) # sort according to last 3 characters\n\n # create an empty numpy.ndarray which will contain the images (resized to (img_rows, img_cols))\n images = np.ndarray((2 * len(patients), img_rows, img_cols), dtype=np.uint8) # 2 images per patient\n masks = np.ndarray((2 * len(patients), img_rows, img_cols), dtype=np.uint8) # 2 masks per patient\n\n # we now go through each patient's directory :\n idx = 0\n for patient in patients:\n\n for phase in ['ED', 'ES']:\n\n # read image & mask\n img, _, _, _ = load_mhd_data('data/{pa}/{pa}_4CH_{ph}.mhd'.format(pa=patient, ph=phase))\n mask, _, _, _ = load_mhd_data('data/{pa}/{pa}_4CH_{ph}_gt.mhd'.format(pa=patient, ph=phase))\n\n # resize the img & the mask to (img_rows, img_cols) to keep the network input manageable\n img = resize(img, (img_cols, img_rows), mode='reflect', preserve_range=True)\n mask = resize(mask, (img_cols, img_rows), mode='reflect', preserve_range=True)\n\n # now, save the resized image to the images np.ndarray\n images[idx] = img\n\n # save the corresponding mask to masks np.ndarray (at the same index)\n masks[idx] = mask\n\n idx += 1\n\n print('Created 2 np.ndarrays containing images & masks.')\n\n # Create directory to store files.\n directory = os.path.join(os.getcwd(), 'output/processed_data/')\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n # save all ndarrays to a .npy files (for faster loading later)\n np.save('output/processed_data/images.npy', images)\n np.save('output/processed_data/masks.npy', masks)\n print('Saving to .npy files done: see files\\noutput/processed_data/images.npy & \\noutput/processed_data/masks.npy.')", "def __main__() :\n try :\n poly = Polyhedre(sys.argv[1])\n \n name = sys.argv[2]\n \n createAllFiles(poly, name)\n\n createAllImageFiles(poly, name)\n \n except FileNotFoundError :\n print(\"Use an existing file\")", "def create_directories(train_path, test_path):\n train_path.joinpath(\"images\").mkdir(parents=True)\n test_path.joinpath(\"images\").mkdir(parents=True)", "def set_data(img_path, dataframe):\n N = len(os.listdir(img_path))\n x_ = np.empty((N, SIZE, SIZE, 3), dtype=np.uint8)\n y_ = np.empty(N)\n image_names = np.empty(N, dtype=np.dtype(('U', 15)))\n for i, img_name in enumerate(tqdm(os.listdir(img_path))):\n x_[i, :, :, :] = preprocess_image(img_path + img_name)\n y_[i] = dataframe.loc[img_name.split('.')[0], 'level']\n image_names[i] = img_name\n\n return x_, y_", "def get_training_data(data_dir):\n data = []\n for label in labels:\n path = os.path.join(data_dir, label)\n class_num = labels.index(label)\n img_set = os.listdir(path)\n n = len(img_set)\n for i in range(n):\n try:\n img = img_set[i]\n img_arr = cv2.imread(os.path.join(path, img))\n resized_arr = cv2.resize(img_arr, (img_size, img_size)) # Reshaping images to preferred size\n data.append([resized_arr, class_num])\n if i % 100 == 0:\n print(\"Processing images: {}/{}\".format(i + 1, n))\n except Exception as e:\n print(e)\n return np.array(data)", "def import_data(self, img_size):\n path = self._path\n images = []\n labels = []\n\n categs_name = [filename for filename in os.listdir(path)]\n for categ in categs_name:\n if isdir(join(path, categ)):\n\n for img_name in os.listdir(join(path, categ)):\n\n if \".jpg\" in img_name:\n\n img_name = self.correct_filename(img_name, categ)\n img_path = join(path, categ, img_name)\n img = cv2.imread(img_path)\n\n if img_size:\n dim = (img_size, img_size)\n try:\n img = cv2.resize(img, dim)\n except:\n print(img_name, \"has not been loaded.\")\n continue\n\n images.append(img)\n labels.append(categ)\n\n X = np.array(images)\n y = self.transform_labels(labels)\n\n return X, y", "def create_new_images(x):\n \n datagen = ImageDataGenerator(width_shift_range=0.1,\n height_shift_range=0.1,\n shear_range=0.1,\n zoom_range=0.1,\n horizontal_flip=True,\n fill_mode='constant',\n cval=0) \n \n i = 0\n for batch in datagen.flow(x, batch_size=1,\n save_to_dir='data/Histology/new_benign',\n save_prefix='benign',\n save_format='jpeg'):\n i += 1 \n if i > 3:\n break\n \n return 0", "def gen_train_val_test_images(data_dir, seed=131):\n np.random.seed(seed)\n\n # Load SVHN Dataset (single digits)\n train_data = scipy_io.loadmat(data_dir + '/train_32x32.mat')\n test_data = scipy_io.loadmat(data_dir + '/test_32x32.mat')\n extra_data = scipy_io.loadmat(data_dir + '/extra_32x32.mat')\n\n train_X, train_y = train_data['X'], train_data['y']\n test_X, test_y = test_data['X'], test_data['y']\n extra_X, extra_y = extra_data['X'], extra_data['y']\n\n train_y = train_y.squeeze()\n test_y = test_y.squeeze()\n extra_y = extra_y.squeeze()\n\n # Change labels for '0' digit from 10 to 0\n train_y[train_y == 10] = 0\n test_y[test_y == 10] = 0\n extra_y[extra_y == 10] = 0\n\n del extra_data\n\n num_classes = 10\n\n train_val_sample_idxs = np.array([], int)\n for i in range(num_classes):\n class_idxs = np.arange(len(train_y))[train_y == i]\n sel_class_idxs = np.random.choice(class_idxs, size=400)\n train_val_sample_idxs = np.concatenate((train_val_sample_idxs,\n sel_class_idxs))\n not_train_val_sample_idxs = np.setdiff1d(np.arange(len(train_y)),\n train_val_sample_idxs)\n\n val_X = train_X[:, :, :, train_val_sample_idxs]\n val_y = train_y[train_val_sample_idxs]\n\n extra_val_sample_idxs = np.array([], int)\n for i in range(num_classes):\n class_idxs = np.arange(len(extra_y))[extra_y == i]\n sel_class_idxs = np.random.choice(class_idxs, size=200)\n extra_val_sample_idxs = np.concatenate((extra_val_sample_idxs,\n sel_class_idxs))\n not_extra_val_sample_idxs = np.setdiff1d(np.arange(len(extra_y)),\n extra_val_sample_idxs)\n\n val_X = np.concatenate((val_X, extra_X[:, :, :, extra_val_sample_idxs]), axis=3)\n val_y = np.hstack((val_y, extra_y[extra_val_sample_idxs]))\n\n train_X = np.concatenate((train_X[:, :, :, not_train_val_sample_idxs],\n extra_X[:, :, :, not_extra_val_sample_idxs]), axis=3)\n train_y = np.hstack((train_y[not_train_val_sample_idxs],\n extra_y[not_extra_val_sample_idxs]))\n\n # Create directories and save images\n train_dir = data_dir + '/imgs/train'\n test_dir = data_dir + '/imgs/test'\n validation_dir = data_dir + '/imgs/validation'\n\n if not os.path.exists(train_dir):\n os.makedirs(train_dir)\n\n if not os.path.exists(validation_dir):\n os.makedirs(validation_dir)\n\n if not os.path.exists(test_dir):\n os.makedirs(test_dir)\n\n for i in range(num_classes):\n if not os.path.exists(train_dir + '/' + str(i)):\n os.makedirs(train_dir + '/' + str(i))\n\n if not os.path.exists(validation_dir + '/' + str(i)):\n os.makedirs(validation_dir + '/' + str(i))\n\n if not os.path.exists(test_dir + '/' + str(i)):\n os.makedirs(test_dir + '/' + str(i))\n\n print \"Creating train images ... \"\n for i in range(len(train_y)):\n filename = train_dir + '/' + str(train_y[i]) + '/' + str(i) + '.png'\n skimage_io.imsave(filename, train_X[:, :, :, i])\n\n print \"Creating validation images ... \"\n for i in range(len(val_y)):\n filename = validation_dir + '/' + str(val_y[i]) + '/' + str(i) + '.png'\n skimage_io.imsave(filename, val_X[:, :, :, i])\n\n print \"Creating test images ... \"\n for i in range(len(test_y)):\n filename = test_dir + '/' + str(test_y[i]) + '/' + str(i) + '.png'\n skimage_io.imsave(filename, test_X[:, :, :, i])", "def _preprocess(self):\n print(\"Note: if root path is changed, the previously generated json files need to be re-generated (delete them first)\")\n if osp.exists(self.imgs_labeled_dir) and \\\n osp.exists(self.imgs_detected_dir) and \\\n osp.exists(self.split_classic_det_json_path) and \\\n osp.exists(self.split_classic_lab_json_path) and \\\n osp.exists(self.split_new_det_json_path) and \\\n osp.exists(self.split_new_lab_json_path):\n return\n\n mkdir_if_missing(self.imgs_detected_dir)\n mkdir_if_missing(self.imgs_labeled_dir)\n\n print(\"Extract image data from {} and save as png\".format(self.raw_mat_path))\n mat = h5py.File(self.raw_mat_path, 'r')\n\n def _deref(ref):\n return mat[ref][:].T\n\n def _process_images(img_refs, campid, pid, save_dir):\n img_paths = [] # Note: some persons only have images for one view\n for imgid, img_ref in enumerate(img_refs):\n img = _deref(img_ref)\n # skip empty cell\n if img.size == 0 or img.ndim < 3: continue\n # images are saved with the following format, index-1 (ensure uniqueness)\n # campid: index of camera pair (1-5)\n # pid: index of person in 'campid'-th camera pair\n # viewid: index of view, {1, 2}\n # imgid: index of image, (1-10)\n viewid = 1 if imgid < 5 else 2\n img_name = '{:01d}_{:03d}_{:01d}_{:02d}.png'.format(campid+1, pid+1, viewid, imgid+1)\n img_path = osp.join(save_dir, img_name)\n imageio.imwrite(img_path, img)\n img_paths.append(img_path)\n return img_paths\n\n def _extract_img(name):\n print(\"Processing {} images (extract and save) ...\".format(name))\n meta_data = []\n imgs_dir = self.imgs_detected_dir if name == 'detected' else self.imgs_labeled_dir\n for campid, camp_ref in enumerate(mat[name][0]):\n camp = _deref(camp_ref)\n num_pids = camp.shape[0]\n for pid in range(num_pids):\n img_paths = _process_images(camp[pid,:], campid, pid, imgs_dir)\n assert len(img_paths) > 0, \"campid{}-pid{} has no images\".format(campid, pid)\n meta_data.append((campid+1, pid+1, img_paths))\n print(\"done camera pair {} with {} identities\".format(campid+1, num_pids))\n return meta_data\n\n meta_detected = _extract_img('detected')\n meta_labeled = _extract_img('labeled')\n\n def _extract_classic_split(meta_data, test_split):\n train, test = [], []\n num_train_pids, num_test_pids = 0, 0\n num_train_imgs, num_test_imgs = 0, 0\n for i, (campid, pid, img_paths) in enumerate(meta_data):\n \n if [campid, pid] in test_split:\n for img_path in img_paths:\n camid = int(osp.basename(img_path).split('_')[2])\n test.append((img_path, num_test_pids, camid))\n num_test_pids += 1\n num_test_imgs += len(img_paths)\n else:\n for img_path in img_paths:\n camid = int(osp.basename(img_path).split('_')[2])\n train.append((img_path, num_train_pids, camid))\n num_train_pids += 1\n num_train_imgs += len(img_paths)\n return train, num_train_pids, num_train_imgs, test, num_test_pids, num_test_imgs\n\n print(\"Creating classic splits (# = 20) ...\")\n splits_classic_det, splits_classic_lab = [], []\n for split_ref in mat['testsets'][0]:\n test_split = _deref(split_ref).tolist()\n\n # create split for detected images\n train, num_train_pids, num_train_imgs, test, num_test_pids, num_test_imgs = \\\n _extract_classic_split(meta_detected, test_split)\n splits_classic_det.append({\n 'train': train, 'query': test, 'gallery': test,\n 'num_train_pids': num_train_pids, 'num_train_imgs': num_train_imgs,\n 'num_query_pids': num_test_pids, 'num_query_imgs': num_test_imgs,\n 'num_gallery_pids': num_test_pids, 'num_gallery_imgs': num_test_imgs,\n })\n\n # create split for labeled images\n train, num_train_pids, num_train_imgs, test, num_test_pids, num_test_imgs = \\\n _extract_classic_split(meta_labeled, test_split)\n splits_classic_lab.append({\n 'train': train, 'query': test, 'gallery': test,\n 'num_train_pids': num_train_pids, 'num_train_imgs': num_train_imgs,\n 'num_query_pids': num_test_pids, 'num_query_imgs': num_test_imgs,\n 'num_gallery_pids': num_test_pids, 'num_gallery_imgs': num_test_imgs,\n })\n \n write_json(splits_classic_det, self.split_classic_det_json_path)\n write_json(splits_classic_lab, self.split_classic_lab_json_path)\n\n def _extract_set(filelist, pids, pid2label, idxs, img_dir, relabel):\n tmp_set = []\n unique_pids = set()\n for idx in idxs:\n img_name = filelist[idx][0]\n camid = int(img_name.split('_')[2])\n pid = pids[idx]\n if relabel: pid = pid2label[pid]\n img_path = osp.join(img_dir, img_name)\n tmp_set.append((img_path, int(pid), camid))\n unique_pids.add(pid)\n return tmp_set, len(unique_pids), len(idxs)\n\n def _extract_new_split(split_dict, img_dir):\n train_idxs = split_dict['train_idx'].flatten() - 1 # index-0\n pids = split_dict['labels'].flatten()\n train_pids = set(pids[train_idxs])\n pid2label = {pid: label for label, pid in enumerate(train_pids)}\n query_idxs = split_dict['query_idx'].flatten() - 1\n gallery_idxs = split_dict['gallery_idx'].flatten() - 1\n filelist = split_dict['filelist'].flatten()\n train_info = _extract_set(filelist, pids, pid2label, train_idxs, img_dir, relabel=True)\n query_info = _extract_set(filelist, pids, pid2label, query_idxs, img_dir, relabel=False)\n gallery_info = _extract_set(filelist, pids, pid2label, gallery_idxs, img_dir, relabel=False)\n return train_info, query_info, gallery_info\n\n print(\"Creating new splits for detected images (767/700) ...\")\n train_info, query_info, gallery_info = _extract_new_split(\n loadmat(self.split_new_det_mat_path),\n self.imgs_detected_dir,\n )\n splits = [{\n 'train': train_info[0], 'query': query_info[0], 'gallery': gallery_info[0],\n 'num_train_pids': train_info[1], 'num_train_imgs': train_info[2],\n 'num_query_pids': query_info[1], 'num_query_imgs': query_info[2],\n 'num_gallery_pids': gallery_info[1], 'num_gallery_imgs': gallery_info[2],\n }]\n write_json(splits, self.split_new_det_json_path)\n\n print(\"Creating new splits for labeled images (767/700) ...\")\n train_info, query_info, gallery_info = _extract_new_split(\n loadmat(self.split_new_lab_mat_path),\n self.imgs_labeled_dir,\n )\n splits = [{\n 'train': train_info[0], 'query': query_info[0], 'gallery': gallery_info[0],\n 'num_train_pids': train_info[1], 'num_train_imgs': train_info[2],\n 'num_query_pids': query_info[1], 'num_query_imgs': query_info[2],\n 'num_gallery_pids': gallery_info[1], 'num_gallery_imgs': gallery_info[2],\n }]\n write_json(splits, self.split_new_lab_json_path)", "def process_data(datapath, batch_sizes=[64, 32, 32],\n shuffles=[True, False, False]):\n train_dir = os.path.join(datapath, 'train')\n valid_dir = os.path.join(datapath, 'valid')\n test_dir = os.path.join(datapath, 'test')\n\n data_transforms = {'train': transforms.Compose([\n transforms.Resize(256),\n transforms.RandomHorizontalFlip(0.3),\n transforms.RandomRotation(30),\n transforms.RandomResizedCrop(224),\n transforms.ToTensor(),\n transforms.Normalize([.485, .456, .406],\n [.229, .224, .225])\n ]),\n 'valid': transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize([.485, .456, .406],\n [.229, .224, .225])\n ]),\n 'test': transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize([.485, .456, .406],\n [.229, .224, .225])\n ])}\n\n image_dataset = {\n 'train': datasets.ImageFolder(\n train_dir, transform=data_transforms['train']\n ),\n 'valid': datasets.ImageFolder(\n valid_dir, transform=data_transforms['valid']\n ),\n 'test': datasets.ImageFolder(\n test_dir, transform=data_transforms['test']\n )\n }\n\n dataloaders = {\n 'train': torch.utils.data.DataLoader(\n image_dataset['train'],\n batch_size=batch_sizes[0],\n shuffle=shuffles[0]\n ),\n 'valid': torch.utils.data.DataLoader(\n image_dataset['valid'],\n batch_size=batch_sizes[1],\n shuffle=shuffles[1]\n ),\n 'test': torch.utils.data.DataLoader(\n image_dataset['test'],\n batch_size=batch_sizes[2],\n shuffle=shuffles[2]\n )\n }\n\n return image_dataset, dataloaders", "def _get_filenames_and_classes(dataset_dir):\n # print 'DATASET DIR:', dataset_dir\n # print 'subdir:', [name for name in os.listdir(dataset_dir)]\n # dataset_main_folder_list = []\n # for name in os.listdir(dataset_dir):\n # \tif os.path.isdir(name):\n # \t\tdataset_main_folder_list.append(name)\n dataset_main_folder_list = [name for name in os.listdir(dataset_dir) if os.path.isdir(os.path.join(dataset_dir,name))]\n dataset_root = os.path.join(dataset_dir, dataset_main_folder_list[0])\n directories = []\n class_names = []\n for filename in os.listdir(dataset_root):\n path = os.path.join(dataset_root, filename)\n if os.path.isdir(path):\n directories.append(path)\n class_names.append(filename)\n \n count = 0\n #print(directories)\n for directory in directories:\n #print(directory)\n #continue\n for filename in os.listdir(directory):\n print(filename)\n path = os.path.join(directory, filename)\n\n im = Image.open(path)\n imResize = im.resize((28,28), Image.ANTIALIAS)\n imResize.save(path, 'bmp')\n print(count)\n count = count + 1\n \n\n\n \n return", "def createAllImageFiles(poly, name) :\n \n for i in range(len(poly.getPaths())):\n fileName = name + \"_\" + str(i) + \".dot\"\n imgName = name + \"_\" + str(i) + \".jpg\"\n \n Command = \"neato -Tjpeg \" + fileName + \" -o \" + imgName\n run(Command, shell=True)", "def label_training_data(input_path, output_path):\r\n import shutil\r\n image_files = [file for file in os.listdir(path=input_path) if '.JPG' in file or '.jpeg' in file]\r\n \r\n for file in image_files:\r\n file_input_path = os.path.join(input_path,file)\r\n \r\n img = cv2.imread(file_input_path)\r\n \r\n file_output_path = os.path.join(output_path, classify_face(img))\r\n \r\n try:\r\n os.makedirs(file_output_path)\r\n except FileExistsError:\r\n # directory already exists\r\n pass\r\n shutil.move(file_input_path, file_output_path)", "def make_npz_file(data_type):\n\n\tdata_folder = data_type + \"_images\"\n\tlabel_file = os.path.join(dataset_params.data_path, data_type + \"_lables.csv\")\n\toutput_file = os.path.join(dataset_params.data_path, \"synthetic_\" + data_type + \"_data\")\n\tline_reader = csv.DictReader(open(label_file,\"r\"))\n\n\tdata = []\n\tlabels = []\n\tdata_points = 0\n\tfor row in line_reader:\n\t\timage_name = os.path.join(dataset_params.data_path,data_folder,row[\"figNum\"] + \".png\")\n\t\timage_data = cv2.imread(image_name, cv2.IMREAD_COLOR)\n\t\timage_data = cv2.cvtColor(image_data, cv2.COLOR_BGR2RGB)\n\t\timage_label = [int(dataset_params.shapes[row[\"shape\"]]), int(dataset_params.colors[row[\"color\"]]), int(dataset_params.sizes[row[\"size\"]]), int(row[\"quadrant\"]), int(dataset_params.backgrounds[row[\"background\"]]) ]\n\t\tdata.append(image_data)\n\t\tlabels.append(image_label)\n\t\tdata_points += 1\n\n\t# Converting list to data to np array\n\tdata = np.asarray(data)\n\tlabels = np.asarray(labels)\n\n\t# Printing log information\n\tprint(data_type, \"statistics being saved: \")\n\tprint(data_type, \"data shape\", data.shape)\n\tprint(data_type, \"label shape\", labels.shape)\n\n\t# saveing the file as npz file\n\tnp.savez_compressed(output_file, data=data, lables=labels)", "def create_raster_datapackage(pk_type, path, file_flag, out_path):\n process_source(pk_type, path, file_flag, out_path)", "def load_images(test_data_dir, image_size = (300, 300)):\n # loop over the input images\n images_data = []\n labels = []\n imagePaths = sorted(list(paths.list_images(test_data_dir)))\n for imagePath in imagePaths:\n # load the image, pre-process it, and store it in the data list\n image = cv2.imread(imagePath)\n image = cv2.resize(image, image_size)\n image = img_to_array(image)\n images_data.append(image)\n\n # extract the class label from the image path and update the\n # labels list\n label = imagePath.split(os.path.sep)[-2]\n labels.append(label)\n return images_data, sorted(labels)", "def load_data(data_dir, resample_size, resample_spacing):\n image_filenames, label_filenames = zip(*list(iterate_folder(data_dir)))\n x = np.array([resample_img(sitk.ReadImage(f), \"Image\", resample_size, resample_spacing) for f in image_filenames]) \n y = np.array([resample_img(sitk.ReadImage(f), \"Label\", resample_size, resample_spacing) for f in label_filenames]) \n return x,y", "def get_files(self):\n train_images = glob(os.path.join(self.images_dir, '*%s' % self.im_extension)) \n train_labels = [x.replace(self.im_extension, '.npy').replace('images', 'groundTruth') for x in train_images]\n val_images = glob(os.path.join(self.val_images_dir, '*%s' % self.im_extension))\n val_labels = [x.replace(self.im_extension, '.npy').replace('images', 'groundTruth') for x in val_images]\n train_images = np.array(train_images)\n train_labels = np.array(train_labels)\n val_images = np.array(val_images)\n val_labels = np.array(val_labels)\n test_images = np.array(\n glob('/media/data_cifs/pytorch_projects/datasets/BSDS500_crops/data/images/test_nocrop/*.jpg'))\n test_labels = np.array(\n [x.replace('images', 'groundTruth').replace('.jpg', '.npy') for x in test_images])\n test_labels = np.array(\n [np.load(x) for x in test_labels])\n keep_idx = np.array([True if x.shape[0] > x.shape[1] else False for x in test_labels])\n test_images = test_images[keep_idx]\n test_labels = test_labels[keep_idx]\n test_images = np.stack([misc.imread(x) for x in test_images], 0)\n test_labels = np.stack(test_labels, 0)\n test_labels = test_labels[..., None]\n\n # Add constant padding to bottom/right\n if self.pad:\n test_images = util.pad(test_images, ((0, 0), (self.pad // 2, self.pad - self.pad // 2), (self.pad // 2, self.pad - self.pad // 2), (0, 0)), mode='linear_ramp')\n test_labels = util.pad(test_labels, ((0, 0), (self.pad // 2, self.pad - self.pad // 2), (self.pad // 2, self.pad - self.pad // 2), (0, 0)), mode='constant', constant_values=0)\n\n # Select images for training\n sort_idx = np.argsort(train_images)\n train_images = train_images[sort_idx[:self.train_size]]\n train_labels = train_labels[sort_idx[:self.train_size]]\n\n # Build CV dict\n cv_files, cv_labels = {}, {}\n cv_files[self.folds['train']] = train_images\n cv_files[self.folds['val']] = val_images\n cv_files[self.folds['test']] = test_images\n cv_labels[self.folds['train']] = train_labels\n cv_labels[self.folds['val']] = val_labels\n cv_labels[self.folds['test']] = test_labels\n return cv_files, cv_labels", "def create_GT_masks(root_dir, background_dir, intrinsic_matrix,classes):\n list_all_images = load_obj(root_dir + \"all_images_adr\")\n training_images_idx = load_obj(root_dir + \"train_images_indices\")\n for i in range(len(training_images_idx)):\n img_adr = list_all_images[training_images_idx[i]]\n label = os.path.split(os.path.split(os.path.dirname(img_adr))[0])[1]\n regex = re.compile(r'\\d+')\n idx = regex.findall(os.path.split(img_adr)[1])[0]\n\n if i % 1000 == 0:\n print(str(i) + \"/\" + str(len(training_images_idx)) + \" finished!\")\n\n image = cv2.imread(img_adr)\n ID_mask = np.zeros((image.shape[0], image.shape[1]))\n U_mask = np.zeros((image.shape[0], image.shape[1]))\n V_mask = np.zeros((image.shape[0], image.shape[1]))\n\n ID_mask_file = root_dir + label + \\\n \"/ground_truth/IDmasks/color\" + str(idx) + \".png\"\n U_mask_file = root_dir + label + \\\n \"/ground_truth/Umasks/color\" + str(idx) + \".png\"\n V_mask_file = root_dir + label + \\\n \"/ground_truth/Vmasks/color\" + str(idx) + \".png\"\n\n tra_adr = root_dir + label + \"/data/tra\" + str(idx) + \".tra\"\n rot_adr = root_dir + label + \"/data/rot\" + str(idx) + \".rot\"\n rigid_transformation = get_rot_tra(rot_adr, tra_adr)\n\n # Read point Point Cloud Data\n ptcld_file = root_dir + label + \"/object.xyz\"\n pt_cld_data = np.loadtxt(ptcld_file, skiprows=1, usecols=(0, 1, 2))\n ones = np.ones((pt_cld_data.shape[0], 1))\n homogenous_coordinate = np.append(pt_cld_data[:, :3], ones, axis=1)\n\n # Perspective Projection to obtain 2D coordinates for masks\n homogenous_2D = intrinsic_matrix @ (rigid_transformation @ homogenous_coordinate.T)\n coord_2D = homogenous_2D[:2, :] / homogenous_2D[2, :]\n coord_2D = ((np.floor(coord_2D)).T).astype(int)\n x_2d = np.clip(coord_2D[:, 0], 0, 639)\n y_2d = np.clip(coord_2D[:, 1], 0, 479)\n ID_mask[y_2d, x_2d] = classes[label]\n\n if i % 100 != 0: # change background for every 99/100 images\n background_img_adr = background_dir + random.choice(os.listdir(background_dir))\n background_img = cv2.imread(background_img_adr)\n background_img = cv2.resize(background_img, (image.shape[1], image.shape[0]), interpolation=cv2.INTER_AREA)\n background_img[y_2d, x_2d, :] = image[y_2d, x_2d, :]\n background_adr = root_dir + label + \"/changed_background/color\" + str(idx) + \".png\"\n mpimg.imsave(background_adr, background_img)\n\n # Generate Ground Truth UV Maps\n centre = np.mean(pt_cld_data, axis=0)\n length = np.sqrt((centre[0]-pt_cld_data[:, 0])**2 + (centre[1] -\n pt_cld_data[:, 1])**2 + (centre[2]-pt_cld_data[:, 2])**2)\n unit_vector = [(pt_cld_data[:, 0]-centre[0])/length, (pt_cld_data[:,\n 1]-centre[1])/length, (pt_cld_data[:, 2]-centre[2])/length]\n U = 0.5 + (np.arctan2(unit_vector[2], unit_vector[0])/(2*np.pi))\n V = 0.5 - (np.arcsin(unit_vector[1])/np.pi)\n U_mask[y_2d, x_2d] = U\n V_mask[y_2d, x_2d] = V\n\n # Saving ID, U and V masks after using the fill holes function\n ID_mask, U_mask, V_mask = fill_holes(ID_mask, U_mask, V_mask)\n cv2.imwrite(ID_mask_file, ID_mask)\n mpimg.imsave(U_mask_file, U_mask, cmap='gray')\n mpimg.imsave(V_mask_file, V_mask, cmap='gray')", "def load_data_and_labels(self):\n gen = image.ImageDataGenerator()\n target_size = (224,224)\n if self.preprocess:\n print('Preprocessing data...')\n if not os.path.isdir(self.pproc_dir()):\n os.mkdir(self.pproc_dir())\n \n batch_arr = []\n for ld,segment in [(self.train_dir(), 'train'),\n (self.valid_dir(), 'valid')]:\n # TODO(ness): segment = os.basename(ld)\n flowgen = gen.flow_from_directory(\n ld,\n target_size=target_size,\n shuffle=False,\n class_mode=None,\n batch_size=1)\n # Save the batches using method defined in utils.py\n data = np.concatenate([flowgen.next() for i in range(flowgen.n)])\n batches_dir = self.pproc_dir() + segment + '-bc'\n save_array(batches_dir, data)\n \n # Save the classes.\n cls_dir = self.pproc_dir() + segment + '-cl'\n save_array(cls_dir, flowgen.classes)\n \n batch_arr.append((data, flowgen.classes, flowgen.class_indices))\n \n # Set the data.\n self.training_data = batch_arr[0][0]\n self.validation_data = batch_arr[1][0]\n \n # Classes are zero-indexed and represent a category in\n # numerical form. So if the classes are 'dog' and 'cat',\n # the possible class values will be 0 and 1.\n self.trn_classes = batch_arr[0][1]\n self.val_classes = batch_arr[1][1]\n \n # Labels are the one-hot encoded (i.e. categorical)\n # version of the classes. In other words, if there are\n # 5 classes and an element belongs to class 2,\n # its label will be [0,0,1,0,0] (index 1).\n self.training_labels = to_categorical(batch_arr[0][1])\n self.validation_labels = to_categorical(batch_arr[1][1])\n \n # Class indices are dictionaries of the form\n # {'category_name': 0, 'category_name_2: 1}. They\n # make the mapping between numerical class indices and\n # a human-readable category name. They are (should be...)\n # the same for validation and training, so only load them\n # once, after sanity checking.\n self.cindices = batch_arr[0][2]\n print('Done preprocessing.')\n else:\n print('Loading data...')\n # Load the pre-saved data using methods defined in utils.py. See\n # preprocessing branch for the meaning of the data.\n self.training_data = load_array(self.pproc_dir() + 'train-bc')\n self.validation_data = load_array(self.pproc_dir() + 'valid-bc')\n self.trn_classes = load_array(self.pproc_dir() + 'train-cl')\n self.val_classes = load_array(self.pproc_dir() + 'valid-cl')\n self.training_labels = to_categorical(self.trn_classes)\n self.validation_labels = to_categorical(self.val_classes)\n \n # To get the class indices, we create the generator. It's cheap to\n # run since it doesn't actually load all the data.\n flowgen = gen.flow_from_directory(\n self.train_dir(),\n target_size=target_size,\n shuffle=False,\n class_mode=None,\n batch_size=1) \n self.cindices = flowgen.class_indices\n print('Done loading.')", "def data_set_maker():\n\n # crate a folder in your code directory and name it: \"files\". put the .npy files iside that folder\n path = os.getcwd() # reads the current path\n x_train = np.load(path + '/files/tinyX.npy', 'r') # reads the input file\n y_train = np.load(path + '/files/tinyY.npy', 'r') # reads the input file\n x_test = np.load(path + '/files/tinyX_test.npy', 'r') # reads the input file\n x_train, y_train = shuffle(x_train, y_train)\n\n return x_train, y_train, x_test", "def create_dataset(data_path, batch_size=32, repeat_size=1, num_parallel_workers=1):\n # define dataset\n mnist_ds = ds.MnistDataset(data_path, num_samples=batch_size * 10)\n\n resize_height, resize_width = 32, 32\n rescale = 1.0 / 255.0\n rescale_nml = 1 / 0.3081\n shift_nml = -1 * 0.1307 / 0.3081\n\n # define map operations\n resize_op = CV.Resize((resize_height, resize_width), interpolation=Inter.LINEAR) # Bilinear mode\n rescale_nml_op = CV.Rescale(rescale_nml, shift_nml)\n rescale_op = CV.Rescale(rescale, shift=0.0)\n hwc2chw_op = CV.HWC2CHW()\n type_cast_op = C.TypeCast(mstype.int32)\n\n # apply map operations on images\n mnist_ds = mnist_ds.map(operations=type_cast_op, input_columns=\"label\", num_parallel_workers=num_parallel_workers)\n mnist_ds = mnist_ds.map(operations=resize_op, input_columns=\"image\", num_parallel_workers=num_parallel_workers)\n mnist_ds = mnist_ds.map(operations=rescale_op, input_columns=\"image\", num_parallel_workers=num_parallel_workers)\n mnist_ds = mnist_ds.map(operations=rescale_nml_op, input_columns=\"image\", num_parallel_workers=num_parallel_workers)\n mnist_ds = mnist_ds.map(operations=hwc2chw_op, input_columns=\"image\", num_parallel_workers=num_parallel_workers)\n\n # apply DatasetOps\n mnist_ds = mnist_ds.batch(batch_size, drop_remainder=True)\n mnist_ds = mnist_ds.repeat(repeat_size)\n\n return mnist_ds" ]
[ "0.66889775", "0.6415231", "0.6367699", "0.63560754", "0.6261791", "0.6167243", "0.61289775", "0.60442597", "0.6023371", "0.5907351", "0.5826012", "0.57797915", "0.5760415", "0.57251453", "0.57180756", "0.57146996", "0.57077223", "0.56844974", "0.5684313", "0.56831986", "0.56820303", "0.56811684", "0.5680994", "0.56770945", "0.5675886", "0.5668763", "0.56536174", "0.56481946", "0.5640629", "0.56321543", "0.5630778", "0.562844", "0.5618531", "0.5609468", "0.5598758", "0.55871236", "0.5583787", "0.558247", "0.557305", "0.5551966", "0.5551842", "0.55468595", "0.5543378", "0.55397904", "0.5538488", "0.5532023", "0.55298734", "0.5522743", "0.55219406", "0.55206054", "0.55192286", "0.5518041", "0.55144197", "0.550301", "0.54979676", "0.54901206", "0.547911", "0.547873", "0.54764223", "0.5475422", "0.54749256", "0.5473899", "0.54720896", "0.5460778", "0.54568774", "0.5456856", "0.5454505", "0.545107", "0.5447526", "0.54420507", "0.543256", "0.5424838", "0.5422801", "0.54187936", "0.5401625", "0.540153", "0.53982806", "0.5391401", "0.5369291", "0.53676754", "0.53670615", "0.53667706", "0.5362651", "0.53536546", "0.5352119", "0.5351791", "0.5349851", "0.5347283", "0.53434837", "0.5340143", "0.533958", "0.5338843", "0.5338016", "0.533469", "0.5332694", "0.533042", "0.53301597", "0.5329244", "0.53264207", "0.53258276" ]
0.78713274
0
Initialize a new FullyConnectedNet.
def __init__( self, hidden_dims, input_dim=3 * 32 * 32, num_classes=10, dropout=1, normalization=None, reg=0.0, weight_scale=1e-2, dtype=np.float32, seed=None, ): self.normalization = normalization self.use_dropout = dropout != 1 self.reg = reg self.num_layers = 1 + len(hidden_dims) self.dtype = dtype self.params = {} Din, Dout = input_dim, hidden_dims[0] for i in range(self.num_layers): self.params['W' + str(i+1)] = np.random.normal(scale=weight_scale, size=(Din, Dout)) self.params['b' + str(i+1)] = np.zeros((Dout,)) Din = Dout if i < len(hidden_dims) - 1: Dout = hidden_dims[i+1] if i == len(hidden_dims) - 1: Dout = num_classes # BN params initialization if self.normalization != None: for i in range(self.num_layers - 1): self.params['gamma' + str(i+1)] = np.ones(shape=(hidden_dims[i])) self.params['beta' + str(i+1)] = np.zeros(shape=(hidden_dims[i])) # When using dropout we need to pass a dropout_param dictionary to each # dropout layer so that the layer knows the dropout probability and the mode # (train / test). You can pass the same dropout_param to each dropout layer. self.dropout_param = {} if self.use_dropout: self.dropout_param = {"mode": "train", "p": dropout} if seed is not None: self.dropout_param["seed"] = seed # With batch normalization we need to keep track of running means and # variances, so we need to pass a special bn_param object to each batch # normalization layer. You should pass self.bn_params[0] to the forward pass # of the first batch normalization layer, self.bn_params[1] to the forward # pass of the second batch normalization layer, etc. self.bn_params = [] if self.normalization == "batchnorm": self.bn_params = [{"mode": "train"} for i in range(self.num_layers - 1)] if self.normalization == "layernorm": self.bn_params = [{} for i in range(self.num_layers - 1)] # Cast all parameters to the correct datatype for k, v in self.params.items(): self.params[k] = v.astype(dtype)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def initialise_network(self):\n raise NotImplementedError", "def initialize_network(self):\n # intermediate layer size\n ils = int((self.specbinnum + self.numfilters) / 2)\n\n network = lasagne.layers.InputLayer((None, 1, self.specbinnum, self.numtimebins), self.input_var)\n\n network = NormalisationLayer(network, self.specbinnum)\n self.normlayer = network\n\n network, _ = custom_convlayer_2(network, in_num_chans=self.specbinnum, out_num_chans=ils)\n network = batch_norm(network)\n network, _ = custom_convlayer_2(network, in_num_chans=ils, out_num_chans=self.numfilters)\n network = batch_norm(network)\n\n network = lasagne.layers.NonlinearityLayer(network, nonlinearity=elu)\n self.latents = network\n network = ZeroOutBackgroundLatentsLayer(self.latents,\n mp_down_factor=self.mp_down_factor,\n numfilters=self.numfilters,\n numtimebins=self.numtimebins,\n background_latents_factor=self.background_latents_factor,\n use_maxpool=self.use_maxpool)\n network, _ = custom_convlayer_2(network, in_num_chans=self.numfilters, out_num_chans=ils)\n network = batch_norm(network)\n network, _ = custom_convlayer_2(network, in_num_chans=ils, out_num_chans=self.specbinnum)\n network = batch_norm(network)\n\n # output_size\n num_time_samples = int(audioframe_len/2 * (self.numtimebins + 1))\n # network = batch_norm(DenseLayer(network, num_time_samples)) # MemoryError\n network, _ = custom_convlayer_2(network, in_num_chans=self.specbinnum, out_num_chans=num_time_samples)\n network, _ = batch_norm(network)\n network, _ = custom_convlayer_2(network, in_num_chans=num_time_samples, out_num_chans=1)\n network, _ = batch_norm(network)\n\n self.network = network", "def __init__(self, n_input, n_output, hidden_layer_size, reg):\n self.reg = reg\n self.fulllayer1 = FullyConnectedLayer(n_input, hidden_layer_size)\n self.reglayer1 = ReLULayer()\n self.fulllayer2 = FullyConnectedLayer(hidden_layer_size, n_output)", "def __init__(self, *args):\n _snap.TModeNet_swiginit(self, _snap.new_TModeNet(*args))", "def init_net(self):\r\n # initialize the generator network\r\n g_net = Net(\r\n self.architecture['generator'], net_name='gen',\r\n data_format=FLAGS.IMAGE_FORMAT, num_class=self.num_class)\r\n # define layer connections in generator\r\n self.Gen = Routine(g_net)\r\n self.Gen.add_input_layers([64, self.code_size], [0])\r\n self.Gen.seq_links(list(range(g_net.num_layers)))\r\n self.Gen.add_output_layers([g_net.num_layers - 1])\r\n\r\n # initialize the generator network\r\n d_net = Net(\r\n self.architecture['discriminator'], net_name='dis',\r\n data_format=FLAGS.IMAGE_FORMAT, num_class=self.num_class)\r\n # define layer connections in generator\r\n self.Dis = Routine(d_net)\r\n self.Dis.add_input_layers([64] + list(self.architecture['input'][0]), [0])\r\n self.Dis.seq_links(list(range(d_net.num_layers)))\r\n self.Dis.add_output_layers([d_net.num_layers - 1])", "def __init__(self, in_channels=3, n_classes=21):\n super(U_Net, self).__init__()\n\n self.layer_0 = UNet_Encoder_Particular(in_channels, 64)\n\n self.layer_1 = UNet_Encoder(64, 128)\n self.layer_2 = UNet_Encoder(128, 256)\n self.layer_3 = UNet_Encoder(256, 512)\n self.layer_4 = UNet_Encoder(512, 512)\n\n self.layer_7 = UNet_Decoder(1024, 256)\n self.layer_8 = UNet_Decoder(512, 128)\n self.layer_9 = UNet_Decoder(256, 64)\n self.layer_10 = UNet_Decoder(128, 64)\n\n self.layer_11 = UNet_Decoder_Particular(64, n_classes)", "def __init__(self):\n \n self.model = Net()\n\n if torch.cuda.is_available():\n map_location=torch.device('cuda')\n else:\n map_location=torch.device('cpu')\n\n # load parameters\n self.model.load_state_dict(torch.load('model.pt',\n map_location=map_location)) \n \n if torch.cuda.is_available():\n self.model.cuda()\n else:\n self.model.cpu()\n \n self.model.eval()", "def initialize_network(self):\n self.sess = tf.InteractiveSession()\n sys.stderr.write(\"------\\n\")\n self.model.create_model()\n self._initialize_trainer()\n self.sess.run(tf.initialize_all_variables())\n self.saver = tf.train.Saver()", "def __init__(self, backboneNet, projection_head) -> None:\n super(SimCLR, self).__init__()\n self.Net = backboneNet\n self.projection_head = projection_head", "def setup_net(self):\n pass", "def __init__(self, **kwargs):\n #super(Net, self).__init__()\n nn.Module.__init__(self)\n # Build CNN\n module, shapes, optim = build_neuron_network(**kwargs)\n self._configuration = kwargs\n self.add_module('cnn', module)\n self.shapes = shapes\n # Loss and optimization\n self.criterion = nn.MSELoss(reduction='mean')\n self.optimizer = optim\n self._kwargs = kwargs", "def initialize_ai(self):\n\n self.gid, self.genome = constants.genomes_to_run[self.identifier]\n self.genome.fitness = -1\n self.net = neat.nn.FeedForwardNetwork.create(self.genome, constants.conf)\n # self.net = neat.nn.RecurrentNetwork\n # .create(self.genome, constants.conf)", "def create_base_network(NumberOfFeatures, NumberOfClasses,init_mode='glorot_normal'):\n network = Sequential()\n network.add(Dense(44, activation='sigmoid', kernel_initializer=init_mode,input_dim=NumberOfFeatures))\n# network.add(Dense(22, activation='sigmoid',kernel_initializer=init_mode))\n network.add(Dense(NumberOfClasses, activation='softmax',kernel_initializer=init_mode))\n return network", "def __init__(self):\n super(CustomNetwork, self).__init__()\n self.fc1 = nn.Linear(28*28, 500)\n self.fc2 = nn.Linear(500, 256)\n self.fc3 = nn.Linear(256, 10)\n self.loss = Loss()", "def __init__(self, n_input, n_output, hidden_layer_size, reg):\n self.reg = reg\n self.input_layer = FullyConnectedLayer(n_input, hidden_layer_size)\n self.relu = ReLULayer()\n self.output_layer = FullyConnectedLayer(hidden_layer_size, n_output)\n self.W_in = None\n self.W_out = None\n self.B_in = None\n self.B_out = None\n # TODO Create necessary layers", "def __init__(self, nclasses, device):\n super(HybridNN, self).__init__(nclasses, device)\n self.data_dev = qml.device(device, wires=self.req_qub_out)\n self.device = device\n self.model_dev = None\n self.nn = None\n self.bias = True", "def __init__(self):\n # TODO\n self.confThreshold = 0.6\n self.nmsThreshold = 0.5\n self.inpWidth = 320\n self.inpHeight = 320\n classesFile = \"/content/drive/My Drive/tracking_course/Detection/yolo_workshop/coco.names\"\n self.classes = None\n with open(classesFile,'rt') as f:\n self.classes = f.read().rstrip('\\n').split('\\n')\n\n modelConfiguration = \"/content/drive/My Drive/tracking_course/Detection/yolo_workshop/yolov3.cfg\"\n modelWeights = \"/content/drive/My Drive/tracking_course/Detection/yolo_workshop/yolov3.weights\"\n self.net = cv2.dnn.readNetFromDarknet(modelConfiguration, modelWeights)\n self.net.setPreferableBackend(cv2.dnn.DNN_BACKEND_OPENCV)\n self.net.setPreferableTarget(cv2.dnn.DNN_TARGET_CPU)", "def __init__(self, in_channels=3, n_classes=21):\n super(UpNet, self).__init__()\n\n self.layer_1 = UpNetLayer_ParticularEncoder_2(in_channels, 64, 2)\n self.layer_2 = UpNetLayer_Encoder(64, 128, 2)\n self.layer_3 = UpNetLayer_Encoder(128, 256, 3)\n self.layer_4 = UpNetLayer_Encoder(256, 512, 3)\n self.layer_6 = UpNetLayer_ParticularEncoder(512, 1024, 3)\n\n self.layer_inter = UpNetLayer_Dropout()\n\n self.layer_7 = UpNetLayer_Decoder_Particular(1024, 512, 3)\n self.layer_8 = UpNetLayer_Decoder(512, 256, 3)\n self.layer_9 = UpNetLayer_Decoder(256, 128, 3)\n self.layer_10 = UpNetLayer_Decoder(128, 64, 2)\n self.layer_11 = UpNetLayer_Decoder_Particular_2(64, n_classes, 2)", "def __init__(self):\n super(FcNet, self).__init__()\n\n # get size of some layers\n start_num = 48\n max_num = 200\n mid_num = 50\n end_num = 8\n \n # define regressor\n self.regress = nn.Sequential(\n nn.Linear(start_num,max_num,bias=True),\n nn.Sigmoid(),\n nn.Linear(max_num,mid_num,bias = True),\n nn.Sigmoid(),\n nn.Linear(mid_num,end_num, bias = True),\n nn.Sigmoid()\n )", "def __init__(self, nInputs, nOutputs, hiddenLayersDims, outputActivationFunctions = None, outputActivationDerivatives = None, hiddenActivationFunctions = None,\\\n\t\t\t\t hiddenActivationDerivatives = None): \n\n\t\tself._nInputs = nInputs\n\t\tself._nOutputs = nOutputs\n\n\t\tself._nHiddenLayers, self._nUnitsPerLayer = hiddenLayersDims\n\n\t\tself._outputActivationFunctions = outputActivationFunctions\n\t\tself._outputActivationDerivatives = outputActivationDerivatives\n\n\t\tself._hiddenActivationFunctions = hiddenActivationFunctions\n\t\tself._hiddenActivationDerivatives = hiddenActivationDerivatives\n\n\t\tself.initialiseActivationFunctions()\n\n\t\tself.initialiseNetwork()\n\n\t\tself._nBranches = len(self.collectAllBranches())", "def __init__(self):\n super(SimpleNet, self).__init__()\n\n self.conv_layers = None\n self.fc_layers = None\n self.loss_criterion = None\n\n #######################################################################\n # Student code begins\n #######################################################################\n\n self.conv_layers = nn.Sequential(\n nn.Conv2d(1, 10, kernel_size=5, stride=1),\n nn.ReLU(),\n nn.MaxPool2d(3),\n nn.Conv2d(10, 20, kernel_size=5, stride=1),\n nn.ReLU(),\n nn.MaxPool2d(3)\n )\n\n conv_out = int(20*5*5)\n\n self.fc_layers = nn.Sequential(\n nn.Linear(conv_out, 100),\n nn.Linear(100, 15)\n )\n\n self.loss_criterion = nn.MSELoss(reduction='mean')\n\n #######################################################################\n # Student code ends\n #######################################################################", "def __init__(self):\n self.raw_wires = PyWires.WireNetwork();\n self.__initialize_wires();", "def __init__(self):\n torch.nn.Module.__init__(self)\n # Convolution and pooling layers of VGG-16.\n self.features = torchvision.models.vgg16(pretrained=True).features\n self.features = torch.nn.Sequential(*list(self.features.children())\n [:-1]) # Remove pool5.\n # Linear classifier.\n self.fc = torch.nn.Linear(512**2, 36)", "def __init__(self, netSize):\n\t\t\n\t\t# TRY THIS FOR RANDOM!\n\t\t#\n\t\t#\n\t\t#\n\t\t\n\t\tself.biases = [self.randomArray(i, 1) for i in netSize[1:]] # Biases do not exist for the first layer ! Those are inputs.\n\t\tself.netSize = netSize\n\t\t#Initialize Weights\n\t\t#This initializes the weights for each layer based on the size. The number of rows should be\n\t\t#the number of neurons for the current, and the number of columns should be the same as the number of neurons\n\t\t#in the next layer. There are no weights for the last layer. That's the output layer.\n\t\tself.weights \t\t = [self.randomArray(i, j) for i, j in zip(netSize[:-1], netSize[1:]) ]", "def __init__(self, latent_space, input_features):\r\n\r\n self._latent_space = latent_space\r\n self._input_cells = input_features\r\n\r\n self._encoder = None\r\n self._decoder = None\r\n self._autoencoder = None\r\n self._configure_network()", "def __init__(self, num_visible, num_hidden, act_func='logistic'):\n\n print('Initializing network... ', end='')\n sys.stdout.flush()\n\n self.num_visible = num_visible\n self.num_hidden = num_hidden\n \n #self.reconstructed = np.zeros((self.num_examples, self.num_visible))\n\n self.weights = 0.1 * np.random.randn(num_visible, num_hidden)\n self.v_bias = np.zeros((1, num_visible))\n self.h_bias = -4.0 * np.ones((1, num_hidden))\n\n self.w_inc = np.zeros((num_visible, num_hidden))\n self.v_inc = np.zeros((1, num_visible))\n self.h_inc = np.zeros((1, num_hidden))\n\n if act_func == 'chaotic':\n self.act_func = self.chaotic_logistic\n else:\n self.act_func = self.logistic\n\n print('Done!')\n return", "def construct(self):\n self.input_size = self.numplanes * self.boardsize**2\n \n if self.hidden:\n layers = [\n torch.nn.Linear(self.input_size, self.hidden), \n torch.nn.ReLU(),\n torch.nn.Linear(self.hidden, self.boardsize**2)\n ]\n else:\n layers = [torch.nn.Linear(self.input_size, self.boardsize**2)]\n\n self.layers = torch.nn.ModuleList(layers)\n self.optimizer = torch.optim.Adam(self.parameters(), lr=1e-5)\n logging.info(\"Model initialized: %s\", self)", "def __init__(self, layerNeurons, numberOfLayers, initialWeights = None, lowerBound = None, upperBound = None):\r\n \r\n # Ensure that there is at-least one input and one output layer in the network\r\n assert len(layerNeurons) > 1, \"At least one input layer and one output layer is needed\"\r\n \r\n # Get the total number of weights needed in the network\r\n totalWeightCount = NeuralNetwork.getSizeOfWeightVector(layerNeurons)*numberOfLayers\r\n \r\n # Initialise the weights with the initialiser or random values\r\n if initialWeights is None:\r\n if lowerBound is None:\r\n lowerBound=-1/np.sqrt(layerNeurons[0])\r\n if upperBound is None:\r\n upperBound=1/np.sqrt(layerNeurons[0])\r\n self.weights = np.random.uniform(lowerBound, upperBound, totalWeightCount)\r\n else:\r\n assert initialWeights.size == totalWeightCount, (\"Length of initial weight matrix incorrect. You need \"+str(totalWeightCount)+\" weights\")\r\n self.weights = initialWeights.view()\r\n \r\n self.weights.shape = (numberOfLayers, -1)\r\n # create an empty array of layers\r\n self.layers = []\r\n layerBlockStart = 0\r\n \r\n for layerInputDimention, layerOutputDimention in zip(layerNeurons, layerNeurons[1:]):\r\n # initialise each layer with its input and output dimentions and bi-directional pointers to the relivant weights\r\n layerBlockEnd = layerBlockStart+(layerInputDimention*layerOutputDimention)\r\n layerBiasEnd = layerBlockEnd+layerOutputDimention\r\n newLayer = batchNetworkLayer(layerInputDimention, layerOutputDimention, numberOfLayers, \r\n self.weights[..., :, layerBlockStart:layerBlockEnd], \r\n self.weights[..., :, layerBlockEnd:layerBiasEnd])\r\n self.layers.append(newLayer)\r\n \r\n layerBlockStart = layerBiasEnd", "def __init__(self, *args):\n _snap.TCrossNet_swiginit(self, _snap.new_TCrossNet(*args))", "def __init__(self):\r\n torch.nn.Module.__init__(self)\r\n # Convolution and pooling layers of VGG-16.\r\n self.features = torchvision.models.vgg19_bn(pretrained=False).features\r\n self.features = torch.nn.Sequential(*list(self.features.children())\r\n [:-1]) # Remove pool5.\r\n # Linear classifier.\r\n self.fc = torch.nn.Linear(512**2, 11)", "def __init__(self):\r\n super(HarrisNet, self).__init__()\r\n\r\n image_gradients_layer = ImageGradientsLayer()\r\n\r\n\r\n # (1) ImageGradientsLayer: Compute image gradients Ix Iy. Can be\r\n # approximated by convolving with sobel filter.\r\n # (2) EigenvalueApproxLayer: Compute S_xx, S_yy and S_xy, the output is\r\n # a tensor of size num_image x 3 x width x height\r\n # (3) CornerResponseLayer: Compute R matrix, the output is a tensor of\r\n # size num_image x 1 x width x height\r\n # (4) NMSLayer: Perform non-maximum suppression, the output is a tensor\r\n # of size num_image x 1 x width x height\r\n\r\n layer_1 = ChannelProductLayer()\r\n layer_2 = SecondMomentMatrixLayer()\r\n layer_3 = CornerResponseLayer()\r\n layer_4 = NMSLayer()\r\n\r\n self.net = nn.Sequential(\r\n image_gradients_layer,\r\n layer_1,\r\n layer_2,\r\n layer_3,\r\n layer_4\r\n )", "def init_target_net(self, sess):\n sess.run(self.init_target_net_op)", "def initialize_network(self):\n if self.trainer is None:\n # -- Initialize from beginning and start training, since no model is provided -- #\n super().initialize_network() # --> This updates the corresponding variables automatically since we inherit this class\n \n # -- Create a Multi Head Generic_UNet from the current network using the provided split and first task name -- #\n # -- Do not rely on self.task for initialization, since the user might provide the wrong task (unintended), -- #\n # -- however for self.plans, the user needs to extract the correct plans_file path by himself using always the -- #\n # -- first task from a list of tasks since the network is build using the plans_file and thus the structure might vary -- #\n self.mh_network = MultiHead_Module(Generic_UNet, self.split, self.tasks_list_with_char[0][0], prev_trainer=self.network,\n input_channels=self.num_input_channels, base_num_features=self.base_num_features,\\\n num_classes=self.num_classes, num_pool=len(self.net_num_pool_op_kernel_sizes))\n # -- Add the split to the already_trained_on since it is simplified by now -- #\n self.already_trained_on[str(self.fold)]['used_split'] = self.mh_network.split\n # -- Save the updated dictionary as a json file -- #\n save_json(self.already_trained_on, join(self.trained_on_path, self.extension+'_trained_on.json'))\n return # Done with initialization\n\n # -- Some sanity checks and loads.. -- #\n # -- Check if the trainer contains plans.pkl file which it should have after sucessfull training -- #\n if 'fold_' in self.trainer.output_folder:\n # -- Remove the addition of fold_X from the output_folder, since the plans.pkl is outside of the fold_X directories -- #\n plans_dir = self.trainer.output_folder.replace('fold_', '')[:-1]\n else:\n # -- If no fold_ in output_folder, everything is fine -- #\n plans_dir = self.trainer.output_folder\n \n assert isfile(join(plans_dir, \"plans.pkl\")), \"Folder with saved model weights must contain a plans.pkl file..\"\n\n # -- Check that the trainer type is as expected -- #\n assert isinstance(self.trainer, (nnUNetTrainerV2, nnUNetTrainerMultiHead)), \"The trainer needs to be nnUNetTrainerV2 or nnUNetTrainerMultiHead..\"\n\n # -- If the trainer is already of Multi Head type, there should also be a pkl file with the sets it has already been trained on ! -- #\n if isinstance(self.trainer, nnUNetTrainerMultiHead): # If model was trained using nnUNetTrainerV2, the pickle file won't exist\n self.already_trained_on = load_json(join(self.trained_on_path, self.extension+'_trained_on.json'))\n \n # -- Load the model and parameters -- #\n # -- NOTE: self.trainer is a Multi Head Network, so it has a model, body and heads. -- #\n print(\"Loading trainer and setting the network for training\")\n self.trainer.load_final_checkpoint(train=True) # Load state_dict of the final model\n\n # -- Set mh_network -- #\n # -- Make it to Multi Head network if it is not already -- #\n # -- Use the first task in tasks_joined_name, since this represents the corresponding task name, whereas self.task -- #\n # -- is the task to train on, which is not equal to the one that will be initialized now using a pre-trained network -- #\n # -- (prev_trainer). -- #\n if isinstance(self.trainer, nnUNetTrainerV2):\n self.mh_network = MultiHead_Module(Generic_UNet, self.split, self.tasks_list_with_char[0][0], prev_trainer=self.trainer.network,\n input_channels=self.num_input_channels, base_num_features=self.base_num_features,\\\n num_classes=self.num_classes, num_pool=len(self.net_num_pool_op_kernel_sizes))\n else: # Already Multi Head type\n self.mh_network = self.trainer#.mh_network\n # -- Ensure that the split that has been previously used and the current one are equal -- #\n # -- NOTE: Do this after initialization, since the splits might be different before but still lead to the same level after -- #\n # -- simplification. -- #\n prev_split = self.already_trained_on[str(self.fold)]['used_split']\n assert self.mh_network.split == prev_split,\\\n \"To continue training on the fold {} the same split, ie. \\'{}\\' needs to be provided, not \\'{}\\'.\".format(self.fold, self.mh_network.split, prev_split)\n # -- Delete the prev_split --> not necessary anymore -- #\n del prev_split\n \n # -- Set self.network to the model in mh_network --> otherwise the network is not initialized and not in right type -- #\n self.network = self.mh_network.model", "def init():\n global neural_network\n global labels\n\n # load objects required by run() for inferencing\n model_dir = Model.get_model_path(\"mnist-fashion\")\n # neural model\n neural_network = keras.models.load_model(f\"{model_dir}/neural-network.h5\")\n # labels\n with open(f\"{model_dir}/labels.jsonpickle\", \"r\") as labels_file:\n labels = jsonpickle.decode(labels_file.read())", "def __init__(\n self, config: SimpleGCNConfig = SimpleGCNConfig(name=\"simplegcn\")\n ):\n super().__init__()\n self.edge_lengthscale = config.edge_lengthscale\n self.weight_edges = config.weight_edges\n\n self.atom_embedding = nn.Linear(\n config.atom_input_features, config.width\n )\n\n self.layer1 = GraphConv(config.width, config.width)\n self.layer2 = GraphConv(config.width, config.output_features)\n self.readout = AvgPooling()", "def __init__(self, *args):\n _snap.TMMNet_swiginit(self, _snap.new_TMMNet(*args))", "def __init__(self, model_config):\n # Training Parameters\n self.__learning_rate = model_config[\"cnnLearningRate\"]\n\n # Network Parameters\n self.__num_classes = model_config[\"numClasses\"]\n self.__weight_decay = 1e-4\n self.__num_gpus = model_config[\"numGpus\"]\n self.__use_csnn = model_config[\"useCsnn\"]\n\n self.__csnn = Csnn(model_config)", "def __init__(self, vgg_net):\n super().__init__()\n # create a conv layer that corresponds to the first linear layer\n linear1 = vgg_net.classifier[0]\n conv = nn.Conv2d(512, 4096, 7, 7)\n\n # copy data into it\n conv.bias.data.copy_(linear1.bias.data)\n conv.weight.data.view(4096, -1).copy_(linear1.weight.data)\n\n # replace the layer in the sequential classifier part\n vgg_net.classifier = nn.Sequential(\n conv, nn.Flatten(1), *vgg_net.classifier[1:]\n )\n\n self.vgg_net = vgg_net", "def __init__(self, *args):\n _snap.TNEANet_swiginit(self, _snap.new_TNEANet(*args))", "def __init__(self, network: Network):\n self.graph = network.graph", "def __init__(self, net, batch=False):\n\n super().__init__()\n self.batch = batch\n self.net = net\n self.input = Input(self.net.layers()[0],\n self.net.layers()[1].inputSize(), batch)\n self.hiddens = []\n for i in range(1, len(net.layers())-1):\n nextLen = net.layers()[i+1].inputSize()\n self.hiddens.append(Hidden(net.layers()[i], nextLen, batch))\n self.output = Output(self.net.layers()[-1])", "def test_init_net_simple(self):\n net = ecn.NeuralNet(2, (2,), 1)\n self.assertEqual(2, len(net.weights.keys()))\n self.assertEqual((2, 3), np.shape(net.weights['h0']))\n self.assertEqual((1, 3), np.shape(net.weights['y']))\n print('Finished testing simple neural net init\\n')", "def __init__(self, network_path='.', logging=True,\n input_image_size=None, n_input_channels=None,\n n_output_classes=None,\n conv1_size=5, conv1_n_chan=32, conv1_n_pool=2,\n conv2_size=5, conv2_n_chan=64, conv2_n_pool=2,\n fc1_n_chan=1024, fc1_dropout=0.5, alpha=4e-4 ):\n self.logging = logging\n\n # If network path does not yet exists\n self.network_path = network_path\n if not os.path.isdir(self.network_path):\n # Make network directory\n os.mkdir(self.network_path)\n now = datetime.datetime.now()\n self.log(\"\\n\\n++++++++++++++++++++++++++++++++++++++++++++++++++++\")\n self.log( \"Creation of new network: \")\n self.log( \" {}\".format(self.network_path) )\n self.log( \" @ {}\".format(now.strftime(\"%Y-%m-%d %H:%M\")) )\n self.log( \"++++++++++++++++++++++++++++++++++++++++++++++++++++\")\n self.log(\"\\nNetwork did not exist ... \")\n self.log(\"Created new network with supplied (or default) architecture\")\n\n # Set up new network\n self.y_res = input_image_size[0]\n self.x_res = input_image_size[1]\n self.n_input_channels = n_input_channels\n self.n_output_classes = n_output_classes\n self.conv1_size = conv1_size\n self.conv1_n_chan = conv1_n_chan\n self.conv1_n_pool = conv1_n_pool\n self.conv2_size = conv2_size\n self.conv2_n_chan = conv2_n_chan\n self.conv2_n_pool = conv2_n_pool\n self.fc1_y_size = int( np.ceil( np.ceil(\n self.y_res/self.conv1_n_pool ) / self.conv2_n_pool ) )\n self.fc1_x_size = int( np.ceil( np.ceil(\n self.x_res/self.conv1_n_pool ) / self.conv2_n_pool ) )\n self.fc1_n_chan = fc1_n_chan\n self.fc1_dropout = fc1_dropout\n self.alpha = alpha\n self.n_samples_trained = 0\n self.n_class_samples_trained = self.n_output_classes*[0]\n self.n_samples_list = []\n self.n_class_samples_list = [[] for _ in range(self.n_output_classes)]\n self.accuracy_list = [[] for _ in range(self.n_output_classes)]\n self.precision_list = [[] for _ in range(self.n_output_classes)]\n self.recall_list = [[] for _ in range(self.n_output_classes)]\n self.F1_list = [[] for _ in range(self.n_output_classes)]\n\n # Save network architecture\n self.save_network_architecture( network_path=self.network_path )\n\n else:\n now = datetime.datetime.now()\n self.log(\"\\n\\n++++++++++++++++++++++++++++++++++++++++++++++++++++\")\n self.log( \"Re-initialization of existing network: \")\n self.log( \" {}\".format(self.network_path) )\n self.log( \" @ {}\".format(now.strftime(\"%Y-%m-%d %H:%M\")) )\n self.log( \"++++++++++++++++++++++++++++++++++++++++++++++++++++\")\n self.log( \" \")\n\n # Load network architecture from directory\n net_architecture = self.load_network_architecture(self.network_path)\n\n # Set up network variables from loaded architecture\n self.y_res = net_architecture['y_res']\n self.x_res = net_architecture['x_res']\n self.n_input_channels = net_architecture['n_input_channels']\n self.n_output_classes = net_architecture['n_output_classes']\n self.conv1_size = net_architecture['conv1_size']\n self.conv1_n_chan = net_architecture['conv1_n_chan']\n self.conv1_n_pool = net_architecture['conv1_n_pool']\n self.conv2_size = net_architecture['conv2_size']\n self.conv2_n_chan = net_architecture['conv2_n_chan']\n self.conv2_n_pool = net_architecture['conv2_n_pool']\n self.fc1_y_size = int( np.ceil( np.ceil(\n self.y_res/self.conv1_n_pool ) / self.conv2_n_pool ) )\n self.fc1_x_size = int( np.ceil( np.ceil(\n self.x_res/self.conv1_n_pool ) / self.conv2_n_pool ) )\n self.fc1_n_chan = net_architecture['fc1_n_chan']\n self.fc1_dropout = net_architecture['fc1_dropout']\n self.alpha = net_architecture['alpha']\n self.n_samples_trained = net_architecture['n_samples_trained']\n self.n_class_samples_trained = net_architecture['n_class_samples_trained']\n self.n_samples_list = net_architecture['n_samples_list']\n self.n_class_samples_list = net_architecture['n_class_samples_list']\n self.accuracy_list = net_architecture['accuracy_list']\n self.precision_list = net_architecture['precision_list']\n self.recall_list = net_architecture['recall_list']\n self.F1_list = net_architecture['F1_list']\n\n # Update values of alpha and dropout if supplied\n if self.alpha != alpha:\n self.alpha = alpha\n self.log(\"Updated learning rate 'alpha' to {}\".format(self.alpha))\n if self.fc1_dropout != fc1_dropout:\n self.fc1_dropout = fc1_dropout\n self.log(\"Updated dropout fraction to {}\".format(self.fc1_dropout))\n\n # Clear previous graphs\n tf.reset_default_graph()\n\n #########################################################\n # Input and target variable placeholders\n # x = [ m_samples x [channel_1_data, channel_2_data, etc.] ]\n self.x = tf.placeholder( tf.float32, shape = [None,\n self.n_input_channels * self.y_res * self.x_res] )\n self.y_trgt = tf.placeholder( tf.float32, \\\n shape = [None, self.n_output_classes] )\n\n # Convert input image to tensor with channel as last dimension\n # x_image = [-1 x im-height x im-width x n-input-channels]\n x_image_temp = tf.reshape(self.x, [-1,\n self.n_input_channels,self.y_res,self.x_res])\n x_image = tf.transpose(x_image_temp, [0,2,3,1])\n\n #########################################################\n # Set up convolutional layer 1\n # W = [im-height x im-width x n-input-channels x n-output-channels])\n self.conv1_shape = [self.conv1_size, self.conv1_size,\n self.n_input_channels, self.conv1_n_chan]\n self.W_conv1 = tf.Variable( tf.truncated_normal(\n shape=self.conv1_shape, stddev=0.1))\n self.b_conv1 = tf.Variable( tf.constant(0.1,\n shape=[self.conv1_n_chan] ))\n\n # Convolve x_image with the weight tensor\n self.conv1_lin = tf.nn.conv2d( x_image, self.W_conv1,\n strides=[1, 1, 1, 1], padding='SAME' )\n\n # Add bias and apply transfer function\n self.conv1_relu = tf.nn.relu( self.conv1_lin + self.b_conv1 )\n\n # Max pooling\n self.conv1_kernel = [1, self.conv1_n_pool, self.conv1_n_pool, 1]\n self.conv1_pool = tf.nn.max_pool( self.conv1_relu,\n ksize=self.conv1_kernel, strides=self.conv1_kernel, padding='SAME')\n\n #########################################################\n # Convolutional layer 2\n self.conv2_shape = [self.conv2_size, self.conv2_size,\n self.conv1_n_chan, self.conv2_n_chan]\n self.W_conv2 = tf.Variable( tf.truncated_normal(\n shape=self.conv2_shape, stddev=0.1 ) )\n self.b_conv2 = tf.Variable( tf.constant(0.1,\n shape=[self.conv2_n_chan] ))\n\n # Convolve x_image with the weight tensor\n self.conv2_lin = tf.nn.conv2d( self.conv1_pool, self.W_conv2,\n strides=[1, 1, 1, 1], padding='SAME' )\n\n # Add bias and apply transfer function\n self.conv2_relu = tf.nn.relu( self.conv2_lin + self.b_conv2 )\n\n # Max pooling\n self.conv2_kernel = [1, self.conv2_n_pool, self.conv2_n_pool, 1]\n self.conv2_pool = tf.nn.max_pool( self.conv2_relu,\n ksize=self.conv2_kernel, strides=self.conv2_kernel, padding='SAME')\n\n\n #########################################################\n # Densely Connected Layer\n # Weights and bias\n self.fc1_shape = [self.fc1_y_size * self.fc1_x_size * self.conv2_n_chan,\n self.fc1_n_chan]\n self.W_fc1 = tf.Variable( tf.truncated_normal(\n shape=self.fc1_shape, stddev=0.1 ) )\n self.b_fc1 = tf.Variable( tf.constant(0.1, shape=[self.fc1_n_chan] ))\n\n # Flatten output from conv2\n self.conv2_pool_flat = tf.reshape(\n self.conv2_pool, [-1, self.fc1_shape[0]] )\n\n # Calculate network step\n self.fc1_relu = tf.nn.relu( tf.matmul( self.conv2_pool_flat,\n self.W_fc1) + self.b_fc1 )\n\n # Set up dropout option for fc1\n self.fc1_keep_prob = tf.placeholder(tf.float32)\n self.fc1_relu_drop = tf.nn.dropout(self.fc1_relu, self.fc1_keep_prob)\n\n #########################################################\n # Readout layer\n # Weights and bias\n self.fc_out_shape = [self.fc1_n_chan, self.n_output_classes]\n self.W_fc_out = tf.Variable( tf.truncated_normal(\n shape=self.fc_out_shape, stddev=0.1 ) )\n self.b_fc_out = tf.Variable( tf.constant(0.1,\n shape=[self.fc_out_shape[1]] ))\n\n # Calculate network step\n self.fc_out_lin = tf.matmul( self.fc1_relu_drop,\n self.W_fc_out ) + self.b_fc_out\n\n #########################################################\n # Define cost function and optimizer algorithm\n self.cross_entropy = tf.reduce_mean(\n tf.nn.softmax_cross_entropy_with_logits(\n logits=self.fc_out_lin, labels=self.y_trgt ) )\n self.train_step = tf.train.AdamOptimizer(self.alpha).minimize(\n self.cross_entropy )\n\n #########################################################\n # Define how to test trained model\n self.network_prediction = tf.cast( tf.argmax(\n self.fc_out_lin, 1 ), tf.float32 )\n self.is_correct_prediction = tf.equal( tf.argmax( self.fc_out_lin, 1 ),\n tf.argmax( self.y_trgt, 1 ) )\n self.accuracy = tf.reduce_mean( tf.cast(\n self.is_correct_prediction, tf.float32 ) )\n\n #########################################################\n # Create save operation\n self.saver = tf.train.Saver()", "def __init__(self, in_channels=3, n_classes=21):\n super(SegNet_1, self).__init__()\n\n self.layer_1 = SegnetLayer_Encoder(in_channels, 64, 2)\n self.layer_2 = SegnetLayer_Encoder(64, 128, 2)\n self.layer_3 = SegnetLayer_Encoder(128, 256, 3)\n self.layer_4 = SegnetLayer_Encoder(256, 512, 3)\n self.layer_5 = SegnetLayer_Encoder(512, 1024, 3)\n self.layer_6 = SegnetLayer_Encoder(1024, 1024, 3)\n\n self.layer_7 = SegnetLayer_Decoder(1024, 1024, 3)\n self.layer_8 = SegnetLayer_Decoder(1024, 512, 3)\n self.layer_9 = SegnetLayer_Decoder(512, 256, 3)\n self.layer_10 = SegnetLayer_Decoder(256, 128, 3)\n self.layer_11 = SegnetLayer_Decoder(128, 64, 2)\n self.layer_12 = SegnetLayer_Decoder(64, n_classes, 2)", "def init_efficientnet(num_classes: int) -> nn.Module:\n\n return EfficientNet.from_pretrained('efficientnet-b1', num_classes=num_classes)", "def __init__( self, config: 'bittensor.config' = None ):\n if config == None: config = neuron.config()\n self.config = config; neuron.check_config( self.config ); print ( self.config )\n bittensor.logging (\n config = self.config,\n logging_dir = self.config.neuron.full_path,\n )\n self.device = torch.device(\n device = self.config.neuron.device\n )\n self.wallet = bittensor.wallet(\n config = self.config\n )\n self.dendrite = bittensor.dendrite(\n config = self.config,\n wallet = self.wallet\n )\n self.subtensor = bittensor.subtensor(\n config = self.config\n )\n self.metagraph = bittensor.metagraph(\n config = self.config\n )\n self.axon = bittensor.axon (\n config = self.config,\n wallet = self.wallet,\n forward_callback = self.forward,\n backward_callback = self.backward\n )\n self.dataset = bittensor.dataloader (\n config = self.config\n )\n self.router = SGMOERouter(\n config = self.config\n ).to( self.device )\n self.nucleus = GPT2Nucleus(\n config = self.config,\n routing_callback = self.route\n ).to( self.device )\n self.optimizer = torch.optim.SGD(\n [\n {\"params\": self.router.parameters()},\n {\"params\": self.nucleus.parameters()}\n ],\n lr = self.config.neuron.learning_rate,\n weight_decay = self.config.neuron.weight_decay,\n )\n self.tensorboard = SummaryWriter(\n log_dir = self.config.neuron.tensorboard_dir\n )\n self.mechanism_weights = torch.ones( [0] )\n self.epoch = 0\n self.global_step = 0\n self.epoch_loss = math.inf/2\n self.best_epoch_loss = math.inf", "def test_init(self):\n network = PerceptronNetwork(\n [\n PerceptronLayer.blank(4, 2, 'layer1', ['a', 'b', 'c', 'd']),\n PerceptronLayer.blank(2, 2, 'layer2', ['a', 'b', 'c', 'd'])\n ]\n )\n self.assertIsNotNone(network)", "def new(self):\n self.define_layers()\n self.model = nn.Sequential(*self.layers)\n self.model.cuda()\n self.model = orthogonal_init(self.model)\n\n # Re-count N\n self.count_params()", "def __init__(self, functions=None, variables=None, global_resource=None):\n self.ssa = NetworkEnsemble()\n if functions is None:\n self.ssa.functions = dict()\n else:\n self.ssa.functions = functions\n if variables is None:\n self.ssa.variables = dict()\n else:\n self.ssa.variables = variables\n if global_resource is None:\n self.ssa.global_resource = dict()\n else:\n self.ssa.global_resource = global_resource", "def __init__(self, **kwargs):\n super().__init__()\n self.model_conv = models.resnet50(pretrained=True)\n for param in self.model_conv.parameters():\n param.requires_grad = False\n num_ftrs = self.model_conv.fc.in_features\n num_classes = 10\n self.model_conv.fc = nn.Linear(num_ftrs, num_classes)", "def __init__(self, network_path='.', logging=True,\n input_image_size=None, n_input_channels=None,\n n_output_classes=None,\n fc1_n_chan=1024, fc1_dropout=0.5, alpha=4e-4 ):\n self.logging = logging\n\n # If network path does not yet exists\n self.network_path = network_path\n if not os.path.isdir(self.network_path):\n # Make network directory\n os.mkdir(self.network_path)\n now = datetime.datetime.now()\n self.log(\"\\n\\n++++++++++++++++++++++++++++++++++++++++++++++++++++\")\n self.log( \"Creation of new network: \")\n self.log( \" {}\".format(self.network_path) )\n self.log( \" @ {}\".format(now.strftime(\"%Y-%m-%d %H:%M\")) )\n self.log( \"++++++++++++++++++++++++++++++++++++++++++++++++++++\")\n self.log(\"\\nNetwork did not exist ... \")\n self.log(\"Created new network with supplied (or default) architecture\")\n\n # Set up new network\n self.y_res = input_image_size[0]\n self.x_res = input_image_size[1]\n self.n_input_channels = n_input_channels\n self.n_output_classes = n_output_classes\n self.fc1_n_chan = fc1_n_chan\n self.fc1_dropout = fc1_dropout\n self.alpha = alpha\n self.n_samples_trained = 0\n self.n_class_samples_trained = self.n_output_classes*[0]\n self.n_samples_list = []\n self.n_class_samples_list = [[] for _ in range(self.n_output_classes)]\n self.accuracy_list = [[] for _ in range(self.n_output_classes)]\n self.precision_list = [[] for _ in range(self.n_output_classes)]\n self.recall_list = [[] for _ in range(self.n_output_classes)]\n self.F1_list = [[] for _ in range(self.n_output_classes)]\n\n # Save network architecture\n self.save_network_architecture( network_path=self.network_path )\n\n else:\n now = datetime.datetime.now()\n self.log(\"\\n\\n++++++++++++++++++++++++++++++++++++++++++++++++++++\")\n self.log( \"Re-initialization of existing network: \")\n self.log( \" {}\".format(self.network_path) )\n self.log( \" @ {}\".format(now.strftime(\"%Y-%m-%d %H:%M\")) )\n self.log( \"++++++++++++++++++++++++++++++++++++++++++++++++++++\")\n self.log( \" \")\n\n # Load network architecture from directory\n net_architecture = self.load_network_architecture(self.network_path)\n\n # Set up network variables from loaded architecture\n self.y_res = net_architecture['y_res']\n self.x_res = net_architecture['x_res']\n self.n_input_channels = net_architecture['n_input_channels']\n self.n_output_classes = net_architecture['n_output_classes']\n self.fc1_n_chan = net_architecture['fc1_n_chan']\n self.fc1_dropout = net_architecture['fc1_dropout']\n self.alpha = net_architecture['alpha']\n self.n_samples_trained = net_architecture['n_samples_trained']\n self.n_class_samples_trained = net_architecture['n_class_samples_trained']\n self.n_samples_list = net_architecture['n_samples_list']\n self.n_class_samples_list = net_architecture['n_class_samples_list']\n self.accuracy_list = net_architecture['accuracy_list']\n self.precision_list = net_architecture['precision_list']\n self.recall_list = net_architecture['recall_list']\n self.F1_list = net_architecture['F1_list']\n\n # Update values of alpha and dropout if supplied\n if self.alpha != alpha:\n self.alpha = alpha\n self.log(\"Updated learning rate 'alpha' to {}\".format(self.alpha))\n if self.fc1_dropout != fc1_dropout:\n self.fc1_dropout = fc1_dropout\n self.log(\"Updated dropout fraction to {}\".format(self.fc1_dropout))\n\n # Clear previous graphs\n tf.reset_default_graph()\n\n #########################################################\n # Input and target variable placeholders\n # x = [ m_samples x [channel_1_data, channel_2_data, etc.] ]\n self.x = tf.placeholder( tf.float32, shape = [None,\n self.n_input_channels * self.y_res * self.x_res] )\n self.y_trgt = tf.placeholder( tf.float32, \\\n shape = [None, self.n_output_classes] )\n\n #########################################################\n # Densely Connected Layer\n # Weights and bias\n self.fc1_shape = \\\n [self.y_res * self.x_res * self.n_input_channels,\n self.fc1_n_chan]\n self.W_fc1 = tf.Variable( tf.truncated_normal(\n shape=self.fc1_shape, stddev=0.1 ) )\n self.b_fc1 = tf.Variable( tf.constant(0.1, shape=[self.fc1_n_chan] ))\n\n # Calculate network step\n self.fc1_relu = tf.nn.relu( tf.matmul( self.x,\n self.W_fc1) + self.b_fc1 )\n\n # Set up dropout option for fc1\n self.fc1_keep_prob = tf.placeholder(tf.float32)\n self.fc1_relu_drop = tf.nn.dropout(self.fc1_relu, self.fc1_keep_prob)\n\n #########################################################\n # Readout layer\n # Weights and bias\n self.fc_out_shape = [self.fc1_n_chan, self.n_output_classes]\n self.W_fc_out = tf.Variable( tf.truncated_normal(\n shape=self.fc_out_shape, stddev=0.1 ) )\n self.b_fc_out = tf.Variable( tf.constant(0.1,\n shape=[self.fc_out_shape[1]] ))\n\n # Calculate network step\n self.fc_out_lin = tf.matmul( self.fc1_relu_drop,\n self.W_fc_out ) + self.b_fc_out\n\n #########################################################\n # Define cost function and optimizer algorithm\n self.cross_entropy = tf.reduce_mean(\n tf.nn.softmax_cross_entropy_with_logits(\n logits=self.fc_out_lin, labels=self.y_trgt ) )\n self.train_step = tf.train.AdamOptimizer(self.alpha).minimize(\n self.cross_entropy )\n\n #########################################################\n # Define how to test trained model\n self.network_prediction = tf.cast( tf.argmax(\n self.fc_out_lin, 1 ), tf.float32 )\n self.is_correct_prediction = tf.equal( tf.argmax( self.fc_out_lin, 1 ),\n tf.argmax( self.y_trgt, 1 ) )\n self.accuracy = tf.reduce_mean( tf.cast(\n self.is_correct_prediction, tf.float32 ) )\n\n #########################################################\n # Create save operation\n self.saver = tf.train.Saver()", "def __init__(self, *args):\n _snap.TMMNetModeNetI_swiginit(self, _snap.new_TMMNetModeNetI(*args))", "def __init__(self, classes=2622):\n super().__init__()\n self.conv1 = _ConvBlock(3, 64, 64)\n self.conv2 = _ConvBlock(64, 128, 128)\n self.conv3 = _ConvBlock(128, 256, 256, 256)\n self.conv4 = _ConvBlock(256, 512, 512, 512)\n self.conv5 = _ConvBlock(512, 512, 512, 512)\n self.dropout = torch.nn.Dropout(0.5)\n self.fc1 = torch.nn.Linear(7 * 7 * 512, 4096)\n self.fc2 = torch.nn.Linear(4096, 4096)\n self.fc3 = torch.nn.Linear(4096, classes)", "def __init__(self, resnet, num_classes):\n super(FineTune, self).__init__()\n\n # Everything except the last linear layer\n self.features = nn.Sequential(*list(resnet.children())[:-1])\n num_ftrs = resnet.fc.in_features\n self.classifier = nn.Sequential(\n nn.Linear(num_ftrs, num_classes)\n )\n\n # # Freeze those weights\n # for param in self.features.parameters():\n # param.requires_grad = False", "def __init__(self, in_channels=3, n_classes=21):\n super(SegNet, self).__init__()\n\n self.layer_1 = SegnetLayer_Encoder(in_channels, 64, 2)\n self.layer_2 = SegnetLayer_Encoder(64, 128, 2)\n self.layer_3 = SegnetLayer_Encoder(128, 256, 3)\n self.layer_4 = SegnetLayer_Encoder(256, 512, 3)\n self.layer_5 = SegnetLayer_Encoder(512, 512, 3)\n\n self.layer_6 = SegnetLayer_Decoder(512, 512, 3)\n self.layer_7 = SegnetLayer_Decoder(512, 256, 3)\n self.layer_8 = SegnetLayer_Decoder(256, 128, 3)\n self.layer_9 = SegnetLayer_Decoder(128, 64, 2)\n self.layer_10 = SegnetLayer_Decoder(64, n_classes, 2)", "def __init__(self, *args):\n _snap.TModeNetNodeI_swiginit(self, _snap.new_TModeNetNodeI(*args))", "def __init__(self, hparams):\n super(ImagenetTransferLearning, self).__init__()\n self.hparams = hparams\n self.feature_extractor = models.mobilenet_v2(pretrained=True)\n self.feature_extractor.eval()\n\n # Establish classifier\n # self.layer_1 = torch.nn.Linear(hparams[\"input_size\"], 128)\n self.layer_1 = torch.nn.Linear(1000, 128)\n self.layer_2 = torch.nn.Linear(128, 256)\n self.layer_3 = torch.nn.Linear(256, hparams[\"targets\"])", "def __init__(self):\n super(CNet, self).__init__()\n\n self.init_param_range = (-0.08, 0.08)\n\n ## Lookup tables for the state, action and previous action.\n self.action_lookup = nn.Embedding(3, 128)\n\n # self.state_dict_lookup = nn.Embedding(48, 128)\n self.own_c_lookup = nn.Embedding(129, 128)\n self.own_s_lookup = nn.Embedding(129, 128)\n\n self.th_1_lookup = nn.Embedding(115, 128)\n self.th_2_lookup = nn.Embedding(115, 128)\n self.th_3_lookup = nn.Embedding(115, 128)\n\n self.f_1_lookup = nn.Embedding(96, 128)\n self.f_2_lookup = nn.Embedding(96, 128)\n self.f_3_lookup = nn.Embedding(96, 128)\n self.f_4_lookup = nn.Embedding(96, 128)\n\n self.bu_msg_lookup = nn.Embedding(5, 128)\n\n # self.state_tensor_lookup = nn.Embedding(48, 128)\n self.i_t_lookup = nn.Embedding(24, 128)\n self.lives_lookup = nn.Embedding(10, 128)\n\n self.prev_action_lookup = nn.Embedding(91, 128)\n\n # RNN to approximate the agent’s action-observation history.\n self.rnn = nn.GRU(input_size=128, hidden_size=128, num_layers=2)\n\n # 2 layer MLP with batch normalization, for producing output from RNN top layer.\n self.output = nn.Sequential(\n nn.Linear(128, 128),\n # nn.BatchNorm1d(128),\n nn.ReLU(),\n nn.Linear(128, 90)\n )", "def __init__(self, network_path='.', logging=True,\n input_image_size=None, n_input_channels=None,\n n_output_classes=None,\n fc1_dropout=1.0, alpha=4e-4 ):\n self.logging = logging\n\n # If network path does not yet exists\n self.network_path = network_path\n if not os.path.isdir(self.network_path):\n # Make network directory\n os.mkdir(self.network_path)\n now = datetime.datetime.now()\n self.log(\"\\n\\n++++++++++++++++++++++++++++++++++++++++++++++++++++\")\n self.log( \"Creation of new network: \")\n self.log( \" {}\".format(self.network_path) )\n self.log( \" @ {}\".format(now.strftime(\"%Y-%m-%d %H:%M\")) )\n self.log( \"++++++++++++++++++++++++++++++++++++++++++++++++++++\")\n self.log(\"\\nNetwork did not exist ... \")\n self.log(\"Created new network with supplied (or default) architecture\")\n\n # Set up new network\n self.y_res = input_image_size[0]\n self.x_res = input_image_size[1]\n self.n_input_channels = n_input_channels\n self.n_output_classes = n_output_classes\n self.fc1_dropout = fc1_dropout\n self.alpha = alpha\n self.n_samples_trained = 0\n self.n_class_samples_trained = self.n_output_classes*[0]\n self.n_samples_list = []\n self.n_class_samples_list = [[] for _ in range(self.n_output_classes)]\n self.accuracy_list = [[] for _ in range(self.n_output_classes)]\n self.precision_list = [[] for _ in range(self.n_output_classes)]\n self.recall_list = [[] for _ in range(self.n_output_classes)]\n self.F1_list = [[] for _ in range(self.n_output_classes)]\n\n # Save network architecture\n self.save_network_architecture( network_path=self.network_path )\n\n else:\n now = datetime.datetime.now()\n self.log(\"\\n\\n++++++++++++++++++++++++++++++++++++++++++++++++++++\")\n self.log( \"Re-initialization of existing network: \")\n self.log( \" {}\".format(self.network_path) )\n self.log( \" @ {}\".format(now.strftime(\"%Y-%m-%d %H:%M\")) )\n self.log( \"++++++++++++++++++++++++++++++++++++++++++++++++++++\")\n self.log( \" \")\n\n # Load network architecture from directory\n net_architecture = self.load_network_architecture(self.network_path)\n\n # Set up network variables from loaded architecture\n self.y_res = net_architecture['y_res']\n self.x_res = net_architecture['x_res']\n self.n_input_channels = net_architecture['n_input_channels']\n self.n_output_classes = net_architecture['n_output_classes']\n self.fc1_dropout = net_architecture['fc1_dropout']\n self.alpha = net_architecture['alpha']\n self.n_samples_trained = net_architecture['n_samples_trained']\n self.n_class_samples_trained = net_architecture['n_class_samples_trained']\n self.n_samples_list = net_architecture['n_samples_list']\n self.n_class_samples_list = net_architecture['n_class_samples_list']\n self.accuracy_list = net_architecture['accuracy_list']\n self.precision_list = net_architecture['precision_list']\n self.recall_list = net_architecture['recall_list']\n self.F1_list = net_architecture['F1_list']\n\n # Update values of alpha and dropout if supplied\n if self.alpha != alpha:\n self.alpha = alpha\n self.log(\"Updated learning rate 'alpha' to {}\".format(self.alpha))\n if self.fc1_dropout != fc1_dropout:\n self.fc1_dropout = fc1_dropout\n self.log(\"Updated dropout fraction to {}\".format(self.fc1_dropout))\n\n # Clear previous graphs\n tf.reset_default_graph()\n\n #########################################################\n # Input and target variable placeholders\n # x = [ m_samples x [channel_1_data, channel_2_data, etc.] ]\n self.x = tf.placeholder( tf.float32, shape = [None,\n self.n_input_channels * self.y_res * self.x_res] )\n self.y_trgt = tf.placeholder( tf.float32, \\\n shape = [None, self.n_output_classes] )\n\n # Set up dropout option for inputs\n self.fc1_keep_prob = tf.placeholder(tf.float32)\n self.x_drop = tf.nn.dropout(self.x, self.fc1_keep_prob)\n\n #########################################################\n # Readout layer\n # Weights and bias\n self.fc_out_shape = \\\n [self.y_res * self.x_res * self.n_input_channels,\n self.n_output_classes]\n self.W_fc_out = tf.Variable( tf.truncated_normal(\n shape=self.fc_out_shape, stddev=0.1 ) )\n self.b_fc_out = tf.Variable( tf.constant(0.1,\n shape=[self.fc_out_shape[1]] ))\n\n # Calculate network step\n self.fc_out_lin = tf.matmul( self.x_drop,\n self.W_fc_out ) + self.b_fc_out\n\n #########################################################\n # Define cost function and optimizer algorithm\n self.cross_entropy = tf.reduce_mean(\n tf.nn.softmax_cross_entropy_with_logits(\n logits=self.fc_out_lin, labels=self.y_trgt ) )\n self.train_step = tf.train.AdamOptimizer(self.alpha).minimize(\n self.cross_entropy )\n\n #########################################################\n # Define how to test trained model\n self.network_prediction = tf.cast( tf.argmax(\n self.fc_out_lin, 1 ), tf.float32 )\n self.is_correct_prediction = tf.equal( tf.argmax( self.fc_out_lin, 1 ),\n tf.argmax( self.y_trgt, 1 ) )\n self.accuracy = tf.reduce_mean( tf.cast(\n self.is_correct_prediction, tf.float32 ) )\n\n #########################################################\n # Create save operation\n self.saver = tf.train.Saver()", "def __init__(self):\n super(enc_clf, self).__init__()\n\n self.fc1 = nn.Linear(784, 1024)\n self.fc2 = nn.Linear(1024, 1024)\n self.fc3 = nn.Linear(1024, 512)\n self.fc4 = nn.Linear(512, 10)", "def init_model(self):\n cxnlib.CXNNetInitModel(self.handle)", "def __init__(self, module: torch.nn.Module, loss: torch.nn.Module,\n input_node_name='0',\n output_node_name='output', label_node_name='label',\n loss_node_name='loss',\n events: List[d5.ExecutorEvent] = [],\n device: d5.DeviceType = None, with_outputs = False):\n # Do not call super() here!\n self.network = PyTorchNativeNetwork(module)\n self.devname = 'cuda' if device is None or device.is_gpu() else 'cpu'\n self.events = events\n self.model = module.to(self.devname)\n self.is_training = True\n self.loss = loss.to(self.devname) if loss is not None else None\n self.innode = input_node_name\n self.outnode = output_node_name\n self.labelnode = label_node_name\n self.lossnode = loss_node_name\n self.with_outputs = with_outputs", "def __init__(self, mode, cfg):\n super(DMCM, self).__init__()\n\n self.conv_net = cfg.get_image_net(mode)\n self.sparse_net = cfg.get_genes_net(mode)\n\n # Matrix network does not need weight initialization because there can\n # be no vanishing gradients.\n self.conv_net.apply(_init_weights_xavier)", "def set_network(self, pair_blocks=1, base_channels=512, layers=5):\n\n # store architecture\n self.pair_blocks = pair_blocks\n self.base_channels = base_channels\n self.layers = layers\n\n self.net = Network(pair_blocks, base_channels, layers, self.device)\n self.train_loader.index = 0\n\n self._loaded = False\n self.time_stamp_path = None", "def __init__(self, in_channels=3, in_channels1=3, n_classes=21):\n super(SegNet, self).__init__()\n\n self.layer_1 = SegnetLayer_Encoder(in_channels, 64, 2)\n self.layer_2 = SegnetLayer_Encoder(64, 128, 2)\n self.layer_3 = SegnetLayer_Encoder(128, 256, 3)\n self.layer_4 = SegnetLayer_Encoder(256, 512, 3)\n self.layer_5 = SegnetLayer_Encoder(512, 512, 3)\n\n self.layer_6 = SegnetLayer_Decoder(512, 512, 3)\n self.layer_7 = SegnetLayer_Decoder(512, 256, 3)\n self.layer_8 = SegnetLayer_Decoder(256, 128, 3)\n self.layer_9 = SegnetLayer_Decoder(128, 64, 2)\n self.layer_10 = SegnetLayer_Decoder(64, n_classes, 2)\n\n self.layer_11 = SegnetLayer_Encoder(in_channels1, 64, 2)\n self.layer_12 = SegnetLayer_Encoder(64, 128, 2)\n self.layer_13 = SegnetLayer_Encoder(128, 256, 3)\n self.layer_14 = SegnetLayer_Encoder(256, 512, 3)\n self.layer_15 = SegnetLayer_Encoder(512, 512, 3)\n\n self.layer_16 = SegnetLayer_Decoder(512, 512, 3)\n self.layer_17 = SegnetLayer_Decoder(512, 256, 3)\n self.layer_18 = SegnetLayer_Decoder(256, 128, 3)\n self.layer_19 = SegnetLayer_Decoder(128, 64, 2)\n self.layer_110 = SegnetLayer_Decoder(64, n_classes, 2)\n\n self.layer_1110 = UNet_Decoder_Particular(n_classes * 2, n_classes)", "def __init__(self, C, num_classes):\n super(AuxiliaryHeadImageNet, self).__init__()\n self.features = nn.Sequential(nn.ReLU(inplace=True), nn.AvgPool2d(5, stride=2, padding=0, count_include_pad=False), nn.Conv2d(C, 128, 1, bias=False), nn.BatchNorm2d(128), nn.ReLU(inplace=True), nn.Conv2d(128, 768, 2, bias=False), nn.ReLU(inplace=True))\n self.classifier = nn.Linear(768, num_classes)", "def __init__(self, layerNeurons, initialWeights = None, layerTypes=None, **kwargs):\r\n \r\n # Ensure that there is at-least one input and one output layer in the network\r\n assert len(layerNeurons)>1, \"At least one input layer and one output layer is needed\"\r\n \r\n # Get the total number of weights needed in the network\r\n totalWeightCount = NeuralNetwork.getSizeOfWeightVector(layerNeurons)\r\n \r\n # Initialise the weights with the initializer or random values\r\n if initialWeights is None:\r\n self.weights = np.random.uniform(-1/np.sqrt(layerNeurons[0]), 1/np.sqrt(layerNeurons[0]), totalWeightCount)\r\n else:\r\n assert len(initialWeights) == totalWeightCount, (\"Length of initial weight matrix incorrect. You need \"+str(totalWeightCount)+\" weights\")\r\n self.weights = np.array(initialWeights, dtype = np.float64) \r\n \r\n # create an empty array of layers\r\n self.layers = []\r\n layerBlockStart = 0\r\n \r\n if layerTypes is None or len(layerTypes)<(len(layerNeurons)-1):\r\n layerTypes=[NetworkLayer]*(len(layerNeurons)-1)\r\n \r\n for layerInputDimention, layerOutputDimention, layerType in zip(layerNeurons, layerNeurons[1:], layerTypes):\r\n # initialise each layer with its input and output dimentions and bi-directional pointers to the relivant weights\r\n layerBlockEnd = layerBlockStart+(layerInputDimention*layerOutputDimention)\r\n layerBiasEnd = layerBlockEnd+layerOutputDimention\r\n newLayer = layerType(layerInputDimention, layerOutputDimention, \r\n self.weights[..., layerBlockStart:layerBlockEnd], \r\n self.weights[..., layerBlockEnd:layerBiasEnd], **kwargs)\r\n self.layers.append(newLayer)\r\n \r\n layerBlockStart = layerBiasEnd\r\n \r\n # Tell the output later to use a different function to calculate the delta \r\n newLayer.calcDelta = newLayer.calcDeltaOutputLayer", "def _build_network(self):\n pass", "def __init__(self, neuron_count):\n # The current state of the thermal network.\n self.current_state = [0.0] * neuron_count\n\n # The weights.\n self.weights = np.zeros( [neuron_count*neuron_count] )\n\n # The neuron count.\n self.neuron_count = neuron_count", "def __init__(self):\n self.topology = None\n self.learningRate = None\n self.momentum = None\n self.name = None\n self.size = None\n #self._hiddenActiv_fun_key = None\n #self._outActiv_fun_key = None\n #self.output_activation = None\n #self.hidden_activation = None", "def __init__(self):\n super().__init__()\n \n # convolutional layers\n self.conv1 = nn.Conv2d(1, 16, kernel_size=3) # 16x(14-2)x(14-2) = 16x12x12\n self.conv2 = nn.Conv2d(16, 32, kernel_size=3) # 32x10x10 => pooling = 32x5x5\n \n # fully connected layers\n self.fc1 = nn.Linear(32 * 5 * 5, 64)\n self.fc2 = nn.Linear(64, 10)\n self.fc3 = nn.Linear(20, 10)\n self.fc4 = nn.Linear(10, 1)\n \n # regularizers\n self.drop = nn.Dropout(0.1)\n self.drop2d = nn.Dropout2d(0.1)\n self.pool = nn.MaxPool2d(kernel_size=2)\n self.bn2d = nn.BatchNorm2d(16, affine=False)\n self.bn = nn.BatchNorm1d(64, affine=False)\n\n # activation functions\n self.relu = nn.ReLU()\n self.sigmoid = nn.Sigmoid()\n \n # Initialize weights\n self.apply(self.weights_init)", "def __init__(self):\n super(SCNN, self).__init__()\n\n # Linear classifier.\n self.inplanes = 128\n self._norm_layer = nn.BatchNorm2d\n self.dilation = 1\n self.groups = 1\n self.base_width = 64\n\n self.num_class = 125\n backbone = torchvision.models.resnet34(pretrained=True)\n self.shared_features = nn.Sequential(*list(backbone.children())[0:6])\n #self.realistic_head = nn.Sequential(*list(backbone.children())[6:8])\n # self.synthetic_head = nn.Sequential(nn.Conv2d(128, 128, 3, 2, 1), nn.BatchNorm2d(128), nn.ReLU(inplace=True),\n # nn.Conv2d(128, 128, 3, 1, 1), nn.BatchNorm2d(128), nn.ReLU(inplace=True),\n # nn.Conv2d(128, 256, 3, 2, 1), nn.BatchNorm2d(256), nn.ReLU(inplace=True),\n # nn.Conv2d(256, 256, 3, 1, 1), nn.BatchNorm2d(256), nn.ReLU(inplace=True))\n\n self.synthetic_head1 = self._make_layer(BasicBlock, 128, 1, stride=2, dilate=False)\n self.synthetic_head2 = self._make_layer(BasicBlock, 256, 1, stride=2, dilate=False)\n self.avgpool = nn.AdaptiveAvgPool2d((1, 1))\n self.classifier = nn.Linear(256, self.num_class)\n\n for m in self.synthetic_head1.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n\n for m in self.synthetic_head2.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n\n weight_init(self.classifier)\n\n for param in self.shared_features.parameters():\n param.requires_grad = False", "def __init__(self):\r\n # A dummy layer does nothing\r\n self.weights = np.zeros(shape=(input.shape[1], 10))\r\n bias = np.zeros(shape=(10,))\r\n pass", "def create_network(self):\n\n print ('Creating network, changing data will have no effect beyond this point.')\n n = IMNN.IMNN(parameters=self.parameters)\n\n if self.load_network:\n n.restore_network()\n else:\n n.setup(network = self.network, load_data = self.data)\n\n return n", "def __init__(self, c):\n super(DeepConv, self).__init__(self)\n\n self.block_1 = BaseBlock(c, 16)\n self.block_2 = BaseBlock(16, 32)\n self.block_3 = BaseBlock(32, 32)\n\n self._body = nn.Sequential(self.block_1, self.block_2, self.block_3)", "def __init__(self):\n self.layers = []\n self.best_loss = None", "def trainNet():", "def __init__(\n self, state_dim, action_dim, sizes, activations, use_batch_norm: bool = False\n ) -> None:\n super().__init__()\n assert state_dim > 0, \"state_dim must be > 0, got {}\".format(state_dim)\n assert action_dim > 0, \"action_dim must be > 0, got {}\".format(action_dim)\n self.state_dim = state_dim\n self.action_dim = action_dim\n assert len(sizes) == len(\n activations\n ), \"The numbers of sizes and activations must match; got {} vs {}\".format(\n len(sizes), len(activations)\n )\n\n # The last layer gives the concentration of the distribution.\n self.fc = FullyConnectedNetwork(\n [state_dim] + sizes + [action_dim],\n activations + [\"linear\"],\n use_batch_norm=use_batch_norm,\n )", "def __init__(self,\n n_occupancy: int = 3,\n n_neighbor_sites_list: int = 19,\n n_permutation_list: int = 6,\n n_task: int = 1,\n dropout_rate: float = 0.4,\n n_conv: int = 2,\n n_features: int = 44,\n sitewise_n_feature: int = 25,\n **kwargs):\n\n def init_weights(m):\n if isinstance(m, nn.Linear):\n torch.nn.init.xavier_uniform_(m.weight)\n\n model = LCNN(n_occupancy, n_neighbor_sites_list, n_permutation_list,\n n_task, dropout_rate, n_conv, n_features,\n sitewise_n_feature)\n model.apply(init_weights)\n loss = L2Loss()\n output_types = ['prediction']\n super(LCNNModel, self).__init__(model,\n loss=loss,\n output_types=output_types,\n **kwargs)", "def __init__(self, N_sym, n_nodes, activations, N_element, bias = True, scaling = None):\n super(MultiLayerNet, self).__init__()\n N_layers = len(n_nodes)\n if N_layers == 0:\n self.net = torch.nn.Linear(N_sym, N_element, bias = bias)\n else:\n layers = []\n for n in range(N_layers):\n if n == 0:\n layers += [torch.nn.Linear(N_sym, n_nodes[n], bias = bias)]\n layers += [activations[n]]\n else:\n layers += [torch.nn.Linear(n_nodes[n-1], n_nodes[n], bias = bias)]\n layers += [activations[n]]\n layers += [torch.nn.Linear(n_nodes[-1], N_element, bias = bias)]\n self.net = torch.nn.Sequential(*layers)\n \n self.scaling = scaling", "def __init__(self, input_dim=3*32*32, hidden_dim=100, num_classes=10,\n weight_scale=1e-3, reg=0.0):\n self.params = {}\n self.reg = reg\n\n ############################################################################\n # TODO: Initialize the weights and biases of the two-layer net. Weights #\n # should be initialized from a Gaussian centered at 0.0 with #\n # standard deviation equal to weight_scale, and biases should be #\n # initialized to zero. All weights and biases should be stored in the #\n # dictionary self.params, with first layer weights #\n # and biases using the keys 'W1' and 'b1' and second layer #\n # weights and biases using the keys 'W2' and 'b2'. #\n ############################################################################\n self.params['W1'] = weight_scale * np.random.randn(input_dim, hidden_dim)\n self.params['b1'] = np.zeros(hidden_dim)\n self.params['W2'] = weight_scale * np.random.randn(hidden_dim, num_classes)\n self.params['b2'] = np.zeros(num_classes)\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################", "def __init__(self, *args):\n _snap.TNEGraph_swiginit(self, _snap.new_TNEGraph(*args))", "def test_ctor(self):\r\n # the network model itself\r\n model = densenet.DenseNet(\r\n depth=40,\r\n Block=densenet.BasicBlock,\r\n growth_rate=12,\r\n compression_rate=1.0,\r\n mask=True,\r\n num_classes=100,\r\n )\r\n num_params = model_utils.get_model_num_params(model)\r\n\r\n self.assertAlmostEqual(num_params, 1.06, places=1) # around 1.7\r\n self.assertEqual(model_utils.get_num_conv2d_layers(model), 40)", "def __init__(self, network: Network):\n if LOG[\"ExperimentAI\"]:\n print(\"[ExperimentAI] Initializing AI\")\n self.network = network", "def __init__(self,\n image_channels,\n num_classes):\n super().__init__()\n\n self.model = torchvision.models.resnet18(pretrained=True)\n self.model.fully_connected = nn.Linear(224, 10)", "def _generate_network_initialization(self, graph, memory_manager):\n\n # TODO: To be changed if we want to support multiple outputs\n output_buffer_name = graph.outputs[0].name\n\n ops_to_ignore = ['Reshape', 'Mul']\n\n buffers_allocated = []\n\n buffer_declaration = \"\"\n buffer_declaration += \" pico_cnn::naive::Tensor **kernels;\\n\"\n buffer_declaration += \" pico_cnn::naive::Tensor **biases;\\n\"\n\n constructor_code = \"\"\n #constructor_code += \"Network::Network() {\\n\\n\"\n\n num_layers = 0\n num_kernels = 0\n num_biases = 0\n\n for node in graph.nodes:\n \"\"\"Do not count the reshape layers as the input tensor will only define the dimensions\"\"\"\n if len(node.input_tensors) > 0 and node.op_type not in ops_to_ignore:\n num_layers += 1\n for num, input in enumerate(node.input_tensors):\n if input in buffers_allocated:\n continue\n else:\n tensor = node.input_tensors[input]\n buffers_allocated.append(input)\n if len(tensor.shape) == 1:\n num_biases += 1\n else:\n num_kernels += 1\n\n \"\"\"The arrays kernels and biases will be used to pass only two variables to read_binary_weights\"\"\"\n constructor_code += \" kernels = new pico_cnn::naive::Tensor*[{}]();\\n\".format(num_kernels)\n constructor_code += \" biases = new pico_cnn::naive::Tensor*[{}]();\\n\\n\".format(num_biases)\n\n pos = -1\n pos_kernel = -1\n pos_bias = -1\n\n buffers_allocated.clear()\n\n \"\"\"Iterate over all nodes in the graph and generate the corresponding allocation code.\"\"\"\n for node_id, node in enumerate(graph.nodes):\n\n if len(node.input_tensors) > 0 and node.op_type not in ops_to_ignore:\n pos += 1\n\n buffer_declaration += \" // Layer: \" + node.name + \", Operation: \" + node.op_type + \"\\n\"\n constructor_code += \" // Layer: \" + node.name + \", Operation: \" + node.op_type + \"\\n\"\n\n # Allocate memory for kernels and biases\n buffer_declaration += \" // Inputs\\n\"\n constructor_code += \" // Inputs\\n\"\n for num, input in enumerate(node.input_tensors):\n\n if node.op_type in ops_to_ignore:\n continue\n\n if input in buffers_allocated:\n continue\n else:\n buffers_allocated.append(input)\n\n tensor = node.input_tensors[input]\n if len(tensor.shape) == 1:\n pos_bias += 1\n else:\n pos_kernel += 1\n\n buffer = memory_manager.get_buffer(graph, input)\n\n buffer_declaration += \" // \" + str(buffer.shape) + \"\\n\"\n\n pico_cnn_tensor = \" pico_cnn::naive::Tensor *\"\n\n buffer_declaration += pico_cnn_tensor + buffer.name + \";\\n\"\n\n constructor_code += \" // \" + str(buffer.shape) + \"\" # TODO maybe we sometimes need \\n\n\n functionality = CodeRegistry.get_funct(\"KernelAllocation\")\n impl = functionality[0].create(buffer, pos, pos_kernel, pos_bias)\n\n if impl:\n constructor_code += impl.generate_code()\n constructor_code += \"\\n\"\n\n buffer_declaration += \" // Outputs\\n\"\n constructor_code += \" // Outputs\\n\"\n for num, output in enumerate(node.outputs):\n\n buffer = memory_manager.get_buffer(graph, output)\n\n if output == output_buffer_name:\n buffer_declaration += \" // Output tensor {} with shape {} of network provided as argument of Network::run()\".format(buffer.name, str(buffer.shape))\n constructor_code += \" // Output tensor {} with shape {} of network provided as argument of Network::run()\".format(buffer.name, str(buffer.shape))\n continue\n\n buffer_declaration += \" // \" + str(buffer.shape) + \"\\n\"\n\n pico_cnn_tensor = \" pico_cnn::naive::Tensor *\"\n\n buffer_declaration += pico_cnn_tensor + buffer.name + \";\\n\"\n\n constructor_code += \" // \" + str(buffer.shape) + \"\" # TODO maybe we sometimes need \\n\n\n functionality = CodeRegistry.get_funct(\"OutputAllocation\")\n impl = functionality[0].create(buffer)\n\n if impl:\n constructor_code += impl.generate_code()\n constructor_code += \"\\n\"\n\n buffer_declaration += \"\\n\\n\"\n constructor_code += \"\\n\\n\"\n\n #constructor_code += \"}\\n\"\n\n self.buffer_declaration = buffer_declaration\n self.constructor_code = constructor_code", "def __init__(self, outer_nc, inner_nc, input_nc=None,\n submodule=None, outermost=False, innermost=False, norm_layer=nn.BatchNorm2d, use_dropout=False):\n super(UnetSkipConnectionBlock, self).__init__()\n self.outermost = outermost\n if type(norm_layer) == functools.partial:\n use_bias = norm_layer.func == nn.InstanceNorm2d\n else:\n use_bias = norm_layer == nn.InstanceNorm2d\n if input_nc is None:\n input_nc = outer_nc\n downconv = nn.Conv2d(input_nc, inner_nc, kernel_size=4,\n stride=2, padding=1, bias=use_bias)\n downrelu = nn.LeakyReLU(0.2, True)\n downnorm = norm_layer(inner_nc)\n uprelu = nn.ReLU(True)\n upnorm = norm_layer(outer_nc)\n\n if outermost:\n upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,\n kernel_size=4, stride=2,\n padding=1)\n down = [downconv]\n up = [uprelu, upconv, nn.Tanh()]\n model = down + [submodule] + up\n elif innermost:\n upconv = nn.ConvTranspose2d(inner_nc, outer_nc,\n kernel_size=4, stride=2,\n padding=1, bias=use_bias)\n down = [downrelu, downconv]\n up = [uprelu, upconv, upnorm]\n model = down + up\n else:\n upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,\n kernel_size=4, stride=2,\n padding=1, bias=use_bias)\n down = [downrelu, downconv, downnorm]\n up = [uprelu, upconv, upnorm]\n\n if use_dropout:\n model = down + [submodule] + up + [nn.Dropout(0.5)]\n else:\n model = down + [submodule] + up\n\n self.model = nn.Sequential(*model)", "def build_network(self):\n\n input_placeholder = Input(shape = self.input_shape)\n\n # Stage 1\n x = self.main_path_block(\n input_placeholder,\n 64, (7, 7), 'same',\n 'conv1', 'bn_conv1',\n activation = 'relu',\n strides = (2, 2)\n )\n x = MaxPooling2D((3, 3), strides = (2, 2), padding = 'same')(x)\n\n # Stage 2\n x = self.identity_block(x, 64, 'relu', 2, 'a', False)\n x = self.identity_block(x, 64, 'relu', 2, 'b')\n\n # Stage 3\n x = self.convolutional_block(x, [128, 128, 128], 'relu', 3, 'a')\n x = self.identity_block(x, 128, 'relu', 3, 'b')\n\n # Stage 4\n x = self.convolutional_block(x, [256, 256, 256], 'relu', 4, 'a')\n x = self.identity_block(x, 256, 'relu', 4, 'b')\n\n # Stage 5\n x = self.convolutional_block(x, [512, 512, 512], 'relu', 5, 'a')\n x = self.identity_block(x, 512, 'relu', 4, 'b')\n\n # Fully Connected Layers\n x = BatchNormalization(axis = 3)(x)\n x = Activation('relu')(x)\n x = AveragePooling2D((2, 1), padding = 'valid', strides = (2, 2))(x)\n x = Flatten()(x)\n x = Dense(512)\n x = Dense(\n self.classes, activation = 'softmax',\n name = 'fc_' + str(self.classes),\n kernel_initializer = glorot_uniform(seed = 0)\n )(x)\n\n self.model = Model(input_placeholder, x, name = 'Resnet18')", "def __init__(self, settings):\n super(CaffeNet, self).__init__(settings)\n\n self._range_scale = 1.0 # not needed; image already in [0,255]\n\n \n #ULF[todo]: explain, make this a setting\n self._net_channel_swap = (2,1,0)\n #self._net_channel_swap = None\n if self._net_channel_swap:\n self._net_channel_swap_inv = tuple([self._net_channel_swap.index(ii) for ii in range(len(self._net_channel_swap))])\n else:\n self._net_channel_swap_inv = None\n\n\n # (1) import caffe library\n #\n sys.path.insert(0, os.path.join(settings.caffevis_caffe_root, 'python'))\n import caffe\n print 'debug[caffe]: CaffeNet.__init__: using Caffe in', caffe.__file__\n\n # Check if the imported caffe provides all required functions\n self._check_caffe_version(caffe)\n \n # Set the mode to CPU or GPU.\n # Note: in the latest Caffe versions, there is one Caffe object\n # *per thread*, so the mode must be set per thread!\n # Here we set the mode for the main thread; it is also separately\n # set in CaffeProcThread.\n if settings.caffevis_mode_gpu:\n caffe.set_mode_gpu()\n print 'debug[caffe]: CaffeNet.__init__: CaffeVisApp mode (in main thread): GPU'\n else:\n caffe.set_mode_cpu()\n print 'debug[caffe]: CaffeNet.__init__: CaffeVisApp mode (in main thread): CPU'\n print 'debug[caffe]: CaffeNet.__init__: Loading the classifier (', settings.caffevis_deploy_prototxt, settings.caffevis_network_weights, ') ...'\n\n\n # (2) load the caffe model\n # \n # ULF[hack]: make Caffe silent - there should be a better\n # (i.e. official) way to do so. We only want to suppress\n # the info (like network topology) while still seeing warnings\n # and errors!\n suppress_output = (hasattr(self.settings, 'caffe_init_silent')\n and self.settings.caffe_init_silent)\n\n if suppress_output:\n # open 2 file descriptors\n null_fds = [os.open(os.devnull, os.O_RDWR) for x in xrange(2)]\n # save the current file descriptors to a tuple\n original_fds = os.dup(1), os.dup(2)\n # put /dev/null fds on stdout (1) and stderr (2)\n os.dup2(null_fds[0], 1)\n os.dup2(null_fds[1], 2)\n\n self.net = caffe.Classifier(\n settings.caffevis_deploy_prototxt,\n settings.caffevis_network_weights,\n mean = None, # Set to None for now, assign later # self._data_mean,\n channel_swap = self._net_channel_swap,\n raw_scale = self._range_scale,\n )\n \n if suppress_output:\n # restore file original descriptors for stdout (1) and stderr (2)\n os.dup2(original_fds[0], 1)\n os.dup2(original_fds[1], 2)\n # close the temporary file descriptors\n os.close(null_fds[0])\n os.close(null_fds[1])\n print 'debug[caffe]: CaffeNet.__init__: ... loading completed.'\n\n self._init_data_mean()\n self._check_force_backward_true()", "def __init__(self, *args):\n _snap.TDirNet_swiginit(self, _snap.new_TDirNet(*args))", "def initialize_network_los() -> bool:\n return True", "def __init__(self):\n\t\tself.edges = defaultdict(list)\n\t\tself.weights = {}\n\t\tself.connections = {}", "def __init__(self, C, num_classes):\n super(AuxiliaryHeadImageNet, self).__init__()\n self.features = nn.Sequential(\n nn.ReLU(inplace=True),\n nn.AvgPool2d(5, stride=2, padding=0, count_include_pad=False),\n nn.Conv2d(C, 128, 1, bias=False),\n nn.BatchNorm2d(128),\n nn.ReLU(inplace=True),\n nn.Conv2d(128, 768, 2, bias=False),\n # NOTE: This batchnorm was omitted in my earlier implementation due to a typo.\n # Commenting it out for consistency with the experiments in the paper.\n # nn.BatchNorm2d(768),\n nn.ReLU(inplace=True)\n )\n self.classifier = nn.Linear(768, num_classes)", "def __init__(self, C, num_classes):\n super(AuxiliaryHeadImageNet, self).__init__()\n self.features = nn.Sequential(\n nn.ReLU(inplace=True),\n nn.AvgPool2d(5, stride=2, padding=0, count_include_pad=False),\n nn.Conv2d(C, 128, 1, bias=False),\n nn.BatchNorm2d(128),\n nn.ReLU(inplace=True),\n nn.Conv2d(128, 768, 2, bias=False),\n # NOTE: This batchnorm was omitted in my earlier implementation due to a typo.\n # Commenting it out for consistency with the experiments in the paper.\n # nn.BatchNorm2d(768),\n nn.ReLU(inplace=True)\n )\n self.classifier = nn.Linear(768, num_classes)", "def __init__(self, C, num_classes):\n super(AuxiliaryHeadImageNet, self).__init__()\n self.features = nn.Sequential(\n nn.ReLU(inplace=True),\n nn.AvgPool2d(5, stride=2, padding=0, count_include_pad=False),\n nn.Conv2d(C, 128, 1, bias=False),\n nn.BatchNorm2d(128),\n nn.ReLU(inplace=True),\n nn.Conv2d(128, 768, 2, bias=False),\n # NOTE: This batchnorm was omitted in my earlier implementation due to a typo.\n # Commenting it out for consistency with the experiments in the paper.\n # nn.BatchNorm2d(768),\n nn.ReLU(inplace=True)\n )\n self.classifier = nn.Linear(768, num_classes)", "def initialize_networkHandler(self):\n\t\tself.networkHandler = NetworkHandler(\n\t\t\tself.callbackQueue,\n\t\t\tself.received_order,\n\t\t\tself.set_light_callback,\n\t\t\tself.newOrderQueue,\n\t\t\tself.startedOrderQueue,\n\t\t\tself.lost_connection\n\t\t\t)", "def __init__(self, net, batch):\n self.net = net\n self.train_batch_is(batch)\n self.image_height = len(batch.image_array[0][0])\n self.image_width = len(batch.image_array[0][0][0])\n self.net.reset_forward()", "def __init__(self, name, config):\n super(RelationalNetwork, self).__init__(name, RelationalNetwork, config)\n\n # Get key mappings.\n self.key_feature_maps = self.stream_keys[\"feature_maps\"]\n self.key_question_encodings = self.stream_keys[\"question_encodings\"]\n self.key_outputs = self.stream_keys[\"outputs\"]\n\n # Retrieve input sizes from globals.\n self.feature_maps_height = self.globals[\"feature_maps_height\"]\n self.feature_maps_width = self.globals[\"feature_maps_width\"]\n self.feature_maps_depth = self.globals[\"feature_maps_depth\"]\n self.question_encoding_size = self.globals[\"question_encoding_size\"]\n \n # Create \"object\" coordinates.\n self.obj_coords = []\n for h in range(self.feature_maps_height):\n for w in range(self.feature_maps_width):\n self.obj_coords.append((h,w))\n\n # Calculate input size to the g_theta: two \"objects\" + question (+ optionally: image size)\n input_size = 2 * self.feature_maps_depth + self.question_encoding_size\n\n # Create the module list.\n modules = []\n\n # Retrieve dropout rate value - if set, will put dropout between every layer.\n dropout_rate = self.config[\"dropout_rate\"]\n\n # Create the model, i.e. the \"relational\" g_theta network.\n g_theta_sizes = self.config[\"g_theta_sizes\"]\n if type(g_theta_sizes) == list and len(g_theta_sizes) > 1:\n # First input dim.\n input_dim = input_size\n for hidden_dim in g_theta_sizes:\n # Add linear layer.\n modules.append( torch.nn.Linear(input_dim, hidden_dim) )\n # Add activation and dropout.\n modules.append( torch.nn.ReLU() )\n if (dropout_rate > 0):\n modules.append( torch.nn.Dropout(dropout_rate) )\n # Remember input dim of next layer.\n input_dim = hidden_dim\n\n # Add output layer.\n modules.append( torch.nn.Linear(input_dim, hidden_dim) )\n\n self.logger.info(\"Created g_theta network with {} layers\".format(len(g_theta_sizes)+1))\n\n else:\n raise ConfigurationError(\"'g_theta_sizes' must contain a list with numbers of neurons in g_theta layers (currently {})\".format(self.hidden_sizes))\n\n # Export output_size to globals.\n self.output_size = g_theta_sizes[-1]\n self.globals[\"output_size\"] = self.output_size\n\n # Finally create the sequential model out of those modules.\n self.g_theta = torch.nn.Sequential(*modules)", "def __init__(self, nx, nodes):\n if type(nx) is not int:\n raise TypeError(\"nx must be an integer\")\n if nx < 1:\n raise ValueError(\"nx must be a positive integer\")\n if type(nodes) is not int:\n raise TypeError(\"nodes must be an integer\")\n if nodes < 1:\n raise ValueError(\"nodes must be a positive integer\")\n # weights vector for the hidden layer\n # default mean is 0\n # default stddev is 1\n self.__W1 = np.random.normal(size=(nodes, nx))\n # The bias for the hidden layer. Upon instantiation,\n # it should be initialized with 0’s.\n self.__b1 = np.zeros((nodes, 1))\n # The activated output for the hidden layer. Upon instantiation,\n # it should be initialized to 0\n self.__A1 = 0\n # weights vector for the output neuron\n # default mean is 0\n # default stddev is 1\n self.__W2 = np.random.normal(size=(1, nodes))\n # bias for the output neuron\n self.__b2 = 0\n # activated output for the output neuron (prediction)\n self.__A2 = 0", "def setUp(self):\n # The short NSC used in this example\n self.net_nsc = [\n (1, 4, 0, 0, 0), # Layer 1: Identity(input)\n (2, 1, 3, 1, 0), # Layer 2: Convolution(Layer1)\n (3, 1, 3, 2, 0), # Layer 3: Convolution(Layer2)\n (4, 5, 0, 1, 3), # Layer 4: Convolution(Layer1)\n (5, 7, 0, 0, 0), # Layer 5: Convolution(Layer4)\n ]", "def __init__(self, in_channels=3):\n super().__init__()\n model_list = nn.ModuleList()\n model_list.append(\n ConvBlock(in_channels, 64, leaky=True, instance_norm=False, bias=True))\n model_list.append(ConvBlock(64, 128, leaky=True,\n instance_norm=True, bias=False))\n model_list.append(ConvBlock(128, 256, leaky=True,\n instance_norm=True, bias=False))\n model_list.append(ConvBlock(256, 512, leaky=True,\n instance_norm=True, bias=False, stride=1))\n model_list.append(nn.Conv2d(512, 1, kernel_size=4,\n stride=1, padding=1, bias=True))\n self.model = nn.Sequential(*model_list)\n\n self._initialize_params()" ]
[ "0.69023705", "0.6850007", "0.6721665", "0.65706", "0.6532473", "0.64673805", "0.64505595", "0.64458954", "0.6434368", "0.64332235", "0.64150655", "0.6372491", "0.636863", "0.6341182", "0.6330495", "0.63025963", "0.6276662", "0.62390316", "0.6235152", "0.61963755", "0.61900246", "0.61785716", "0.6175203", "0.61713195", "0.6170169", "0.615478", "0.614945", "0.61450905", "0.61366504", "0.61155593", "0.6106042", "0.61050206", "0.610115", "0.6089653", "0.60848165", "0.60667264", "0.6065172", "0.6061645", "0.60612935", "0.60600924", "0.6053745", "0.6049151", "0.6041936", "0.6038751", "0.60334927", "0.6027372", "0.6026258", "0.602363", "0.6019186", "0.6014522", "0.60068935", "0.59956425", "0.59808713", "0.5972463", "0.59662366", "0.5958748", "0.5956478", "0.5951118", "0.59432024", "0.59353065", "0.59336025", "0.59329677", "0.59303707", "0.5929328", "0.59269255", "0.5915176", "0.5906102", "0.59045327", "0.5903651", "0.5902044", "0.58976835", "0.5888555", "0.5886091", "0.5883432", "0.58750266", "0.5859851", "0.585297", "0.58522326", "0.5840696", "0.5839813", "0.58355284", "0.5835302", "0.5831391", "0.581727", "0.58137083", "0.5809756", "0.5804382", "0.5799944", "0.57997555", "0.57984453", "0.5796514", "0.57909", "0.57896507", "0.57896507", "0.57896507", "0.5785398", "0.57821995", "0.57678145", "0.5767623", "0.57550913", "0.57536525" ]
0.0
-1
Compute loss and gradient for the fullyconnected net.
def loss(self, X, y=None): X = X.astype(self.dtype) mode = "test" if y is None else "train" # Set train/test mode for batchnorm params and dropout param since they # behave differently during training and testing. if self.use_dropout: self.dropout_param["mode"] = mode if self.normalization == "batchnorm": for bn_param in self.bn_params: bn_param["mode"] = mode scores = None cache_affine = [] cache_bn = [] cache_ln = [] cache_relu = [] cache_dropout = [] # Forward Pass out = X for i in range(self.num_layers - 1): # Affine W, b = self.params['W' + str(i+1)], self.params['b' + str(i+1)] out, cache = affine_forward(out, W, b) cache_affine.append(cache) # BN if self.normalization=='batchnorm': gamma, beta = self.params['gamma' + str(i+1)], self. params['beta' + str(i+1)] out, cache = batchnorm_forward(out, gamma, beta, self.bn_params[i]) cache_bn.append(cache) if self.normalization=='layernorm': gamma, beta = self.params['gamma' + str(i+1)], self.params['beta' + str(i+1)] out, cache = layernorm_forward(out, gamma, beta, self.bn_params[i]) cache_ln.append(cache) # ReLU out, cache = relu_forward(out) cache_relu.append(cache) # Dropout if self.use_dropout: out, cache = dropout_forward(out, self.dropout_param) cache_dropout.append(cache) # Input update x = out # Last Layer W, b = self.params['W' + str(self.num_layers)], self.params['b' + str(self.num_layers)] scores, cache = affine_forward(x, W, b) cache_affine.append(cache) # If test mode return early if mode == "test": return scores loss, grads = 0.0, {} N = X.shape[0] weight_name = 'W' + str(self.num_layers) bias_name = 'b' + str(self.num_layers) # Loss calculation loss, dx = softmax_loss(scores, y) # Last layer backwards dout, grads[weight_name], grads[bias_name] = affine_backward(dx, cache_affine.pop()) # Last layer regularization loss += 0.5 * self.reg * np.sum(np.square(self.params[weight_name])) #grads[weight_name] /= N grads[weight_name] += self.reg * self.params[weight_name] # Layers: self.num_layer - 1 -> 1 i = self.num_layers - 2 while i >= 0: # Dropout if self.use_dropout: dout = dropout_backward(dout, cache_dropout.pop()) # ReLU dout = relu_backward(dout, cache_relu.pop()) # BN if self.normalization=='batchnorm': dout, grads['gamma' + str(i+1)], grads['beta' + str(i+1)] = batchnorm_backward(dout, cache_bn.pop()) #LN if self.normalization=='layernorm': dout, grads['gamma' + str(i+1)], grads['beta' + str(i+1)] = layernorm_backward(dout, cache_ln.pop()) # Affine weight_name = 'W' + str(i+1) bias_name = 'b' + str(i+1) dout, grads[weight_name], grads[bias_name] = affine_backward(dout, cache_affine.pop()) # Regularization loss += 0.5 * self.reg * np.sum(np.square(self.params[weight_name])) #grads[weight_name] /= N grads[weight_name] += self.reg * self.params[weight_name] i -= 1 return loss, grads
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compute_loss_and_gradients(self, X, y):\n # Before running forward and backward pass through the model,\n # clear parameter gradients aggregated from the previous pass\n # TODO Set parameter gradient to zeros\n # Hint: using self.params() might be useful!\n self.fulllayer1.W.grad = np.zeros_like(self.fulllayer1.W.grad)\n self.fulllayer1.B.grad = np.zeros_like(self.fulllayer1.B.grad)\n self.fulllayer2.W.grad = np.zeros_like(self.fulllayer2.W.grad)\n self.fulllayer2.B.grad = np.zeros_like(self.fulllayer2.B.grad)\n\n\n # TODO Compute loss and fill param gradients\n # by running forward and backward passes through the model\n res = self.fulllayer1.forward(X)\n res2 = self.reglayer1.forward(res)\n res3 = self.fulllayer2.forward(res2)\n\n loss, grad = softmax_with_cross_entropy(res3, y)\n\n back3 = self.fulllayer2.backward(grad)\n back2 = self.reglayer1.backward(back3)\n back = self.fulllayer1.backward(back2)\n \n # After that, implement l2 regularization on all params\n # Hint: self.params() is useful again!\n\n for params in self.params().keys():\n # print(params)\n # print(self.params()[params].value)\n loc_loss, loc_grad = l2_regularization(self.params()[params].value, self.reg)\n loss += loc_loss\n self.params()[params].grad += loc_grad\n\n return loss", "def compute_loss(self, inputs):\r\n outputs = self.net.compute_outputs(inputs)\r\n loss_grad = self.net.compute_loss_grad(outputs - inputs)\r\n loss = np.sum((inputs - outputs) ** 2, axis=0).mean() / 2.0\r\n return loss, loss_grad", "def compute_loss_and_gradients(self, X, y):\n # TODO Compute loss and fill param gradients\n # by running forward and backward passes through the model", "def loss(self, X, y=None, justLoss=False):\n # N = X.shape[0]\n # mode = 'test' if y is None else 'train'\n scores = None\n\n W1, b1 = self.params['W1'], self.params['b1']\n # W2, b2 = self.params['W2'], self.params['b2']\n W3, b3 = self.params['W3'], self.params['b3']\n\n\n conv_param = {'stride': 1, 'pad': 0}\n pool_param = {'pool_height': 2, 'pool_width': 2, 'stride': 2}\n\n\n #######################################################################\n # TODO: Implement the forward pass for the convolutional neural net, #\n # computing the class scores for X and storing them in the scores #\n # variable. #\n #######################################################################\n\n conv1, conv_cache = conv_forward(X, W1, b1, conv_param)\n relu1, relu_cache1 = relu_forward(conv1)\n\n # conv2, conv_cache2 = conv_forward(relu1, W2, b2, conv_param)\n # relu2, relu_cache2 = relu_forward(conv2)\n\n scores, maxpool_cache = max_pool_forward(relu1, pool_param)\n scores, forward_cache = fc_forward(scores, W3, b3)\n \n\n\n if y is None:\n return scores\n\n loss, grads = 0, {}\n #######################################################################\n # TODO: Implement the backward pass for the convolutional neural net, #\n # storing the loss and gradients in the loss and grads variables. #\n # Compute data loss using softmax, and make sure that grads[k] holds #\n # the gradients for self.params[k]. #\n loss, dscores = softmax_loss(scores, y)\n\n if justLoss:\n return loss\n # print(loss)\n\n\n dx_3, grads['W3'], grads['b3'] = fc_backward(dscores, forward_cache)\n dx_3 = max_pool_backward(dx_3, maxpool_cache)\n\n # dx_2 = relu_backward(dx_3, relu_cache2)\n # dx_2, grads['W2'], grads['b2'] = conv_backward(dx_3, conv_cache2)\n\n dx = relu_backward(dx_3, relu_cache1)\n dx, grads['W1'], grads['b1'] = conv_backward(dx, conv_cache)\n \n \n\n return loss, grads", "def loss(self, X, y=None):\n X = X.astype(self.dtype)\n mode = 'test' if y is None else 'train'\n\n scores = None\n ############################################################################\n # Implementing the forward pass for the fully-connected net, computing #\n # the class scores for X and storing them in the scores variable. #\n ############################################################################\n\n l_input = X.copy()\n out = []\n cache = []\n for i in range(self.num_layers - 1):\n # layerwise compute the forward pass and store outputs in out list\n key = ['W' + str(i+1), 'b' + str(i+1)]\n lout, lcache = affine_sigmoid_forward(l_input, self.params[key[0]], self.params[key[1]])\n out.append(lout)\n cache.append(lcache)\n l_input = lout\n\n key = ['W' + str(self.num_layers), 'b' + str(self.num_layers)]\n scores, lcache = affine_forward(out[self.num_layers - 2], self.params[key[0]], self.params[key[1]])\n cache.append(lcache)\n \n # regularization parameter compute by summing square of all weight vectors\n R = 0\n for i in range(1, self.num_layers + 1):\n key = 'W' + str(i)\n R += np.sum(np.power(self.params[key], 2))\n\n # If test mode return early\n if mode == 'test':\n return scores\n\n loss, grads = 0.0, {}\n\n ########################\n # Backward pass to compute the loss and gradients\n ########################\n\n loss, dscore = softmax_loss(scores, y)\n # Apply regularization of the loss \n loss = loss + 0.5 * self.reg * R\n\n key = ['W' + str(self.num_layers), 'b' + str(self.num_layers)]\n dx, grads[key[0]], grads[key[1]] = affine_backward(dscore, cache[self.num_layers - 1])\n grads[key[0]] += self.reg * self.params[key[0]] \n\n for i in range(self.num_layers - 1, 0, -1):\n key = ['W' + str(i), 'b' + str(i)]\n dx, grads[key[0]], grads[key[1]] = affine_sigmoid_backward(dx, cache[i-1])\n # Apply regularization to the gradients\n grads[key[0]] += self.reg * self.params[key[0]]\n\n return loss, grads", "def _train(self, loss):\n config = ConfigParser.ConfigParser()\n config.read(\"config/conf.cfg\")\n\n learning_rate =float(config.get(\"Common Params\", \"learning_rate\"))\n moment = float(config.get(\"Common Params\", \"moment\"))\n opt = tf.train.AdamOptimizer()\n train_step = opt.minimize(loss)\n return train_step\n\n # grads = opt.compute_gradients(self.total_loss)\n\n # apply_gradient_op = opt.apply_gradients(grads, global_step=self.global_step)\n\n #return apply_gradient_op", "def compute_loss(self):", "def update_network(self, loss_dict):\r\n loss = sum(loss_dict.values())\r\n self.optimizer.zero_grad()\r\n loss.backward()\r\n self.optimizer.step()", "def loss(self, X, y=None):\r\n\r\n # Findout if it's trainig or test time\r\n mode = 'train'\r\n if y is None:\r\n mode = 'test'\r\n\r\n # Set the mode for batch normalization and dropout parameters if needed.\r\n if self.use_batch_norm:\r\n for bn_param in self.bn_params:\r\n bn_param['mode'] = mode\r\n if self.use_dropout:\r\n self.dropout_params['mode'] = mode\r\n\r\n # Compute the forward pass fo the cnn.\r\n caches = []\r\n input_layer = X\r\n for i in range(1, self.num_conv_layers+1):\r\n w = self.params['W{}'.format(i)]\r\n b = self.params['b{}'.format(i)]\r\n\r\n if self.use_batch_norm:\r\n gamma = self.params['gamma{}'.format(i)]\r\n beta = self.params['beta{}'.format(i)]\r\n layer_score, layer_cache = conv_bn_relu_pool_forward(input_layer, w, b, gamma, beta,\r\n self.conv_params, self.bn_params[i-1], \r\n self.pool_params)\r\n else:\r\n layer_score, layer_cache = conv_relu_pool_forward(input_layer, w, b, self.conv_params, \r\n self.pool_params)\r\n input_layer = layer_score\r\n caches.append(layer_cache)\r\n\r\n # Compute the forward pass for the fully connected net.\r\n num_layers = self.num_conv_layers + self.num_hidden_layers\r\n for i in range(self.num_conv_layers+1, num_layers+1):\r\n w = self.params['W{}'.format(i)]\r\n b = self.params['b{}'.format(i)]\r\n if self.use_batch_norm:\r\n gamma = self.params['gamma{}'.format(i)]\r\n beta = self.params['beta{}'.format(i)]\r\n layer_score, layer_cache = affine_bn_relu_forward(input_layer, w, b, gamma, beta,\r\n self.bn_params[i-1],\r\n dropout=self.use_dropout, \r\n dropout_param=self.dropout_params)\r\n else:\r\n layer_score, layer_cache = affine_relu_forward(input_layer, w, b, dropout=self.use_dropout, \r\n dropout_param=self.dropout_params)\r\n input_layer = layer_score\r\n caches.append(layer_cache)\r\n\r\n # Compute the forward pass for the output layer.\r\n w = self.params['W{}'.format(i+1)]\r\n b = self.params['b{}'.format(i+1)]\r\n scores, output_cache = affine_forward(input_layer, w, b)\r\n\r\n # If testing time return the scores\r\n if mode == 'test':\r\n return scores\r\n\r\n # Compute the loss\r\n loss, dscores = softmax_loss(scores, y)\r\n\r\n # Add regularization to the loss and the corresponding gradient.\r\n grads = {}\r\n for i in range(1, num_layers+2):\r\n w = 'W{}'.format(i)\r\n loss += 0.5 * self.reg * np.sum(self.params[w]**2)\r\n grads[w] = self.reg * self.params[w]\r\n\r\n # Compute the gradients using backprop on the fully connected net.\r\n # Start with the output layer\r\n w = 'W{}'.format(num_layers+1)\r\n b = 'b{}'.format(num_layers+1)\r\n dx, dw, db = affine_backward(dscores, output_cache)\r\n grads[w] += dw\r\n grads[b] = db\r\n for i in range(num_layers, self.num_conv_layers, -1):\r\n cache = caches[i-1]\r\n w = 'W{}'.format(i)\r\n b = 'b{}'.format(i)\r\n if self.use_batch_norm:\r\n gamma = 'gamma{}'.format(i)\r\n beta = 'beta{}'.format(i)\r\n dx, dw, db, dgamma, dbeta = affine_bn_relu_backward(dx, cache, self.use_dropout)\r\n grads[gamma] = dgamma\r\n grads[beta] = dbeta\r\n else:\r\n dx, dw, db = affine_relu_backward(dx, cache)\r\n grads[w] += dw\r\n grads[b] = db\r\n\r\n # Compute the gradeints using backprop on the convolutional layers.\r\n for i in range(self.num_conv_layers, 0, -1):\r\n cache = caches[i-1]\r\n w = 'W{}'.format(i)\r\n b = 'b{}'.format(i)\r\n if self.use_batch_norm:\r\n gamma = 'gamma{}'.format(i)\r\n beta = 'beta{}'.format(i)\r\n dx, dw, db, dgamma, dbeta = conv_bn_relu_pool_backward(dx, cache)\r\n grads[gamma] = dgamma\r\n grads[beta] = dbeta\r\n else:\r\n dx, dw, db = conv_relu_pool_backward(dx, cache)\r\n grads[w] += dw\r\n grads[b] = db\r\n\r\n return loss, grads", "def loss_and_grad(self, X, y):\n\n # Initialize the loss and gradient to zero.\n loss = 0.0\n grad = np.zeros_like(self.W)\n grad_tmp = np.zeros_like(self.W)\n num_classes = self.W.shape[0] # C = num_classes\n num_train = X.shape[0]\n \n # ================================================================ #\n # YOUR CODE HERE:\n # Calculate the softmax loss and the gradient. Store the gradient\n # as the variable grad.\n # ================================================================ #\n \n exp_a = np.zeros((num_classes,num_train))\n for i in np.arange(num_train):\n \n Loss = 0.0\n\n class_scores = np.dot(self.W,X[i,:].T) # calculating class scores (C x 1 vector)\n class_scores -= np.max(class_scores) # considering the possible issue for numerical instability and account for it\n\n exp_a[:,i] = np.exp(class_scores) # turning class scores to probabilities (C x 1 vector), without normalization\n\n Loss -= np.log(exp_a[y[i],i]/np.sum(exp_a[:,i]))\n \n \n #if i==0:\n grada = np.zeros(X.shape[1])\n \n for j in range(num_classes):\n if j != y[i]:\n grad_tmp[j,:] = X[i,:].T * (exp_a[j,i] / np.sum(exp_a[:,i])) \n else: \n grad_tmp[j,:] = X[i,:].T * (exp_a[j,i] / np.sum(exp_a[:,i])) - X[i,:].T \n\n grad += grad_tmp\n loss += Loss \n \n pass\n\n\n loss /= num_train\n grad /= num_train\n # ================================================================ #\n # END YOUR CODE HERE\n # ================================================================ #\n\n return loss, grad", "def compute_gradients_and_update(batch_y0, batch_yN):\n with tf.GradientTape() as g:\n pred_y = node_network(tb, batch_y0)\n loss = tf.reduce_mean(tf.abs(pred_y - batch_yN))\n grads = g.gradient(loss, var_list)\n optimizer.apply_gradients(zip(grads, var_list))\n return loss", "def compute_gradients(self):\n wlist = self._neural_net.weights()\n blist = self._neural_net.biases()\n\n nmatrices = len(wlist)\n weight_grad = []\n bias_grad = []\n\n cost_function = self._cost_function\n weight_der = WeightDerivative(neural_net=self._neural_net,\n data_src=self._data_src,\n cost_function=cost_function)\n biase_der = BiasDerivative(neural_net=self._neural_net,\n data_src=self._data_src,\n cost_function=cost_function)\n for layer in range(nmatrices):\n weight_grad.append(np.zeros(wlist[layer].shape))\n bias_grad.append(np.zeros(blist[layer].shape))\n\n rows, cols = wlist[layer].shape\n for i in range(rows):\n for j in range(cols):\n loc = ParameterLocation(layer=layer, row=i, column=j)\n weight_grad[layer][i][j] = weight_der.partial_derivative(loc)\n\n for row in range(rows):\n loc = ParameterLocation(layer=layer, row=row, column=0)\n bias_grad[layer][row] = biase_der.partial_derivative(loc)\n\n return weight_grad, bias_grad", "def loss_gradient(self, x, y):\n x_preproc = self._apply_processing(x)\n x_defences, y_defences = self._apply_defences(x_preproc, y, fit=False)\n\n # Adjust the shape of y for loss functions that do not take labels in one-hot encoding\n if self._reduce_labels:\n y_defences = np.argmax(y_defences, axis=1)\n\n grads = self._loss_grads([x_defences, y_defences])[0]\n grads = self._apply_defences_gradient(x_preproc, grads)\n grads = self._apply_processing_gradient(grads)\n assert grads.shape == x_preproc.shape\n\n return grads", "def build_loss(self):\n import tensorflow as tf\n\n y_1d = [tf.reduce_sum(tf.multiply(self.variables[\"y\"][i], self.variables[\"y_action\"][i]), axis=1) for i in range(len(self.variables[\"y\"]))]\n loss = np.sum([tf.nn.l2_loss(y_1d[i] - self.variables[\"y_true\"]) for i in range(len(y_1d))])\n\n l1_reg = 0\n l2_reg = 0\n\n keys = sorted(self.variables.keys())\n keys = [key for key in keys if critere_keys(key) and \"W\" in key]\n for key in keys:\n l1_reg += tf.reduce_sum(tf.abs(self.variables[key]))\n l2_reg += tf.nn.l2_loss(self.variables[key])\n\n self.loss = loss + self.alpha_reg * l1_reg + self.beta_reg * l2_reg\n\n self.train_step = tf.train.RMSPropOptimizer(self.decay_learning_rate,\n decay=0.99, momentum=0., centered=True).minimize(self.loss, global_step=self.global_step)", "def _define_loss(self):\n\n cost = []\n unit_cost = []\n for nn in range(len(self.ffnet_out)):\n data_out = self.data_out_batch[nn]\n if self.filter_data:\n # this will zero out predictions where there is no data,\n # matching Robs here\n pred = tf.multiply(\n self.networks[self.ffnet_out[nn]].layers[-1].outputs,\n self.data_filter_batch[nn])\n else:\n pred = self.networks[self.ffnet_out[nn]].layers[-1].outputs\n\n nt = tf.cast(tf.shape(pred)[0], tf.float32)\n # define cost function\n if self.noise_dist == 'gaussian':\n with tf.name_scope('gaussian_loss'):\n cost.append(tf.nn.l2_loss(data_out - pred) / nt)\n unit_cost.append(tf.reduce_mean(tf.square(data_out-pred), axis=0))\n\n elif self.noise_dist == 'poisson':\n with tf.name_scope('poisson_loss'):\n\n if self.poisson_unit_norm is not None:\n # normalize based on rate * time (number of spikes)\n cost_norm = tf.multiply(self.poisson_unit_norm[nn], nt)\n else:\n cost_norm = nt\n\n cost.append(-tf.reduce_sum(tf.divide(\n tf.multiply(data_out, tf.log(self._log_min + pred)) - pred,\n cost_norm)))\n\n unit_cost.append(-tf.divide(\n tf.reduce_sum(\n tf.multiply(\n data_out, tf.log(self._log_min + pred)) - pred, axis=0),\n cost_norm))\n\n elif self.noise_dist == 'bernoulli':\n with tf.name_scope('bernoulli_loss'):\n # Check per-cell normalization with cross-entropy\n # cost_norm = tf.maximum(\n # tf.reduce_sum(data_out, axis=0), 1)\n cost.append(tf.reduce_mean(\n tf.nn.sigmoid_cross_entropy_with_logits(\n labels=data_out, logits=pred)))\n unit_cost.append(tf.reduce_mean(\n tf.nn.sigmoid_cross_entropy_with_logits(\n labels=data_out, logits=pred), axis=0))\n else:\n TypeError('Cost function not supported.')\n\n self.cost = tf.add_n(cost)\n self.unit_cost = unit_cost\n\n # Add regularization penalties\n reg_costs = []\n with tf.name_scope('regularization'):\n for nn in range(self.num_networks):\n reg_costs.append(self.networks[nn].define_regularization_loss())\n self.cost_reg = tf.add_n(reg_costs)\n\n self.cost_penalized = tf.add(self.cost, self.cost_reg)\n\n # save summary of cost\n # with tf.variable_scope('summaries'):\n tf.summary.scalar('cost', self.cost)\n tf.summary.scalar('cost_penalized', self.cost_penalized)\n tf.summary.scalar('reg_pen', self.cost_reg)", "def _compute_loss(self, parameters, inputs, ground_truth):\n predictions = self.network_forward(parameters, inputs)\n loss = np.mean((ground_truth - predictions) ** 2)\n return loss", "def _compute_loss(self, parameters, inputs, ground_truth):\n predictions = self.network_forward(parameters, inputs)\n loss = np.mean((ground_truth - predictions) ** 2)\n return loss", "def backward(self, input_train, input_train_label):\n batchSize = len(input_train) #liczba obrazow podawanych na wejscie w trakcie jednej iteracji\n weights = self.Weights\n biases = self.Biases\n delta_W = self.delta_W\n delta_B = self.delta_B\n poolParams = self.poolParams\n dW_list = []\n dB_list = []\n dW4 = np.zeros(weights[4].shape)\n dB4 = np.zeros(biases[4].shape)\n dW3 = np.zeros(weights[3].shape)\n dB3 = np.zeros(biases[3].shape)\n dW2 = np.zeros(weights[2].shape)\n dB2 = np.zeros(biases[2].shape)\n dW1 = np.zeros(weights[1].shape)\n dB1 = np.zeros(biases[1].shape)\n dW0 = np.zeros(weights[0].shape)\n dB0 = np.zeros(biases[0].shape)\n loss = 0\n for image in range(batchSize):\n\n X_data = input_train[image]\n X_label = input_train_label[image]\n output_forward, cache = self.forward(X_data) \n loss += -1*sum(X_label - np.log(output_forward)) #obliczenie wartosci funkcji straty [cross entropy]\n\n #Propagacja wsteczna gradientu\n dy = -1*(X_label - output_forward)/2\n #print(\"X_label = {} \\t layer7 = {} \\t dy = {}\".format(X_label, output_forward, dy))\n\n [dy, dW, dB ] = fullycon_b(cache[6], np.asarray([dy]).transpose() , weights[4])\n dW4 += dW\n dB4 += dB.flatten() #wektoryzacja macierzy\n dy = act.relu_b(dy.transpose(), cache[6])\n\n [dy, dW, dB ] = fullycon_b(cache[5][:,0], dy, weights[3])\n dW3 += dW\n dB3 += dB.flatten()\n dy = act.relu_b(dy.transpose(), cache[5][:,0]) \n \n [dy, dW, dB ] = convolution_b(cache[4], dy, weights[2])\n dW2 += dW\n dB2 += dB.flatten()\n \n dy = maxpool_b(cache[3], dy)\n dy = act.relu_b(dy, cache[3])\n\n [dy, dW, dB ] = convolution_b(cache[2], dy, weights[1])\n dW1 += dW\n dB1 += dB.flatten()\n \n dy = maxpool_b(cache[1], dy)\n dy = act.relu_b(dy, cache[1]) \n\n [dy, dW, dB ] = convolution_b(np.asarray([cache[0]]), dy, weights[0])\n dW0 += dW\n dB0 += dB.flatten()\n\t\t\t\n dW_list.append(dW4)\n dB_list.append(dB4)\n dW_list.append(dW3)\n dB_list.append(dB3)\n dW_list.append(dW2)\n dB_list.append(dB2)\n dW_list.append(dW1)\n dB_list.append(dB1)\n dW_list.append(dW0)\n dB_list.append(dB0)\n dW_list = dW_list[::-1]\n dB_list = dB_list[::-1]\n \n #Aktualizacja parametrow kazdej z warstw (o ile takie posiada)\n #uczenie z metoda momentum: learning rate = const; alpha = const\n for x in range(len(dW_list)):\n delta_W[x] = alpha*delta_W[x] - eta*dW_list[x]/batchSize\n weights[x] += delta_W[x]\n delta_B[x] = alpha*delta_B[x] - eta*dB_list[x]/batchSize\n biases[x] += delta_B[x]\n #przypisanie nowych wag po aktualiacji wszystkich parametrow\n self.Weights = weights\n self.Biases = biases\n\n #zwrocenie stosunku wartosci f-cji straty do rozmiaru batch'u\n return loss/batchSize", "def train(self) -> None:\n for _ in range(self.epochs):\n for x, y in zip(self.x_train, self.y_train):\n\n weights_gradient = [\n None for weight in self.weights\n ] # Initializing weight gradients for each layer which are going to be used to update the weights in the network.\n\n biases_gradient = [\n None for bias in self.biases\n ] # Initializing bias gradients for each layer which are going to be used to update the biases in the network.\n\n activation = np.expand_dims(x, axis=1)\n activations = [\n activation\n ] # A list for storing all the activations when doing forward propagation\n\n values = (\n []\n ) # A list for storing weight * x + bias values without applying the activation function.\n\n for weight, bias in zip(self.weights, self.biases):\n value = np.dot(weight, activation) + bias\n values.append(value)\n\n activation = self.sigmoid(value)\n activations.append(activation)\n\n \"\"\"\n Calculating the error delta from output layer to be propagated backwards in the network. It is calculated\n by taking the derivative of the loss function, which in our case is MSE, and multiply with derivate of\n the sigmoid function applied on the value that entered the last layer of the network.\n \"\"\"\n\n error_delta = (activations[-1] - y) * self.sigmoid_derivative(\n values[-1]\n )\n\n weights_gradient[-1] = np.dot(\n error_delta, activations[-2].T\n ) # Setting error delta multiplied with the second last layer activations as weight gradient for last layer.\n\n biases_gradient[-1] = error_delta # Setting error delta as bias gradient for last layer.\n\n \"\"\"\n This for-loop does the same as the code from line 128 - 136, but for each layer in the network.\n Thus, the error is propagated backwards in the network, and the gradients for each layer are set.\n \"\"\"\n for layer in range(2, self.total_layers):\n error_delta = np.dot(\n self.weights[-layer + 1].T, error_delta\n ) * self.sigmoid_derivative(values[-layer])\n\n weights_gradient[-layer] = np.dot(\n error_delta, activations[-layer - 1].T\n )\n\n biases_gradient[-layer] = error_delta\n\n self.weights = [\n weight - self.lr * weight_gradient\n for weight, weight_gradient in zip(self.weights, weights_gradient)\n ] # Updating the weights of the network by w_i - learning_rate * nabla w_i (w_i is the weight matrix at layer i, and nabla w_i is weight gradient.)\n\n self.biases = [\n bias - self.lr * bias_gradient\n for bias, bias_gradient in zip(self.biases, biases_gradient)\n ] # Updating the biases of the network by b_i - learning_rate * nabla b_i (b_i is the bias vector at layer i, and nabla b_i is weight gradient.)", "def _gradient_descent(self, X, y, epochs, learning_rate, batch_size):\n num_feats = X.shape[1]\n num_samples = X.shape[0]\n\n y = y.reshape(num_samples, 1)\n W = np.random.rand(num_feats, 1)\n training_loss_epochs = []\n\n for ix in range(epochs):\n shuffled_ix = (np.arange(0, len(X)))\n np.random.shuffle(shuffled_ix)\n X = X[shuffled_ix, :]\n y = y[shuffled_ix, :]\n\n for batch_ix in np.arange(0, X.shape[0], batch_size):\n dW = self._compute_gradient(W, X[batch_ix:batch_ix + batch_size], y[batch_ix:batch_ix + batch_size])\n W -= learning_rate * dW\n\n if ix % 10 == 0:\n y_pred = np.dot(X, W)\n training_loss = self.mse(y, y_pred)\n print('epoch {0} : training loss {1}'.format(ix, training_loss))\n training_loss_epochs.append(training_loss[0])\n\n self.weights = W\n self.training_loss = training_loss_epochs\n return None", "def checkBatchGradient():\n\n from mynnet import InputLayer\n\n n,b,d,o = (1, 4, 3, 7) # sequence length, batch size, hidden size, output size\n input_size = 10\n \n lstm = create_cell(input_size, (n,b,d,o))\n\n X = np.random.randn(n,b,input_size)\n c0 = np.random.randn(b,d)\n \n print \"c0:\", c0\n\n # batch forward backward\n H, Ct = lstm.forward(X, c0)\n wrand = np.random.randn(*H.shape)\n loss = np.sum(H * wrand) # weighted sum is a nice hash to use I think\n dH = wrand\n dX, dW, dV, dc0 = lstm.backward(dH)\n\n def fwd():\n h, _ = lstm.forward(X, c0)\n return np.sum(h * wrand)\n\n # now gradient check all\n delta = 1e-7\n rel_error_thr_warning = 1e-2\n rel_error_thr_error = 1\n tocheck = [X, lstm.W, lstm.V, c0]\n grads_analytic = [dX, dW, dV, dc0]\n names = ['X', 'W', 'V', 'c0']\n for j in xrange(len(tocheck)):\n mat = tocheck[j]\n dmat = grads_analytic[j]\n name = names[j]\n # gradcheck\n for i in xrange(mat.size):\n old_val = mat.flat[i]\n mat.flat[i] = old_val + delta\n loss0 = fwd()\n mat.flat[i] = old_val - delta\n loss1 = fwd()\n mat.flat[i] = old_val\n\n grad_analytic = dmat.flat[i]\n grad_numerical = (loss0 - loss1) / (2 * delta)\n\n if grad_numerical == 0 and grad_analytic == 0:\n rel_error = 0 # both are zero, OK.\n status = 'OK'\n elif abs(grad_numerical) < 1e-7 and abs(grad_analytic) < 1e-7:\n rel_error = 0 # not enough precision to check this\n status = 'VAL SMALL WARNING'\n else:\n rel_error = abs(grad_analytic - grad_numerical) / abs(grad_numerical + grad_analytic)\n status = 'OK'\n if rel_error > rel_error_thr_warning: status = 'WARNING'\n if rel_error > rel_error_thr_error: status = '!!!!! NOTOK'\n\n # print stats\n print '%s checking param %s index %s (val = %+8f), analytic = %+8f, numerical = %+8f, relative error = %+8f' \\\n % (status, name, `np.unravel_index(i, mat.shape)`, old_val, grad_analytic, grad_numerical, rel_error)", "def train_1layer_network(x_train, y_train):\n W = np.random.normal(0, 1, (2, ))\n b = np.random.normal(0, 1, (1, ))\n n_epoch = 1000\n lr = 0.2\n for i in range(n_epoch):\n cost, dW, db = compute_cost_gradient1(x_train, y_train, W, b)\n W -= lr * dW\n b -= lr * db\n print('epoch {}: cost = {}'.format(i+1, cost))\n return W, b", "def _compute_func_grad(self, w):\n W = w.reshape((self.X.shape[1], self.Y.shape[1]))\n self.nll_, self.grad_ = calculate_gradient(self.X, self.Y, W, self.prior, self.weighted,0)", "def calculate_training_loss(self):\n self.network.train()\n self.training_average_loss = self.calculate_average_loss(self.training_dataloader)", "def compute_gradient_and_loss1(W, X, y, reg, reg_type, opt):\n if opt == 0: # compute gradient only if opt == 0\n dW = np.zeros(W.shape) # initialize the gradient as zero\n \n # compute the loss and the gradient\n num_classes = W.shape[1]\n num_train = X.shape[0]\n loss = 0.0\n #############################################################################\n # TODO: #\n # Implement the routine to compute the loss, storing the result in loss #\n ############################################################################# \n for i in xrange(num_train): # for every augmended image data (3072+1 vector)\n s = X[i].dot(W) # compute s (scores)\n s_y = s[y[i]] # keep the correct ground truth class score\n for j in xrange(num_classes): # for every class\n if j != y[i]: # don't take the correct ground truth index\n term = s[j] - s_y + 1 # max term with Delta = 1, according to Hinge loss formula\n if term > 0: # trick: take only the term > 0, equal to max(0,...) formula\n loss += term # add the possitive term \n if opt == 0: # compute gradient only if opt == 0\n dW[:, j] += X[i] # this is a analytically with Calculus gradient, case j<>y[i]\n dW[:, y[i]] -= X[i] # case j==y[i]\n\n# loss /= num_train # num_train = M, according to given formula \n\n if reg_type == 1: # loss + regularization , l2 or l1\n loss += reg * np.sum(np.abs(W)) # l1, reg is actually lambda regularization strength\n else:\n loss += reg * np.sum(W * W) # l2\n \n if opt == 0: # compute gradient only if opt == 0\n dW /= num_train # we have to divide by num_train in order to have the 'mean' gradient\n if reg_type == 1: # we use deriv_abs function for l1 derivative\n dW += reg * deriv_abs(W)\n else:\n dW += 2 * reg * W # l2 derivative formula\n \n return loss, dW\n else:\n return loss, None\n \n print 'CSFAK INSIDE compute_gradient_and_loss'\n #############################################################################\n # TODO: #\n # Implement the gradient for the required loss, storing the result in dW.\t #\n # #\n # Hint: Instead of computing the gradient from scratch, it may be easier #\n # to reuse some of the intermediate values that you used to compute the #\n # loss. #\n #############################################################################\n \n #pass\n\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################", "def loss(self, X, y=None):\n W1 = self.params['W1']\n W2, b2 = self.params['W2'], self.params['b2']\n W3, b3 = self.params['W3'], self.params['b3']\n\n # pass pool_param to the forward pass for the max-pooling layer\n pool_param = {'pool_height': 2, 'pool_width': 2, 'stride': 2}\n\n scores = None\n ############################################################################\n # TODO: Implement the forward pass for the three-layer convolutional net, #\n # computing the class scores for X and storing them in the scores #\n # variable. #\n ############################################################################\n X, cache_conv = conv_forward(X, W1)\n X, x_relu1 = relu_forward(X)\n X, cache_maxpool = max_pool_forward(X, pool_param)\n N1,C1,H1,W1 = X.shape\n X = X.reshape(N1, C1 * H1 * W1)\n X, cache_fc2 = fc_forward(X, W2, b2)\n X, x_relu2 = relu_forward(X)\n X, cache_fc3 = fc_forward(X, W3, b3)\n scores = X\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n\n if y is None:\n return scores\n\n loss, grads = 0, {}\n ############################################################################\n # TODO: Implement the backward pass for the three-layer convolutional net, #\n # storing the loss and gradients in the loss and grads variables. Compute #\n # data loss using softmax, and make sure that grads[k] holds the gradients #\n # for self.params[k]. #\n ############################################################################\n loss, dx = softmax_loss(X, y)\n dx, dw, db = fc_backward(dx, cache_fc3)\n grads['W3'] = dw\n grads['b3'] = db\n dx = relu_backward(dx, x_relu2)\n dx, dw, db = fc_backward(dx, cache_fc2)\n grads['W2'] = dw\n grads['b2'] = db\n xx, Ind, pp = cache_maxpool\n N2,C2,H2,W2 = xx.shape\n H2 = int(H2/2)\n W2 = int(W2/2)\n dx = dx.reshape(N2,C2,H2,W2)\n dx = max_pool_backward(dx, cache_maxpool)\n dx = relu_backward(dx, x_relu1)\n dx, dw = conv_backward(dx, cache_conv)\n grads['W1'] = dw\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n\n return loss, grads", "def check_layer_gradient(layer, x, delta=1e-5, tol=1e-4):\n output = layer.forward(x)\n np.random.seed(10)\n #output_weight = np.random.randn(*output.shape)\n output_weight = np.ones_like(output)\n #print('output_weight',output_weight)\n\n def helper_func(x):\n output = layer.forward(x)\n loss = np.sum(output * output_weight)\n #print('loss',loss)\n d_out = np.ones_like(output) * output_weight\n grad = layer.backward(d_out)\n return loss, grad\n\n return check_gradient(helper_func, x, delta, tol)", "def loss(self, X, y=None):\n W1, b1 = self.params['W1'], self.params['b1']\n W2, b2 = self.params['W2'], self.params['b2']\n W3, b3 = self.params['W3'], self.params['b3']\n \n # pass conv_param to the forward pass for the convolutional layer\n filter_size = W1.shape[2]\n conv_param = {'stride': 1, 'pad': (filter_size - 1) / 2}\n\n # pass pool_param to the forward pass for the max-pooling layer\n pool_param = {'pool_height': 2, 'pool_width': 2, 'stride': 2}\n\n scores = None\n ############################################################################\n # TODO: Implement the forward pass for the three-layer convolutional net, #\n # computing the class scores for X and storing them in the scores #\n # variable. #\n ############################################################################\n cnn_out, cnn_cache = conv_relu_pool_forward(X, W1, b1, conv_param, pool_param)\n hidden_out, hidden_cache = affine_relu_forward(cnn_out, W2, b2)\n scores, scores_cache = affine_forward(hidden_out, W3, b3)\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n \n if y is None:\n return scores\n \n loss, grads = 0, {}\n ############################################################################\n # TODO: Implement the backward pass for the three-layer convolutional net, #\n # storing the loss and gradients in the loss and grads variables. Compute #\n # data loss using softmax, and make sure that grads[k] holds the gradients #\n # for self.params[k]. Don't forget to add L2 regularization! #\n ############################################################################\n\n # Compute loss and gradients\n loss, dscores = softmax_loss(scores, y)\n dhidden, grads['W3'], grads['b3'] = affine_backward(dscores, scores_cache)\n dcnn, grads['W2'], grads['b2'] = affine_relu_backward(dhidden, hidden_cache)\n dX, grads['W1'], grads['b1'] = conv_relu_pool_backward(dcnn, cnn_cache)\n\n # Regularization\n loss = loss + 0.5*self.reg*np.sum(self.params['W3']**2)\n loss = loss + 0.5*self.reg*np.sum(self.params['W2']**2)\n loss = loss + 0.5*self.reg*np.sum(self.params['W1']**2)\n grads['W3'] = grads['W3'] + self.reg * self.params['W3']\n grads['W2'] = grads['W2'] + self.reg * self.params['W2']\n grads['W1'] = grads['W1'] + self.reg * self.params['W1']\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n \n return loss, grads", "def train_step(self):\n\n # Custom gradient\n if self.model.computed_gradient:\n outputs = self.model.compute_all(next(self.dataset_it))\n gradients = outputs[\"gradients\"]\n\n # Compute\n else:\n with tf.GradientTape() as tape:\n outputs = self.model.compute_all(next(self.dataset_it))\n gradients = tape.gradient(outputs[\"loss\"], self.params)\n\n # Apply\n self.optimizer.apply_gradients(zip(gradients, self.params))\n\n return outputs", "def train_2layer_network(x_train, y_train):\n W = np.random.normal(0, 1, (2, ))\n V = np.random.normal(0, 1, (2, ))\n U = np.random.normal(0, 1, (2, ))\n b0 = np.random.normal(0, 1, (1, ))\n b1 = np.random.normal(0, 1, (1, ))\n b2 = np.random.normal(0, 1, (1, ))\n n_epoch = 4000\n lr = 0.3\n for i in range(n_epoch):\n cost, dW, dV, dU, db0, db1, db2 = compute_cost_gradient2(x_train, y_train, W, V, U, b0, b1, b2)\n W -= (lr * dW)\n V -= (lr * dV)\n U -= (lr * dU)\n b0 -= (lr * db0)\n b1 -= (lr * db1)\n b2 -= (lr * db2)\n print('epoch {}: cost = {}'.format(i+1, cost))\n return W, V, U, b0, b1, b2", "def compute_loss(self):\n self.test_logits = self.compute_logits()\n loss = tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=self.data.test_labels, logits=self.test_logits)\n cross_entropy_loss = tf.reduce_mean(loss)\n regularization = tf.reduce_sum(\n tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))\n loss = cross_entropy_loss + self.weight_decay * regularization\n return loss", "def compute_gradient_and_loss(W, X, y, reg, reg_type, opt):\n if opt == 0: # compute gradient only if opt == 0\n dW = np.zeros(W.shape) # initialize the gradient as zero\n \n # compute the loss and the gradient\n num_classes = W.shape[1]\n num_train = X.shape[0]\n loss = 0.0\n #############################################################################\n # TODO: #\n # Implement the routine to compute the loss, storing the result in loss #\n ############################################################################# \n for i in xrange(num_train): # for every augmended image data (3072+1 vector)\n s = X[i].dot(W) # compute s (scores)\n s_y = s[y[i]] # keep the correct ground truth class score\n max_sj = -999\n argmax_sj = -1\n local_loss = 0.0\n for j in xrange(num_classes): # for every class \n if j != y[i]: # don't take the correct ground truth index\n if s[j] > max_sj:\n max_sj = s[j]\n argmax_sj = j\n\n term = 1 + max_sj - s_y # max term with Delta = 1, according to Hinge loss formula \n \n if term > 0:\n local_loss = term\n \n loss += local_loss\n \n for j in xrange(num_classes): # for every class \n if j != y[i]: # don't take the correct ground truth index\n if opt == 0: # compute gradient only if opt == 0\n if j == argmax_sj:\n dW[:, j] += X[i] # this is a analytically with Calculus gradient, case j<>y[i]\n dW[:, y[i]] -= X[i] # case j==y[i]\n \n \n\n# loss /= num_train # num_train = M, according to given formula \n\n if reg_type == 1: # loss + regularization , l2 or l1\n loss += reg * np.sum(np.abs(W)) # l1, reg is actually lambda regularization strength\n else:\n loss += reg * np.sum(W * W) # l2\n \n if opt == 0: # compute gradient only if opt == 0\n dW /= num_train # we have to divide by num_train in order to have the 'mean' gradient\n if reg_type == 1: # we use deriv_abs function for l1 derivative\n# dW += reg * deriv_abs(W) #dW[:,-1]\n# else:\n# dW += 2 * reg * W # l2 derivative formula \n dW[:-1,:] += reg * np.sign((W[:-1,:])) #dW[:,-1]\n else:\n dW[:-1,:] += 2 * reg * W[:-1,:] # l2 derivative formula \n return loss, dW\n else:\n return loss, None\n \n print 'CSFAK INSIDE compute_gradient_and_loss'\n #############################################################################\n # TODO: #\n # Implement the gradient for the required loss, storing the result in dW.\t #\n # #\n # Hint: Instead of computing the gradient from scratch, it may be easier #\n # to reuse some of the intermediate values that you used to compute the #\n # loss. #\n #############################################################################\n \n #pass\n\n #############################################################################\n # END OF YOUR CODE #\n ############################################################################# ", "def compute_loss(self):\n self.prototypes = self.compute_prototypes()\n self.test_logits = self.compute_logits()\n loss = tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=self.episode.test_labels, logits=self.test_logits)\n cross_entropy_loss = tf.reduce_mean(loss)\n regularization = tf.reduce_sum(\n tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))\n loss = cross_entropy_loss + self.weight_decay * regularization\n return loss", "def compute_net_gradients(images, labels, net, optimizer=None, is_net_first_initialized=False):\n _, net_loss = net.compute_loss(\n inputdata=images,\n labels=labels,\n name='shadow_net',\n reuse=is_net_first_initialized\n )\n\n if optimizer is not None:\n grads = optimizer.compute_gradients(net_loss)\n else:\n grads = None\n\n return net_loss, grads", "def loss(self, X_batch, y_batch, learning_rate=1e-3, one_vs_all_index=-1, reg=True):\n #########################################################################\n # TODO: #\n # calculate the loss and the derivative #\n #########################################################################\n loss = 0\n for i in range(X_batch.shape[0]):\n if one_vs_all_index == -1:\n loss += -(y_batch[i] * (np.dot(self.w.T, X_batch[i]))) + np.log(\n 1 + np.exp(np.dot(self.w.T, X_batch[i])))\n else:\n if reg:\n reg = (learning_rate / 2 * X_batch.shape[0]) * np.sum(np.power(self.ws[one_vs_all_index], 2))\n loss += -(y_batch[i] * (np.dot(self.ws[one_vs_all_index].T, X_batch[i]))) + np.log(\n 1 + np.exp(np.dot(self.ws[one_vs_all_index].T, X_batch[i]))) + reg\n else:\n loss += -(y_batch[i] * (np.dot(self.ws[one_vs_all_index].T, X_batch[i]))) + np.log(\n 1 + np.exp(np.dot(self.ws[one_vs_all_index].T, X_batch[i])))\n gradients = np.zeros(X_batch.shape[1])\n if one_vs_all_index == -1:\n dot = np.dot(X_batch, self.w)\n else:\n dot = np.dot(X_batch, self.ws[one_vs_all_index])\n logists = sigmod(dot)\n diff = y_batch - logists\n for index in range(X_batch.shape[0]):\n if one_vs_all_index != -1:\n if reg:\n dot = np.dot(X_batch[index], diff[index])\n gradients[1:] += dot[1:] + (learning_rate / X_batch.shape[0]) * self.ws[one_vs_all_index][1:]\n gradients[0] += dot[0]\n else:\n gradients += np.dot(X_batch[index], diff[index])\n else:\n gradients += np.dot(X_batch[index], diff[index])\n\n return loss, gradients / X_batch.shape[0] # 取均值免得步长过大直接nan\n #########################################################################\n # END OF YOUR CODE #\n #########################################################################", "def fast_loss_and_grad(self, X, y):\n loss = 0.0\n grad = np.zeros(self.W.shape) # initialize the gradient as zero\n \n # ================================================================ #\n # YOUR CODE HERE:\n # Calculate the softmax loss and gradient WITHOUT any for loops.\n # ================================================================ #\n \n num_train = X.shape[0]\n num_classes = self.W.shape[0]\n \n# # vectorized loss calculation #\n class_scores_matrix = np.dot(self.W,X.T) # calculating class scores matrix (C x m): rows are class scores transposes\n class_scores_matrix -= np.max(class_scores_matrix) # considering the possible issue for numerical instability and account for it\n exp_a = np.exp(class_scores_matrix) # calculating the exponents\n \n# y_exp = np.array(exp_a[y, np.arange(0, class_scores_matrix.shape[1])])\n# #print(exp_a[:,:3])\n# #print(y[:3])\n# #print(y_exp[:3])\n \n# tt = np.sum(exp_a,axis=0)\n# tt2 = np.divide(tt,y_exp)\n# print(num_train)\n# tt3 = np.power(tt2,1/num_train)\n# loss = np.log(np.prod(tt3))\n \n \n \n \n (C, D) = self.W.shape\n N = X.shape[0]\n\n scores = np.dot(self.W, X.T)\n scores -= np.max(scores) # shift by log C to avoid numerical instability\n\n y_mat = np.zeros(shape = (C, N))\n y_mat[y, range(N)] = 1\n\n # matrix of all zeros except for a single wx + log C value in each column that corresponds to the\n # quantity we need to subtract from each row of scores\n correct_wx = np.multiply(y_mat, scores)\n\n # create a single row of the correct wx_y + log C values for each data point\n sums_wy = np.sum(correct_wx, axis=0) # sum over each column\n\n exp_scores = np.exp(scores)\n sums_exp = np.sum(exp_scores, axis=0) # sum over each column\n result = np.log(sums_exp)\n\n result -= sums_wy\n\n loss = np.sum(result)\n loss /= num_train\n \n \n # vectorized gradient calculation #\n exp_a_sum = np.sum(exp_a,axis=0)\n\n y_mat_corres = np.zeros(shape = (num_classes, num_train))\n y_mat_corres[y, range(num_train)] = 1\n sum_exp_scores = np.sum(exp_a, axis=0) \n sum_exp_scores = 1.0 / exp_a_sum # division by sum over columns\n exp_a *= sum_exp_scores\n grad = np.dot(exp_a, X)\n grad -= np.dot(y_mat_corres, X)\n grad /= num_train\n \n\n # ================================================================ #\n # END YOUR CODE HERE\n # ================================================================ #\n\n return loss, grad", "def train(network,X,y):\r\n \r\n # Get the layer activations\r\n layer_activations = forward(network,X)\r\n logits = layer_activations[-1]\r\n \r\n # Compute the loss and the initial gradient\r\n loss = softmax_crossentropy_with_logits(logits,y)\r\n loss_grad = grad_softmax_crossentropy_with_logits(logits,y)\r\n \r\n for i in range(1, len(network)):\r\n loss_grad = network[len(network) - i].backward(layer_activations[len(network) - i - 1], loss_grad)\r\n #loss_grad = network[0].backward(X, loss_grad)\r\n return np.mean(loss)", "def compute_grad(W, x, y, loss_c, config):\n\n # Lazy import of propper model\n if config.model_type == \"linear_svm\":\n from utils.linear_svm import model_grad\n elif config.model_type == \"logistic_regression\":\n from utils.logistic_regression import model_grad\n else:\n raise ValueError(\"Wrong model type {}\".format(\n config.model_type))\n\n dW, db = model_grad(loss_c, x, y)\n dW += config.reg_lambda * l2_grad(W)\n\n return dW, db", "def add_training_op(self, loss):\n ### YOUR CODE HERE (~1-2 lines)\n opt = tf.train.AdamOptimizer(self.config.lr)\n grads_vars = opt.compute_gradients(loss)\n capped_grads_vars = [(tf.clip_by_value(g, -Config.max_grad_norm, Config.max_grad_norm), v)\n for g, v in grads_vars] # gradient capping\n train_op = opt.apply_gradients(capped_grads_vars, tf.Variable(0, trainable=False))\n ### END YOUR CODE\n return train_op", "def backward_pass(architecture,gradient_layerwise,grad_weights,grad_bias):\n \n for layer in range(len(architecture)-1,-1,-1):\n X_input,X_output,weightsi,biasi,X_input_im2col,imi,output_shapei,kernel_shapei,stridei,operationi,imxi = architecture['layer{}'.format(layer+1)]\n# print(\"Operation is:{} and Layer is: {}\".format(operationi,layer+1))\n if operationi == 'softmax': # Last layer -> Dont apply softmax in any layer other than the last layer!\n # not taking gradients here because we need dz_dX(secondlastlayer) which is y_pred - y\n continue\n \n if operationi == 'conv_bn_relu' or operationi == 'conv_relu' or operationi == 'conv_sigmoid' or operationi == 'conv_bn_sigmoid':\n operationi__1 = architecture['layer{}'.format(layer+2)][9]\n if operationi__1 == 'softmax':\n y_pred = architecture['layer{}'.format(layer+2)][1]\n y_pred = torch.reshape(y_pred,y.shape)\n dz_dXi = y_pred - y\n dz_dXi[dz_dXi > clip] = 0 # Gradient Clipping\n dz_dXi[dz_dXi < -clip] = 0 # Gradient Clipping\n X_output = torch.reshape(X_output,dz_dXi.shape)\n if operationi == 'conv_sigmoid' or operationi == 'conv_bn_sigmoid':\n dz_dXi *= sigmoid(X_output)*(1-sigmoid(X_output)) # Taking the derivative of the sigmoid function\n elif 'relu' in operationi:\n dz_dXi[X_output <= 0] = 0\n else:\n None\n \n gradient_layerwise['layer{}'.format(layer+1)][0] = dz_dXi # .\n dz_dbi = torch.reshape(dz_dXi,biasi.shape)\n gradient_layerwise['layer{}'.format(layer+1)][2] = dz_dbi # .\n try:\n dz_dweightsi = (dz_dXi).mm(torch.t(X_input_im2col)) # dz_dweightsi = dz_dXi * dXi_dweightsi (chain rule)\n except:\n dz_dweightsi = (dz_dXi).mm(X_input_im2col)\n \n dz_dweightsi[dz_dweightsi > clip] = 0 # Gradient Clipping\n dz_dweightsi[dz_dweightsi < -clip] = 0\n gradient_layerwise['layer{}'.format(layer+1)][1] = dz_dweightsi #\n elif operationi__1 == 'maxpool': # need to do something here to fix the problem\n None\n\n elif 'flatten' in operationi__1:\n # we currently have dz_doutput of flatten -> we want dz_doutput of the conv_bn_relu before flatten\n \n weightsi__1 = architecture['layer{}'.format(layer+2)][2] # weights2\n dz_dXi__1 = gradient_layerwise['layer{}'.format(layer+2)][0] # dz_dXoutput of flatten\n if len(dz_dXi__1.shape) == 3:\n dz_dXi__1 = torch.reshape(dz_dXi__1,(-1,output_shapei__1[0]))\n imi__1 = architecture['layer{}'.format(layer+2)][5] # i\n try:\n dz_dXi = torch.t(weightsi__1).mm(dz_dXi__1)\n except:\n dz_dXi = weightsi__1.mm(dz_dXi__1)\n X_output = torch.reshape(X_output,dz_dXi.shape)\n if operationi == 'conv_sigmoid' or operationi == 'conv_bn_sigmoid':\n dz_dXi *= sigmoid(X_output)*(1-sigmoid(X_output)) # Taking the derivative of the sigmoid function\n elif 'relu' in operationi:\n dz_dXi[X_output <= 0] = 0\n else:\n None\n\n dz_dXi = torch.reshape(dz_dXi,(output_shapei[1]*output_shapei[2],-1))\n dz_dbi = torch.reshape(dz_dXi,biasi.shape)\n dz_dweightsi = X_input_im2col.mm(dz_dXi)\n dz_dweightsi[dz_dweightsi > clip] = 0 # Gradient Clipping\n dz_dweightsi[dz_dweightsi < -clip] = 0 # Gradient Clipping\n dz_dbi = dz_dXi\n \n gradient_layerwise['layer{}'.format(layer+1)][0] = torch.Tensor(dz_dXi)# Can also set this to layer like in line ~800\n \n gradient_layerwise['layer{}'.format(layer+1)][1] = torch.Tensor(dz_dweightsi) # Can also set this to layer like in line ~800\n \n gradient_layerwise['layer{}'.format(layer+1)][2] = torch.Tensor(dz_dbi) # Can also set this to layer like in line ~800\n \n else:\n weightsi__1 = architecture['layer{}'.format(layer+2)][2]\n dz_dXi__1 = gradient_layerwise['layer{}'.format(layer+2)][0] # dz_dX2 -> backpropagated from maxpool\n output_shapei__1 = architecture['layer{}'.format(layer+2)][6]\n operationi__1 == architecture['layer{}'.format(layer+2)][9] # ...\n if len(dz_dXi__1.shape) == 3:\n dz_dXi__1 = torch.reshape(dz_dXi__1,(-1,output_shapei__1[0]))\n imi__1 = architecture['layer{}'.format(layer+2)][5]\n try:\n Y = weightsi__1.mm(dz_dXi__1)\n except:\n Y = weightsi__1.mm(torch.t(dz_dXi__1))\n dz_dXi = torch.zeros(X_output.shape)\n output_shape_current_layer = architecture['layer{}'.format(layer+1)][6]\n bias_current_layer = architecture['layer{}'.format(layer+1)][3]\n X_im2col_current_layer = architecture['layer{}'.format(layer+1)][4]\n for i in range(np.shape(X_output)[0]):\n for j in range(np.shape(X_output)[1]):\n for k in range(np.shape(X_output)[2]):\n idxs = getIndexes(imi__1,(i,j,k))\n dz_dXi[i,j,k] = sum([Y[idx[0],idx[1]] for idx in idxs])\n \n dz_dXi[dz_dXi > clip] = 0 # Gradient Clipping\n dz_dXi[dz_dXi < -clip] = 0 # Gradient Clipping\n if 'sigmoid' in operationi__1: # ...\n X_output = torch.reshape(X_output,dz_dXi.shape)\n dz_dXi *= sigmoid(X_output)*(1-sigmoid(X_output)) # Taking the derivative of the sigmoid function\n elif 'relu' in operationi__1: # ...\n dz_dXi[X_output <= 0] = 0\n else:\n None\n \n dz_dXi = torch.reshape(dz_dXi,(output_shape_current_layer[1]*output_shape_current_layer[2],-1))\n dz_dbi = torch.reshape(dz_dXi,bias_current_layer.shape)\n dz_dweightsi = X_im2col_current_layer.mm(dz_dXi)\n dz_dweightsi[dz_dweightsi > clip] = 0 # Gradient Clipping\n dz_dweightsi[dz_dweightsi < -clip] = 0 # Gradient Clipping\n gradient_layerwise['layer{}'.format(layer+1)][0] = torch.Tensor(dz_dXi)\n gradient_layerwise['layer{}'.format(layer+1)][1] = torch.Tensor(dz_dweightsi)\n gradient_layerwise['layer{}'.format(layer+1)][2] = torch.Tensor(dz_dbi)\n \n if operationi == 'maxpool':\n \n weightsi__1 = architecture['layer{}'.format(layer+2)][2]\n dz_dXi__1 = gradient_layerwise['layer{}'.format(layer+2)][0] # dz_dXoutput -> backpropagated from maxpool\n output_shapei__1 = architecture['layer{}'.format(layer+2)][6]\n operationi__1 == architecture['layer{}'.format(layer+2)][9] # ...\n \n if len(dz_dXi__1.shape) == 3:\n dz_dXi__1 = torch.reshape(dz_dXi__1,(-1,output_shapei__1[0]))\n imi__1 = architecture['layer{}'.format(layer+2)][5]\n try:\n Y = weightsi__1.mm(dz_dXi__1)\n except:\n try:\n Y = weightsi__1.mm(torch.t(dz_dXi__1))\n except:\n Y = torch.t(weightsi__1).mm(dz_dXi__1) # Ensuring valid matrix multiplication here\n \n dz_dXi = torch.zeros(X_output.shape)\n output_shape_current_layer = architecture['layer{}'.format(layer+1)][6]\n bias_current_layer = architecture['layer{}'.format(layer+1)][3]\n X_im2col_current_layer = architecture['layer{}'.format(layer+1)][4]\n for i in range(np.shape(X_output)[0]):\n for j in range(np.shape(X_output)[1]):\n for k in range(np.shape(X_output)[2]):\n idxs = getIndexes(imi__1,(i,j,k))\n dz_dXi[i,j,k] = sum([Y[idx[0],idx[1]] for idx in idxs])\n\n dz_dXi[dz_dXi > clip] = 0 # Gradient Clipping\n dz_dXi[dz_dXi < -clip] = 0 # Gradient Clipping\n \n if operationi__1 == 'conv_sigmoid' or operationi__1 == 'conv_bn_sigmoid': # ...\n X_output = torch.reshape(X_output,dz_dXi.shape)\n dz_dXi *= sigmoid(X_output)*(1-sigmoid(X_output)) # Taking the derivative of the sigmoid function\n else:\n dz_dXi[X_output <= 0] = 0\n\n gradient_layerwise['layer{}'.format(layer+1)][0] = torch.Tensor(dz_dXi)\n \n dz_dXinput = torch.zeros((X_input.shape))\n dz_dXoutput = gradient_layerwise['layer{}'.format(layer+1)][0] # output = output of maxpool\n\n dz_dXoutput = torch.reshape(dz_dXoutput,(output_shapei[0],X_input_im2col.shape[2]))\n \n for i in range(output_shapei[0]):\n for j in range(X_input_im2col.shape[2]):\n Xi2ci = X_im2col_current_layer[i,:,:]\n idx = torch.argmax(Xi2ci[:,j]).item()\n value = imxi[i][(idx,j)]\n dz_dXinput[value[0],value[1],value[2]] += float(dz_dXoutput[i,j])\n\n# dz_dXinput = torch.reshape(dz_dXinput,output_shapei)\n \n X_prev_im2col = architecture['layer{}'.format(layer)][4]\n X_output_prev = architecture['layer{}'.format(layer)][1]\n X_output_prev = torch.reshape(X_output_prev,dz_dXinput.shape)\n X_input_prev = architecture['layer{}'.format(layer)][0]\n prev_bias = architecture['layer{}'.format(layer)][3]\n output_shape_prev = architecture['layer{}'.format(layer)][6]\n prev_operation = architecture['layer{}'.format(layer)][9]\n \n if prev_operation == 'conv_sigmoid' or prev_operation == 'conv_bn_sigmoid':\n dz_dXinput *= sigmoid(X_output_prev)*(1-sigmoid(X_output_prev)) # Taking the derivative of the sigmoid function\n else:\n dz_dXinput[X_output_prev <= 0] = 0\n \n if len(dz_dXinput.shape) == 3:\n dz_dXinput = torch.reshape(dz_dXinput,(-1,output_shape_prev[0]))\n \n dz_dbi = torch.reshape(dz_dXinput,prev_bias.shape)\n dz_dweightsi = X_prev_im2col.mm(dz_dXinput)\n dz_dweightsi[dz_dweightsi > clip] = 0 # Gradient Clipping\n dz_dweightsi[dz_dweightsi < -clip] = 0\n \n gradient_layerwise['layer{}'.format(layer)][2] = torch.Tensor(dz_dbi)\n gradient_layerwise['layer{}'.format(layer)][1] = torch.Tensor(dz_dweightsi)\n gradient_layerwise['layer{}'.format(layer)][0] = torch.Tensor(dz_dXinput) # ...\n \n if 'flatten_dense' in operationi:\n \n operationi__1 = architecture['layer{}'.format(layer+2)][9]\n \n if operationi__1 == 'softmax':\n \n X_input = torch.reshape(torch.Tensor(X_input),(-1,1))\n X_output = torch.reshape(X_output,(-1,1))\n y_pred = architecture['layer{}'.format(layer+2)][1]\n y_pred = torch.reshape(y_pred,y.shape)\n dz_dXi = y_pred - y\n dz_dXi[dz_dXi > clip] = 0 # Gradient Clipping\n dz_dXi[dz_dXi < -clip] = 0 # Gradient Clipping\n X_output = torch.reshape(X_output,dz_dXi.shape)\n if 'sigmoid' in operationi:\n dz_dXi *= sigmoid(X_output)*(1-sigmoid(X_output)) # Taking the derivative of the sigmoid function\n elif 'relu' in operationi:\n dz_dXi[X_output <= 0] = 0\n else:\n None\n \n dz_dbi = torch.reshape(dz_dXi,biasi.shape)\n try:\n dz_dweightsi = (dz_dXi).mm(torch.t(X_input)) # dz_dweightsi = dz_dXi * dXi_dweightsi (chain rule)\n except:\n dz_dweightsi = (dz_dXi).mm(X_input)\n \n dz_dweightsi[dz_dweightsi > clip] = 0 # Gradient Clipping\n dz_dweightsi[dz_dweightsi < -clip] = 0\n \n gradient_layerwise['layer{}'.format(layer+1)][0] = dz_dXi # Can also set this to layer like in line ~800\n gradient_layerwise['layer{}'.format(layer+1)][1] = dz_dweightsi # Can also set this to layer like in line ~800\n gradient_layerwise['layer{}'.format(layer+1)][2] = dz_dbi # Can also set this to layer like in line ~800\n \n else:\n # Have to modify and test this before implementation -> Specifically\n # the backprop implementation is not consistent with the ones above\n #\n X_output = torch.reshape(X_output,(-1,1))\n weights__i = architecture['layer{}'.format(layer+2)][2]\n dz_dXoutput = gradient_layerwise['layer{}'.format(layer+2)][0]\n dz_dXoutput = torch.reshape(torch.Tensor(dz_dXoutput),X_output.shape)\n X_input = torch.reshape(torch.Tensor(X_input),(-1,1))\n\n if 'relu' in operationi:\n dz_dXoutput[X_output<0] = 0\n try:\n dz_dXinput = torch.t(weights__i).mm(dz_dXoutput)\n except:\n dz_dXinput = torch.t(dz_dXoutput).mm(weights__i)\n try:\n dz_dweightsi = dz_dXoutput.mm(torch.t(X_input))\n except:\n dz_dweightsi = dz_dXoutput.mm(X_input)\n dz_dbi = dz_dXoutput\n if 'sigmoid' in operationi:\n dz_dXoutput*= sigmoid(X_output)*(1-sigmoid(X_output))\n try:\n dz_dXinput = torch.t(weights__i).mm(dz_dXoutput)\n except:\n dz_dXinput = torch.t(dz_dXoutput).mm(weights__i)\n try:\n dz_dweightsi = dz_dXoutput.mm(torch.t(X_input))\n except:\n dz_dweightsi = dz_dXoutput.mm(X_input)\n dz_dbi = dz_dXoutput\n else:\n try:\n dz_dXinput = torch.t(weights__i).mm(dz_dXoutput)\n except:\n dz_dXinput = torch.t(dz_dXoutput).mm(weights__i)\n try:\n dz_dweightsi = dz_dXoutput.mm(torch.t(X_input))\n except:\n dz_dweightsi = dz_dXoutput.mm(X_input)\n dz_dbi = dz_dXoutput\n \n unflattened_Xinput = architecture['layer{}'.format(layer+1)][0]\n dz_dXinput = torch.reshape(dz_dXinput,unflattened_Xinput.shape)\n gradient_layerwise['layer{}'.format(layer+1)][2] = torch.Tensor(dz_dbi)\n gradient_layerwise['layer{}'.format(layer+1)][1] = torch.Tensor(dz_dweightsi)\n gradient_layerwise['layer{}'.format(layer+1)][0] = torch.Tensor(dz_dXinput)\n \n if gradient_layerwise['layer{}'.format(layer+1)][1] is not None:\n try:\n grad_weights['layer{}'.format(layer+1)] += gradient_layerwise['layer{}'.format(layer+1)][1]\n except:\n grad_weights['layer{}'.format(layer+1)] += torch.t(gradient_layerwise['layer{}'.format(layer+1)][1])\n if gradient_layerwise['layer{}'.format(layer+1)][2] is not None:\n try:\n grad_bias['layer{}'.format(layer+1)] += gradient_layerwise['layer{}'.format(layer+1)][2]\n except:\n grad_bias['layer{}'.format(layer+1)] += torch.t(gradient_layerwise['layer{}'.format(layer+1)][2])\n \n gc.collect()\n return", "def compute_gradients(self, inputs, targets, hprev):\n n = len(inputs)\n loss = 0\n\n # Dictionaries for storing values during the forward pass\n aa, xx, hh, oo, pp = {}, {}, {}, {}, {}\n hh[-1] = np.copy(hprev)\n\n # Forward pass\n for t in range(n):\n xx[t] = np.zeros((self.vocab_len, 1))\n xx[t][inputs[t]] = 1 # 1-hot-encoding\n\n aa[t], hh[t], oo[t], pp[t] = self.evaluate_classifier(hh[t-1], xx[t])\n\n loss += -np.log(pp[t][targets[t]][0]) # update the loss\n\n # Dictionary for storing the gradients\n grads = {\"W\": np.zeros_like(self.W), \"U\": np.zeros_like(self.U),\n \"V\": np.zeros_like(self.V), \"b\": np.zeros_like(self.b),\n \"c\": np.zeros_like(self.c), \"o\": np.zeros_like(pp[0]),\n \"h\": np.zeros_like(hh[0]), \"h_next\": np.zeros_like(hh[0]),\n \"a\": np.zeros_like(aa[0])}\n\n # Backward pass\n for t in reversed(range(n)):\n grads[\"o\"] = np.copy(pp[t])\n grads[\"o\"][targets[t]] -= 1\n\n grads[\"V\"] += grads[\"o\"]@hh[t].T\n grads[\"c\"] += grads[\"o\"]\n\n grads[\"h\"] = np.matmul(self.V.T , grads[\"o\"] )+ grads[\"h_next\"]\n grads[\"a\"] = np.multiply(grads[\"h\"], (1 - np.square(hh[t])))\n\n grads[\"U\"] += np.matmul(grads[\"a\"], xx[t].T)\n grads[\"W\"] += np.matmul(grads[\"a\"], hh[t-1].T)\n grads[\"b\"] += grads[\"a\"]\n\n grads[\"h_next\"] = np.matmul(self.W.T, grads[\"a\"])\n\n # Drop redundant gradients\n grads = {k: grads[k] for k in grads if k not in [\"o\", \"h\", \"h_next\", \"a\"]}\n\n # Clip the gradients\n for grad in grads:\n grads[grad] = np.clip(grads[grad], -5, 5)\n\n # Update the hidden state sequence\n h = hh[n-1]\n\n return grads, loss, h", "def loss_grad(dataset, params):\n grads = [grad(dataset[0][i], dataset[1][i], params) for i in range(len(dataset[0]))]\n return np.mean(grads, axis=0)", "def loss(self, X, y=None):\n scores = None\n ############################################################################\n # TODO: Implement the forward pass for the two-layer net, computing the #\n # class scores for X and storing them in the scores variable. #\n ############################################################################\n hid1, hid1cache = affine_relu_forward(X, self.params['W1'], self.params['b1'])\n scores, scorecache = affine_forward(hid1, self.params['W2'], self.params['b2'])\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n\n # If y is None then we are in test mode so just return scores\n if y is None:\n return scores\n\n loss, grads = 0, {}\n ############################################################################\n # TODO: Implement the backward pass for the two-layer net. Store the loss #\n # in the loss variable and gradients in the grads dictionary. Compute data #\n # loss using softmax, and make sure that grads[k] holds the gradients for #\n # self.params[k]. Don't forget to add L2 regularization! #\n # #\n # NOTE: To ensure that your implementation matches ours and you pass the #\n # automated tests, make sure that your L2 regularization includes a factor #\n # of 0.5 to simplify the expression for the gradient. #\n ############################################################################\n loss, dscores = softmax_loss(scores, y)\n loss += 0.5 * self.reg *( np.sum(self.params['W1']**2) + np.sum(self.params['W2']**2) )\n\n dhid1, grads['W2'], grads['b2'] = affine_backward(dscores, scorecache)\n dx, grads['W1'], grads['b1'] = affine_relu_backward(dhid1, hid1cache)\n\n grads['W1'] += self.reg * self.params['W1']\n grads['W2'] += self.reg * self.params['W2']\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n\n return loss, grads", "def forward_backward_prop(data, labels, params, dimensions):\n\n ### Unpack network parameters (do not modify)\n ofs = 0\n Dx, H, Dy = (dimensions[0], dimensions[1], dimensions[2])\n\n activation = []\n\n W1 = np.reshape(params[ofs:ofs+ Dx * H], (Dx, H))\n ofs += Dx * H\n b1 = np.reshape(params[ofs:ofs + H], (1, H))\n ofs += H\n W2 = np.reshape(params[ofs:ofs + H * Dy], (H, Dy))\n ofs += H * Dy\n b2 = np.reshape(params[ofs:ofs + Dy], (1, Dy))\n\n ### Forward propagation\n activation.append(data)\n\n # Hidden layer inputs: (N, Dx) * (Dx, H) -> N x H\n z = np.dot(activation[-1], W1) + b1 \n # Activations, inputs to the final layer. \n activation.append(sigmoid(z)) # output of the hidden layer, activation\n # Final layer outputs: ( N x H ) * ( H, Dy) -> (N, Dy)\n z = np.dot(activation[-1], W2) + b2\n activation.append( softmax(z) )\n\n # Cross-entropy cost\n\n y_p = activation[-1]\n activation = activation[:-1] # remove activation data (output)\n\n cost = -np.sum(labels * np.log(y_p))\n \n error = []\n \n ### backward propagation\n sigma = (y_p - labels)\n error.append(sigma)\n\n gradb2 = np.sum(error[-1], axis=0)\n gradW2 = np.dot(activation[-1].T, error[-1])\n\n #\n sigma = np.dot(W2, error[-1].T)\n sigma = sigma.T * sigmoid_grad(activation[-1])\n activation = activation[:-1] # remove activation data ( hidden layer )\n\n error.append(sigma)\n\n gradb1 = np.sum(error[-1], axis=0)\n gradW1 = np.dot(activation[-1].T, error[-1])\n\n\n ### Stack gradients (do not modify)\n grad = np.concatenate((gradW1.flatten(), gradb1.flatten(), \n gradW2.flatten(), gradb2.flatten()))\n \n return cost, grad", "def update_grhs():\n init_gradient()\n costs_per_batch = []\n for i in range(n_train_batches):\n c = update_gradient_batch(i,*args)\n costs_per_batch.append(c)\n return numpy.mean(costs_per_batch,axis=0)", "def check_gradient(self, x, y):\n x = x.transpose()\n y = y.transpose()\n layers_copy = deepcopy(self.layers)\n epsilon = 10 ** -4\n a, layer = self.forward_propagation(x)\n delta = self.calculate_delta(a, y, layer)\n self.backpropagation(delta=delta, theta=layer.theta)\n previous_layer_output = x\n for layer in self.layers:\n theta_copy = deepcopy(layer.theta)\n real_theta_size = theta_copy.shape\n delta = layer.delta\n dc_dtheta = np.outer(previous_layer_output, delta).transpose()\n previous_layer_output = layer.a\n R, C = theta_copy.shape\n for i in range(R):\n for j in range(C):\n theta_plus = deepcopy(theta_copy)\n theta_plus[i, j] += epsilon\n layer.theta = theta_plus\n a_plus, l_plus = self.forward_propagation(x)\n err_plus = self.calculate_loss(a_plus, y)\n theta_minus = deepcopy(theta_copy)\n theta_minus[i, j] -= epsilon\n layer.theta = theta_minus\n a_minus, l_minus = self.forward_propagation(x)\n err_minus = self.calculate_loss(a_minus, y)\n limit = (err_plus - err_minus)/(2*epsilon)\n grad_diff = abs(dc_dtheta[i,j] - limit)\n assert grad_diff < 10 ** -6, f\"Diff {grad_diff} is too big.\"\n layer.theta = theta_copy", "def ComputeGradients(self, input_data: list, target_output_data: list):\n delta = 1e-6\n normal_cost = self.Cost(input_data, target_output_data)\n\n # Evaluate Gradient for Hidden Layer Biases\n for i in range(self.hidden_layer_biases.shape[0]):\n original_bias_value = self.hidden_layer_biases[i]\n self.hidden_layer_biases[i] += delta\n plusdelta_cost = self.Cost(input_data, target_output_data)\n self.hidden_layer_biases[i] = original_bias_value\n self.hidden_biases_gradient[i] = (plusdelta_cost - normal_cost) / delta\n\n # Evaluate Gradient for Output Layer Biases\n for i in range(self.output_layer_biases.shape[0]):\n original_bias_value = self.output_layer_biases[i]\n self.output_layer_biases[i] += delta\n plusdelta_cost = self.Cost(input_data, target_output_data)\n self.output_layer_biases[i] = original_bias_value\n self.output_biases_gradient[i] = (plusdelta_cost - normal_cost) / delta\n\n # Evaluate Gradient for Input Layer to Hidden Layer Weights\n for i in range(self.input_to_hidden_weights.shape[0]):\n for h in range(self.input_to_hidden_weights.shape[1]):\n original_bias_value = self.input_to_hidden_weights[i, h]\n self.input_to_hidden_weights[i, h] += delta\n plusdelta_cost = self.Cost(input_data, target_output_data)\n self.input_to_hidden_weights[i, h] = original_bias_value\n self.input_to_hidden_weights_gradient[i, h] = (plusdelta_cost - normal_cost) / delta\n\n # Evaluate Gradient for Input Layer to Hidden Layer Weights\n for h in range(self.hidden_to_output_weights.shape[0]):\n for o in range(self.hidden_to_output_weights.shape[1]):\n original_bias_value = self.hidden_to_output_weights[h, o]\n self.hidden_to_output_weights[h, o] += delta\n plusdelta_cost = self.Cost(input_data, target_output_data)\n self.hidden_to_output_weights[h, o] = original_bias_value\n self.hidden_to_output_weights_gradient[h, o] = (plusdelta_cost - normal_cost) / delta", "def train(net):\n\n # Set SGD hyperparameters\n n_iter = 200 # number of iterations of SGD\n learning_rate = 1e-3 # learning rate for SGD\n momentum = .99 # momentum parameter for SGD\n batch_size = 100 # number of data points in each mini-batch\n\n # Initialize binary cross-entropy loss function\n loss_fn = nn.BCELoss()\n\n # Initialize SGD optimizer with momentum\n optimizer = optim.SGD(net.parameters(), lr=learning_rate, momentum=momentum)\n\n # Placeholder to save loss at each iteration\n track_loss = []\n\n # Loop over iterations\n for i in range(n_iter):\n\n # Sample minibatch of oriented grating stimuli\n stimuli, tilt = sample_stimuli(batch_size)\n\n # Evaluate loss and update network weights\n out = net(stimuli) # predicted probability of tilt right\n loss = loss_fn(out, tilt) # evaluate loss\n optimizer.zero_grad() # clear gradients\n loss.backward() # compute gradients\n optimizer.step() # update weights\n \n # Keep track of loss at each iteration\n track_loss.append(loss.item())\n\n # Track progress\n if (i + 1) % (n_iter / 10) == 0:\n print('iteration %i | loss: %.3f | percent correct: %.2f%%' % (i + 1, loss.item(), 100 * pcorrect(out, tilt)))\n \n # Plot loss\n plt.plot(track_loss)\n plt.xlabel('iterations of SGD')\n plt.ylabel('binary cross-entropy loss')\n plt.xlim([0, None])\n plt.ylim([0, None])\n plt.show()", "def backward(\n self, X: np.ndarray, y: np.ndarray, lr: float, reg: float = 0.0\n ) -> float:\n y_hat = self.forward(X)\n\n y_one_hot = self.one_hot_encode(y)\n loss = CrossEntropy.forward(y_one_hot, y_hat)\n\n d_layer = CrossEntropy.backward(y, y_hat)\n\n w_grads = []\n b_grads = []\n\n for idx, layer in reversed(list(enumerate(self.layers))):\n # Not output layer\n if (idx + 1) < len(self.layers):\n next_layer = self.layers[idx + 1]\n\n d_layer = d_layer.dot(next_layer.w.T)\n d_layer = layer.activation_func.backward(d_layer, layer.activated_out)\n\n d_w = layer.linear_in.T.dot(d_layer) + 2 * reg * layer.w\n d_b = np.sum(d_layer, axis=0)\n\n w_grads.insert(0, d_w)\n b_grads.insert(0, d_b)\n\n self.optimizer.step(self.layers, w_grads, b_grads, lr)\n\n if self.norm_weights:\n w_norm = max(np.linalg.norm(l.w) for l in self.layers) / len(self.layers)\n b_norm = max(np.linalg.norm(l.w) for l in self.layers) / len(self.layers)\n for layer in self.layers:\n layer.w /= w_norm\n layer.b /= b_norm\n\n return loss", "def computeGradient(self, X, y, w):\n n = len(X)\n if self.loss == 'linear':\n gradient = -2 * np.dot(X.T, (y - X.dot(w)))\n elif self.loss == 'logistic':\n g = self.logistic(X, w)\n gradient = -2 * np.dot(X.T, (y - g) * g * (1 - g))\n elif self.loss == 'perceptron':\n newY = (y > 0).astype(int) * 2 - 1 # change from (0, 1) to (-1, 1)\n index = ((np.dot(X, w) >= 0).astype(int) != y)\n usedX = X[index[:, 0]]\n usedY = newY[index[:, 0]]\n gradient = -np.dot(usedX.T, usedY)\n elif self.loss == 'svm':\n newY = (y > 0).astype(int) * 2 - 1 # change from (0, 1) to (-1, 1)\n index = (np.dot(X, w) * newY < 1)\n usedX = X[index[:, 0]]\n usedY = newY[index[:, 0]]\n gradient = 2 * w - self.C * np.dot(usedX.T, usedY)\n gradient[0] = gradient[0] + 2 * w[0]\n\n return gradient", "def compile(self, optimizer, lr):\n \n #clip_morm = 0.1\n self.loss_f = None\n with self.graph.as_default():\n \n tvars = tf.trainable_variables()\n ft_vars = [v for v in tvars if \"_fe\" in v.name] \n lab_vars = [v for v in tvars if \"_dc\" not in v.name]\n dom_vars = [v for v in tvars if \"_lp\" not in v.name]\n\n print()\n print(\" ft updates:\", ft_vars)\n print(\"96x3 updates:\", lab_vars)\n print(\" 1x3 updates:\", dom_vars)\n print()\n\n # `tf.nn.softmax_cross_entropy_with_logits` is deprcated.\n # https://www.tensorflow.org/api_docs/python/tf/nn/softmax_cross_entropy_with_logits\n self.loss = tf.nn.softmax_cross_entropy_with_logits_v2(labels=self.labels, logits=self.output, name='cross_entropy')\n self.loss_adv = tf.nn.softmax_cross_entropy_with_logits_v2(labels=self.labels_adv, logits=self.output_adv, name='cross_entropy_adv')\n \n #grads_and_vars = optimizer.compute_gradients(loss, var_list=tf_vars)\n #clipped_grads_and_vars = [(tf.clip_by_norm(grad, clip_norm=clip_norm), var) for grad, var in grads_and_vars]\n \n self.loss_fe = - lam * self.loss_adv\n # `tf.control_dependencies` is necessary if `tf.layers.batch_normalization` is in the model\n # https://www.tensorflow.org/api_docs/python/tf/layers/batch_normalization\n self.train_step_adv = optimizer(lr).minimize(self.loss_fe, name='minimize_fe', var_list=ft_vars)\n self.train_step = optimizer(lr).minimize(self.loss, name='minimize', var_list=lab_vars)\n self.train_step_adv = optimizer(lr).minimize(self.loss_adv, name='minimize_adv', var_list=dom_vars)\n\n # Initialize all `tf.Variable`.\n self.session.run(tf.global_variables_initializer())", "def loss(self, X, y=None):\n X = X.astype(self.dtype)\n mode = 'test' if y is None else 'train'\n\n # Set train/test mode for batchnorm params and dropout param since they\n # behave differently during training and testing.\n if self.use_dropout:\n self.dropout_param['mode'] = mode\n if self.normalization=='batchnorm':\n for bn_param in self.bn_params:\n bn_param['mode'] = mode\n ############################################################################\n # TODO: Implement the forward pass for the fully-connected net, computing #\n # the class scores for X and storing them in the scores variable. #\n # #\n # When using dropout, you'll need to pass self.dropout_param to each #\n # dropout forward pass. #\n # #\n # When using batch normalization, you'll need to pass self.bn_params[0] to #\n # the forward pass for the first batch normalization layer, pass #\n # self.bn_params[1] to the forward pass for the second batch normalization #\n # layer, etc. #\n ############################################################################\n cache = {} # 需要存储反向传播需要的参数\n cache_dropout = {}\n hidden = X\n for i in range(self.num_layers - 1):\n if self.normalization == 'batchnorm':\n hidden,cache[i+1] = affine_bn_relu_forward(hidden,\n self.params['W' + str(i+1)],\n self.params['b' + str(i+1)],\n self.params['gamma' + str(i+1)],\n self.params['beta' + str(i+1)],\n self.bn_params[i])\n elif self.normalization == 'layernorm':\n hidden, cache[i + 1] = affine_ln_relu_forward(hidden,\n self.params['W' + str(i + 1)],\n self.params['b' + str(i + 1)],\n self.params['gamma' + str(i + 1)],\n self.params['beta' + str(i + 1)],\n self.bn_params[i])\n else:\n hidden , cache[i+1] = affine_relu_forward(hidden,self.params['W' + str(i+1)],\n self.params['b' + str(i+1)])\n if self.use_dropout:\n hidden , cache_dropout[i+1] = dropout_forward(hidden,self.dropout_param)\n # 最后一层不用激活层\n scores, cache[self.num_layers] = affine_forward(hidden , self.params['W' + str(self.num_layers)],\n self.params['b' + str(self.num_layers)])\n\n # If test mode return early\n if mode == 'test':\n return scores\n\n ############################################################################\n # TODO: Implement the backward pass for the fully-connected net. Store the #\n # loss in the loss variable and gradients in the grads dictionary. Compute #\n # data loss using softmax, and make sure that grads[k] holds the gradients #\n # for self.params[k]. Don't forget to add L2 regularization! #\n # #\n # When using batch/layer normalization, you don't need to regularize the scale #\n # and shift parameters. #\n # #\n # NOTE: To ensure that your implementation matches ours and you pass the #\n # automated tests, make sure that your L2 regularization includes a factor #\n # of 0.5 to simplify the expression for the gradient. #\n ############################################################################\n loss, grads = 0.0, {}\n loss, dS = softmax_loss(scores , y)\n # 最后一层没有relu激活层\n dhidden, grads['W' + str(self.num_layers)], grads['b' + str(self.num_layers)] \\\n = affine_backward(dS,cache[self.num_layers])\n loss += 0.5 * self.reg * np.sum(self.params['W' + str(self.num_layers)] * self.params['W' + str(self.num_layers)])\n grads['W' + str(self.num_layers)] += self.reg * self.params['W' + str(self.num_layers)]\n\n for i in range(self.num_layers - 1, 0, -1):\n loss += 0.5 * self.reg * np.sum(self.params[\"W\" + str(i)] * self.params[\"W\" + str(i)])\n # 倒着求梯度\n if self.use_dropout:\n dhidden = dropout_backward(dhidden,cache_dropout[i])\n if self.normalization == 'batchnorm':\n dhidden, dw, db, dgamma, dbeta = affine_bn_relu_backward(dhidden, cache[i])\n grads['gamma' + str(i)] = dgamma\n grads['beta' + str(i)] = dbeta\n elif self.normalization == 'layernorm':\n dhidden, dw, db, dgamma, dbeta = affine_ln_relu_backward(dhidden, cache[i])\n grads['gamma' + str(i)] = dgamma\n grads['beta' + str(i)] = dbeta\n else:\n dhidden, dw, db = affine_relu_backward(dhidden, cache[i])\n grads['W' + str(i)] = dw + self.reg * self.params['W' + str(i)]\n grads['b' + str(i)] = db\n return loss, grads", "def compute_gradient_and_loss2(W, X, y, reg, reg_type, opt):\n if opt == 0: # compute gradient only if opt == 0\n dW = np.zeros(W.shape) # initialize the gradient as zero\n \n # compute the loss and the gradient\n num_classes = W.shape[1]\n num_train = X.shape[0]\n loss = 0.0\n #############################################################################\n # TODO: #\n # Implement the routine to compute the loss, storing the result in loss #\n ############################################################################# \n for i in xrange(num_train): # for every augmended image data (3072+1 vector)\n s = X[i].dot(W) # compute s (scores)\n s_y = s[y[i]] # keep the correct ground truth class score\n max_sj = -999\n argmax_sj = -1\n local_loss = 0.0\n for j in xrange(num_classes): # for every class \n if j == y[i]: # don't take the correct ground truth index\n continue\n if s[j] > max_sj:\n max_sj = s[j]\n argmax_sj = j\n\n term = 1 + max_sj - s_y # max term with Delta = 1, according to Hinge loss formula \n\n for j in xrange(num_classes): # for every class \n if j == y[i]: # don't take the correct ground truth index\n continue\n if term > 0: # trick: take only the term > 0, equal to max(0,...) formula\n local_loss = term # add the possitive term \n if opt == 0: # compute gradient only if opt == 0\n if j == argmax_sj:\n dW[:, j] += X[i] # this is a analytically with Calculus gradient, case j<>y[i]\n dW[:, y[i]] -= X[i] # case j==y[i]\n \n loss += local_loss \n\n# loss /= num_train # num_train = M, according to given formula \n\n if reg_type == 1: # loss + regularization , l2 or l1\n loss += reg * np.sum(np.abs(W)) # l1, reg is actually lambda regularization strength\n else:\n loss += reg * np.sum(W * W) # l2\n \n if opt == 0: # compute gradient only if opt == 0\n dW /= num_train # we have to divide by num_train in order to have the 'mean' gradient\n if reg_type == 1: # we use deriv_abs function for l1 derivative\n dW[:,-1] += reg * deriv_abs(W[:,-1]) #dW[:,-1]\n else:\n dW[:,-1] += 2 * reg * W[:,-1] # l2 derivative formula\n \n return loss, dW\n else:\n return loss, None\n \n print 'CSFAK INSIDE compute_gradient_and_loss'\n #############################################################################\n # TODO: #\n # Implement the gradient for the required loss, storing the result in dW.\t #\n # #\n # Hint: Instead of computing the gradient from scratch, it may be easier #\n # to reuse some of the intermediate values that you used to compute the #\n # loss. #\n #############################################################################\n \n #pass\n\n #############################################################################\n # END OF YOUR CODE #\n ############################################################################# ", "def _compute_gradients(self, config):\n with tf.GradientTape() as tape:\n all_loss = self._compute_loss(**config)\n # Compute gradients wrt input image\n total_loss = all_loss[0]\n return tape.gradient(total_loss, config['init_image']), all_loss", "def gradient(self, inputs):\n raise NotImplementedError", "def loss(self, X, y=None):\n W1, b1 = self.params['W1'], self.params['b1']\n W2, b2 = self.params['W2'], self.params['b2']\n W3, b3 = self.params['W3'], self.params['b3']\n \n # pass conv_param to the forward pass for the convolutional layer\n filter_size = W1.shape[2]\n conv_param = {'stride': 1, 'pad': (filter_size - 1) / 2}\n\n # pass pool_param to the forward pass for the max-pooling layer\n pool_param = {'pool_height': 2, 'pool_width': 2, 'stride': 2}\n\n ############################################################################\n # TODO: Implement the forward pass for the three-layer convolutional net, #\n # computing the class scores for X and storing them in the scores #\n # variable. #\n ############################################################################\n \n scores = None \n cache = {}\n # def conv_relu_pool_forward(x, w, b, conv_param, pool_param): return out, cache;\n out, cache['layer1'] = layer_utils.conv_relu_pool_forward(X, W1, b1, conv_param, pool_param) \n # def affine_relu_forward(x, w, b): return out, cache;\n out, cache['layer2'] = layer_utils.affine_relu_forward(out, W2, b2)\n # def affine_forward(x, w, b): return out, cache;\n scores, cache['layer3'] = layers.affine_forward(out, W3, b3)\n\n\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n \n if y is None:\n return scores\n \n ############################################################################\n # TODO: Implement the backward pass for the three-layer convolutional net, #\n # storing the loss and gradients in the loss and grads variables. Compute #\n # data loss using softmax, and make sure that grads[k] holds the gradients #\n # for self.params[k]. Don't forget to add L2 regularization! #\n ############################################################################\n \n loss, grads = 0, {}\n\n # def softmax_loss(x, y): return loss, dscore;\n loss, dscores = layers.softmax_loss(scores, y)\n loss += 0.5 * self.reg * (np.sum(W1 * W1) + np.sum(W2 * W2) + np.sum(W3 * W3))\n\n # def affine_backward(dout, cache): return dx, dw, db;\n dout, dW3, db3 = layers.affine_backward(dscores, cache['layer3']) \n # def affine_relu_backward(dout, cache): return dx, dw, db;\n dout, dW2, db2 = layer_utils.affine_relu_backward(dout, cache['layer2'])\n # def conv_relu_pool_backward(dout, cache): return dx, dw, db;\n dout, dW1, db1 = layer_utils.conv_relu_pool_backward(dout, cache['layer1'])\n\n # reg\n grads['W3'], grads['b3'] = dW3 + self.reg * W3, db3\n grads['W2'], grads['b2'] = dW2 + self.reg * W2, db2\n grads['W1'], grads['b1'] = dW1 + self.reg * W1, db1\n\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n \n return loss, grads", "def loss_function(self, train_head, train_tail, train_relation, train_head_corrupted, train_tail_corrupted):\n\n # train_head = tf.nn.l2_normalize(train_head, 1)\n # train_tail = tf.nn.l2_normalize(train_tail, 1)\n # train_head_corrupted = tf.nn.l2_normalize(train_head_corrupted, 1)\n # train_tail_corrupted = tf.nn.l2_normalize(train_tail_corrupted, 1)\n\n # loss = tf.reduce_mean(\n # tf.maximum(self.dict_paras['margin']\n # + self.distance(tf.add(train_head, train_relation), train_tail)\n # - self.distance(tf.add(train_head_corrupted, train_relation), train_tail_corrupted), 0.))\n\n loss = tf.reduce_mean(self.distance(tf.add(train_head, train_relation), train_tail))\n\n return loss", "def loss(self, X, y=None):\n W1, b1 = self.params['W1'], self.params['b1']\n W2, b2 = self.params['W2'], self.params['b2']\n W3, b3 = self.params['W3'], self.params['b3']\n\n # conv - relu - 2x2 max pool - affine - relu - affine - softmax\n\n\n # pass conv_param to the forward pass for the convolutional layer\n # Padding and stride chosen to preserve the input spatial size\n filter_size = W1.shape[2]\n conv_param = {'stride': 1, 'pad': (filter_size - 1) // 2}\n\n # pass pool_param to the forward pass for the max-pooling layer\n pool_param = {'pool_height': 2, 'pool_width': 2, 'stride': 2}\n\n\n h1, c1 = conv_forward_im2col(X, W1, b1, conv_param) #\n h1, r1 = relu_forward(h1)\n h1, p1 = max_pool_forward_fast(h1, pool_param) #\n max_pool_shape = h1.shape\n h1 = h1.reshape(X.shape[0], -1)\n h2, c2 = affine_relu_forward(h1, W2, b2)\n scores, c3 = affine_forward(h2, W3, b3)\n\n if y is None:\n return scores\n\n loss, dx = softmax_loss(scores, y)\n\n loss += self.reg / 2 * (self.params['W1']**2).sum()\n loss += self.reg / 2 * (self.params['W2']**2).sum()\n loss += self.reg / 2 * (self.params['W3']**2).sum()\n\n ############################################################################\n # TODO: Implement the backward pass for the three-layer convolutional net, #\n # storing the loss and gradients in the loss and grads variables. Compute #\n # data loss using softmax, and make sure that grads[k] holds the gradients #\n # for self.params[k]. Don't forget to add L2 regularization! #\n # #\n # NOTE: To ensure that your implementation matches ours and you pass the #\n # automated tests, make sure that your L2 regularization includes a factor #\n # of 0.5 to simplify the expression for the gradient. #\n ############################################################################\n \n grads = {}\n dx, grads['W3'], grads['b3'] = affine_backward(dx, c3)\n grads['W3'] += self.reg * self.params['W3']\n dx, grads['W2'], grads['b2'] = affine_relu_backward(dx, c2)\n dx = dx.reshape(max_pool_shape)\n dx = max_pool_backward_fast(dx, p1)\n dx = relu_backward(dx, r1)\n dx, grads['W1'], grads['b1'] = conv_backward_im2col(dx, c1)\n\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n\n return loss, grads", "def loss(self, X, y=None):\n X = X.astype(self.dtype)\n mode = 'test' if y is None else 'train'\n\n num_FC = self.num_FC\n num_CNN = self.num_CNN\n total_layer = self.num_FC + self.num_CNN\n \n cache = {}\n pre_layer_output = X\n for i in range(0, num_CNN):\n W_name = \"W\" + str(i)\n b_name = \"b\" + str(i)\n conv_param_name = \"conv_param\" + str(i)\n gamma_name = \"gamma\" + str(i)\n beta_name = \"beta\" + str(i)\n bn_param_name = \"bn_param\" + str(i)\n pool_param_name = \"pool_param\" + str(i)\n\n w = self.params[W_name]\n b = self.params[b_name]\n conv_param = self.fix_params[conv_param_name]\n gamma = self.params[gamma_name]\n beta = self.params[beta_name]\n bn_param = self.fix_params[bn_param_name]\n pool_param = self.fix_params[pool_param_name]\n \n pre_layer_output, cache_layer = cnn_batch_relu_pool_forward(pre_layer_output, \n w, b, conv_param, \n gamma, beta, bn_param, \n pool_param)\n cache[i] = cache_layer\n \n for i in range(0, num_FC):\n W_name = \"W\" + str(i + num_CNN)\n b_name = \"b\" + str(i + num_CNN)\n gamma_name = \"gamma\" + str(i + num_CNN)\n beta_name = \"beta\" + str(i + num_CNN)\n bn_param_name = \"bn_param\" + str(i + num_CNN)\n drop_name = \"drop_ratio\" + str(i + num_CNN)\n\n w = self.params[W_name]\n b = self.params[b_name]\n gamma = self.params[gamma_name]\n beta = self.params[beta_name]\n bn_param = self.fix_params[bn_param_name]\n dropout_param = self.fix_params[drop_name]\n\n pre_layer_output, cache_layer = affine_batch_relu_drop_forward(pre_layer_output, \n w, b, \n gamma, beta, bn_param, \n dropout_param)\n cache[i + num_CNN] = cache_layer\n \n W_name = \"W\" + str(total_layer)\n b_name = \"b\" + str(total_layer)\n w = self.params[W_name]\n b = self.params[b_name]\n scores, cache[total_layer] = affine_forward(pre_layer_output, w, b)\n if y is None:\n return scores\n \n loss, grads = 0, {}\n \n loss, dUpLayer = softmax_loss(scores, y)\n loss = loss + 0.5 * self.reg * np.sum(w**2)\n \n cache_layer = cache[total_layer]\n dUpLayer, grads[W_name], grads[b_name] = affine_backward(dUpLayer, cache_layer)\n grads[W_name] = grads[W_name] + self.reg * self.params[W_name]\n\n for i in range(0, num_FC):\n layer_index = num_FC + num_CNN -1 - i\n W_name = \"W\" + str(layer_index)\n b_name = \"b\" + str(layer_index)\n gamma_name = \"gamma\" + str(layer_index)\n beta_name = \"beta\" + str(layer_index)\n\n cache_layer = cache[layer_index]\n dUpLayer, grads[W_name], grads[b_name], grads[gamma_name], grads[beta_name] = affine_batch_relu_drop_backward(dUpLayer, cache_layer)\n\n loss = loss + 0.5 * self.reg * np.sum(self.params[W_name]**2)\n grads[W_name] = grads[W_name] + self.reg * self.params[W_name]\n grads[gamma_name] = grads[gamma_name] + self.reg * self.params[gamma_name]\n\n for i in range(0, num_CNN):\n\n layer_index = num_CNN -1 - i\n\n W_name = \"W\" + str(layer_index)\n b_name = \"b\" + str(layer_index)\n conv_param_name = \"conv_param\" + str(layer_index)\n gamma_name = \"gamma\" + str(layer_index)\n beta_name = \"beta\" + str(layer_index)\n\n cache_layer = cache[layer_index]\n dUpLayer, grads[W_name], grads[b_name], grads[gamma_name], grads[beta_name] = cnn_batch_relu_pool_backward(dUpLayer, cache_layer)\n\n loss = loss + 0.5 * self.reg * np.sum(self.params[W_name]**2)\n grads[W_name] = grads[W_name] + self.reg * self.params[W_name]\n grads[gamma_name] = grads[gamma_name] + self.reg * self.params[gamma_name]\n \n return loss, grads", "def loss(self, X, y=None):\n\t\tW1, b1 = self.params['W1'], self.params['b1']\n\t\tW2, b2 = self.params['W2'], self.params['b2']\n\t\tW3, b3 = self.params['W3'], self.params['b3']\n\t\t\n\t\t# pass conv_param to the forward pass for the convolutional layer\n\t\tfilter_size = W1.shape[2]\n\t\tconv_param = {'stride': 1, 'pad': (filter_size - 1) / 2}\n\n\t\t# pass pool_param to the forward pass for the max-pooling layer\n\t\tpool_param = {'pool_height': 2, 'pool_width': 2, 'stride': 2}\n\n\t\tscores = None\n\t\t############################################################################\n\t\t# TODO: Implement the forward pass for the three-layer convolutional net, #\n\t\t# computing the class scores for X and storing them in the scores\t\t\t\t\t #\n\t\t# variable.\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t #\n\t\t############################################################################\n\t\tz1, cache1 = conv_relu_pool_forward(X, W1, b1, conv_param, pool_param)\n\t\tz2, cache2 = affine_relu_forward(z1, W2, b2)\n\t\ty3, cache3 = affine_forward(z2, W3, b3)\n\t\tscores = y3\n\t\t############################################################################\n\t\t#\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tEND OF YOUR CODE\t\t\t\t\t\t\t\t\t\t\t\t\t\t #\n\t\t############################################################################\n\t\t\n\t\tif y is None:\n\t\t\treturn scores\n\t\t\n\t\tloss, grads = 0, {}\n\t\t############################################################################\n\t\t# TODO: Implement the backward pass for the three-layer convolutional net, #\n\t\t# storing the loss and gradients in the loss and grads variables. Compute #\n\t\t# data loss using softmax, and make sure that grads[k] holds the gradients #\n\t\t# for self.params[k]. Don't forget to add L2 regularization!\t\t\t\t\t\t\t #\n\t\t############################################################################\n\t\tloss, dout = softmax_loss(scores, y)\n\t\tloss += self.reg * 0.5 * (np.power(self.params['W3'], 2).sum() + \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tnp.power(self.params['W2'], 2).sum() + \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tnp.power(self.params['W1'], 2).sum())\n\n\t\tdx3, grads['W3'], grads['b3'] = affine_backward(dout, cache3)\n\t\tdx2, grads['W2'], grads['b2'] = affine_relu_backward(dx3, cache2)\n\t\tdx1, grads['W1'], grads['b1'] = conv_relu_pool_backward(dx2, cache1)\n\t\t############################################################################\n\t\t#\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tEND OF YOUR CODE\t\t\t\t\t\t\t\t\t\t\t\t\t\t #\n\t\t############################################################################\n\t\t\n\t\treturn loss, grads", "def calculate_validation_loss(self):\n self.network.train()\n self.validation_average_loss = self.calculate_average_loss(self.validation_dataloader)", "def loss(self, X, y=None):\n X = X.astype(self.dtype)\n mode = 'test' if y is None else 'train'\n\n # We are gonna store everythin in a dictionnary hidden\n hidden = {}\n hidden['h0'] = X.reshape(X.shape[0], np.prod(X.shape[1:]))\n\n for i in range(self.L):\n idx = i + 1\n # Naming of the variable\n w = self.params['W' + str(idx)]\n b = self.params['b' + str(idx)]\n h = hidden['h' + str(idx - 1)]\n\n # Computing of the forward pass.\n # Special case of the last layer (output)\n if idx == self.L:\n h, cache_h = affine_forward(h, w, b)\n hidden['h' + str(idx)] = h\n hidden['cache_h' + str(idx)] = cache_h\n\n # For all other layers\n else:\n h, cache_h = affine_relu_forward(h, w, b)\n hidden['h' + str(idx)] = h\n hidden['cache_h' + str(idx)] = cache_h\n\n scores = hidden['h' + str(self.L)]\n\n # If test mode return early\n if mode == 'test':\n return scores\n\n # Computing of the loss\n data_loss, dscores = softmax_loss(scores, y)\n reg_loss = 0\n for w in [self.params[f] for f in self.params.keys() if f[0] == 'W']:\n reg_loss += 0.5 * self.reg * np.sum(w * w)\n\n loss = data_loss + reg_loss\n\n # Backward pass\n\n hidden['dh' + str(self.L)] = dscores\n for i in range(self.L)[::-1]:\n idx = i + 1\n dh = hidden['dh' + str(idx)]\n h_cache = hidden['cache_h' + str(idx)]\n if idx == self.L:\n dh, dw, db = affine_backward(dh, h_cache)\n hidden['dh' + str(idx - 1)] = dh\n hidden['dW' + str(idx)] = dw\n hidden['db' + str(idx)] = db\n else:\n dh, dw, db = affine_relu_backward(dh, h_cache)\n hidden['dh' + str(idx - 1)] = dh\n hidden['dW' + str(idx)] = dw\n hidden['db' + str(idx)] = db\n\n # w gradients where we add the regulariation term\n list_dw = {key[1:]: val + self.reg * self.params[key[1:]]\n for key, val in hidden.iteritems() if key[:2] == 'dW'}\n # Paramerters b\n list_db = {key[1:]: val for key, val in hidden.iteritems() if key[:2] == 'db'}\n # Parameters gamma\n list_dgamma = {key[1:]: val for key, val in hidden.iteritems() if key[:6] == 'dgamma'}\n # Paramters beta\n list_dbeta = {key[1:]: val for key, val in hidden.iteritems() if key[:5] == 'dbeta'}\n grads = {}\n grads.update(list_dw)\n grads.update(list_db)\n grads.update(list_dgamma)\n grads.update(list_dbeta)\n return loss, grads", "def train_CNN(self, train_dataset, validation_dataset, lr = 0.01, epochs_num = 100, batch_size = 40, alpha = 0, momentum = 0.9):\n threshold = 0.5\n # optimizer = SGD(self.parameters(), lr = lr, weight_decay = alpha, momentum = momentum)\n optimizer = Adam(self.parameters())\n\n criterion = nn.BCELoss()\n\n train_losses = []\n\n validation_losses = []\n\n f1_scores_validations = []\n precisions_validations = []\n recalls_validations = []\n\n epochs = []\n\n start = time.time()\n\n remaining_time = 0\n\n train_dataloader = DataLoader(train_dataset, batch_size = batch_size, collate_fn = PPD.collate_data)\n best_f1score_validation = 0\n patience = 0\n for epoch in range(epochs_num):\n\n super(CNN, self).train()\n\n for i_batch, sample_batched in enumerate(train_dataloader):\n\n input = sample_batched[0]\n\n target = sample_batched[1].float()\n #optimizer.zero_grad() clears x.grad for every parameter x in the optimizer. It’s important to call this before loss.backward(), otherwise you’ll accumulate the gradients from multiple passes.\n self.zero_grad()\n\n output = self(input)\n\n train_loss = criterion(output, target)\n #loss.backward() computes dloss/dx for every parameter x which has requires_grad=True. These are accumulated into x.grad for every parameter x. In pseudo-code: x.grad += dloss/dx\n train_loss.backward()\n #optimizer.step updates the value of x using the gradient x.grad. For example, the SGD optimizer performs: x += -lr * x.grad\n optimizer.step()\n\n super(CNN, self).eval()\n\n validation_segments, validation_labels = PPD.collate_data(validation_dataset)\n\n validation_loss = criterion(self(validation_segments.long()), validation_labels.float())\n\n f1_scores_validation = self.f1_score(self(validation_segments.long()), validation_labels.float(), threshold)[0]\n precisions_validation = self.f1_score(self(validation_segments.long()), validation_labels.float(), threshold)[1]\n recalls_validation = self.f1_score(self(validation_segments.long()), validation_labels.float(), threshold)[2]\n\n if (ceil(f1_scores_validation * 100) / 100) <= (ceil(best_f1score_validation * 100) / 100):\n patience = patience + 1\n else:\n best_f1score_validation = f1_scores_validation\n patience = 0\n\n\n end = time.time()\n\n remaining_time = remaining_time * 0.90 + ((end - start) * (epochs_num - epoch + 1) / (epoch + 1)) * 0.1\n\n remaining_time_corrected = remaining_time / (1 - (0.9 ** (epoch + 1)))\n\n epoch_str = \"last epoch finished: \" + str(epoch)\n\n progress_str = \"progress: \" + str((epoch + 1) * 100 / epochs_num) + \"%\"\n\n time_str = \"time: \" + str(remaining_time_corrected / 60) + \" mins\"\n\n sys.stdout.write(\"\\r\" + epoch_str + \" -- \" + progress_str + \" -- \" + time_str)\n\n sys.stdout.flush()\n\n train_losses.append(train_loss.item())\n\n validation_losses.append(validation_loss.item())\n\n f1_scores_validations.append(f1_scores_validation)\n precisions_validations.append(precisions_validation)\n recalls_validations.append(recalls_validation)\n\n epochs.append(epoch)\n # if patience == 15:\n # break\n\n print(\"\\n\" + \"Training completed. Total training time: \" + str(round((end - start) / 60, 2)) + \" mins\")\n\n return epochs, train_losses, validation_losses, f1_scores_validations, precisions_validations, recalls_validations", "def _optimize(self):\n # Retrieve all trainable variables\n train_variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)\n\n # Compute the gradient (return a pair of variable and their respective gradient)\n grads = self.optimizer.compute_gradients(loss=self.loss, var_list=train_variables)\n self.train_dis = self.optimizer.apply_gradients(grads, global_step=self.global_step)", "def forward_backward_prop(data, labels, params, dimensions):\n\n ### Unpack network parameters (do not modify)\n ofs = 0\n Dx, H, Dy = (dimensions[0], dimensions[1], dimensions[2])\n\n W1 = np.reshape(params[ofs:ofs+ Dx * H], (Dx, H))\n ofs += Dx * H\n b1 = np.reshape(params[ofs:ofs + H], (1, H))\n ofs += H\n W2 = np.reshape(params[ofs:ofs + H * Dy], (H, Dy))\n ofs += H * Dy\n b2 = np.reshape(params[ofs:ofs + Dy], (1, Dy))\n\n ### YOUR CODE HERE: forward propagation\n affine_1 = np.dot(data, W1) + b1\n sigmoid_1 = sigmoid(affine_1)\n affine_2 = np.dot(sigmoid_1, W2) + b2\n scores = sigmoid(affine_2)\n cost = - np.sum(np.multiply(np.log(softmax(scores)), labels))\n ### END YOUR CODE\n \n ### YOUR CODE HERE: backward propagation\n cross_entropy_grad_ = cross_entropy_grad(scores, labels)\n sigmoid_2_grads = sigmoid_input_grad(cross_entropy_grad_, scores)\n x_2_grad, gradW2, gradb2 = affine_grads(sigmoid_2_grads, sigmoid_1, W2, b2)\n sigmoid_1_grads = sigmoid_input_grad(x_2_grad, sigmoid_1)\n x_1_grad, gradW1, gradb1 = affine_grads(sigmoid_1_grads, data, W1, b1)\n ### END YOUR CODE\n \n ### Stack gradients (do not modify)\n grad = np.concatenate((gradW1.flatten(), gradb1.flatten(), \n gradW2.flatten(), gradb2.flatten()))\n return cost, grad", "def model_loss(inp, fake, real_label, fake_label):\n \n \n Dreal,realcls,R1 = gradpen(inp)\n [Dfake,fakecls] = D(fake)\n # 1. Adversarial loss\n \n glabel = tf.ones_like(Dfake)#tf.random.uniform((Dfake.shape), 1-LN, 1)\n dlabelr = tf.ones_like(Dreal)#tf.random.uniform((Dreal.shape), 1-LN, 1)\n dlabelf = tf.zeros_like(Dfake)#tf.random.uniform((Dfake.shape), 0, LN)\n \n \n \n # D has no sigmoid activation: \"from_logits=True\"\n real_loss = tf.keras.losses.binary_crossentropy(\n dlabelr, Dreal, from_logits=True)\n real_loss = tf.reduce_mean(real_loss)\n \n fake_loss = tf.keras.losses.binary_crossentropy(\n dlabelf, Dfake, from_logits=True)\n fake_loss = tf.reduce_mean(fake_loss)\n \n Dadv = 0.5*(real_loss+fake_loss)\n \n Gadv = tf.keras.losses.binary_crossentropy(\n glabel, Dfake, from_logits=True)\n Gadv = tf.reduce_mean(Gadv)\n \n # 2. Classification loss\n \n Dcls = tf.keras.losses.binary_crossentropy(real_label, realcls, from_logits=True)\n Dcls = tf.reduce_mean(Dcls)\n \n Gcls = tf.keras.losses.binary_crossentropy(fake_label, fakecls, from_logits=True)\n Gcls = tf.reduce_mean(Gcls)\n \n # 3. Total loss\n \n Dloss = Dadv + (GAMMA/2)*R1 + LAMBDA_CLS*Dcls\n \n Gloss = Gadv + LAMBDA_CLS*Gcls\n \n return (Dloss, Dadv, Dcls, R1), (Gloss, Gadv, Gcls)", "def grad_loss_wrt_w(self, x, y):\n (N, D) = x.shape\n k1 = np.matmul(x, np.transpose(self.w)) + self.b\n y1 = y.reshape((N,1))\n dr = (1 + np.exp(1 * y1 * k1))\n nr = -y1 * x\n c1 = nr/dr\n #(N1,D1) = self.w.shape\n #c2 = np.zeros((N1,D1))\n #for i in range(N):\n # c2[i-1] = c1[i-1,:] + c1[i,:]\n #l_w = c2/N\n l_w1 = np.mean(c1,axis=0)\n return l_w1\n\n\n #raise NotImplementedError", "def backward_pass(total_loss):\n\n # Get the tensor that keeps track of step in this graph or create one if not there\n global_step = tf.train.get_or_create_global_step()\n\n # Print summary of total loss\n tf.summary.scalar('Total_Loss', total_loss)\n\n # Decay the learning rate\n dk_steps = int((FLAGS.epoch_size / FLAGS.batch_size) * 75)\n lr_decayed = tf.train.cosine_decay_restarts(FLAGS.learning_rate, global_step, dk_steps)\n\n # Compute the gradients. NAdam optimizer came in tensorflow 1.2\n opt = tf.contrib.opt.NadamOptimizer(learning_rate=lr_decayed, beta1=FLAGS.beta1,\n beta2=FLAGS.beta2, epsilon=0.1)\n\n # Compute the gradients\n gradients = opt.compute_gradients(total_loss)\n\n # Apply the gradients\n train_op = opt.apply_gradients(gradients, global_step, name='train')\n\n # Add histograms for the trainable variables. i.e. the collection of variables created with Trainable=True\n for var in tf.trainable_variables():\n tf.summary.histogram(var.op.name, var)\n\n # Maintain average weights to smooth out training\n variable_averages = tf.train.ExponentialMovingAverage(FLAGS.moving_avg_decay, global_step)\n\n # Applies the average to the variables in the trainable ops collection\n variable_averages_op = variable_averages.apply(tf.trainable_variables())\n\n with tf.control_dependencies([train_op, variable_averages_op]): # Wait until we apply the gradients\n dummy_op = tf.no_op(name='train') # Does nothing. placeholder to control the execution of the graph\n\n return dummy_op", "def D_loss_basic(self, netD, real, fake):\n # Real\n D_real = netD(real)\n D_real_loss = self.GANLoss(D_real, True, True)\n # fake\n D_fake = netD(fake)\n D_fake_loss = self.GANLoss(D_fake, False, True)\n # loss for discriminator\n D_loss = (D_real_loss + D_fake_loss) * 0.5\n # gradient penalty for wgan-gp\n if self.gan_mode == 'wgangp':\n gradient_penalty, gradients = base_function.cal_gradient_penalty(netD, real, fake)\n D_loss +=gradient_penalty\n\n D_loss = D_loss * self.loss_d_weight\n D_loss.backward()\n\n return D_loss", "def loss(self, X, y=None):\r\n mode = 'test' if y is None else 'train'\r\n\r\n if self.dropout_param is not None:\r\n self.dropout_param['mode'] = mode\r\n if self.use_batchnorm:\r\n for bn_param in self.bn_params:\r\n bn_param[mode] = mode\r\n\r\n\r\n W1, b1 = self.params['W1'], self.params['b1']\r\n W2, b2 = self.params['W2'], self.params['b2']\r\n W3, b3 = self.params['W3'], self.params['b3']\r\n gamma1, beta1 = self.params['gamma1'], self.params['beta1']\r\n gamma2, beta2 = self.params['gamma2'], self.params['beta2']\r\n # pass conv_param to the forward pass for the convolutional layer\r\n filter_size = W1.shape[2]\r\n conv_param = {'stride': 1, 'pad': int((filter_size - 1) / 2)}\r\n\r\n # pass pool_param to the forward pass for the max-pooling layer\r\n pool_param = {'pool_height': 2, 'pool_width': 2, 'stride': 2}\r\n\r\n scores = None\r\n ############################################################################\r\n # TODO: Implement the forward pass for the three-layer convolutional net, #\r\n # computing the class scores for X and storing them in the scores #\r\n # variable. #\r\n ############################################################################\r\n alpha = 0.1\r\n csrp1, csrp1_cache = conv_sbn_lrelu_pool_forward(X, W1, b1, gamma1, beta1, self.bn_params[0], conv_param, pool_param, alpha)\r\n abr1, abr1_cache = affine_bn_lrelu_forward(csrp1, W2, b2, gamma2, beta2, self.bn_params[1], alpha)\r\n scores, out_cache = affine_forward(abr1, W3, b3)\r\n ############################################################################\r\n # END OF YOUR CODE #\r\n ############################################################################\r\n\r\n if y is None:\r\n return scores\r\n\r\n loss, grads = 0, {}\r\n ############################################################################\r\n # TODO: Implement the backward pass for the three-layer convolutional net, #\r\n # storing the loss and gradients in the loss and grads variables. Compute #\r\n # data loss using softmax, and make sure that grads[k] holds the gradients #\r\n # for self.params[k]. Don't forget to add L2 regularization! #\r\n ############################################################################\r\n loss, dp = softmax_loss(scores, y)\r\n loss += 0.5 * self.reg * np.sum(\r\n np.sum(W1 ** 2) + np.sum(W2 ** 2) + np.sum(W3 ** 2)\r\n )\r\n dp, dw3, db3 = affine_backward(dp, out_cache)\r\n dp, dw2, db2, dgamma2, dbeta2 = affine_bn_lrelu_backward(dp, abr1_cache)\r\n dp, dw1, db1, dgamma1, dbeta1 = conv_sbn_lrelu_pool_backward(dp, csrp1_cache)\r\n grads['W1'] = dw1 + self.reg * W1\r\n grads['W2'] = dw2 + self.reg * W2\r\n grads['W3'] = dw3 + self.reg * W3\r\n grads['b1'] = db1\r\n grads['b2'] = db2\r\n grads['b3'] = db3\r\n grads['gamma2'] = dgamma2\r\n grads['gamma1'] = dgamma1\r\n grads['beta2'] = dbeta2\r\n grads['beta1'] = dbeta1\r\n \r\n ############################################################################\r\n # END OF YOUR CODE #\r\n ############################################################################\r\n\r\n return loss, grads", "def loss(self, X, y=None):\n\t\tmode = 'test' if y is None else 'train'\n\t\tif self.dropout_param is not None:\n\t\t\tself.dropout_param['mode'] = mode\n\t\tif self.use_batchnorm:\n\t\t\tfor bn_param in self.bn_params:\n\t\t\t\tbn_param[mode] = mode\n\n\t\tW1, b1 = self.params['W1'], self.params['b1']\n\t\tW2, b2 = self.params['W2'], self.params['b2']\n\t\tW3, b3 = self.params['W3'], self.params['b3']\n\t\tW5, b5 = self.params['W5'], self.params['b5']\n\t\t\n\t\tgamma1, beta1 = self.params['gamma1'], self.params['beta1']\n\t\tgamma2, beta2 = self.params['gamma2'], self.params['beta2']\n\t\tgamma3, beta3 = self.params['gamma3'], self.params['beta3']\t\n\n\t\t# pass conv_param to the forward pass for the convolutional layer\n\t\tfilter_size1 = W1.shape[2]\n\t\tconv_param1 = {'stride': 1, 'pad': (filter_size1 - 1) / 2}\n\t\tfilter_size2 = W2.shape[2]\n\t\tconv_param2 = {'stride': 1, 'pad': (filter_size2 - 1) / 2}\n\t\t\n\t\t# pass pool_param to the forward pass for the max-pooling layer\n\t\tpool_param = {'pool_height': 2, 'pool_width': 2, 'stride': 2}\n\t\t\n\t\tscores = None\n\t\n\t\t# Convolutional layers\t\n\t\tz1, cache1 = conv_relu_forward(X, W1, b1, conv_param1)\n\t\tz2, cache2 = conv_relu_pool_forward(z1, W2, b2, conv_param2, pool_param)\n\t\tz3, cache3 = spatial_batchnorm_forward(z2, gamma1, beta1, self.bn_params[1])\n\n\t\t# Fully Connected layers\n\t\tz4, cache4 = affine_relu_bn_forward(z3, W3, b3, gamma2, beta2, self.bn_params[2])\n\t\tz4, cache9 = dropout_forward(z4, self.dropout_params)\n\n\t\t# Output layer\n\t\tz6, cache6 = affine_forward(z4, W5, b5)\n\t\tz7, cache7 = batchnorm_forward(z6, gamma3, beta3, self.bn_params[3])\n\t\t#z8, cache8 = dropout_forward(z7, self.dropout_params)\n\t\tscores = z7\n\t\t\n\t\tif y is None:\n\t\t\treturn scores\n\t\t\n\t\tloss, grads = 0, {}\n\t\tloss, dout = softmax_loss(scores, y)\n\t\tloss += self.reg * 0.5 * (np.power(self.params['W1'], 2).sum() +\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tnp.power(self.params['W2'], 2).sum() +\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tnp.power(self.params['W5'], 2).sum() +\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tnp.power(self.params['W3'], 2).sum())\n\t\t\n\t\t#dx8 = dropout_backward(dout, cache8)\n\t\tdx7, grads['gamma3'], grads['beta3'] = batchnorm_backward(dout, cache7)\n\t\tdx6, grads['W5'], grads['b5'] = affine_backward(dx7, cache6)\n\t\tdx6 = dropout_backward(dx6, cache9)\n\t\tdx4, grads['W3'], grads['b3'], grads['gamma2'], grads['beta2'] = affine_relu_bn_backward(dx6, cache4)\n\t\t\n\t\tdx3, grads['gamma1'], grads['beta1'] = spatial_batchnorm_backward(dx4, cache3)\n\t\tdx2, grads['W2'], grads['b2'] = conv_relu_pool_backward(dx3, cache2)\n\t\tdx1, grads['W1'], grads['b1'] = conv_relu_backward(dx2, cache1)\n\t\t\n\t\treturn loss, grads", "def update(self, loss=None, inputs=None, targets=None, outputs=None):\n\n # TODO: add gradient accumulation\n\n self.optimizer.zero_grad(set_to_none=self.none_grad)\n\n if self.grad_scaler:\n self.grad_scaler.scale(loss).backward()\n self.grad_scaler.step(self.optimizer)\n\n if self.clip_grad:\n self.grad_scaler.unscale_(self.optimizer)\n self.clip_grad(self.model.parameters())\n self.grad_scaler.update()\n else:\n loss.backward()\n\n if self.clip_grad:\n self.clip_grad(self.model.parameters())\n\n self.optimizer.step()", "def worker(D,graph=None):\n\n if graph ==None:\n graph = tf.Graph()\n # Build Tensorflow graph which computes gradients of the model with one mini-batch of examples\n with graph.as_default():\n \n # Get input and labels for learning from D\n inputs, labels = D\n logits = mdnn.CNN_model(inputs,graph)\n \n # Calculate loss.\n loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels,logits=logits))\n \n optimizer = tf.train.GradientDescentOptimizer(0.1)\n grads = optimizer.compute_gradients(loss)\n with tf.variable_scope(\"\",reuse=True):\n grads_var = {var.op.name:tf.Variable(tf.zeros(var.get_shape()),trainable=False,name=var.op.name+\"_grad\",collections=[\"W_grad\"]) for _,var in grads}\n train_op = [grads_var[var.op.name].assign(grad) for grad,var in grads]\n \n # Build an initialization operation.\n init = tf.global_variables_initializer()\n\n \n # Tensorflow op to update parameters from PS\n get_W = df.get_w(graph,\"W_global\")\n\n\n with tf.Session() as sess:\n #Initialize the TF variables\n sess.run([init])\n tf.train.start_queue_runners(sess=sess)\n iteration = 0\n s = sck.socket(sck.AF_INET, sck.SOCK_STREAM)\n s.connect((FLAGS.ip_PS, FLAGS.port))\n \n while iteration < FLAGS.iter_max:\n #Get the parameters from the PS\n com.send_msg(s,\"\",\"GET_W\")\n cmd,data= com.recv_msg(s)\n iteration,W= com.decode_variables(data)\n s.close()\n \n #Update the parameters\n sess.run(get_W,{key+\"_delta:0\":value for key,value in W.items()})\n \n #Compute gradients stored in Tensorflow variables\n inp,log,lab,loss_values,_ =sess.run([inputs,logits,labels,loss,train_op])\n\n print \"Loss\",loss_values\n \n #Encode the update with the local timer (iteration)\n update = com.encode_variables(sess,\"W_grad\",iteration,compression=FLAGS.compression_rate)\n \n #Push the update to PS\n s = sck.socket(sck.AF_INET, sck.SOCK_STREAM)\n s.connect((FLAGS.ip_PS, FLAGS.port))\n \n com.send_msg(s,update,\"PUSH\")\n print \"Worker\",FLAGS.id_worker,\" is closed\"", "def fully_connected_model(input_size, num_labels, num_hidden_nodes,\n valid_dataset, test_dataset, batch_size,\n learning_rate, beta = 0.0, dropout_prob = 0.0,\n exp_decay = None, method = 'gd'):\n def create_model(weights, inputs, labels = None):\n hidden_units = inputs\n num_hidden_layers = len(weights) // 2 - 1\n regularisation_term = tf.zeros([1])\n\n for l in range(num_hidden_layers):\n cur_weights = weights[2*l]\n cur_biases = weights[2*l + 1]\n\n hidden_units = tf.nn.relu(tf.matmul(hidden_units, cur_weights) + cur_biases)\n if labels is not None:\n # If labels are specified, the graph will be used for training,\n # so we apply dropout.\n hidden_units = tf.nn.dropout(hidden_units, 1 - dropout_prob)\n\n regularisation_term = regularisation_term + tf.nn.l2_loss(cur_weights)\n\n # Output layer.\n cur_weights = weights[-2]\n cur_biases = weights[-1]\n out_logits = tf.matmul(hidden_units, cur_weights) + cur_biases\n out_prob = tf.nn.softmax(out_logits)\n regularisation_term = regularisation_term + tf.nn.l2_loss(cur_weights)\n\n if labels is not None:\n # Only when training.\n loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(out_logits, labels))\n loss = loss + beta * regularisation_term\n return out_prob, loss\n\n return out_prob\n\n graph = tf.Graph()\n with graph.as_default():\n tf_train_dataset = tf.placeholder(tf.float32, shape=(batch_size, input_size))\n tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels))\n tf_valid_dataset = tf.constant(valid_dataset)\n tf_test_dataset = tf.constant(test_dataset)\n\n # Variables.\n weights = create_fully_connected_weights(input_size, num_labels, num_hidden_nodes)\n\n # Training computation.\n train_prediction, loss = create_model(weights, tf_train_dataset, tf_train_labels)\n valid_prediction = create_model(weights, tf_valid_dataset)\n test_prediction = create_model(weights, tf_test_dataset)\n\n # Optimizer.\n global_step = tf.Variable(0)\n\n if exp_decay is not None:\n learning_rate = tf.train.exponential_decay(\n learning_rate, global_step,\n exp_decay['decay_steps'], exp_decay['decay_rate'], exp_decay['staircase'])\n\n optimizer = None\n if method == 'gd':\n optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(\n loss, global_step=global_step)\n elif method == 'adagrad':\n optimizer = tf.train.AdagradOptimizer(learning_rate).minimize(\n loss, global_step=global_step)\n else:\n raise Exception('Unknown optimiser.')\n\n tf_graph = {\n 'graph': graph,\n 'data_ph': tf_train_dataset,\n 'labels_ph': tf_train_labels }\n tf_predictions = [train_prediction, valid_prediction, test_prediction]\n\n return tf_graph, optimizer, loss, tf_predictions", "def loss(self, X, y=None):\n X = X.astype(self.dtype)\n mode = 'test' if y is None else 'train'\n\n # Set train/test mode for batchnorm params and dropout param since they\n # behave differently during training and testing.\n if self.use_dropout:\n self.dropout_param['mode'] = mode\n if self.normalization=='batchnorm':\n for bn_param in self.bn_params:\n bn_param['mode'] = mode\n scores = None\n ############################################################################\n # TODO: Implement the forward pass for the fully-connected net, computing #\n # the class scores for X and storing them in the scores variable. #\n # #\n # When using dropout, you'll need to pass self.dropout_param to each #\n # dropout forward pass. #\n # #\n # When using batch normalization, you'll need to pass self.bn_params[0] to #\n # the forward pass for the first batch normalization layer, pass #\n # self.bn_params[1] to the forward pass for the second batch normalization #\n # layer, etc. #\n ############################################################################\n if not self.use_dropout:\n if self.normalization is None: # {affine-relu} X (L-1) - affine - softmax\n cache, scores = self._AffRelu_Loss(X)\n elif self.normalization is \"batchnorm\":\n cache, scores = self._AffBatchRelu_Loss(X)\n elif self.normalization is \"layernorm\":\n cache, scores = self._AffLayerRelu_Loss(X)\n else:\n if self.normalization is None: # {affine-relu} X (L-1) - affine - softmax\n cache, scores = self._AffReluDrop_Loss(X)\n elif self.normalization is \"batchnorm\":\n cache, scores = self._AffBatchReluDrop_Loss(X)\n elif self.normalization is \"layernorm\":\n cache, scores = self._AffLayerReluDrop_Loss(X)\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n\n # If test mode return early\n if mode == 'test':\n return scores\n\n loss, grads = 0.0, {}\n \n ############################################################################\n # TODO: Implement the backward pass for the fully-connected net. Store the #\n # loss in the loss variable and gradients in the grads dictionary. Compute #\n # data loss using softmax, and make sure that grads[k] holds the gradients #\n # for self.params[k]. Don't forget to add L2 regularization! #\n # #\n # When using batch/layer normalization, you don't need to regularize the scale #\n # and shift parameters. #\n # #\n # NOTE: To ensure that your implementation matches ours and you pass the #\n # automated tests, make sure that your L2 regularization includes a factor #\n # of 0.5 to simplify the expression for the gradient. #\n ############################################################################\n \n loss, dscores = softmax_loss(scores, y)\n if not self.use_dropout:\n if self.normalization is None: # {affine-relu} X (L-1) - affine - softmax\n grads, l2_loss = self._AffRelu_Backprop(dscores, cache)\n loss += l2_loss\n elif self.normalization is \"batchnorm\":\n grads, l2_loss = self._AffBatchRelu_Backprop(dscores, cache)\n loss += l2_loss\n elif self.normalization is \"layernorm\":\n grads, l2_loss = self._AffLayerRelu_Backprop(dscores, cache)\n loss += l2_loss\n else:\n if self.normalization is None: # {affine-relu} X (L-1) - affine - softmax\n grads, l2_loss = self._AffReluDrop_Backprop(dscores, cache)\n loss += l2_loss\n elif self.normalization is \"batchnorm\":\n grads, l2_loss = self._AffBatchReluDrop_Backprop(dscores, cache)\n loss += l2_loss\n elif self.normalization is \"layernorm\":\n grads, l2_loss = self._AffLayerReluDrop_Backprop(dscores, cache)\n loss += l2_loss\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n\n return loss, grads", "def __cnnNetFn(self, input, is_training):\n with tf.variable_scope('CNN'):\n conv1 = tf.layers.conv2d(input, 32, 3, activation=\"elu\", padding='SAME',\n kernel_regularizer=tf.contrib.layers.l2_regularizer(self.__weight_decay))\n conv1_bn = tf.layers.batch_normalization(conv1)\n conv2 = tf.layers.conv2d(conv1_bn, 32, 3, activation=\"elu\", padding='SAME',\n kernel_regularizer=tf.contrib.layers.l2_regularizer(self.__weight_decay))\n conv2_bn = tf.layers.batch_normalization(conv2)\n conv2_pool = tf.layers.max_pooling2d(conv2_bn, 2, 2, padding='SAME')\n conv2_drop = tf.layers.dropout(conv2_pool, rate=0.2, training=is_training)\n\n conv3 = tf.layers.conv2d(conv2_drop, 64, 3, activation=\"elu\", padding='SAME',\n kernel_regularizer=tf.contrib.layers.l2_regularizer(self.__weight_decay))\n conv3_bn = tf.layers.batch_normalization(conv3)\n conv4 = tf.layers.conv2d(conv3_bn, 64, 3, activation=\"elu\", padding='SAME',\n kernel_regularizer=tf.contrib.layers.l2_regularizer(self.__weight_decay))\n conv4_bn = tf.layers.batch_normalization(conv4)\n conv4_pool = tf.layers.max_pooling2d(conv4_bn, 2, 2, padding='SAME')\n conv4_drop = tf.layers.dropout(conv4_pool, rate=0.3, training=is_training)\n\n conv5 = tf.layers.conv2d(conv4_drop, 128, 3, activation=\"elu\", padding='SAME',\n kernel_regularizer=tf.contrib.layers.l2_regularizer(self.__weight_decay))\n conv5_bn = tf.layers.batch_normalization(conv5)\n conv6 = tf.layers.conv2d(conv5_bn, 128, 3, activation=\"elu\", padding='SAME',\n kernel_regularizer=tf.contrib.layers.l2_regularizer(self.__weight_decay))\n conv6_pool = tf.layers.max_pooling2d(conv6, 2, 2, padding='SAME')\n\n csnn_features = tf.stop_gradient(self.__csnn.getTrainOp(input))\n csnn_features = tf.identity(csnn_features)\n if self.__use_csnn:\n joint_features = tf.concat((conv6_pool, csnn_features), axis=3)\n else:\n joint_features = conv6_pool\n\n conv6_bn = tf.layers.batch_normalization(joint_features)\n\n conv7 = tf.layers.conv2d(conv6_bn, 256, 3, activation=\"elu\", padding='SAME',\n kernel_regularizer=tf.contrib.layers.l2_regularizer(self.__weight_decay))\n conv7_bn = tf.layers.batch_normalization(conv7)\n conv8 = tf.layers.conv2d(conv7_bn, 256, 3, activation=\"elu\", padding='SAME',\n kernel_regularizer=tf.contrib.layers.l2_regularizer(self.__weight_decay))\n conv8_bn = tf.layers.batch_normalization(conv8)\n conv8_pool = tf.layers.max_pooling2d(conv8_bn, 2, 2, padding='SAME')\n conv8_drop = tf.layers.dropout(conv8_pool, rate=0.4, training=is_training)\n\n flat = tf.contrib.layers.flatten(conv8_drop)\n logits = tf.layers.dense(flat, self.__num_classes)\n return logits, csnn_features", "def test_network_fine_tuning_loss(self):\n height = 128\n width = 128\n num_features = 3\n batch_size = 2\n\n # Create the graph.\n input_image_a = tf.placeholder(shape=[None, height, width, num_features], dtype=tf.float32)\n input_image_b = tf.placeholder(shape=[None, height, width, num_features], dtype=tf.float32)\n final_flow, previous_flows = self.pwc_net.get_forward(input_image_a, input_image_b)\n\n image_a = np.zeros(shape=[batch_size, height, width, num_features], dtype=np.float32)\n image_a[:, 10:height - 10, 10:width - 10, :] = 1.0\n image_b = np.zeros(shape=[batch_size, height, width, num_features], dtype=np.float32)\n image_b[:, 5:height - 5, 5:width - 5, :] = 1.0\n dummy_flow = np.ones(shape=[batch_size, height, width, 2], dtype=np.float32)\n\n self.sess.run(tf.global_variables_initializer())\n trainable_vars = tf.trainable_variables(scope='pwc_net')\n\n # Check that the gradients are flowing.\n grad_op = tf.gradients(tf.reduce_mean(final_flow), trainable_vars + [input_image_a, input_image_b])\n for grad in grad_op:\n self.assertNotEqual(grad, None)\n\n # Get the losses.\n gt_placeholder = tf.placeholder(shape=[None, height, width, 2], dtype=tf.float32)\n training_loss = self.pwc_net.get_fine_tuning_loss(previous_flows, gt_placeholder)\n # Check the loss.\n loss_value = self.sess.run(training_loss, feed_dict={input_image_a: image_a, input_image_b: image_b,\n gt_placeholder: dummy_flow})\n self.assertNotAlmostEqual(loss_value[0], 0.0)\n\n # Check the gradients.\n loss_grad_ops = tf.gradients(training_loss, trainable_vars + [input_image_a, input_image_b])\n self.assertGreater(len(loss_grad_ops), 0)\n for grad in loss_grad_ops:\n self.assertNotEqual(grad, None)\n grads = self.sess.run(loss_grad_ops, feed_dict={input_image_a: image_a, input_image_b: image_b,\n gt_placeholder: dummy_flow})\n for grad in grads:\n self.assertNotAlmostEqual(0.0, np.sum(grad))", "def loss(self, X, y=None, reg=0.0):\n\n self.layers = []\n layers = self.layers\n layers.append(X)\n\n # Unpack variables from the params dictionary\n W1, b1 = self.params['W1'], self.params['b1']\n W2, b2 = self.params['W2'], self.params['b2']\n N, D = X.shape\n H, C = W2.shape\n\n # Compute the forward pass\n scores = None\n #############################################################################\n # TODO: Perform the forward pass, computing the class scores for the input. #\n # Store the result in the scores variable, which should be an array of #\n # shape (N, C). #\n #############################################################################\n mid = np.maximum(0, X.dot(W1) + b1.reshape(1, -1)) # activation\n scores = mid.dot(W2) + b2.reshape(1, -1)\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n # If the targets are not given then jump out, we're done\n if y is None:\n return scores\n\n # Compute the loss\n loss = None\n #############################################################################\n # TODO: Finish the forward pass, and compute the loss. This should include #\n # both the data loss and L2 regularization for W1 and W2. Store the result #\n # in the variable loss, which should be a scalar. Use the Softmax #\n # classifier loss. So that your results match ours, multiply the #\n # regularization loss by 0.5 #\n #############################################################################\n exp_score = np.exp(scores)\n exp_score_sum = exp_score.sum(axis=1)\n correct_score = exp_score[np.arange(N), y]\n probability = (correct_score / exp_score_sum).reshape(-1, 1)\n loss = -np.log(probability).sum()\n\n loss /= N\n loss += 0.5 * reg * (np.sum(W1 * W1) + np.sum(W2 * W2))\n\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n # Backward pass: compute gradients\n grads = {}\n #############################################################################\n # TODO: Compute the backward pass, computing the derivatives of the weights #\n # and biases. Store the results in the grads dictionary. For example, #\n # grads['W1'] should store the gradient on W1, and be a matrix of same size #\n #############################################################################\n des = np.tile((-correct_score / np.square(exp_score_sum)).reshape(-1, 1), (1, C))\n des[np.arange(N), y] += 1.0 / exp_score_sum\n dsoftmax = des * (-np.ones((mid.shape[0], 1)) / probability) * np.exp(scores)\n\n # W2\n grads['W2'] = mid.T.dot(dsoftmax)\n grads['W2'] /= N\n grads['W2'] += reg * W2\n\n # b2\n grads['b2'] = np.ones_like(b2.reshape(1, -1)) * dsoftmax\n grads['b2'] = np.mean(grads['b2'], axis=0).reshape(-1)\n\n # W1\n binary = np.zeros_like(mid)\n binary[mid > 0] = 1\n grads['W1'] = X.T.dot(binary * dsoftmax.dot(W2.T)) # chain rule, compute dmid/dW1 * dscore/dmid * dsoftmax\n grads['W1'] /= N\n grads['W1'] += reg * W1\n\n # b1\n grads['b1'] = np.ones_like(b1.reshape(1, -1)) * binary * dsoftmax.dot(W2.T)\n grads['b1'] = np.mean(grads['b1'], axis=0).reshape(-1)\n\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return loss, grads", "def loss_total(self, mask):\n\n def loss(y_true, y_pred):\n\n # Compute predicted image with non-hole pixels set to ground truth\n y_comp = mask * y_true + (1-mask) * y_pred\n\n # Compute the vgg features. \n if self.vgg_device:\n with tf.device(self.vgg_device):\n vgg_out = self.vgg(y_pred)\n vgg_gt = self.vgg(y_true)\n vgg_comp = self.vgg(y_comp)\n else:\n vgg_out = self.vgg(y_pred)\n vgg_gt = self.vgg(y_true)\n vgg_comp = self.vgg(y_comp)\n \n # Compute loss components\n l1 = self.loss_valid(mask, y_true, y_pred)\n l2 = self.loss_hole(mask, y_true, y_pred)\n l3 = self.loss_perceptual(vgg_out, vgg_gt, vgg_comp)\n l4 = self.loss_tv(mask, y_comp)\n l5 = - 0.5 * K.sum(1 + self.z_log_var -self.cl - K.square(self.z_mean)/K.exp(self.cl) - K.exp(self.z_log_var)/K.exp(self.cl))\n # Return loss function\n return l1 + 6*l2 + 0.05*l3 + 0.1*l4 +l5 \n return loss", "def optimizer(self):\n \n # taken from https://github.com/germain-hug/Deep-RL-Keras/blob/master/DDPG/actor.py\n # I believe this is a work around to get keras to learn **given a gradient**\n # As opposed to bunch of x_train, y_trains?\n \n #Inputs\n state_pl = self.model.input\n action_grads_pl = K.placeholder(shape=(None,1)) \n \n #Find grad_(pars) mu(state)\n mu_pl = self.model.output\n pars = self.model.trainable_weights\n pars_grad_mu = tf.gradients(mu_pl, pars, -action_grads_pl)\n \n #grads_and_pars = zip(pars_grad_mu, pars) #keras needs this form\n #updates = tf.train.AdamOptimizer(self.lr).apply_gradients(grads_and_pars)\n\n # The gradients as defined above work on my mac, but not ubuntu.\n # Below I am trying a workaround. I changed the keras source code \n # To get this working. Specifically, I make the optimizer.get_updates()\n # function accept custom gradients. It was easy to do.\n \n opt = Adam(self.lr)\n loss = pars_grad_mu #placeholder, I won't use it\n updates = opt.get_updates(loss = loss, params = pars, grads = pars_grad_mu)\n\n return K.function(inputs = [state_pl, action_grads_pl], outputs = [], updates = updates)\n #return K.function(inputs = [state_pl, action_grads_pl], outputs = [updates])", "def train(self, features, labels, optimizer, loss_scale=None):\n loss, gradients = self.compute_gradients(\n features,\n labels,\n optimizer,\n loss_scale=loss_scale,\n )\n optimizer.apply_gradients(list(zip(gradients, self.trainable_weights)))\n return loss", "def gradient(self, node, output_grad):\r\n return [output_grad]\r\n \"\"\"higher accuracy notice notice here\"\"\"", "def backward_val(self):\n self.loss_similarity = [NCC(warped_img, self.batch_fixed) for warped_img in self.warped_img_list]\n self.loss_similarity_mean = torch.mean(torch.stack(self.loss_similarity))\n self.loss_smooth = [GradNorm(disp_map) for disp_map in self.disp_list]\n self.loss_smooth_mean = torch.mean(torch.stack(self.loss_smooth))\n if len(self.strain_compensated_list) > 1:\n self.loss_consistency_strain = [NCC(self.strain_compensated_list[t-1][:,:,143:-143,:], self.strain_compensated_list[t][:,:,143:-143,:]) for t in range(1, len(self.strain_compensated_list))]\n self.loss_consistency_strain_mean = torch.mean(torch.stack(self.loss_consistency_strain))\n self.loss_total = 1 - self.loss_similarity_mean + self.loss_smooth_mean * self.alpha + (1 - self.loss_consistency_strain_mean) * self.beta\n else:\n self.loss_total = 1 - self.loss_similarity_mean + self.loss_smooth_mean * self.alpha", "def _add_train_op(self):\n self._lr_rate = tf.maximum(\n self._hps.min_lr, # min_lr_rate.\n tf.train.exponential_decay(self._hps.lr, self.global_step, 30000, 0.98))\n \n \n # Take gradients of the trainable variables w.r.t. the loss function to minimize\n loss_to_minimize = self._total_loss if self._hps.coverage else self._loss\n tvars = tf.trainable_variables()\n gradients = tf.gradients(loss_to_minimize, tvars, aggregation_method=tf.AggregationMethod.EXPERIMENTAL_ACCUMULATE_N)\n\n # Clip the gradients\n with tf.device(self._get_gpu(self._num_gpus-1)):\n grads, global_norm = tf.clip_by_global_norm(gradients, self._hps.max_grad_norm)\n\n # Add a summary\n tf.summary.scalar('global_norm', global_norm)\n\n # Apply adagrad optimizer\n if self._hps.optimizer == 'adagrad':\n optimizer = tf.train.AdagradOptimizer(\n self._hps.lr, initial_accumulator_value=self._hps.adagrad_init_acc)\n\n elif self._hps.optimizer == 'adam': \n # Adam\n optimizer = tf.train.AdamOptimizer()\n \n elif self._hps.optimizer == 'sgd':\n # SGD\n optimizer = tf.train.GradientDescentOptimizer(self._lr_rate)\n tf.summary.scalar('learning rate', self._lr_rate)\n \n else:\n raise Exception('Invalid optimizer: ', self._hps.optimizer)\n\n with tf.device(self._get_gpu(self._num_gpus-1)):\n self._train_op = optimizer.apply_gradients(\n zip(grads, tvars), global_step=self.global_step, name='train_step')", "def learning_by_gradient_descent(y, tx, w, gamma):\n loss = calculate_loss(y,tx,w)\n grad = calculate_gradient(y,tx,w)\n w_new = w - gamma*grad\n #grad is for debugging purpose\n return loss, w_new,grad", "def _batch_gradient_descent(self, X, y, lr, epochs):\n\n # Initialize the bias and weights.\n _, n = X.shape\n self.bias = 0\n self.weights = np.random.normal(size=n)\n\n for i in range(epochs):\n # Calculate and sum the gradient delta of each sample\n grad_bias, grad_weights = self._get_gradient(X, y)\n\n # Show the gradient of each epoch.\n grad = (grad_bias + grad_weights.mean()) / 2\n print(\"Epochs %d gradient %.3f\" % (i + 1, grad), flush=True)\n\n # Update the bias and weight by gradient of current epoch\n self.bias += lr * grad_bias\n self.weights += lr * grad_weights", "def backward_G(self):\n # Calculate regularzation loss to make transformed feature and target image feature in the same latent space\n self.loss_reg_gen = self.loss_reg * self.opt.lambda_regularization\n\n # Calculate l1 loss \n loss_app_gen = self.L1loss(self.img_gen, self.input_P2)\n self.loss_app_gen = loss_app_gen * self.opt.lambda_rec \n \n # parsing loss\n label_P2 = self.label_P2.squeeze(1).long()\n #print(self.input_SPL2.min(), self.input_SPL2.max(), self.parsav.min(), self.parsav.max())\n self.loss_par = self.parLoss(self.parsav,label_P2)# * 20. \n self.loss_par1 = self.L1loss(self.parsav, self.input_SPL2) * 100 \n\n # Calculate GAN loss\n base_function._freeze(self.net_D)\n D_fake = self.net_D(self.img_gen)\n self.loss_ad_gen = self.GANloss(D_fake, True, False) * self.opt.lambda_g\n\n # Calculate perceptual loss\n loss_content_gen, loss_style_gen = self.Vggloss(self.img_gen, self.input_P2) \n self.loss_style_gen = loss_style_gen*self.opt.lambda_style\n self.loss_content_gen = loss_content_gen*self.opt.lambda_content\n\n total_loss = 0\n\n for name in self.loss_names:\n if name != 'dis_img_gen':\n #print(getattr(self, \"loss_\" + name))\n total_loss += getattr(self, \"loss_\" + name)\n total_loss.backward()", "def loss(self, X, y=None, lambda_reg=0.0):\n \n # Unpack variables from the params dictionary\n N, D = X.shape\n\n # Compute the forward pass\n #############################################################################\n # TODO: Perform the forward pass, computing the class scores for the input. #\n # Store the result in the scores variable, which should be an array of #\n # shape (N, C). #\n #############################################################################\n scores, cache_list = self.network_forward(X)\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n \n #############################################################################\n # TODO: Compute for the loss. This should include L2 regularization for #\n # the weights of each layer. #\n #############################################################################\n loss_softmax, dloss_softmax = self.softmax_cross_entropy_loss(scores, y)\n loss = loss_softmax\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n \n #############################################################################\n # TODO: Compute the derivatives of the weights and biases. Store the #\n # results in the grads dictionary. For example, grads['W1'] should store #\n # the gradient on the weights W of the first layer, and be a matrix of #\n # same size. #\n #############################################################################\n grads = self.network_backward(dloss_softmax, cache_list)\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return loss, grads", "def SoftEntropy(nn_last_layer, correct_label, learning_rate): \n \n loss = tf2.math.reduce_sum( tf2.nn.softmax_cross_entropy_with_logits(tf2.stop_gradient(correct_label), nn_last_layer))\n \n #obtain training operation\n optimizer = tf.train.AdamOptimizer(learning_rate = learning_rate, epsilon = 1e-8) #Note default value of epsilon 1e-8 results in instability after few epochs\n \n #clip the gradients\n gvs = optimizer.compute_gradients(loss)\n #capped_gvs = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gvs]\n training_operation = optimizer.apply_gradients(gvs)\n\n return training_operation, loss", "def check_layer_gradient(layer, x, delta=1e-5, tol=1e-4):\n output = layer.forward(x)\n if isinstance(output, list):\n output = output[0]\n output_weight = CP.cp.random.randn(*output.shape)\n\n def helper_func(x):\n output = layer.forward(x)\n if isinstance(output, list):\n output = output[0]\n loss = CP.cp.sum(output * output_weight)\n d_out = CP.cp.ones_like(output) * output_weight\n grad = layer.backward(d_out)\n return loss, grad\n\n return check_gradient(helper_func, x, delta, tol)", "def run(self, arch : 'str'):\n if not hasattr(self, arch):\n print(\"Unrecognized neural net type %s\" % arch)\n sys.exit(1)\n\n # placeholders for feature vector and labels\n x = tf.placeholder(tf.float32, shape=[None, 784])\n y_ = tf.placeholder(tf.float32, shape=[None, 10])\n \n # dropout probability\n self.keep_prob = tf.placeholder(tf.float32)\n\n # reshape input image\n self.x_image = tf.reshape(x, [-1,28,28,1])\n \n # get the output node from the architecture-defining object\n obj = getattr(self, arch)()\n y_conv = obj.arch(self)\n\n # define the loss function here (TODO: parameterize?). We use cross-entropy\n cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y_conv), reduction_indices=[1]))\n\n # define the gradient update method here (TODO: parameterize?)\n train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)\n\n correct_prediction = tf.equal(tf.argmax(y_conv,1), tf.argmax(y_,1))\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n\n # summary-op for tensorboard\n summary_op = tf.scalar_summary(\"training accuracy\", accuracy)\n self.summary_writer.add_graph(self.sess.graph)\n\n # init tensorflow variables \n self.sess.run(tf.initialize_all_variables())\n\n # stochastic gradient descent (mini-batch training)\n # TODO: parameterize numbers used in here)\n for i in range(500):\n batch = self.data.train.next_batch(50)\n\n # gather summary and write, every 100 steps\n if i%100 == 0:\n\n summary_op_str = self.sess.run(summary_op, feed_dict={\n x:batch[0], y_: batch[1], self.keep_prob: 1.0})\n self.summary_writer.add_summary(summary_op_str, i)\n print(summary_op_str)\n\n self.sess.run(train_step, feed_dict={x: batch[0], y_: batch[1], self.keep_prob: 0.5})\n\n # get test accuracy\n print(\"test accuracy %g\"%(self.sess.run(accuracy, feed_dict={\n x: self.data.test.images, y_: self.data.test.labels, self.keep_prob: 1.0})))\n\n self.sess.close()", "def backPropagate(self):\n\n # application of the chain rule to find derivative of the loss function with respect to weights2 and weights1\n d_weights2 = np.dot(self.layer1.T, (2*(self.y - self.output) * sigmoid_derivative(self.output)))\n d_weights1 = np.dot(self.input.T, (np.dot(2*(self.y - self.output) * sigmoid_derivative(self.output), self.weights2.T) * sigmoid_derivative(self.layer1)))\n\n # update the weights with the derivative (slope) of the loss function\n self.weights1 += d_weights1\n self.weights2 += d_weights2", "def policy_gradient(self, loader):\n\n net = nn.DataParallel(self.agent, device_ids=self.gpus)\n total_steps = len(loader)\n \n for step_idx, src in enumerate(tqdm(loader, desc='Calculating policy gradient...', leave=False)):\n\n # Decode fragments and smiles, and get loss\n frags, smiles, loss = self.getBatchOutputs(net, src)\n \n # Get rewards\n reward = self.env.getRewards(smiles, frags=frags)\n\n # Filter out molecules with multiple fragments by setting reward to 0\n if self.no_multifrag_smiles:\n reward = np.array([r if s.count('.') == 0 else [0] for s,r in zip(smiles, reward)])\n reward = torch.Tensor(reward).to(self.device)\n \n # Train model with policy gradient\n self.optim.zero_grad()\n loss = loss * ( reward - self.beta )\n loss = -loss.mean()\n loss.backward()\n self.optim.step()\n\n self.monitor.saveProgress(step_idx, None, total_steps, None, loss=loss.item())\n \n return loss.item()", "def compute_bp_update(self, loss, retain_graph=False):\n\n if self.bias is not None:\n grads = torch.autograd.grad(loss, [self.weights, self.bias],\n retain_graph=retain_graph)\n else:\n grads = torch.autograd.grad(loss, self.weights,\n retain_graph=retain_graph)\n\n return grads", "def verify_gradients(self):\n\n print 'WARNING: calling verify_gradients reinitializes the learner'\n\n rng = np.random.mtrand.RandomState(1234)\n\n self.seed = 1234\n self.sizes = [4, 5]\n self.initialize(20, 3)\n example = (rng.rand(20) < 0.5, 2)\n input, target = example\n epsilon = 1e-6\n self.lr = 0.1\n self.decrease_constant = 0\n\n self.fprop(input, target)\n self.bprop(input, target) # compute gradients\n\n import copy\n emp_grad_weights = copy.deepcopy(self.weights)\n\n for h in range(len(self.weights)):\n for i in range(self.weights[h].shape[0]):\n for j in range(self.weights[h].shape[1]):\n self.weights[h][i, j] += epsilon\n a = self.fprop(input, target)\n self.weights[h][i, j] -= epsilon\n\n self.weights[h][i, j] -= epsilon\n b = self.fprop(input, target)\n self.weights[h][i, j] += epsilon\n\n emp_grad_weights[h][i, j] = (a - b) / (2. * epsilon)\n\n print 'grad_weights[0] diff.:', np.sum(np.abs(self.grad_weights[0].ravel() - emp_grad_weights[0].ravel())) / \\\n self.weights[0].ravel().shape[0]\n print 'grad_weights[1] diff.:', np.sum(np.abs(self.grad_weights[1].ravel() - emp_grad_weights[1].ravel())) / \\\n self.weights[1].ravel().shape[0]\n print 'grad_weights[2] diff.:', np.sum(np.abs(self.grad_weights[2].ravel() - emp_grad_weights[2].ravel())) / \\\n self.weights[2].ravel().shape[0]\n\n emp_grad_biases = copy.deepcopy(self.biases)\n for h in range(len(self.biases)):\n for i in range(self.biases[h].shape[0]):\n self.biases[h][i] += epsilon\n a = self.fprop(input, target)\n self.biases[h][i] -= epsilon\n\n self.biases[h][i] -= epsilon\n b = self.fprop(input, target)\n self.biases[h][i] += epsilon\n\n emp_grad_biases[h][i] = (a - b) / (2. * epsilon)\n\n print 'grad_biases[0] diff.:', np.sum(np.abs(self.grad_biases[0].ravel() - emp_grad_biases[0].ravel())) / \\\n self.biases[0].ravel().shape[0]\n print 'grad_biases[1] diff.:', np.sum(np.abs(self.grad_biases[1].ravel() - emp_grad_biases[1].ravel())) / \\\n self.biases[1].ravel().shape[0]\n print 'grad_biases[2] diff.:', np.sum(np.abs(self.grad_biases[2].ravel() - emp_grad_biases[2].ravel())) / \\\n self.biases[2].ravel().shape[0]", "def _create_loss_op(self):\n # 1.) The reconstruction loss, which forces the NN towards reconstructing more accurately the\n # given input. This function is configurable, but usually it is the Bernoulli negative log-likelihood.\n if self.cost_function == 'abs':\n reconstr_loss = tf.reduce_sum(tf.abs(self.x_decoded - self.x_in), 1)\n elif self.cost_function in ('mse', 'l2', 'square'):\n reconstr_loss = tf.reduce_sum(tf.squared_difference(self.x_in, self.x_decoded), 1)\n elif self.cost_function in ('xentropy', 'log'):\n reconstr_loss = \\\n -tf.reduce_sum(self.x_in * tf.log(1e-10 + self.x_decoded)\n + (1 - self.x_in) * tf.log(1e-10 + 1 - self.x_decoded),\n 1)\n else:\n raise ValueError(self.cost_function, \"Unknown cost function name!\")\n\n # 2.) The latent loss, which is defined as the Kullback Leibler divergence\n ## between the distribution in latent space induced by the encoder on\n # the data and some prior. This acts as a kind of regularizer.\n # This can be interpreted as the number of \"nats\" required\n # for transmitting the the latent space distribution given\n # the prior.\n latent_loss = -0.5 * tf.reduce_sum(1. + self.z_log_sigma_sq\n - tf.square(self.z_mean)\n - tf.exp(self.z_log_sigma_sq), 1)\n\n self.loss_op = tf.reduce_mean(reconstr_loss + latent_loss) # average over batch\n tf.add_to_collection(\"losses\", self.loss_op)\n\n if self.learning_rate is not None:\n global_step = tf.train.get_or_create_global_step()\n self.train_op = tf.train.AdamOptimizer(learning_rate=self.learning_rate).minimize(\n self.loss_op,\n global_step=global_step,\n var_list=tf.get_collection(self.training_scope) if self.training_scope is not None else None)\n\n tf.add_to_collection(\"train_ops\", self.train_op)\n tf_logging.info(\"Added AdamOptimizer with learning rate: %.8f\" % self.learning_rate)\n\n tf.summary.scalar(\"latent_loss\", tf.reduce_mean(latent_loss))\n tf.summary.scalar(\"reconstruction_loss\", tf.reduce_mean(reconstr_loss))\n tf.summary.scalar(\"vae_loss\", self.loss_op)", "def update(network: nn.Module, data: DataLoader, loss: nn.Module, \n opt: optim.Optimizer) -> list:\n error_list = []\n counter = 0\n for x, y in data:\n \n print(x.float()[0])\n pred = network(x.float())\n print(pred, y.float())\n\n le = loss(pred, y.float())\n error_list.append(le)\n opt.zero_grad()\n le.backward()\n opt.step()\n counter += 1\n \n print(\"Loss: \", torch.mean(torch.tensor(error_list)) )\n \n return torch.tensor(error_list)", "def train(self, num_epochs: int):\n learn_rate = 0.02\n\n images, labels = self._mn_data.load_training()\n indices = [i for i in range(len(images))]\n\n for epoch in range(num_epochs):\n random.shuffle(indices) # Avoids modifying the actual lists\n epoch_cost = 0\n i = 0\n\n # Go through the training data in batches\n while i < len(indices):\n print(i, \"---------------------------------------------------------\")\n\n if i >= 800:\n break\n\n start = i\n end = i + batch_size\n batch_indices = indices[start:end]\n\n dw = [[[0 for _ in range(perceptron.size_w())] for perceptron in layer] for layer in self._network]\n db = [[0 for _ in layer] for layer in self._network]\n\n # Take a single image from the batch\n for index in batch_indices:\n # print(\"ex\")\n result = self.feed_forward(images[index])\n epoch_cost += self.cost(result, labels[index]) # Creates self._desired_changes\n\n # Backpropagate starting from the last (output) layer\n for j in range(len(self._network)-1, -1, -1):\n layer = self._network[j]\n prev_act_values = self._layer_inputs[j]\n function_name = layer[0].get_activation().name()\n\n if j > 0:\n next_desired_changes = [0.0 for _ in self._network[j-1]]\n else:\n next_desired_changes = None\n\n if function_name == \"relu\":\n leakage = self._relu.get_leakage()\n\n # Look at each perceptron\n for k in range(len(layer)):\n perceptron = layer[k]\n dc_da = self._desired_changes[k]\n\n if function_name == \"sigmoid\":\n dc_da *= self._sigmoid(perceptron.z) * (1 - self._sigmoid(perceptron.z))\n # print(perceptron.z, sig_delta)\n # print(dc_da)\n db[j][k] -= dc_da * learn_rate\n\n # For each weight\n for l in range(len(perceptron.weights)):\n dw[j][k][l] -= dc_da * prev_act_values[l] * learn_rate\n\n if next_desired_changes:\n next_desired_changes[l] += dc_da * perceptron.weights[l]\n\n elif function_name == \"relu\":\n dc_da *= leakage if perceptron.z < 0 else 1\n db[j][k] -= dc_da * learn_rate\n\n # For each weight\n for l in range(len(perceptron.weights)):\n dw[j][k][l] -= dc_da * prev_act_values[l] * learn_rate\n\n if next_desired_changes:\n next_desired_changes[l] += dc_da * perceptron.weights[l]\n\n # print(\"dcda\", dc_da)\n\n if next_desired_changes:\n # print(\"nd\", next_desired_changes)\n self._desired_changes = next_desired_changes\n\n # End of sample image loop\n # print(dw[1:])\n # break\n\n # Update weights and biases\n for j in range(len(self._network)):\n layer = self._network[j]\n\n for k in range(len(layer)):\n perceptron = layer[k]\n\n perceptron.change_weights_and_bias(dw[j][k], db[j][k])\n\n # print(dw[1:])\n # print(db)\n\n i += batch_size\n\n print(\"Epoch {} completed out of {} with loss {}\".format(epoch + 1, num_epochs, epoch_cost))", "def update(self):\n\n # Update W (gradient should be up-to-date)\n _projected_step(self.W, self.gW, 1.0 / self.lipschitz_W())\n\n # Update H (need to recompute residuals since W was updated).\n self.cache_resids()\n self.cache_gH()\n _projected_step(self.H, self.gH, self.step_size)\n\n # Update residuals and gradient computation for W (for next iteration).\n self.cache_resids()\n self.cache_gW()\n\n # Return loss\n return self.loss", "def train(X_train, y_train, X_test, y_test, net):\n \n # convert X, y to tensors:\n X_train = torch.tensor(X_train, dtype=torch.float32)\n y_train = torch.tensor(y_train, dtype=torch.float32)\n \n X_test = torch.tensor(X_test, dtype=torch.float32)\n y_test = torch.tensor(y_test, dtype=torch.float32)\n\n # iterator:\n train_set = TensorDataset(X_train, y_train)\n train_loader = DataLoader(train_set, batch_size, shuffle=True)\n\n test_set = TensorDataset(X_test, y_test)\n test_loader = DataLoader(test_set, batch_size, shuffle=True)\n\n # optimizer:\n optimizer = torch.optim.Adam(net.parameters(), lr=lr)\n loss = nn.MSELoss()\n\n # loss accumulator:\n time_line = []\n train_metric = []\n test_metric = []\n\n # loop:\n for epoch in range(epochs):\n # update parameters:\n for Xb, yb in train_loader:\n train_ls = loss(net(Xb), yb)\n optimizer.zero_grad()\n train_ls.backward()\n optimizer.step()\n # update train and test losses:\n with torch.no_grad():\n if not epoch % 50:\n time_line.append(epoch)\n metric = 0\n for Xb, yb in train_loader:\n metric += loss(net(Xb), yb) / batch_size\n train_metric.append(metric)\n metric = 0\n for Xb, yb in test_loader:\n metric += loss(net(Xb), yb) / batch_size\n test_metric.append(metric)\n # verbose:\n print('Epoch: ', epoch)\n\n # final report of the losses: \n print('Train loss.....{0:6.3f}'.format(train_metric[-1]))\n print('Test loss......{0:6.3f}'.format(test_metric[-1]))\n\n # plot losses with respect to epochs:\n plt.plot(time_line, train_metric, color='b')\n plt.plot(time_line, test_metric, color='r')\n plt.show()", "def run_code_for_training_with_CrossEntropy_and_BCE_Losses(self, net):\n filename_for_out1 = \"performance_numbers_\" + str(self.dl_studio.epochs) + \"label.txt\"\n filename_for_out2 = \"performance_numbers_\" + str(self.dl_studio.epochs) + \"regres.txt\"\n FILE1 = open(filename_for_out1, 'w')\n FILE2 = open(filename_for_out2, 'w')\n net = copy.deepcopy(net)\n net = net.to(self.dl_studio.device)\n criterion1 = nn.CrossEntropyLoss()\n# criterion2 = self.dl_studio.DetectAndLocalize.IOULoss(self.dl_studio.batch_size)\n criterion2 = nn.BCELoss()\n optimizer = optim.SGD(net.parameters(), \n lr=self.dl_studio.learning_rate, momentum=self.dl_studio.momentum)\n for epoch in range(self.dl_studio.epochs): \n running_loss_labeling = 0.0\n running_loss_regression = 0.0 \n for i, data in enumerate(self.train_dataloader):\n gt_too_small = False\n inputs, bbox_gt, labels = data['image'], data['bbox'], data['label']\n if self.dl_studio.debug_train and i % 1000 == 999:\n print(\"\\n\\n[iter=%d:] Ground Truth: \" % (i+1) + \n ' '.join('%5s' % self.dataserver_train.class_labels[labels[j].item()] for j in range(self.dl_studio.batch_size)))\n inputs = inputs.to(self.dl_studio.device)\n labels = labels.to(self.dl_studio.device)\n bbox_gt = bbox_gt.to(self.dl_studio.device)\n optimizer.zero_grad()\n outputs = net(inputs)\n outputs_label = outputs[0]\n bbox_pred = outputs[1]\n if self.dl_studio.debug_train and i % 500 == 499:\n inputs_copy = inputs.detach().clone()\n inputs_copy = inputs_copy.cpu()\n bbox_pc = bbox_pred.detach().clone()\n bbox_pc[bbox_pc<0] = 0\n bbox_pc[bbox_pc>31] = 31\n _, predicted = torch.max(outputs_label.data, 1)\n print(\"[iter=%d:] Predicted Labels: \" % (i+1) + \n ' '.join('%10s' % self.dataserver_train.class_labels[predicted[j].item()] \n for j in range(self.dl_studio.batch_size)))\n for idx in range(self.dl_studio.batch_size):\n i1 = int(bbox_gt[idx][1])\n i2 = int(bbox_gt[idx][3])\n j1 = int(bbox_gt[idx][0])\n j2 = int(bbox_gt[idx][2])\n k1 = int(bbox_pc[idx][1])\n k2 = int(bbox_pc[idx][3])\n l1 = int(bbox_pc[idx][0])\n l2 = int(bbox_pc[idx][2])\n print(\" gt_bb: [%d,%d,%d,%d]\"%(j1,i1,j2,i2))\n print(\" pred_bb: [%d,%d,%d,%d]\"%(l1,k1,l2,k2))\n inputs_copy[idx,0,i1:i2,j1] = 255\n inputs_copy[idx,0,i1:i2,j2] = 255\n inputs_copy[idx,0,i1,j1:j2] = 255\n inputs_copy[idx,0,i2,j1:j2] = 255\n inputs_copy[idx,2,k1:k2,l1] = 255 \n inputs_copy[idx,2,k1:k2,l2] = 255\n inputs_copy[idx,2,k1,l1:l2] = 255\n inputs_copy[idx,2,k2,l1:l2] = 255\n self.dl_studio.display_tensor_as_image(\n torchvision.utils.make_grid(inputs_copy, normalize=True),\n \"see terminal for TRAINING results at iter=%d\" % (i+1))\n mask_regress = torch.zeros(self.dl_studio.batch_size,32,32,requires_grad=False)\n mask_gt = torch.zeros(self.dl_studio.batch_size, 32,32)\n for k,out_regres in enumerate(bbox_pred):\n x1,y1,x2,y2 = bbox_pred[k].tolist()\n x1_gt,y1_gt,x2_gt,y2_gt = bbox_gt[k].tolist()\n x1,y1,x2,y2 = [int(item) if item >0 else 0 for item in (x1,y1,x2,y2)]\n x1_gt,y1_gt,x2_gt,y2_gt = [int(item) if item>0 else 0 for item in (x1_gt,y1_gt,x2_gt,y2_gt)]\n if abs(x1_gt - x2_gt)<5 or abs(y1_gt-y2_gt) < 5: gt_too_small = True\n mask_regress_np = np.zeros((32,32), dtype=bool)\n mask_gt_np = np.zeros((32,32), dtype=bool)\n mask_regress_np[y1:y2,x1:x2] = 1\n mask_gt_np[y1_gt:y2_gt, x1_gt:x2_gt] = 1\n mask_regress[k,:,:] = torch.from_numpy(mask_regress_np)\n mask_regress.reqiures_grad=True\n mask_gt[k,:,:] = torch.from_numpy(mask_gt_np)\n mask_gt.reqiures_grad=True \n loss_labeling = criterion1(outputs_label, labels)\n loss_labeling.backward(retain_graph=True) \n loss_regression = criterion2(mask_regress, mask_gt)\n loss_regression.requires_grad = True\n loss_regression.backward()\n optimizer.step()\n running_loss_labeling += loss_labeling.item() \n running_loss_regression += loss_regression.item() \n if i % 1000 == 999: \n avg_loss_labeling = running_loss_labeling / float(1000)\n avg_loss_regression = running_loss_regression / float(1000)\n print(\"[epoch:%d, batch:%5d] loss_labeling: %.3f loss_regression: %.3f \" % (epoch + 1, i + 1, avg_loss_labeling, avg_loss_regression))\n FILE1.write(\"%.3f\\n\" % avg_loss_labeling)\n FILE1.flush()\n FILE2.write(\"%.3f\\n\" % avg_loss_regression)\n FILE2.flush()\n running_loss_labeling = 0.0\n running_loss_regression = 0.0\n print(\"\\nFinished Training\\n\")\n self.save_model(net)" ]
[ "0.7155436", "0.6939661", "0.685114", "0.68466926", "0.68327856", "0.67759883", "0.6716115", "0.66500854", "0.662701", "0.66258705", "0.660631", "0.65530854", "0.65456945", "0.6514191", "0.6513278", "0.6499993", "0.6499993", "0.6494454", "0.6474171", "0.6471904", "0.64706767", "0.64699215", "0.6407085", "0.639998", "0.6371353", "0.6371325", "0.63648796", "0.6362781", "0.635086", "0.63299835", "0.63286054", "0.6321742", "0.6311118", "0.6303046", "0.6292093", "0.6288903", "0.62879723", "0.628514", "0.62725353", "0.6264534", "0.6262274", "0.6259101", "0.62562335", "0.6254723", "0.62225384", "0.62220407", "0.6217594", "0.6213984", "0.62075037", "0.6206612", "0.620414", "0.61957824", "0.6189516", "0.61873925", "0.6185612", "0.6185347", "0.6183471", "0.6176331", "0.6175878", "0.6172858", "0.61725134", "0.6169516", "0.616729", "0.6163433", "0.6163367", "0.61604106", "0.6155571", "0.61545783", "0.61443555", "0.6143194", "0.61431026", "0.6126707", "0.61259085", "0.61241734", "0.61206514", "0.6107361", "0.61025673", "0.60986465", "0.60931313", "0.6086483", "0.60749304", "0.60748893", "0.6069296", "0.60484076", "0.6039241", "0.6035911", "0.60268795", "0.60209024", "0.60203034", "0.60201365", "0.6020052", "0.6010823", "0.60090125", "0.60013086", "0.59933025", "0.59902334", "0.5988095", "0.5987372", "0.5984102", "0.5983399", "0.59820974" ]
0.0
-1
Initializes a moving window object.
def __init__( self, interp, comm, dt, ptcl, v, p_nz, time, ux_m=0., uy_m=0., uz_m=0., ux_th=0., uy_th=0., uz_th=0., gamma_boost=None ) : # Check that the boundaries are open if ((comm.rank == comm.size-1) and (comm.right_proc is not None)) \ or ((comm.rank == 0) and (comm.left_proc is not None)): raise ValueError('The simulation is using a moving window, but ' 'the boundaries are periodic.\n Please select open ' 'boundaries when initializing the Simulation object.') # Momenta parameters self.ux_m = ux_m self.uy_m = uy_m self.uz_m = uz_m self.ux_th = ux_th self.uy_th = uy_th self.uz_th = uz_th # When running the simulation in boosted frame, convert the arguments if gamma_boost is not None: boost = BoostConverter( gamma_boost ) self.uz_m, = boost.longitudinal_momentum([ self.uz_m ]) # Attach moving window speed and period self.v = v # Get the positions of the global physical domain zmin_global_domain, zmax_global_domain = comm.get_zmin_zmax( local=False, with_damp=False, with_guard=False ) # Attach reference position of moving window (only for the first proc) # (Determines by how many cells the window should be moved) if comm.rank == 0: self.zmin = zmin_global_domain # Attach injection position and speed (only for the last proc) if comm.rank == comm.size-1: self.v_end_plasma = \ c * self.uz_m / np.sqrt(1 + ux_m**2 + uy_m**2 + self.uz_m**2) # Initialize plasma *ahead* of the right *physical* boundary of # the box so, after `exchange_period` iterations # (without adding new plasma), there will still be plasma # inside the physical domain. ( +3 takes into account that 3 more # cells need to be filled w.r.t the left edge of the physical box # such that the last cell inside the box is always correct for # 1st and 3rd order shape factor particles after the moving window # shifted by exchange_period cells. ) self.z_inject = zmax_global_domain + 3*comm.dz + \ comm.exchange_period * (v-self.v_end_plasma) * dt # Try to detect the position of the end of the plasma: # Find the maximal position of the particles which are # continously injected. self.z_end_plasma = None for species in ptcl: if species.continuous_injection and species.Ntot != 0: # Add half of the spacing between particles (the injection # function itself will add a half-spacing again) self.z_end_plasma = species.z.max() + 0.5*comm.dz/p_nz break # Default value in the absence of continuously-injected particles if self.z_end_plasma is None: self.z_end_plasma = zmax_global_domain self.nz_inject = 0 self.p_nz = p_nz # Attach time of last move self.t_last_move = time - dt
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _positionWindow(self):\n\t\tif sys.platform=='win32':\n\t\t\tself.setGeometry(1050, 30, 375, 220)\n\t\telse:\n\t\t\tself.setGeometry(1050, 0, 375, 220)\n\t\t# self.move( (-screen.width()/2)+200, -screen.height()/2 )", "def __init__(self):\n\n # Window starting position\n x = 200\n y = 30\n os.environ[\"SDL_VIDEO_WINDOW_POS\"] = \"%d,%d\" % (x, y)\n\n pygame.init()\n # Init window\n self.window = Window()\n # Flag that defines if the program is running or not\n self.running = True\n if Settings.MENU_ENABLED:\n self.main_menu = MainMenu(self.window)\n self.main_loop()", "def placeWindow(self):\r\n\t\t# window size\r\n\t\tw = 600\r\n\t\th = 300\r\n\t\t# find the screen size\r\n\t\tsw = self.parent.winfo_screenwidth()\r\n\t\tsh = self.parent.winfo_screenheight()\r\n\t\t# now define the location on the current screen\r\n\t\tx = (sw/2-0.5*w)\r\n\t\ty = (sh/2-0.5*h)\r\n\t\tself.parent.geometry('%dx%d+%d+%d' % (w, h, x, y))", "def _positionWindow(self):\n\t\tscreen = QtGui.QDesktopWidget().screenGeometry()\n\t\tself.setGeometry(1050, 275, 375, 350)\n\t\t# self.move( (-screen.width()/2)+200, -screen.height()/2 )", "def init_window(self, game, width, height, scale):\n self.controller = game\n self.window.geometry(\"{0}x{1}\".format((width * scale)+5, (height * scale)+5))\n self.window.resizable(False, False)\n\n self.canvas = tk.Canvas(self.window, width=width * scale, height=height * scale)\n self.canvas.grid(row=0, column=0, sticky=\"nesw\")\n\n self.draw_grid(width, height, scale)\n\n self.window.bind(\"<Button-1>\", lambda a: game.toggle_onclick(a))\n self.window.bind(\"<B1-Motion>\", lambda a: game.toggle_onclick(a))\n self.window.bind(\"<space>\", lambda a: game.toggle_pause())\n self.window.bind(\"<Return>\", lambda a: game.do_step())\n self.window.bind(\"<BackSpace>\", lambda a: game.reset())\n self.set_menu()", "def setupWindow(self):\n\n\t\tself.main_menu_window = MenuFrame.MainMenuFrame(self.uiCoordinator)\n\t\tself.menu_window = self.main_menu_window._mf\n\t\tself.score_window = self.main_menu_window._hf\n\t\tself.instructions_window = self.main_menu_window._if\n\t\tself.menu_window.playButton.focus_set()", "def init():\n try:\n WindowServer.hwnd = win32gui.FindWindow(None, \"Elder Scrolls Online\")\n rect = win32gui.GetWindowRect(WindowServer.hwnd)\n client_rect = win32gui.GetClientRect(WindowServer.hwnd)\n WindowServer.windowOffset = math.floor(((rect[2] - rect[0]) - client_rect[2]) / 2)\n WindowServer.titleOffset = ((rect[3] - rect[1]) - client_rect[3]) - WindowServer.windowOffset\n if config.get(\"borderless\"):\n WindowServer.titleOffset = 0\n WindowServer.status = Status.RUNNING\n except pywintypes.error:\n logging.error(\"Game window not found\")\n WindowServer.status = Status.CRASHED", "def Window(self, w):\r\n\r\n self.window = w\r\n return self", "def set_window_position(self, left, top, right, bottom, state, is_floating):\n self._set_window_position(left, top, right, bottom, state, is_floating)", "def init_curr_window(self):\n self.curr_window = np.random.randn(1, self.win_length) * 1e-9 * np.flip(self.asymwindow, axis=0)", "def __init__(self, y, x, height, width):\n\n self.window = curses.newwin(height, width, y, x)\n self.window.keypad(1)\n self.window.scrollok(True)\n self.scrolling = 0\n self.width = width\n self.height = height\n self.y = y\n self.x = x\n self.fg = \"W\"\n self.bg = None", "def __init__(self):\n super().__init__()\n\n self.geometry('800x480+500+300')\n if settings.touchscreen:\n self.attributes('-fullscreen', True)\n self.attributes('-topmost', True)\n self.update()\n self.attributes('-topmost', False)\n self.configure(background='#000000', cursor='none' if settings.touchscreen else None)\n\n self.grid_rowconfigure(0, weight=1)\n self.grid_columnconfigure(0, weight=1)\n self._current_screen = None\n self._switch_screen(HomeScreen(onnewplayer=self._onnewplayer, onexistingplayer=self._onexistingplayer,\n onhighscores=self._onhighscores, master=self))", "def __create_win(self):\r\n self.__calc_size()\r\n try:\r\n self.win = curses.newwin(self.height, self.width, self.posy, self.posx)\r\n self.panel = curses.panel.new_panel(self.win)\r\n self.win.scrollok(True)\r\n self.win.keypad(1)\r\n self.do_paint()\r\n except Exception:\r\n self.win = None\r\n self.panel = None", "def __init__(self, parent, rect, direction=0, center=False, useAero=False):\r\n\r\n wx.Window.__init__(self, parent, -1, rect.GetPosition(), rect.GetSize(), wx.NO_BORDER)\r\n\r\n self._direction = direction\r\n self._center = center\r\n self._valid = True\r\n self._useAero = useAero\r\n \r\n self._bmp_unfocus, self._bmp_focus = GetDockingImage(direction, useAero, center)\r\n \r\n self._currentImage = self._bmp_unfocus\r\n self.SetBackgroundStyle(wx.BG_STYLE_CUSTOM)\r\n \r\n self.Bind(wx.EVT_ERASE_BACKGROUND, self.OnEraseBackground)\r\n self.Bind(wx.EVT_PAINT, self.OnPaint)", "def __init_window(self) -> pygame.Surface:\n pygame.display.set_caption(CAPTION)\n win = pygame.display.set_mode((WIDTH, HEIGHT))\n \n return win", "def init_window(self, size, screen=None):\n # enforce minimum size\n (mw, mh), (w, h) = config.minsize, size\n if w < mw or h < mh:\n size = mw, mh\n\n # init view surface and pass it to screen\n self.view = pygame.display.set_mode(size, pygame.RESIZABLE)\n self.view.fill((0, 0, 0))\n if screen is not None:\n screen.resize_view()", "def __init__(self, window: pg.Surface):\n self.window = window\n self.board_matrix = np.full(Dimension.board_size(), 1)\n self.maximum_obstacles_on_board = 10\n self.obstacles = self.create_obstacles()", "def setUp(self):\r\n self.caption = \"mirra extending classes\" # window name\r\n self.size = 640, 480 #window size\r\n self.pos = 100,100 # window top left location\r\n self.fullScreen = 0 # if fullScreen is on it will overwrite your pos and size to match the display's resolution\r\n self.frameRate = 15 # set refresh framerate\r", "def SetWindow(self, w):\r\n\r\n self.window = w", "def _prep_window(self, parent=None):\n self.toolkit.app.initialize()\n if not self.initialized:\n self.setup(parent)\n self.resize_to_initial()\n self.update_minimum_size()\n self.update_maximum_size()", "def __init__(self):\n self.defaultTheme = \"DarkAmber\"\n self.version = 1.4\n self.versionName = \"class update\"\n self.title = \"Lms GUI default window\"\n self.layout = [[sg.Text(\"This is the base window class layout.\")]]\n self.elementJustification = 'c'\n self.location=(500, 300)\n self.running = True\n self.window = None\n self.event = \"\"\n self.values = []\n self.nextAction = None", "def __init__(self, rot=0, y=0, x=0, window=None):\n\n\t\tself.rot = rot\n\t\tself.y = y\n\t\tself.x = x\n\t\tself.window = window", "def __init__( self, window_size=QSize( DEFAULT_H_SIZE, DEFAULT_V_SIZE ) ):\n super().__init__()\n\n self.centralWidget = None\n self.window_size = window_size\n\n self.create_models()\n self.create_widgets()\n self.create_layout()\n self.create_menus()\n self.set_state()", "def __init__(self, win):\n \n # draw the base shot of the launcher\n base = Circle(Point(0,0), 3)\n base.setFill(\"red\")\n base.setOutline(\"red\")\n base.draw(win)\n\n # save the window and create initial angle and velocity\n self.win = win\n self.angle = radians(45.0)\n self.vel = 40.0\n \n # create inital \"dummy\" arrow\n self.arrow = Line(Point(0,0), Point(0,0)).draw(win)\n # replace it with the correct arrow\n self.redraw()", "def _configureWindow(self):\n if self._win_type == WindowType.IMMERSIVE:\n pg.setConfigOptions(\n foreground='d',\n background=(_DARK_COLOUR if self._dark else _LIGHT_COLOUR))\n self._win = pg.plot(title=\"Abstact Map Visualisation\")\n self._plt = self._win.plotItem\n self._plt.setAspectLocked(True, 1)\n self._plt.hideAxis('left')\n self._plt.hideAxis('bottom')\n else: # DEFAULT\n pg.setConfigOptions(foreground='k', background='w')\n self._win = pg.plot(title=\"Abstact Map Visualisation\")\n self._plt = self._win.plotItem\n\n # Set up the overlay objects as they are static\n self._overlay_items = [\n QtWidgets.QGraphicsRectItem(-_OVERLAY_WIDTH / 2,\n -_OVERLAY_HEIGHT / 2, _OVERLAY_WIDTH,\n _OVERLAY_HEIGHT)\n ]\n self._overlay_items[0].setBrush(pg.mkBrush(_OVERLAY_COLOUR))\n self._overlay_items[0].setZValue(1000)\n self._win.addItem(self._overlay_items[0])\n self.toggleOverlay(enable=False)\n\n # Do any last settings in the window\n # self._win.parentWidget().showMaximized()\n limit = 30\n self._win.setRange(xRange=[-limit, limit], yRange=[-limit, limit])", "def InitializeWindow(self):\n \n win_height = 600\n win_width = 900\n \n # 'x' and 'y' coordinates place window in the center of the screen\n y = int((self.winfo_screenheight() / 2) - (win_height / 2))\n x = int((self.winfo_screenwidth() / 2) - (win_width / 2))\n self.geometry(f'{win_width}x{win_height}+{x}+{y}')\n self.resizable(False, False)\n self.title('Log In')\n \n # Initialize the background template frame and canvas\n self.main_frame = Widgets.CreateFrame(self)\n self.main_frame.pack(fill='both', expand='true')\n self.main_canvas = Widgets.CreateCanvas(self.main_frame)\n self.main_canvas.pack(fill='both', expand='true')\n \n # Create a window in the center of the screen to hold widgets\n top_left_x = win_width / 4\n top_left_y = win_height / 4\n bottom_right_x = win_width - top_left_x\n bottom_right_y = win_height - top_left_y\n self.main_canvas.create_rectangle(top_left_x, top_left_y,\n bottom_right_x, bottom_right_y,\n fill='#f8f8ff')\n self.canvas_window = self.main_canvas.create_window(win_width / 2,\n win_height / 2)\n \n # Function to save user data if the window is exited\n self.protocol('WM_DELETE_WINDOW', self.OnClose)", "def position_window(window):\n pos = QtGui.QCursor.pos()\n window.move(pos.x(), pos.y())", "def init(cls):\n\n cls.configs = yaml.load( file('../local/config.yaml') )\n cls.is_online = False\n cls.state = State.playing\n cls.classes = classes\n cls.guiclasses = guiclasses\n\n # set up pygame and init\n pygame.init()\n\n # Set up the window\n cls.screen = pygame.display.set_mode(\n tuple(cls.configs['options']['resolution']),\n 0,\n 32)\n classes.screen = cls.screen\n guiclasses.screen = cls.screen", "def __init__(self):\n self.size = width, height = pygame.display.Info().current_w, pygame.display.Info().current_h\n self.screen = pygame.display.set_mode(self.size)\n self.x = int((width - 910) / 2)\n self.y = int((height - 675) / 2)", "def startWindow():\n\n m = mainWindow()\n\n # Show Window\n m.show()\n\n # Return to stay alive\n return m", "def __window_moveTo(self, x, y):\n pass", "def initialise_window(self):\n self.imageLabel.setBackgroundRole(QtGui.QPalette.Base)\n self.imageLabel.setScaledContents(True)\n self.scrollArea.setWidget(self.imageLabel)\n self.setCentralWidget(self.scrollArea)\n self.scrollArea.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff) # Disable horizontal scrollbar.\n self.scrollArea.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff) # Disable vertical scrollbar.\n self.setWindowTitle(\"Robot Map\") # Set title.\n self.showFullScreen() # Make fullscreen.", "def __init__(self):\n\n # Screen's settings\n self.screen_width = 1200\n self.screen_height = 800\n self.bg_color = (230, 230, 230)\n\n # Bluebee Settings\n self.bb_speed = 1.0\n\n # Moving test.\n self.counter = 0\n self.max_left = 400\n self.max_up = 300\n self.max_right = 400\n self.max_down = 300", "def move_window(windowid,PosX,PosY,Width,Height):\n PosX = int(PosX)\n PosY = int(PosY)\n \n logging.debug(\"moving window: %s to (%s,%s,%s,%s) \" % (windowid,PosX,PosY,Width,Height))\n \n if windowid == \":ACTIVE:\":\n\t\twindow = \"-r \"+windowid\n else:\n window = \"-i -r \"+windowid\n\n\t# NOTE: metacity doesn't like resizing and moving in the same step\n # unmaximize\n os.system(\"wmctrl \"+window+\" -b remove,maximized_vert,maximized_horz\")\n # resize\n command = \"wmctrl \" + window + \" -e 0,-1,-1,\" + str(Width) + \",\" + str(Height)\n os.system(command)\n # move\n command = \"wmctrl \" + window + \" -e 0,\" + str(max(PosX,0)) + \",\" + str(max(PosY,0))+ \",-1,-1\"\n os.system(command)\n # set properties\n command = \"wmctrl \" + window + \" -b remove,hidden,shaded\"\n os.system(command)", "def __init__(self):\n self.window = Tk() # The main window\n self.__initialize_variables__() # Initialize the variables\n self.__initialize_menu__() # Initialize the Menu\n self.__initialize_status_bar__()\n self.__initialize_gui__() # Initialize the GUI widgets", "def __init__(self, title='sdl2', x=lib.SDL_WINDOWPOS_CENTERED, y=lib.SDL_WINDOWPOS_CENTERED,\n w=640, h=480, flags=frozenset()):\n self._ptr = check_ptr_err(lib.SDL_CreateWindow(title.encode('utf-8'), x, y, w, h, enumtools.get_mask(flags)))", "def __init__(self, window_string):\n parts = window_string.split()\n parts.sort()\n if (len(parts) == 4 and parts[0].startswith('H=') and parts[1].startswith('W=') and\n parts[2].startswith('X=') and parts[3].startswith('Y=')):\n self.h = int(parts[0][2:])\n self.w = int(parts[1][2:])\n self.y = int(parts[2][2:])\n self.x = int(parts[3][2:])\n self.f=1\n if (self.h>=0 and self.w>=0):\n self.f = 1;\n else:\n self.f = 0;", "def create_window(self, xoff: int, yoff: int, width: int, height: int, name: str = None):\n return Window(xoff, yoff, width, height, name)", "def __init__(self, win):\n super().__init__()\n self.mouse = (0, 0)\n glfw.set_cursor_pos_callback(win, self.on_mouse_move)\n glfw.set_scroll_callback(win, self.on_scroll)", "def __init__(self, win):\n super().__init__()\n self.mouse = (0, 0)\n glfw.set_cursor_pos_callback(win, self.on_mouse_move)\n glfw.set_scroll_callback(win, self.on_scroll)", "def __init__ (self, width, height):\r\n\r\n self.screen_width = width\r\n self.screen_height = height\r\n self.clock = pygame.time.Clock()\r\n self.screen = pygame.display.set_mode((width, height), pygame.FULLSCREEN)\r\n self.initialise()", "def __init___(self, x, y, width, height):\n super(GRect, self).__init__()\n frameWidth = width\n frameHeight = height\n setLocation(x, y)", "def align_window(self):\n self.parent.update()\n\n # get screen info\n screen_width = self.parent.winfo_screenwidth()\n screen_height = self.parent.winfo_screenheight()\n\n # get window info\n window_width = self.parent.winfo_width()\n window_height = self.parent.winfo_height()\n\n # determine position of the window\n x = screen_width - window_width/2 - 120\n y = screen_height - window_height/2 - 60\n\n # move the window to determined position\n self.parent.geometry('+%d+%d' % (x, y))", "def _display_setup(self):\r\n display_file = \"{}/display.json\".format(self.settings_dir)\r\n with open(display_file) as json_file:\r\n win_settings = json.load(json_file)\r\n self.win = visual.Window(**win_settings)\r\n framerate = self.win.fps()\r\n self.frame_duration = 1.0/framerate\r\n self.mouse = event.Mouse(visible=False, win=self.win)", "def __init__(self):\n pygame.init()\n self.screen = pygame.display.set_mode((DisplayConsts.SCREEN_WIDTH, DisplayConsts.SCREEN_HEIGHT))", "def _open_window(self):\r\n\t\t# Creating the window\r\n\t\tself._window = Window(self, Locations.RESTAL)", "def init(self):\n\n pygame.init()\n pygame.joystick.init()\n self.controller = pygame.joystick.Joystick(0)\n self.controller.init()\n self.x=0\n self.y=0", "def set_window(window: Optional[\"Window\"]) -> None:\n global _window\n _window = window", "def __init__(self, win): \r\n\r\n # draw the base shot of the launcher \r\n base = Circle(Point(0,0), 3) \r\n base.setFill('red')\r\n base.setOutline('red')\r\n base.draw(win) \r\n\r\n # save the window and create initial angle and velocity\r\n self.win = win \r\n self.angle = radians(45.0)\r\n self.vel = 40.0 \r\n\r\n # create initial 'dummy' arrow \r\n self.arrow = Line(Point(0,0), Point(0, 0)).draw(win) \r\n # replace it with the correct arrow \r\n self.redraw()", "def __init__(self, height, width, yloc, xloc):\n AnsiWindow.__init__(self, height, width, yloc, xloc)\n self._position = self._position_last = 0\n self._quit = False\n self.content = list()\n self.keyset = VI_KEYSET.copy()\n self.init_keystrokes()", "def setup_score_window(self, score_window):\r\n self.score_window = score_window", "def __init__(self, max_width=500):\n self.max_width = max_width\n self.closed = False\n \n # Create OpenGL window\n super().__init__(visible=False, vsync=False, resizable=True)", "def setup_render_window(self):\n\n # Set camera\n self.__camera.SetPosition(0.0, 0.0, 20.0)\n self.__camera.SetFocalPoint(0.0, 0.0, 0.0)\n\n # Set renderer\n self.renderer.SetActiveCamera(self.__camera)\n self.renderer.SetBackground(0.6, 0.6, 0.6)\n\n # Set render window\n self.__render_window.AddRenderer(self.renderer)\n self.__render_window.SetSize(1000, 600)\n\n # Set render window interactor\n self.__render_window_interactor.SetRenderWindow(self.__render_window)\n self.__render_window_interactor.SetInteractorStyle(self.__interactor_style_trackball_camera)", "def init(self):\n sg.theme(gui.app_theme)\n self.window = sg.Window(\n gui.app_title,\n gui.create_layout(),\n **gui.window_config,\n )\n gui.after_window_init(self.window)", "def __init__(self, root):\n self.root = root\n w, h = root.winfo_screenwidth(), self.root.winfo_screenheight()\n self.root.geometry(\"%dx%d+0+0\" % (w, h))\n self.root.protocol(\"WM_DELETE_WINDOW\", self.end_program)\n self.program_running = True", "def initialize(self) -> None:\n self.simulation = self.initialize_simulation()\n width, height = get_window_resolution()\n display_dim = ((0, width), (0, height))\n self.coord_mapper = CoordinateMapper2D(*self.simulation.dim, *display_dim)\n self.simple_pygame.all_sprites.empty()\n self.initialize_visualization()", "def setupWidget(self):\r\n self.generateCoordinates()\r\n self.modifyCoordinateLists()\r\n self.settings.movementMatrix = self.movementMatrix\r\n self.settings.ghostIntersectionList = self.ghostIntersectionList\r\n self.createBodies()\r\n print(\"GameW set\")", "def __new__(cls):\n builder = get_builder('GwitterWindow')\n new_object = builder.get_object(\"gwitter_window\")\n new_object.finish_initializing(builder)\n return new_object", "def setWindowGeometry(x,y,width,height):\n dislin.window(x,y,width,height)", "def initialize_screen(self):\r\n\r\n pygame.init()\r\n pygame.display.set_caption('Minesweeper')\r\n\r\n screen_width = max(display_params.RECT_SIZE * self.cols + 2 * display_params.MARGIN_SIDE,\r\n display_params.MIN_SCREEN_WIDTH)\r\n screen_height = display_params.RECT_SIZE * self.rows + display_params.MARGIN_TOP + \\\r\n display_params.MARGIN_BOTTOM\r\n self.screen = pygame.display.set_mode((screen_width, screen_height))\r\n self.screen.fill(colors.NAVYBLUE)\r\n\r\n pygame.display.update()", "def __init__(self, len_x, len_y):\n self._gen_window(len_x, len_y)", "def __init__(self, len_x, len_y, win_func, *args, **kwargs):\n self._gen_window(len_x, len_y, win_func, *args, **kwargs)", "def __init__(self):\n self.app = qt.QApplication(sys.argv)\n self.window = qt.QMainWindow()\n self.screenSize = qt.QDesktopWidget().screenGeometry(-1)\n self.window.setGeometry(self.getDims()[1]/4, self.getDims()[0]/4, self.getDims()[1]/2, self.getDims()[0]/2)", "def __init__(self, inWindowTitleStr):\n super(MainWindow, self).__init__()\n self._mainWorkspace = None\n\n self.setWindowTitle(inWindowTitleStr)\n self.setGeometry(500, 100, 700, 900)\n\n self.mainWorkspace = workspace.WorkSpace(parent=self)", "def __init__(self):\n\n self.__main_window = None\n self.__main_display_table = None\n self.remote_stop = False\n\n self.__start_time = None\n self.__broadcast_entry = None\n self.__broadcast_label = None\n self.__broadcast_button = None\n self.__active_lines_stringvar = None\n self.__active_buses_stringvar = None\n self.__number_of_people_stringvar = None\n self.__session_time_stringvar = None\n self.__free_text_stringvars_dict = dict() #holds all the stringvars needed for the bus messages\n self.__font_name = \"Bahnschrift SemiBold SemiConden\"\n #coordinates for groups of icons on the screen\n self.__main_buttons_coords = {\"x\": 458, \"y\": 647}\n self.__statistics_coords = {\"x\": 348, \"y\": 690}\n self.__admin_controls_coords = {\"x\": 459, \"y\": 777}\n self.__broadcast_coords = {\"x\": 22, \"y\": 356}\n self.__messages_coords = {\"x\": 58, \"y\": 56}\n self.__table_coords = {\"x\": 448, \"y\": 16, \"width\": 620, \"height\": 566}", "def __init__(self):\n self.screen = pg.display.get_surface()\n self.screen_rect = self.screen.get_rect()\n self.clock = pg.time.Clock()\n self.fps = 60.0\n self.keys = pg.key.get_pressed()\n self.done = False\n self.player = Player((50,875), 4)\n self.level = pg.Surface((1000,1000)).convert()\n self.level_rect = self.level.get_rect()\n self.viewport = self.screen.get_rect(bottom=self.level_rect.bottom)\n self.win_text,self.win_rect = self.make_text()\n self.obstacles = self.make_obstacles()", "def __init__(self, parent, title=\"MazeBuilder Dialog\", lock_focus=True, x=None, y=None, populator=None, manager=None):\n Toplevel.__init__(self, parent)\n self.title(title)\n self.transient(parent)\n self._manager = manager\n if title:\n self._title = title\n self.parent = parent\n self.result = None\n body = Frame(self)\n if populator is not None:\n self.populate(populator)\n self.initial_focus = self.body(body)\n body.pack(padx=5, pady=5)\n self.buttonbox()\n\n # Needed to grab the window on Linux machines\n while True:\n try:\n self.grab_set()\n except TclError:\n continue\n else:\n break\n\n if not self.initial_focus:\n self.initial_focus = self\n\n self.protocol(\"WM_DELETE_WINDOW\", lambda: Debug.printi(\"Window Close Disabled\", Debug.Level.ERROR))\n if x is None and y is None:\n self.geometry(\"+%d+%d\" % (parent.winfo_rootx() + 50,\n parent.winfo_rooty() + 50))\n else:\n self.geometry(\"+%d+%d\" % (x + 1,\n y + 1))\n self.initial_focus.focus_set()\n\n if lock_focus:\n self.wait_window(self)", "def __init__(self, min_height=600, min_width=600):\n self.window = Tk()\n # set minimum size to which the window can be reduced\n self.window.minsize(min_width, min_height)\n self.canvas = None\n self.frames = {\n \"parameters\": None,\n \"canvas\": None\n }\n self.menubar = {\n \"menubar\": None,\n \"helpmenu\": None,\n \"filemenu\": None,\n \"editmenu\": None\n }\n self.combo_box = {\n \"class\": None,\n \"variable\": None\n }\n self.init_canvas_frame()\n self.init_parameters_frame()\n # self.init_menu_bar()\n self.classes = {\n \"parameters\": Parameters(self),\n \"fractal\": FastFractal(self)\n }\n self.init_parameter_combobox()", "def __init__(self, x1, y1, w, h, dal):\n self._dal = dal\n self._screen_size = Rect(x1, y1, w, h)\n\n self._facade = Facade.facade_layer()\n self._play_button = Rect(426,656,207,58)\n self._quit_button = Rect(686,662,207,58)", "def __init__(self, x, y, w, h, gamewin, x_ind, y_ind):\n\n super().__init__(x, y, w, h)\n\n self.gamewin = gamewin\n self.x_ind = x_ind\n self.y_ind = y_ind\n \n # Customize visuals\n self.box(FL_BORDER_BOX)\n self.color(FL_BLUE)\n self.clear_visible_focus()\n\n # Flags for drawing\n self.hit = False\n self.miss = False", "def __init__(self, window_type=WindowType.DEFAULT, dark=False):\n self._win_type = window_type\n self._dark = dark\n self._layer_items = {} # Dict of items for each visual \"layer\"\n\n self._overlay_items = [] # List of items for an overlay over the graph\n\n # Setup the window, and save some handles to it\n self._configureWindow()", "def move_window():\n\tif SLIDING_WINDOW:\n\t\t# get the chosen predicates\n\t\tpred = Predicate.objects.filter(pk__in=[p+1 for p in toggles.CHOSEN_PREDS])\n\n\t\t# handle window properties\n\t\tfor p in pred:\n\t\t\tp.move_window()", "def initialize(self):\n result = pygame.init()\n pygame.font.init()\n pygame.display.set_caption('gomoku TDD')\n self.screen = pygame.display.set_mode((WINDOWWIDTH, WINDOWHEIGHT))\n self.clock = pygame.time.Clock()\n self.smallfont = pygame.font.Font(None, 40)\n self.isinitialized = True", "def __init__(self, stdscr, config):\n # Make the cursor invisible\n curses.curs_set(0)\n # Make input non-blocking\n stdscr.nodelay(True)\n\n margin_x = (curses.COLS - config.arena_size[0] - 2) // 2\n margin_y = (curses.LINES - config.arena_size[1] - 2) // 2\n\n self.stdscr = stdscr\n self.config = config\n self.arena_win = curses.newwin(\n config.arena_size[1] + 2,\n config.arena_size[0] + 2,\n max(3, margin_y),\n max(0, margin_x))\n self.message_win = curses.newwin(\n 1,\n curses.COLS,\n max(margin_y + config.arena_size[1] + 1, (margin_y * 3 // 2) + config.arena_size[1]),\n 0)\n\n self.__draw_title()\n self.stdscr.refresh()", "def __init__(self, size: int):\n self.size = size\n self.window = []", "def __init__(self, tokens):\n super(Window, self).__init__(\"\") # because pylint complains otherwise\n self.tokens = tokens", "def __init__(self, window: QWidget, parent=None):\n QWidget.__init__(self, parent)\n\n self._window = window\n self._mousePressed = False", "def add_window(self, window):\n if not self.valid_window(window):\n return False\n self.windows.append(window)\n window.tiler = self\n if window not in self.start_positions.keys():\n self.start_positions[window] = window.display_size\n\n rules = config.GET_RULES(window.classname)\n if rules is not None and re.search(rules[\"regex\"], window.title) is not None:\n if \"floating\" in rules:\n window.set_floating(rules[\"floating\"])\n if \"decorated\" in rules:\n if bool(rules[\"decorated\"]):\n window.enable_decoration()\n else:\n window.disable_decoration()\n if \"position\" in rules:\n window.move_to(tuple(rules[\"position\"]))\n\n print(\"Added window: {0}\".format(window))\n window.print_window_styles()\n return True", "def on_activate(self, caller):\n self.window = GameWindow()\n self.add_window(self.window)", "def __init__(self, master, row, column, ystart, xleft, xright, nx, ny, checker):\n\n # A pair of windows needs 5 parameters to specify it:\n #\n # ystart -- first y value to be read, same for each window\n # xleft -- leftmost x pixel of lefthand window\n # xright -- leftmost x pixel of righthand window\n # nx -- X dimension, unbinned pixels\n # ny -- Y dimension, unbinned pixels\n\n self.ystart = RangedPosInt(master, ystart, 1, 1024, checker, width=4)\n self.ystart.grid(row=row,column=column)\n\n self.xleft = RangedPosInt(master, xleft, 1, 512, checker, width=4)\n self.xleft.grid(row=row,column=column+1)\n\n self.xright = RangedPosInt(master, xright, 513, 1024, checker, width=4)\n self.xright.grid(row=row,column=column+2)\n\n self.nx = RangedPosInt(master, nx, 1, 512, checker, width=4)\n self.nx.grid(row=row,column=column+3)\n\n self.ny = RangedPosInt(master, ny, 1, 1024, checker, width=4)\n self.ny.grid(row=row,column=column+4)", "def __init__(self):\n self.stdscr = curses.initscr()\n self.client = None\n self.max_y, self.max_x = self.stdscr.getmaxyx()\n self.chat_container = curses.newwin(self.max_y - 2, self.max_x, 1, 0)\n self.chat_win = self.chat_container.subwin(self.max_y - 3, self.max_x - 4, 2, 2)\n self.prompt_win = curses.newwin(1, self.max_x, self.max_y - 1, 0)\n self.setup()", "def _setup(self, width=turtle._CFG[\"width\"], height=turtle._CFG[\"height\"],\n startx=turtle._CFG[\"leftright\"], starty=turtle._CFG[\"topbottom\"]):\n if not hasattr(self._root, \"set_geometry\"):\n return\n \n sw = self._root.win_width()\n sh = self._root.win_height()\n if isinstance(width, float) and 0 <= width <= 1:\n width = sw*width\n if startx is None:\n startx = (sw - width) / 2\n if isinstance(height, float) and 0 <= height <= 1:\n height = sh*height\n if starty is None:\n starty = (sh - height) / 2\n self._root.set_geometry(width, height, startx, starty)\n self.update()", "def __init__(self, title, dimensions, num_windows, fg = (255, 255, 255), bg = (0, 0, 0)):\n self.fg = fg\n self.bg = bg\n self.offset = 0\n self.screens = []\n for i in range(num_windows):\n screen = Screen(title, i, dimensions)\n self.screens.append(screen)\n screen.window.show()", "def _setwin(self, win):\n\t\tself.win = win", "def start_render_window(self):\n\n # Initialize interactor\n self.__render_window_interactor.Initialize()\n\n # Start render window with interactor\n self.__render_window.Render()\n self.__render_window_interactor.Start()", "def __init__(self, master, **kwargs):\n GenericWindow.__init__(self, master, **kwargs)\n self.states = None\n self.master = master\n self.display()", "def __init__(self):\n self.screen_width = 1200\n self.screen_height = 800\n self.bg_color = (0, 230, 0)\n\n # Glove Settings\n self.glove_move_speed = 0.25\n self.glove_size = 100\n\n # Ball Settings\n self.ball_move_speed = 0.25\n self.ball_size = 40", "def __init__(self):\n # Screen settings\n self.screen_width = 2400\n self.screen_height = 1600\n self.bg_color = (0, 0, 0)\n\n # Raindrop settings\n self.r_y_speed = 10", "def __init__(self, parent=None):\n self._window = None\n\n self.setup_ui()", "def configure_window(self, width, height):\n self.configure_surface(width, height)", "def __init__(self, settings, screen):\r\n super().__init__()\r\n self.screen = screen\r\n \r\n # Load the ship image and get its starting position\r\n self.image = pygame.image.load('player.png')\r\n self.rect = self.image.get_rect()\r\n self.screen_rect = screen.get_rect()\r\n self.settings = settings\r\n self.orientation = 'left'\r\n \r\n # Start at the bottom center of the screen.\r\n self.rect.centerx = self.screen_rect.width/4\r\n self.rect.bottom = self.screen_rect.height/2\r\n \r\n # Store a decimal value for the ship's center.\r\n self.centerx = float(self.rect.centerx)\r\n self.centery = float(self.rect.centery)\r\n \r\n # Movement flag\r\n self.moving_right = False\r\n self.moving_left = False\r\n self.moving_down = False\r\n self.moving_up = False", "def setupWindow(self, framerate, bgColor=\"black\", fgColor=\"green\"):\r\n self.framerate = framerate\r\n self.clock = pygame.time.Clock()\r\n \r\n pygame.init()\r\n self.screen = pygame.display.set_mode((self.screen_Width, self.screen_Height))\r\n\r\n self.bgColor = pygame.Color(bgColor)\r\n self.fgColor = pygame.Color(fgColor)", "def set_window(self, handle):\n pass", "def initialize_global_variables():\n\n # Screen Padding\n global BottomPadding, TopPadding, LeftPadding, RightPadding\n # Window Decoration\n global WinTitle, WinBorder\n # Grid Layout\n global CORNER_WIDTHS, CENTER_WIDTHS, Monitors, WidthAdjustment\n # Simple Layout\n global MwFactor\n # System Desktop and Screen Information\n global MaxWidth, MaxHeight, OrigX, OrigY, Desktop, WinList, OldWinList\n # Miscellaneous \n global TempFile, WindowFilter\n \n Config = initconfig()\n cfgSection=\"DEFAULT\"\n \n # use \"default\" for configurations written using the original stiler\n if Config.has_section(\"default\"):\n cfgSection=\"default\"\n\n BottomPadding = Config.getint(cfgSection,\"BottomPadding\")\n TopPadding = Config.getint(cfgSection,\"TopPadding\")\n LeftPadding = Config.getint(cfgSection,\"LeftPadding\")\n RightPadding = Config.getint(cfgSection,\"RightPadding\")\n WinTitle = Config.getint(cfgSection,\"WinTitle\")\n WinBorder = Config.getint(cfgSection,\"WinBorder\")\n MwFactor = Config.getfloat(cfgSection,\"MwFactor\")\n TempFile = Config.get(cfgSection,\"TempFile\")\n Monitors = Config.getint(cfgSection,\"Monitors\")\n WidthAdjustment = Config.getfloat(cfgSection,\"WidthAdjustment\")\n WindowFilter = Config.getboolean(cfgSection,\"WindowFilter\")\n CORNER_WIDTHS = map(lambda y:float(y),Config.get(cfgSection,\"GridWidths\").split(\",\"))\n\n # create the opposite section for each corner_width\n opposite_widths = []\n for width in CORNER_WIDTHS:\n opposite_widths.append(round(abs(1.0-width),2))\n\n # add the opposites\n CORNER_WIDTHS.extend(opposite_widths)\n\n CORNER_WIDTHS=list(set(CORNER_WIDTHS)) # filter out any duplicates\n CORNER_WIDTHS.sort()\n\n CENTER_WIDTHS = filter(lambda y: y < 0.5, CORNER_WIDTHS)\n CENTER_WIDTHS = map(lambda y:round(abs(y*2-1.0),2),CENTER_WIDTHS)\n CENTER_WIDTHS.append(1.0)\t\t\t\t # always allow max for centers\n CENTER_WIDTHS = list(set(CENTER_WIDTHS)) # filter dups\n CENTER_WIDTHS.sort()\n\n # Handle multiple monitors\n CORNER_WIDTHS=map(lambda y:round(y/Monitors,2)+WidthAdjustment,CORNER_WIDTHS)\n CENTER_WIDTHS=map(lambda y:round(y/Monitors,2)+WidthAdjustment,CENTER_WIDTHS)\n\n logging.debug(\"corner widths: %s\" % CORNER_WIDTHS)\n logging.debug(\"center widths: %s\" % CENTER_WIDTHS)\n\n (Desktop,OrigXstr,OrigYstr,MaxWidthStr,MaxHeightStr,WinList) = initialize()\n MaxWidth = int(MaxWidthStr) - LeftPadding - RightPadding\n MaxHeight = int(MaxHeightStr) - TopPadding - BottomPadding\n OrigX = int(OrigXstr) + LeftPadding\n OrigY = int(OrigYstr) + TopPadding \n OldWinList = retrieve(TempFile)", "def _create_example_window():\n return Window({\"warning\": False, \"state\": \"close\"})", "def __init__(self, title, height, width, y, x):\n logging.info(\n \"Create Border {height},{width},{y},{x}\".format(\n height=height,\n width=width,\n y=y,\n x=x\n )\n )\n self.__border = curses.newwin(height, width, y, x)\n self.__win = self.__border.subwin(height - 2, width - 2, y + 1, x + 1)\n self.__title = title\n self.__win.scrollok(True)", "def positionWindow(window, windowSize, desktop, parentWidget, offset=30, border=10):\n logger = logging.getLogger(__name__)\n logger.debug(\"Positioning window\")\n \n # need cursor position on screen to decide where to open window\n cursor = QtGui.QCursor()\n cursor_pos = cursor.pos()\n logger.debug(\"Cursor pos: (%d, %d)\", cursor_pos.x(), cursor_pos.y())\n \n # first determine screen size, which screen, etc\n screenNumber = desktop.screenNumber(widget=parentWidget)\n logger.debug(\"Screen number: %d\", screenNumber)\n screenGeometry = desktop.availableGeometry(parentWidget)\n logger.debug(\"Screen geometry: (%d, %d, %d, %d)\", screenGeometry.left(), screenGeometry.top(), \n screenGeometry.width(), screenGeometry.height())\n \n # now window size\n windowWidth = windowSize.width()\n windowHeight = windowSize.height()\n logger.debug(\"Window size: %d x %d\", windowWidth, windowHeight)\n logger.debug(\"Cursor offset: %d\", offset)\n logger.debug(\"Screen border: %d\", border)\n \n # first determine x position: right, left or centre\n logger.debug(\"Checking right\")\n \n # fits right if: point_x + offset + window_width < screen_max_x - border\n window_x_max = cursor_pos.x() + offset + windowWidth\n screen_max_x = screenGeometry.left() + screenGeometry.width() - border\n logger.debug(\" Window/screen max x: %d < %d?\", window_x_max, screen_max_x)\n \n if window_x_max < screen_max_x:\n logger.debug(\"Window fits right\")\n \n new_x = cursor_pos.x() + offset\n \n else:\n logger.debug(\"Checking left\")\n \n # fits left if: point_x - offset - window_width > screen_min_x + border\n window_x_min = cursor_pos.x() - offset - windowWidth\n screen_min_x = screenGeometry.left() + border\n logger.debug(\" Window/screen min x: %d > %d?\", window_x_min, screen_min_x)\n \n if window_x_min > screen_min_x:\n logger.debug(\"Window fits left\")\n \n new_x = cursor_pos.x() - offset - windowWidth\n \n else:\n logger.debug(\"Centering window left to right\")\n \n new_x = screenGeometry.left() + (screenGeometry.width() - windowWidth) / 2.0\n \n # now determine y position: below, above or centre\n logger.debug(\"Checking fits below\")\n \n # fits below if: point_y - offset - window_height > screen_min_y + border\n window_y_max = cursor_pos.y() + offset + windowHeight\n screen_max_y = screenGeometry.top() + screenGeometry.height() - border\n logger.debug(\" Window/screen max y: %d < %d?\", window_y_max, screen_max_y)\n \n if window_y_max < screen_max_y:\n logger.debug(\"Window fits below\")\n \n new_y = cursor_pos.y() + offset\n \n else:\n logger.debug(\"Checking fits above\")\n \n # fits above if: point_y + offset + window_height < screen_max_x - border\n window_y_min = cursor_pos.y() - offset - windowHeight\n screen_min_y = screenGeometry.top() + border\n logger.debug(\" Window/screen min y: %d > %d?\", window_y_min, screen_min_y)\n \n if window_y_min > screen_min_y:\n logger.debug(\"Window fits above\")\n \n new_y = cursor_pos.y() - offset - windowHeight\n \n else:\n logger.debug(\"Centering window above to below\")\n \n new_y = screenGeometry.top() + (screenGeometry.height() - windowHeight) / 2.0\n \n # set position of window\n windowPoint = QtCore.QPoint(new_x, new_y)\n \n logger.debug(\"Setting window position: (%d, %d)\", new_x, new_y)\n \n window.setGeometry(QtCore.QRect(windowPoint, window.size()))", "def init():\n global screen_manager\n screen_manager = ScreenManager(transition=SlideTransition())", "def __init__(self):\n super(Pad, self).__init__()\n\n self.oldx, self.oldy = -1, -1\n self.width, self.height = -1, -1\n self.surface, self.cr = None, None\n\n self.add_events(gdk.BUTTON_PRESS_MASK\n | gdk.BUTTON_RELEASE_MASK\n | gdk.POINTER_MOTION_MASK\n | gdk.POINTER_MOTION_HINT_MASK)\n self.connect('button-press-event', self.button_press_cb)\n self.connect('button-release-event', self.button_release_cb)\n self.connect('configure-event', self.configure_cb)\n self.connect('expose-event', self.expose_cb)\n self.connect('motion_notify_event', self.motion_notify_cb)", "def move(self, window):\r\n self.save_pos = (self.center_x, self.center_y) # sauvegarde la position avant de bouger\r\n self.center_x = math.cos(self.angle) * self.velocity + self.center_x\r\n self.center_y = math.sin(self.angle) * self.velocity + self.center_y\r\n self.rectangle = pygame.draw.circle(window, self.color, (self.center_x, self.center_y), self.radius) # update le rectangle\r", "def newwin(self,name,sizeY,sizeX,offsetY,offsetX, border=False):\n\t\tself.windows[name]=Window(sizeY,sizeX,offsetY,offsetX,border)\n\t\treturn self.windows[name]" ]
[ "0.6633121", "0.65863085", "0.64811075", "0.64477473", "0.64404625", "0.64301646", "0.6363847", "0.629953", "0.6248835", "0.62308514", "0.62160885", "0.6184235", "0.61267334", "0.6118133", "0.61062235", "0.6091172", "0.6067022", "0.60565096", "0.6046603", "0.60445255", "0.6034738", "0.59444845", "0.59042513", "0.5900582", "0.5898525", "0.5890058", "0.58798033", "0.58659226", "0.58638436", "0.58453494", "0.5836676", "0.58326435", "0.5816649", "0.580539", "0.57448786", "0.5736643", "0.5734238", "0.57193357", "0.57128924", "0.57128924", "0.5702936", "0.568894", "0.56745124", "0.5664631", "0.5661777", "0.5659894", "0.56477296", "0.5644989", "0.5643311", "0.5642776", "0.56395274", "0.5639082", "0.56314653", "0.5622356", "0.56178147", "0.5607254", "0.5604613", "0.56013787", "0.55992436", "0.55987823", "0.5577477", "0.55759704", "0.5575621", "0.5573532", "0.55673474", "0.55653274", "0.5562908", "0.5549684", "0.554849", "0.5547511", "0.5539877", "0.553142", "0.55258006", "0.5516329", "0.5510469", "0.5510455", "0.5500843", "0.54988414", "0.5492266", "0.5491849", "0.5482221", "0.548147", "0.54797333", "0.54793215", "0.54627675", "0.5451788", "0.54486394", "0.5440929", "0.54375505", "0.5431378", "0.5427557", "0.54246104", "0.5421305", "0.54164654", "0.54127336", "0.54075795", "0.5406655", "0.54058826", "0.54055595", "0.5402889", "0.5395743" ]
0.0
-1
Calculate by how many cells the moving window should be moved. If this is nonzero, shift the fields on the interpolation grid, and add new particles.
def move_grids(self, fld, comm, time): # To avoid discrepancies between processors, only the first proc # decides whether to send the data, and broadcasts the information. dz = comm.dz if comm.rank==0: # Move the continuous position of the moving window object self.zmin += self.v * (time - self.t_last_move) # Find the number of cells by which the window should move zmin_global_domain, zmax_global_domain = comm.get_zmin_zmax( local=False, with_damp=False, with_guard=False ) n_move = int( (self.zmin - zmin_global_domain)/dz ) else: n_move = None # Broadcast the information to all proc if comm.size > 1: n_move = comm.mpi_comm.bcast( n_move ) # Move the grids if n_move != 0: # Move the global domain comm.shift_global_domain_positions( n_move*dz ) # Shift the fields Nm = len(fld.interp) for m in range(Nm): # Modify the values of the corresponding z's fld.interp[m].zmin += n_move*fld.interp[m].dz fld.interp[m].zmax += n_move*fld.interp[m].dz # Shift/move fields by n_move cells in spectral space self.shift_spect_grid( fld.spect[m], n_move ) # Because the grids have just been shifted, there is a shift # in the cell indices that are used for the prefix sum. if fld.use_cuda: fld.prefix_sum_shift += n_move # This quantity is reset to 0 whenever prefix_sum is recalculated # Prepare the positions of injection for the particles # (The actual creation of particles is done when the routine # exchange_particles of boundary_communicator.py is called) if comm.rank == comm.size-1: # Move the injection position self.z_inject += self.v * (time - self.t_last_move) # Take into account the motion of the end of the plasma self.z_end_plasma += self.v_end_plasma * (time - self.t_last_move) # Increment the number of particle cells to add nz_new = int( (self.z_inject - self.z_end_plasma)/dz ) self.nz_inject += nz_new # Increment the virtual position of the end of the plasma # (When `generate_particles` is called, then the plasma # is injected between z_end_plasma - nz_inject*dz and z_end_plasma, # and afterwards nz_inject is set to 0.) self.z_end_plasma += nz_new*dz # Change the time of the last move self.t_last_move = time
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def swipeBase (self) :\n grid = self.grid\n\n #we start by putting every tile up\n for columnNbr in range(4) :\n nbrZeros = 4 - np.count_nonzero(grid[:,columnNbr])\n\n for lineNbr in range(4) :\n counter = 0\n while (grid[lineNbr, columnNbr] == 0) and (counter < 4):\n counter += 1\n if np.count_nonzero(grid[lineNbr:4, columnNbr]) != 0 :\n for remainingLine in range (lineNbr, 3) :\n grid[remainingLine, columnNbr] = grid[remainingLine+1, columnNbr]\n grid[3, columnNbr] = 0\n\n #now we do the additions\n for lineNbr in range(3) :\n if grid[lineNbr, columnNbr] == grid[lineNbr+1, columnNbr] :\n grid[lineNbr, columnNbr] *= 2\n for remainingLine in range (lineNbr+1, 3) :\n grid[remainingLine, columnNbr] = grid[remainingLine+1, columnNbr]\n grid[3, columnNbr] = 0\n\n return (grid)", "def move(self):\n x = y = z = 0.0\n for cell in self.cells:\n x += (cell.x)#*n\n y += (cell.y)#*n\n z += (cell.z)#*n\n np = float(len(self.cells))\n med = numpy.array([x/np,y/np,z/np])\n \n dists = []\n for cell in self.cells:\n d = (cell.x-self.x)**2+(cell.y-self.y)**2+(cell.z-self.z)**2\n d = numpy.sqrt(d)\n dists.append(d)\n #md = (cell.x-med[0])**2+(cell.y-med[1])**2+(cell.z-med[2])**2\n #dists[-1] = (dists[-1]+md)/2\n cell = self.cells[numpy.argmin(dists)]\n cc = numpy.array([cell.x, cell.y, cell.z])\n \n t = self.t\n if abs(self.dnp) * ( self.np-self.np_req) > 0:\n t = self.tr\n self.dcenter = (1-t)*(med-self.center + self.u*(cc-med))\n self.x,self.y,self.z = self.center = self.center + self.dcenter", "def update_grid_pos(self):\n self.grid_pos = self.get_tile_of_position(self.tank.body.position)", "def moving_windows_num(self, size = None, overlap = None):\n if size==None:\n size=4096\n if overlap==None:\n overlap=0\n \n return (self.num_datapoints)/(size-overlap) #Not sure if it's right, need testing", "def _ignite_cells(self, istep, ip):\n particle = self.particles[ip] # get particle\n state, x, y = particle.get_from_keys([\"state\", \"x\", \"y\"])\n if state > STTHR:\n for i in range(self.grid.NX-1):\n if abs(x - self.grid.XCELL[i, 0]) < self.grid.DX/2:\n INDX = i\n for j in range(self.grid.NY-1):\n if abs(y - self.grid.YCELL[0, j]) < self.grid.DY/2:\n INDY = j\n cell = self.grid.CELLS[INDX, INDY]\n cell.BURNPROG += 1\n if (cell.QMAXTR > 0 or cell.QMAXBLD > 0) and cell.BURNSTAT == 0:\n cell.BURNSTAT = 1\n cell.CLOCK = self.TIME[istep]\n # elif cell.QMAXTR == 0 or cell.QMAXBLD == 0:\n # particle.update(state=0.0, factor=0.0)\n # if pType == 2:\n # particle.update(state=0.0)", "def update_grid(self):\n if self.game_over:\n return\n if self.active_piece is None:\n self.place_new_piece()\n if self.piece_collision_exists(self.active_piece):\n self.handle_active_piece_collision()\n self.place_new_piece()\n self.shift_cells(self.active_piece, self.current_direction)\n self.active_piece = TransformPiece.shift_coordinates(self.active_piece, self.current_direction)\n self.merge_with_completed_rows()\n if self.is_game_won():\n self.game_over = True", "def calc_positions(self) :\n\t\tx, y = self.x0, self.y0\n\n\t\twhile self.is_visible(x, y) :\n\t\t\tx = 0.5 * self.gx * self.t**2 + self.vx0 * self.t + self.x0\n\t\t\ty = 0.5 * self.gy * self.t**2 + self.vy0 * self.t + self.y0\n\t\t\t\n\t\t\tself.t += self.dt\n\t\t\tself.pos_x.append(x)\n\t\t\tself.pos_y.append(y)", "def increment(grid):\n height = len(grid)\n width = len(grid[0])\n for r in range(height):\n for c in range(width):\n grid[r][c] += 1\n if grid[r][c] == 10:\n grid [r][c] = 0", "def num_cells_up(self):\n if hasattr(self, '__num_cells_up__'):\n return self.__num_cells_up__\n elif self.shared_coboundaries is not None:\n assert self.upper_index is not None\n return int(self.shared_coboundaries.max()) + 1\n assert self.upper_index is None\n return 0", "def update_shift_count(self, move):\n if len(move) == 2:\n self.shift_count += 1\n else:\n self.shift_count = 0", "def calc(self):\n np = 0\n for cell in self.cells:\n n = self.cell_np[cell]\n np += n\n self.dnp = np - self.np\n self.np = np", "def _add_mines(self):\n num = 0\n while num < self._n:\n x = random.randint(0, self._dim - 1)\n y = random.randint(0, self._dim - 1)\n if self._board[x][y] != -1:\n self._board[x][y] = -1\n neighbors = self._get_neighbors((x, y))\n for neighbor in neighbors:\n if self._board[neighbor[0]][neighbor[1]] != -1:\n self._board[neighbor[0]][neighbor[1]] += 1\n num += 1", "def update_grid(self, x):\r\n\r\n # Append boundary rows and columns to matrix\r\n x = self.append_boundary(x) # the boundary is recomputed at each step\r\n y = np.copy(x)\r\n\r\n # For each cell within boundary, compute state according to rules.\r\n chg_0_1 = 0 # the number of cells that changed from state 0 to state 1\r\n chg_1_0 = 0 # the number of cells that changes from state 1 to state 0\r\n chg_none = 0 # the number of cells that did not change\r\n index = np.arange(1, x.shape[0] - 1)\r\n for i in index:\r\n for j in index:\r\n neighborhood = x[i - 1:i + 2:1, j - 1:j + 2:1] # 3x3 sub matrix centered at i, j\r\n y[i, j] = self.update_cell(neighborhood)\r\n change = int(y[i, j] - x[i, j])\r\n if change == -1:\r\n chg_1_0 += 1\r\n if change == 0:\r\n chg_none += 1\r\n if change == 1:\r\n chg_0_1 += 1\r\n\r\n # Compute statistics excluding boundary\r\n total = np.power(x[1:-1:1, 1:-1:1].shape[0] - 1, 2)\r\n start_1 = np.sum(x[1:-1:1, 1:-1:1])\r\n end_1 = np.sum(y[1:-1:1, 1:-1:1])\r\n stats = [total, start_1, end_1, chg_1_0, chg_none, chg_0_1]\r\n\r\n return y[1:-1:1, 1:-1:1], stats # remove the boundary\r", "def update_positions(self, grid):\r\n self.grid = grid", "def set_adjacent_mine_count(self):\n for position in self.grid_coords:\n x, y = position\n if self.grid[y][x] >= 0:\n grid_value = sum(map(self.is_mine, get_adjacent.get_adjacent(position)))\n self.grid[y][x] = grid_value", "def make_move(grid, n_columns, n_rows):\r\n # Generate the game grid to be manipulated\r\n new_grid = [[0] * (n_columns + 1) for i in range(n_rows + 1)]\r\n\r\n\r\n for i in range(n_rows):\r\n for j in range(n_columns):\r\n upper_left = grid[i-1][j-1] # neighbor to upper left of cell of interest\r\n upper = grid[i-1][j] # neighbor above cell of interest\r\n upper_right = grid[i-1][j+1] # neighbor to upper right of cell of interest\r\n left = grid[i][j-1] # neighbor to left of cell of interest\r\n right = grid[i][j+1] # neighbor to right of cell of interest\r\n bot_left = grid[i+1][j-1] # neighbor to bottom left cell of interest\r\n bot = grid[i+1][j] # neighbor below cell of interest\r\n bot_right = grid[i+1][j+1] # neighbor to bottom right of cell of interest\r\n\r\n # sum of the state of all neighbors\r\n on_neighbors = upper_left + upper + upper_right + left + right + bot_left + bot + bot_right\r\n\r\n # Any ON cell with fewer than two ON neighbors turns OFF\r\n if grid[i][j] == 1 and on_neighbors < 2:\r\n new_grid[i][j] = 0\r\n\r\n # Any ON cell with two or three ON neighbours stays ON\r\n elif grid[i][j] == 1 and (on_neighbors == 2 or on_neighbors == 3):\r\n new_grid[i][j] = 1\r\n\r\n # Any ON cell with more than three ON neighbors turns OFF\r\n elif grid[i][j] == 1 and on_neighbors > 3:\r\n new_grid[i][j] = 0\r\n\r\n # Any OFF cell with three ON neighbors turns ON\r\n elif grid[i][j] == 0 and on_neighbors == 3:\r\n new_grid[i][j] = 1\r\n\r\n return new_grid #manipulated game grid\r", "def push_up (grid):\r\n for a in range (4): \r\n for i in range(3,0,-1): \r\n for j in range(4): \r\n if grid[i-1][j]==0: \r\n grid[i-1][j]=grid[i][j] \r\n grid[i][j]=0\r\n #joining like numbers \r\n for i in range(3): \r\n for j in range(4): \r\n if grid[i][j]==grid[i+1][j]: \r\n grid[i][j]=(grid[i][j])*2\r\n grid[i+1][j]=0\r\n #pafter adding the numbers continue to move them \r\n for a in range (4): \r\n for i in range(3,0,-1): \r\n for j in range(4): \r\n if grid[i-1][j]==0: \r\n grid[i-1][j]=grid[i][j] \r\n grid[i][j]=0", "def _cal_grid_parameters_without_bsite(self, spacing, extra_buffer, nc_handle):\n assert spacing > 0 and extra_buffer > 0, \"spacing and extra_buffer must be positive\"\n self._set_grid_key_value(\"origin\", np.zeros( [3], dtype=float))\n \n self._set_grid_key_value(\"d0\", np.array([spacing, 0, 0], dtype=float))\n self._set_grid_key_value(\"d1\", np.array([0, spacing, 0], dtype=float))\n self._set_grid_key_value(\"d2\", np.array([0, 0, spacing], dtype=float))\n self._set_grid_key_value(\"spacing\", np.array([spacing]*3, dtype=float))\n \n lj_radius = np.array(self._prmtop[\"LJ_SIGMA\"]/2., dtype=float)\n dx = (self._crd[:,0] + lj_radius).max() - (self._crd[:,0] - lj_radius).min()\n dy = (self._crd[:,1] + lj_radius).max() - (self._crd[:,1] - lj_radius).min()\n dz = (self._crd[:,2] + lj_radius).max() - (self._crd[:,2] - lj_radius).min()\n\n print(\"Receptor enclosing box [%f, %f, %f]\"%(dx, dy, dz))\n print(\"extra_buffer: %f\"%extra_buffer)\n\n length = max([dx, dy, dz]) + 2.0*extra_buffer\n count = np.ceil(length / spacing) + 1\n \n self._set_grid_key_value(\"counts\", np.array([count]*3, dtype=int))\n print(\"counts \", self._grid[\"counts\"])\n print(\"Total box size %f\" %((count-1)*spacing))\n\n for key in [\"origin\", \"d0\", \"d1\", \"d2\", \"spacing\", \"counts\"]:\n self._write_to_nc(nc_handle, key, self._grid[key])\n return None", "def grid_inflation(self):\n for obs in self.obstacle_list:\n\n inflation_x1 = round((obs[0][0]-self._inflation_radius)/self.step_size)\n\n inflation_y2 = round((obs[0][1] + obs[2] +self._inflation_radius)/self.step_size)\n\n inflation_x2 = round((obs[0][0] + obs[1] +self._inflation_radius)/self.step_size)\n\n inflation_y1 = round((obs[0][1] -self._inflation_radius)/self.step_size)\n\n self.grid[1, inflation_x1:inflation_x2+1,\n inflation_y1:inflation_y2+1] = INFLATION_COST\n\n # border inflation\n self.grid[1, 0:self.gridwidth+1, 0:round(self._inflation_radius/self.step_size)+1] = INFLATION_COST\n self.grid[1, 0:self.gridwidth+1, self.gridheight-round(self._inflation_radius / self.step_size):self.gridheight+1] = INFLATION_COST\n self.grid[1, 0:round(self._inflation_radius/self.step_size)+1, 0:self.gridheight+1] = INFLATION_COST\n self.grid[1, self.gridwidth-round(self._inflation_radius/self.step_size):self.gridwidth+1, 0:self.gridheight+1] = INFLATION_COST\n\n # if NEED_DRAW_INFLATED_GRID:\n # for i in range(self.gridwidth):\n # plt.scatter(i,0)\n # plt.scatter(i,self.gridheight)\n # for j in range(self.gridheight):\n # plt.scatter(0,j)\n # plt.scatter(self.gridwidth,j)\n # if self.grid[i, j] != 0:\n # plt.scatter(i,j)\n # plt.show()\n\n return self.grid", "def update_pop_matrix(self):\n for row in self.unique_rows[1:-1]: # First and last cell is water\n for col in self.unique_cols[1:-1]: # First and last cell is water\n cell = self.landscape[(row, col)]\n if cell.is_mainland:\n # print(cell)\n self.herb_pop_matrix[row - 1][col - 1] = cell.herb_count\n self.carn_pop_matrix[row - 1][col - 1] = cell.carn_count", "def move(self):\r\n for index in range(self.size):\r\n self.values[index] = self.values[index] + self.velocities[index]\r\n \r\n # Adjust values to keep particle inside boundaries.\r\n if self.values[index] < Particle.MIN_VALUE:\r\n self.values[index] = (-self.values[index] % Particle.MAX_VALUE)\r\n elif self.values[index] > Particle.MAX_VALUE:\r\n self.values[index] = (self.values[index] % Particle.MAX_VALUE)", "def change_cell(self):\n # TODO: assess whether this may partly moved into the base class\n\n x, mu = self.update_position_direction(self.l_edge)\n mu_mean = self.calculate_mean_mu(self.x, x, self.l_edge)\n self.update_estimators(self.l_edge, mu_mean)\n\n if self.next_cell_index == self.grid.Ncells:\n # packet escapes\n self.is_escaped = True\n self.is_active = False\n self.x = self.cell_xr\n\n elif self.next_cell_index == -1:\n # packets gets reflected\n\n self.x = self.cell_xl\n self.mu = -self.mu\n\n self.calculate_and_set_propagation_distances()\n\n else:\n # packet is transported into target cell\n if self.next_cell_index > self.cell_index:\n # packet is moved one cell to the right\n\n self.x = self.grid.xl[self.next_cell_index]\n\n else:\n # packet is moved one cell to the left\n\n self.x = self.grid.xr[self.next_cell_index]\n\n # reset cell-based properties for easy access\n self.cell_index = self.next_cell_index\n self.cell_chi = self.grid.chi[self.cell_index]\n self.cell_xl = self.grid.xl[self.cell_index]\n self.cell_xr = self.grid.xr[self.cell_index]\n self.cell_dx = self.grid.dx[self.cell_index]\n\n # recalculate distances\n self.calculate_and_set_propagation_distances()", "def _update_w(self, idx):\n self.w = ((self._w - 0.4) * (self._generations - idx)) /\\\n (self._generations + 0.4)", "def measurement_update(particles, measured_marker_list, grid):\n weight = []\n cnt = 0\n\n # no new sensor info\n if len(measured_marker_list) == 0:\n s = 1\n for p in particles:\n weight.append((p, 1/len(particles)))\n else:\n for p in particles:\n markers_visible_to_p = p.read_markers(grid)\n\n if p.x < 0 or p.x >= grid.width or p.y < 0 or p.y >= grid.height:\n weight.append((p, 0))\n continue\n if (p.x, p.y) in grid.occupied:\n weight.append((p, 0))\n continue\n\n match = []\n diff = int(math.fabs(len(measured_marker_list)-len(markers_visible_to_p)))\n\n for cm in measured_marker_list:\n if len(markers_visible_to_p) == 0:\n break\n cmx, cmy, cmh = add_marker_measurement_noise(cm, MARKER_TRANS_SIGMA, MARKER_ROT_SIGMA)\n\n # find minp, the closest marker out of markers_visible_to_particle\n minp = markers_visible_to_p[0]\n mind = grid_distance(cmx, cmy, minp[0], minp[1])\n\n for mvp in markers_visible_to_p:\n mvpx, mvpy, mvph = mvp[0], mvp[1], mvp[2]\n dist = grid_distance(cmx, cmy, mvpx, mvpy)\n if dist < mind:\n mind = dist\n minp = mvp\n\n # store the pairing [cm, m] for later calculations\n match.append((minp, cm))\n markers_visible_to_p.remove(minp)\n\n # use match to calculate weight of p\n prob = 1\n\n maxc1 = 0\n maxc2 = (45 ** 2) / (2*(MARKER_ROT_SIGMA ** 2))\n c1 = 2*(MARKER_TRANS_SIGMA ** 2)\n c2 = 2*(MARKER_ROT_SIGMA ** 2)\n\n for i, j in match:\n distBetweenMarkers = grid_distance(i[0], i[1], j[0], j[1])\n angleBetweenMarkers = diff_heading_deg(i[2], j[2])\n const1 = (distBetweenMarkers ** 2) / c1\n const2 = (angleBetweenMarkers ** 2) / c2\n maxc1 = max(maxc1, const1)\n prob *= np.exp(-const1-const2)\n\n for _ in range(diff):\n prob *= np.exp(-maxc1-maxc2)\n\n weight.append((p, prob))\n\n #normalize weight\n s = 0\n weight.sort(key=lambda x: x[1])\n delete = int(PARTICLE_COUNT/100)\n weight = weight[delete:]\n for i, j in weight:\n if j == 0:\n cnt+=1\n else:\n s += j\n weight = weight[cnt:]\n cnt += delete\n\n plist = []\n wlist = []\n\n for i, j in weight:\n newi = Particle(i.x, i.y, i.h)\n wlist.append(j/s)\n plist.append(newi)\n\n newplist = []\n\n if plist != []:\n newplist = np.random.choice(plist, size=len(plist), replace = True, p=wlist)\n\n measured_particles = Particle.create_random(cnt, grid)[:]\n\n for p in newplist:\n ph = add_gaussian_noise(p.h, ODOM_HEAD_SIGMA)\n px = add_gaussian_noise(p.x, ODOM_TRANS_SIGMA)\n py = add_gaussian_noise(p.y, ODOM_TRANS_SIGMA)\n newp = Particle(px, py, ph)\n measured_particles.append(newp)\n\n return measured_particles", "def _data_move_in_mc_on_w(tik_inst, dst, src, data_pos_info):\n\n sub_h_size, sub_w_size, h_size, w_size, w_offset = data_pos_info\n data_cnt_one_block = _get_elment_cnt_one_block(src.dtype)\n sub_w_block = _ceil_div(sub_w_size, data_cnt_one_block)\n sub_h_align_block_size = sub_h_size // data_cnt_one_block * data_cnt_one_block\n sub_h_left = sub_h_size % data_cnt_one_block\n is_not_w_block_align = w_size % data_cnt_one_block > 0\n is_h_size_smaller_one_block = h_size < data_cnt_one_block\n\n def _move_in_one_more_block():\n \"\"\"\n move in one more block of h when h > sub_h and sub_h is not block align\n \"\"\"\n with tik_inst.for_range(0, sub_h_align_block_size) as sub_h_idx:\n tik_inst.data_move(dst[sub_w_block * data_cnt_one_block * sub_h_idx],\n src[w_offset + w_size * sub_h_idx], 0, 1, sub_w_block, 0, 0)\n # in order to avoid dirty data when multiple core\n with tik_inst.for_range(0, data_cnt_one_block) as sub_h_idx_1:\n tik_inst.data_move(dst[sub_w_block * data_cnt_one_block *\n (sub_h_align_block_size + sub_h_idx_1)],\n src[w_offset +\n w_size * (sub_h_size - data_cnt_one_block + sub_h_idx_1)],\n 0, 1, sub_w_block, 0, 0)\n\n with tik_inst.if_scope(is_not_w_block_align):\n # sub_h is block align or h is not enough one block\n with tik_inst.if_scope(tik.any(sub_h_left == 0, is_h_size_smaller_one_block)):\n with tik_inst.for_range(0, sub_h_size) as sub_h_idx:\n tik_inst.data_move(dst[sub_w_block * data_cnt_one_block * sub_h_idx],\n src[w_offset + w_size * sub_h_idx], 0, 1, sub_w_block, 0, 0)\n with tik_inst.else_scope():\n _move_in_one_more_block()\n\n with tik_inst.else_scope():\n with tik_inst.if_scope(tik.any(sub_h_left == 0, is_h_size_smaller_one_block)):\n src_strides = w_size // data_cnt_one_block - sub_w_block\n # mte max strides value is 65535\n with tik_inst.if_scope(src_strides > MTE_STRIDES):\n with tik_inst.for_range(0, sub_h_size) as sub_h_idx_2:\n tik_inst.data_move(dst[sub_w_size * sub_h_idx_2],\n src[w_offset + w_size * sub_h_idx_2],\n 0, 1, sub_w_block, 0, 0)\n with tik_inst.else_scope():\n tik_inst.data_move(dst, src[w_offset], 0, sub_h_size, sub_w_block, src_strides, 0)\n with tik_inst.else_scope():\n _move_in_one_more_block()", "def update(frame_num, mat, grid, N):\n\n new_grid = np.copy(grid)\n #print(\"grid size:\", grid.shape)\n for i in range(1, grid.shape[0]-1):\n for j in range(1, grid.shape[1]-1):\n neighbors = int(grid[i-1, j] + grid[i+1, j] + \\\n grid[i, j+1] + grid[i, j-1] + \\\n grid[i-1,j-1] + grid[i+1,j+1] + \\\n grid[i+1,j-1] + grid[i-1,j+1])\n if grid[i, j] == ON:\n if not (2 <= neighbors <= 3):\n new_grid[i, j] = OFF\n elif grid[i, j] == OFF and neighbors == 3:\n # Grow a cell\n new_grid[i, j] = ON\n else:\n new_grid[i, j] = OFF\n\n ### Update new grid\n mat.set_data(new_grid)\n grid[:] = new_grid[:] # Brackets are important\n return mat", "def push_up (grid):\r\n for i in range (3):\r\n for row in range(1,4):\r\n for col in range(4):\r\n if grid[row-1][col] == 0:\r\n grid[row-1][col] = grid[row][col]\r\n grid[row][col] = 0\r\n for row in range(1,4):\r\n for col in range(4):\r\n if grid[row-1][col] == grid[row][col]:\r\n grid[row-1][col] = grid[row-1][col]*2\r\n grid[row][col]=0\r\n for row in range(1,4):\r\n for col in range(4):\r\n if grid[row-1][col] == 0:\r\n grid[row-1][col] = grid[row][col]\r\n grid[row][col] = 0\r\n \r\n return grid", "def change_cell(self):\n\n x, mu = self.update_position_direction(self.l_edge)\n mu_mean = self.calculate_mean_mu(self.x, x, self.l_edge)\n self.update_estimators(self.l_edge, mu_mean)\n\n if self.next_cell_index == self.grid.Ncells:\n # packet escapes\n self.is_escaped = True\n self.is_active = False\n self.mu = mu\n self.x = self.cell_xr\n\n elif self.next_cell_index == -1:\n\n raise GeometryException(\"No inner boundary in homogeneous sphere\")\n\n else:\n # packet is transported into target cell\n\n self.mu = mu\n\n if self.next_cell_index > self.cell_index:\n # packet is moved one cell to the right\n\n self.x = self.grid.xl[self.next_cell_index]\n\n else:\n # packet is moved one cell to the left\n\n self.x = self.grid.xr[self.next_cell_index]\n\n # reset cell-based properties for easy access\n self.cell_index = self.next_cell_index\n self.cell_chi = self.grid.chi[self.cell_index]\n self.cell_xl = self.grid.xl[self.cell_index]\n self.cell_xr = self.grid.xr[self.cell_index]\n self.cell_dx = self.grid.dx[self.cell_index]\n self.cell_dV = self.grid.dV[self.cell_index]\n\n # recalculate distances\n self.calculate_and_set_propagation_distances()", "def shift_spect_grid( self, grid, n_move,\n shift_rho=True, shift_currents=True ):\n if grid.use_cuda:\n shift = grid.d_field_shift\n # Get a 2D CUDA grid of the size of the grid\n tpb, bpg = cuda_tpb_bpg_2d( grid.Ep.shape[0], grid.Ep.shape[1] )\n # Shift all the fields on the GPU\n shift_spect_array_gpu[tpb, bpg]( grid.Ep, shift, n_move )\n shift_spect_array_gpu[tpb, bpg]( grid.Em, shift, n_move )\n shift_spect_array_gpu[tpb, bpg]( grid.Ez, shift, n_move )\n shift_spect_array_gpu[tpb, bpg]( grid.Bp, shift, n_move )\n shift_spect_array_gpu[tpb, bpg]( grid.Bm, shift, n_move )\n shift_spect_array_gpu[tpb, bpg]( grid.Bz, shift, n_move )\n if shift_rho:\n shift_spect_array_gpu[tpb, bpg]( grid.rho_prev, shift, n_move )\n if shift_currents:\n shift_spect_array_gpu[tpb, bpg]( grid.Jp, shift, n_move )\n shift_spect_array_gpu[tpb, bpg]( grid.Jm, shift, n_move )\n shift_spect_array_gpu[tpb, bpg]( grid.Jz, shift, n_move )\n else:\n shift = grid.field_shift\n # Shift all the fields on the CPU\n shift_spect_array_cpu( grid.Ep, shift, n_move )\n shift_spect_array_cpu( grid.Em, shift, n_move )\n shift_spect_array_cpu( grid.Ez, shift, n_move )\n shift_spect_array_cpu( grid.Bp, shift, n_move )\n shift_spect_array_cpu( grid.Bm, shift, n_move )\n shift_spect_array_cpu( grid.Bz, shift, n_move )\n if shift_rho:\n shift_spect_array_cpu( grid.rho_prev, shift, n_move )\n if shift_currents:\n shift_spect_array_cpu( grid.Jp, shift, n_move )\n shift_spect_array_cpu( grid.Jm, shift, n_move )\n shift_spect_array_cpu( grid.Jz, shift, n_move )", "def update(self):\r\n if self.able_to_move:\r\n self.pix_pos += self.direction*self.speed\r\n if self.time_to_move():\r\n if self.stored_direction != None:\r\n self.direction = self.stored_direction\r\n self.able_to_move = self.can_move()\r\n # calls to the next function in order to check that the player is within bounds \r\n\r\n self.grid_pos[0] = (self.pix_pos[0]-TOP_BOTTOM_BUFFER +\r\n self.app.cell_width//2)//self.app.cell_width+1\r\n self.grid_pos[1] = (self.pix_pos[1]-TOP_BOTTOM_BUFFER +\r\n self.app.cell_height//2)//self.app.cell_height+1\r\n # keep track of where the player is currently to the grid \r\n\r\n if self.on_coin():\r\n self.eat_coin()\r\n # removes the coin once the player is over the tile\r\n\r\n if self.on_fruit():\r\n self.eat_fruit()\r\n # removes the fruit once the player is over the tile\r", "def wander(self):\n \n has_new_pos = False\n while not has_new_pos:\n move = random.choice(self.moves)\n new_pos = add_lists(move, self.position)\n has_new_pos = check_bounds(new_pos, self.grid_size)\n return new_pos", "def move(self, direction):\n new_grid = []\n # get the indices of specific direction\n new_indices = self._grid_indices[direction]\n for cell in new_indices:\n lst = self.traversed_list(cell, direction)\n merged_list = merge(lst)\n new_grid.append(merged_list)\n \n adjusted_grid = adjust_grid(new_grid,direction)\n if self.is_changed(adjusted_grid):\n self.update_grid(adjusted_grid)\n self.new_tile()", "def update_poi (POIn, POInm1, new, current_cell_mask):\n row, col = cuda.grid(2)\n\n if row < POIn.shape[0] and col < POIn.shape[1]:\n POIn[row,col] = 0 \n if current_cell_mask[row,col] == True:\n POIn[row,col] = POInm1[row,col] + new[row,col]", "def move(self):\n # neighbor offsets\n offset = [(-1, 1),(0, 1),(1, 1),(-1, 0),(1, 0),(-1, -1),(0, -1),(1, -1)]\n for i in range(len(offset)):\n x = self.x + offset[i][0] # neighboring coordinates\n y = self.y + offset[i][1]\n if self.island.animal(x, y) == 0: # neighboring spot is open\n self.island.remove(self) # remove from current spot\n self.x = x # new coordinates\n self.y = y\n self.island.register(self) # register new coordinates\n break # finished with move", "def move(self):\n # neighbor offsets\n offset = [(-1, 1),(0, 1),(1, 1),(-1, 0),(1, 0),(-1, -1),(0, -1),(1, -1)]\n for i in range(len(offset)):\n x = self.x + offset[i][0] # neighboring coordinates\n y = self.y + offset[i][1]\n if self.island.animal(x, y) == 0: # neighboring spot is open\n self.island.remove(self) # remove from current spot\n self.x = x # new coordinates\n self.y = y\n self.island.register(self) # register new coordinates\n break # finished with move", "def update(self):\n if (self.j + self.step >= self.image.shape[0]) and (self.i + self.step >= self.image.shape[1]):\n self.no_more_crops = True\n elif self.i + self.step >= self.image.shape[1]:\n self.i = 0\n self.j += self.step\n else:\n self.i += self.step", "def regrid(old_grid):\n bins = np.floor((np.log10(old_grid) - l_min) / dl).astype(int)\n w = (bins >= 0) & (bins < nbins)\n\n return bins, w", "def resizeEvent(self, event):\n # gets nearest width/height divisible by cell length\n nearest_w = event.size().width() // Config.CELL_LENGTH\n nearest_h = event.size().height() // Config.CELL_LENGTH - 1\n \n Config.NUM_CELLS_X = nearest_w\n Config.NUM_CELLS_Y = nearest_h\n layout.scene.resize_update()", "def determine_number_of_packets(self):\n self.Ltot = 4. * np.pi * np.sum(self.eta * self.dV)\n self.L = self.Ltot / float(self.Npackets)\n\n self.npackets_cell = (4. * np.pi * self.eta * self.dV /\n self.L).astype(np.int)\n self.npackets_cell_cum_frac = (\n np.cumsum(self.npackets_cell).astype(np.float) /\n np.sum(self.npackets_cell))", "def get_nb_vals(i, pnts, dem, top_left_cor, cellsize, rows, cols):\n nb_x = np.zeros((5,5)) # this 5 by 5 max would contain the x coordinate of 16 neighbor pixels of a sample point\n nb_y = np.zeros((5,5)) # this 5 by 5 matrix would contain the y coordinate of 16 neighbor pixels of a sample point\n nb_z = np.zeros((5,5))\n # get index and value of cell in DEM containing current point\n (cell_X, cell_Y, cell_Z) = misc.getCellValue(pnts[i], \n dem, \n top_left_cor, \n cellsize)\n #Deal with sample points near boundary of the DEM\n point_within_dem = (cell_X-2) >=0 and (cell_Y-2>=0) and (cell_X+3)<=cols and (cell_Y+3)<=rows\n if point_within_dem:\n nb_z[0:5,0:5] = misc.RasterSubset(dem,(cell_Y-2),(cell_Y+3),(cell_X-2),(cell_X+3))\n else:\n #Get the part of moving window within the DEM domain\n in_data= misc.RasterSubset(dem,max((cell_Y-2),0),min((cell_Y+3),rows),max((cell_X-2),0),min((cell_X+3),cols))\n #in_data=dem[\"array\"][max((cell_Y-2),0):min((cell_Y+3),rows),max((cell_X-2),0):min((cell_X+3),cols)]\n nb_z[max((2-cell_Y),0):min((5-(cell_Y+3-rows)),5),max((2-cell_X),0):min((5-(cell_X+3-cols)),5)]=in_data[0:in_data.shape[0],0:in_data.shape[1]]\n in_data_avg=np.mean(in_data[in_data>-3.4e+10])\n nb_z[nb_z==0]=in_data_avg\n nb_z[nb_z<-3.4e+10]=in_data_avg\n\n\n \n # If there is missing data in the neighborhood of the sample point \n # use neighborhood average to replace the missing value \n has_missing_data = (nb_z>8848).sum()>0 or (nb_z<-413).sum()>0\n if has_missing_data:\n avgValue=np.mean(nb_z[np.where(np.logical_and(nb_z<8848, nb_z>-413))])\n nb_z[nb_z>8848]=avgValue\n nb_z[nb_z<-413]=avgValue\n \n # Obtain the coordinate of cell centroid of a 5*5 neighborhood around the sample point\n for ii in [0,1,2,3,4]:\n cor_y=ii-2\n dy = (cell_Y+cor_y+0.5) * cellsize[1]\n nb_y[ii,:] = top_left_cor[1] + dy\n for jj in [0,1,2,3,4]:\n cor_x=jj-2\n dx = (cell_X+cor_x+0.5) * cellsize[0]\n nb_x [:,jj] = top_left_cor[0] + dx\n return nb_x, nb_y, nb_z", "def update_potential_moves(self):\n\n board = self.get_board()\n\n for row_index, row in enumerate(board):\n\n for column_index, column in enumerate(row):\n\n if column is not None:\n \n position = self.reverse_position(column_index, row_index)\n game_piece_object = self.get_game_piece_object_at_position(position)\n game_piece_object.set_potential_moves(self.generate_moves(position))", "def heuristic_misplaced(self):\n misplaced = 0\n\n for i in range(self.PUZZLE_NUM_ROWS):\n for j in range(self.PUZZLE_NUM_COLUMNS):\n if self.position[i][j] != self.PUZZLE_END_POSITION[i][j]:\n misplaced += 1\n\n return misplaced", "def apply_move(self, move, state):\n x, y , heading, grid_data = state\n map_data = [row[:] for row in grid_data]\n if move == self.MOVE_FORWARD:\n # get coordinates for next cell\n if heading == self.UP:\n next_y = y - 1\n next_x = x\n elif heading == self.DOWN:\n next_y = y + 1\n next_x = x\n elif heading == self.LEFT:\n next_y = y\n next_x = x - 1\n else:\n next_y = y\n next_x = x + 1\n\n # handle special tile types\n if map_data[next_y][next_x] == self.ICE_SYMBOL:\n # handle ice tile - slide until first non-ice tile or blocked\n if heading == self.UP:\n for i in range(next_y, -1, -1):\n if map_data[i][next_x] != self.ICE_SYMBOL:\n if map_data[i][next_x] == self.WATER_SYMBOL:\n # slide into water - game over\n return self.GAME_OVER\n elif self.cell_is_blocked(i, next_x, map_data):\n # if blocked, stop on last ice cell\n next_y = i + 1\n break\n else:\n next_y = i\n break\n elif heading == self.DOWN:\n for i in range(next_y, self.y_size):\n if map_data[i][next_x] != self.ICE_SYMBOL:\n if map_data[i][next_x] == self.WATER_SYMBOL:\n # slide into water - game over\n return self.GAME_OVER\n elif self.cell_is_blocked(i, next_x, map_data):\n # if blocked, stop on last ice cell\n next_y = i - 1\n break\n else:\n next_y = i\n break\n elif heading == self.LEFT:\n for i in range(next_x, -1, -1):\n if map_data[next_y][i] != self.ICE_SYMBOL:\n if map_data[next_y][i] == self.WATER_SYMBOL:\n # slide into water - game over\n return self.GAME_OVER\n elif self.cell_is_blocked(next_y, i, map_data):\n # if blocked, stop on last ice cell\n next_x = i + 1\n break\n else:\n next_x = i\n break\n else:\n for i in range(next_x, self.x_size):\n if map_data[next_y][i] != self.ICE_SYMBOL:\n if map_data[next_y][i] == self.WATER_SYMBOL:\n # slide into water - game over\n return self.GAME_OVER\n elif self.cell_is_blocked(next_y, i, map_data):\n # if blocked, stop on last ice cell\n next_x = i - 1\n break\n else:\n next_x = i\n break\n if map_data[next_y][next_x] == self.TELEPORT_SYMBOL:\n # handle teleport - find the other teleporter\n tpy, tpx = (None, None)\n for i in range(self.y_size):\n for j in range(self.x_size):\n if map_data[i][j] == self.TELEPORT_SYMBOL and (i != next_y or j != next_x):\n tpy, tpx = (i, j)\n break\n if tpy is not None:\n break\n if tpy is None:\n raise Exception(\"LaserTank Map Error: Unmatched teleport symbol\")\n next_y, next_x = (tpy, tpx)\n else:\n # if not ice or teleport, perform collision check\n if self.cell_is_blocked(next_y, next_x, map_data):\n return self.COLLISION\n\n # check for game over conditions\n if self.cell_is_game_over(next_y, next_x, map_data):\n return self.GAME_OVER\n\n # no collision and no game over - update player position\n y = next_y\n x = next_x\n return (x, y, heading, map_data)\n\n elif move == self.TURN_LEFT:\n # no collision or game over possible\n if heading == self.UP:\n heading = self.LEFT\n elif heading == self.DOWN:\n heading = self.RIGHT\n elif heading == self.LEFT:\n heading = self.DOWN\n else:\n heading = self.UP\n return (x, y, heading, map_data)\n\n elif move == self.TURN_RIGHT:\n # no collision or game over possible\n if heading == self.UP:\n heading = self.RIGHT\n elif heading == self.DOWN:\n heading = self.LEFT\n elif heading == self.LEFT:\n heading = self.UP\n else:\n heading = self.DOWN\n return (x, y, heading, map_data)\n\n elif move == self.SHOOT_LASER:\n # set laser direction\n if heading == self.UP:\n laserheading = self.UP\n dy, dx = (-1, 0)\n elif heading == self.DOWN:\n laserheading = self.DOWN\n dy, dx = (1, 0)\n elif heading == self.LEFT:\n laserheading = self.LEFT\n dy, dx = (0, -1)\n else:\n laserheading = self.RIGHT\n dy, dx = (0, 1)\n\n # loop until laser blocking object reached\n ly, lx = (y, x)\n while True:\n ly += dy\n lx += dx\n\n # handle boundary and immovable obstacles\n if ly < 0 or ly >= self.y_size or \\\n lx < 0 or lx >= self.x_size or \\\n map_data[ly][lx] == self.OBSTACLE_SYMBOL:\n # laser stopped without effect\n return self.COLLISION\n\n # handle movable objects\n elif self.cell_is_laser_movable(ly, lx, laserheading, map_data):\n # check if tile can be moved without collision\n if self.cell_is_blocked(ly + dy, lx + dx, map_data) or \\\n map_data[ly + dy][lx + dx] == self.ICE_SYMBOL or \\\n map_data[ly + dy][lx + dx] == self.TELEPORT_SYMBOL or \\\n map_data[ly + dy][lx + dx] == self.FLAG_SYMBOL or \\\n (ly + dy == y and lx + dx == x):\n # tile cannot be moved\n return self.COLLISION\n else:\n old_symbol = map_data[ly][lx]\n map_data[ly][lx] = self.LAND_SYMBOL\n if map_data[ly + dy][lx + dx] == self.WATER_SYMBOL:\n # if new bridge position is water, convert to land tile\n if old_symbol == self.BRIDGE_SYMBOL:\n map_data[ly + dy][lx + dx] = self.LAND_SYMBOL\n # otherwise, do not replace the old symbol\n else:\n # otherwise, move the tile forward\n map_data[ly + dy][lx + dx] = old_symbol\n break\n\n # handle bricks\n elif map_data[ly][lx] == self.BRICK_SYMBOL:\n # remove brick, replace with land\n map_data[ly][lx] = self.LAND_SYMBOL\n break\n\n # handle facing anti-tanks\n elif (map_data[ly][lx] == self.ANTI_TANK_UP_SYMBOL and laserheading == self.DOWN) or \\\n (map_data[ly][lx] == self.ANTI_TANK_DOWN_SYMBOL and laserheading == self.UP) or \\\n (map_data[ly][lx] == self.ANTI_TANK_LEFT_SYMBOL and laserheading == self.RIGHT) or \\\n (map_data[ly][lx] == self.ANTI_TANK_RIGHT_SYMBOL and laserheading == self.LEFT):\n # mark anti-tank as destroyed\n map_data[ly][lx] = self.ANTI_TANK_DESTROYED_SYMBOL\n break\n\n # handle player laser collision\n elif ly == y and lx == x:\n return self.GAME_OVER\n\n # handle facing mirrors\n elif (map_data[ly][lx] == self.MIRROR_UL_SYMBOL and laserheading == self.RIGHT) or \\\n (map_data[ly][lx] == self.MIRROR_UR_SYMBOL and laserheading == self.LEFT):\n # new direction is up\n dy, dx = (-1, 0)\n laserheading = self.UP\n elif (map_data[ly][lx] == self.MIRROR_DL_SYMBOL and laserheading == self.RIGHT) or \\\n (self.grid_data[ly][lx] == self.MIRROR_DR_SYMBOL and laserheading == self.LEFT):\n # new direction is down\n dy, dx = (1, 0)\n laserheading = self.DOWN\n elif (map_data[ly][lx] == self.MIRROR_UL_SYMBOL and laserheading == self.DOWN) or \\\n (map_data[ly][lx] == self.MIRROR_DL_SYMBOL and laserheading == self.UP):\n # new direction is left\n dy, dx = (0, -1)\n laserheading = self.LEFT\n elif (map_data[ly][lx] == self.MIRROR_UR_SYMBOL and laserheading == self.DOWN) or \\\n (map_data[ly][lx] == self.MIRROR_DR_SYMBOL and laserheading == self.UP):\n # new direction is right\n dy, dx = (0, 1)\n laserheading = self.RIGHT\n # do not terminate laser on facing mirror - keep looping\n\n # check for game over condition after effect of laser\n if self.cell_is_game_over(y, x, map_data):\n return self.GAME_OVER\n return (x, y, heading, map_data)\n return self.SUCCESS", "def column_move(plateau,num_col,sens):\n if check_room(plateau, 3, num_col) == False or (sens != 1 and sens != 0):\n return \"Erreur !\"\n if sens==1:\n for i in range(0,3):\n if is_room_empty(plateau,i,num_col):\n column_pack(plateau,num_col,i,sens)\n break\n if get_value(plateau,i,num_col)==get_value(plateau,i+1,num_col)and get_value(plateau,i,num_col)%3==0:\n set_value(plateau,i,num_col,get_value(plateau,i,num_col)*2)\n column_pack(plateau,num_col,i+1,sens)\n break\n if get_value(plateau,i,num_col)==1 and get_value(plateau,i+1,num_col)==2:\n set_value(plateau,i,num_col,3)\n column_pack(plateau,num_col,i+1,sens)\n break\n if get_value(plateau,i,num_col)==2 and get_value(plateau,i+1,num_col)==1:\n set_value(plateau,i,num_col,3)\n column_pack(plateau,num_col,i+1,sens)\n break\n\n else:\n for i in range(3,0,-1):\n if is_room_empty(plateau,i,num_col):\n column_pack(plateau,num_col,i,sens)\n break\n if get_value(plateau,i,num_col)==get_value(plateau,i-1,num_col) and get_value(plateau,i,num_col)%3==0:\n set_value(plateau,i,num_col,get_value(plateau,i,num_col)*2)\n column_pack(plateau,num_col,i-1,sens)\n break\n if get_value(plateau,i,num_col)==1 and get_value(plateau,i-1,num_col)==2:\n set_value(plateau,i,num_col,3)\n column_pack(plateau,num_col,i-1,sens)\n break\n if get_value(plateau,i,num_col)==2 and get_value(plateau,i-1,num_col)==1:\n set_value(plateau,i,num_col,3)\n column_pack(plateau,num_col,i-1,sens)\n break", "def measurement_update(particles, measured_marker_list, grid):\n weights = []\n total_weight = 0\n \"\"\"\n Calculating weights\n \"\"\"\n xy_dst = scipy.stats.norm(0, MARKER_TRANS_SIGMA)\n rt_dst = scipy.stats.norm(0, MARKER_ROT_SIGMA)\n\n skip = 0\n not_skip = 0\n if len(measured_marker_list) == 0:\n return particles\n else:\n for particle in particles:\n if not grid.is_in(particle.x, particle.y):\n skip += 1\n weights.append(0)\n continue\n particle_marker_list = particle.read_markers(grid)\n\n weight = 1.0 / len(particles)\n if len(particle_marker_list) == 0:\n weight = 0\n elif len(particle_marker_list) == 1 and len(measured_marker_list) == 1:\n weight = (xy_dst.pdf(particle_marker_list[0][0] - measured_marker_list[0][0]) *\n xy_dst.pdf(particle_marker_list[0][1] - measured_marker_list[0][1]) *\n rt_dst.pdf(particle_marker_list[0][2] - measured_marker_list[0][2]))\n elif len(particle_marker_list) == 1 and len(measured_marker_list) == 2:\n weight = (xy_dst.pdf(particle_marker_list[0][0] - measured_marker_list[0][0]) *\n xy_dst.pdf(particle_marker_list[0][1] - measured_marker_list[0][1]) *\n rt_dst.pdf(particle_marker_list[0][2] - measured_marker_list[0][2])\n *\n xy_dst.pdf(particle_marker_list[0][0] - measured_marker_list[1][0]) *\n xy_dst.pdf(particle_marker_list[0][1] - measured_marker_list[1][1]) *\n rt_dst.pdf(particle_marker_list[0][2] - measured_marker_list[1][2]))\n elif len(particle_marker_list) == 2 and len(measured_marker_list) == 1:\n weight = (xy_dst.pdf(particle_marker_list[0][0] - measured_marker_list[0][0]) *\n xy_dst.pdf(particle_marker_list[0][1] - measured_marker_list[0][1]) *\n rt_dst.pdf(particle_marker_list[0][2] - measured_marker_list[0][2])\n +\n xy_dst.pdf(particle_marker_list[1][0] - measured_marker_list[0][0]) *\n xy_dst.pdf(particle_marker_list[1][1] - measured_marker_list[0][1]) *\n rt_dst.pdf(particle_marker_list[1][2] - measured_marker_list[0][2]))\n elif len(particle_marker_list) == 2 and len(measured_marker_list) == 2:\n weight = ((xy_dst.pdf(particle_marker_list[0][0] - measured_marker_list[0][0]) *\n xy_dst.pdf(particle_marker_list[0][1] - measured_marker_list[0][1]) *\n rt_dst.pdf(particle_marker_list[0][2] - measured_marker_list[0][2])\n +\n xy_dst.pdf(particle_marker_list[1][0] - measured_marker_list[0][0]) *\n xy_dst.pdf(particle_marker_list[1][1] - measured_marker_list[0][1]) *\n rt_dst.pdf(particle_marker_list[1][2] - measured_marker_list[0][2]))\n *\n (xy_dst.pdf(particle_marker_list[0][0] - measured_marker_list[1][0]) *\n xy_dst.pdf(particle_marker_list[0][1] - measured_marker_list[1][1]) *\n rt_dst.pdf(particle_marker_list[0][2] - measured_marker_list[1][2])\n +\n xy_dst.pdf(particle_marker_list[1][0] - measured_marker_list[1][0]) *\n xy_dst.pdf(particle_marker_list[1][1] - measured_marker_list[1][1]) *\n rt_dst.pdf(particle_marker_list[1][2] - measured_marker_list[1][2])))\n\n not_skip += 1\n weights.append(weight)\n total_weight += weight\n print(\"%d %d\" % (skip, not_skip))\n if total_weight == 0:\n weights = [1.0 / len(particles) for _ in particles]\n else:\n weights = [float(weight)/total_weight for weight in weights]\n particles = numpy.random.choice(particles, size=len(particles), replace=True, p=weights)\n measurement_particles = []\n for particle in particles:\n measurement_particles.append(Particle(particle.x, particle.y, particle.h))\n return measurement_particles", "def insert(self, n):\n # The distance from the ith cell to the jth probe.\n dij = n.XY.reshape((2,-1,1)) - self.points.reshape((2,1,-1))\n dij = (dij**2).sum(axis=0) / self.radius\n dij[dij < 1] = 1\n self.M = 1 / dij\n self.n = n", "def transition_function(grid, neighbourstates, neighbourcounts, decay_grid,\n water_decay_grid):\n\n global water_counter\n global ignition_grid\n neighbourstates = np.array(neighbourstates)\n init_grid = initial_grid.astype(int)\n ig_grid = np.array(ignition_grid)\n windspeed_ignition_modifiers = wind_speed_rvalue(\"NE\", 10)\n new_ig_grid = []\n for i, row in enumerate(grid):\n new_ig_grid.append([\n ignite(cell, neighbourstates[:, i, j],\n windspeed_ignition_modifiers) for j, cell in enumerate(row)\n ])\n new_ig_grid = np.array(new_ig_grid)\n started_to_burn = []\n for i, row in enumerate(grid):\n started_to_burn.append([\n started_burning(cell, ig_grid[i, j], new_ig_grid[i, j])\n for j, cell in enumerate(row)\n ])\n grid[started_to_burn] = START_BURN\n ig_grid = np.add(new_ig_grid, ig_grid)\n full_burn = []\n for i, row in enumerate(grid):\n full_burn.append([\n fully_burning(cell, ig_grid[i, j], decay_grid[i, j])\n for j, cell in enumerate(row)\n ])\n grid[full_burn] = BURNING\n end_burning = []\n for i, row in enumerate(grid):\n end_burning.append([\n ending_burn(cell, decay_grid[i, j], decay_values[int(\n initial_grid[i, j])]) for j, cell in enumerate(row)\n ])\n grid[end_burning] = END_BURN\n decay_grid[(grid == BURNING) | (grid == END_BURN)] -= 1\n burnt_out = (decay_grid == 0) # find those which have decayed to 0\n grid[(decay_grid == 0\n )] = BURNT #set all that have decayed to zero to BURNT(7)\n water_counter += 1\n\n if (water_counter == 100):\n grid[120:160, 80:120] = initial_grid[120:160, 80:120]\n water_decay_grid[(grid != LAKE)] -= 1 # take one off their decay value\n grid[(water_decay_grid == 0)] = BURNT # switch their state to 5\n ignition_grid = ig_grid\n return grid", "def update_grid(self):\n # Check to see if we have moved squares\n _new_grid = self.calc_grid()\n if _new_grid == self._grid:\n return\n # Remove from old square and add to new square\n self.target._grid[self._grid][self._type].discard(self)\n self.target._grid[_new_grid][self._type].add(self)\n # Update coordinates\n self._grid = _new_grid", "def timesGoBy(self):\n self.wcount += 1", "def getMove(self, grid):\n# global prune\n# prune = 0\n def Terminal(stateTup):\n \"\"\"\n Checks if the node is a terminal node\n Returns eval(state) if it is terminal\n \"\"\"\n state = stateTup[0]\n maxDepth = self.depthLimit\n if stateTup[1] == maxDepth:\n val = self.h.get(str(state.map))\n if val == None:\n Val = Eval(state)\n self.h[str(state.map)] = Val\n return Val\n else:\n return val\n elif len(stateTup[0].getAvailableMoves()) == 0:\n val = self.h.get(str(state.map))\n if val == None:\n Val = Eval(state)\n self.h[str(state.map)] = Val\n return Val\n else:\n return val\n\n def Eval(state):\n \"\"\"\n This is the eval function which combines many heuristics and assigns\n weights to each of them\n Returns a single value\n \"\"\"\n\n# H1 = htest2(state)\n# return H1\n H2 = h1(state)*monotonic(state)\n return H2\n\n\n def h1(state):\n Max = state.getMaxTile()\n left = len(state.getAvailableCells())/16\n if state.getCellValue([0,0]) == Max:\n v = 1\n else:\n v= 0.3\n Max = Max/1024\n return Max*left*v\n\n def mono(state):\n mon = 0\n# for i in range(4):\n# row = 0\n# for j in range(3):\n# if state.map[i][j] > state.map[i][j+1]:\n# row+=1\n# if row == 4:\n# mon += 1\n# for i in range(4):\n# column = 0\n# for j in range(3):\n# if state.map[j][i] > state.map[j+1][i]:\n# column +=1\n# if column == 4:\n# mon +=1\n#\n#\n# return mon/8\n for i in range(4):\n if all(earlier >= later for earlier, later in zip(grid.map[i], grid.map[i][1:])):\n mon+=1\n\n return mon/8\n\n def monotonic(state):\n cellvals = {}\n Path1 = [(3,0),(3,1),(3,2),(3,3),(2,3),(2,2),(2,1),(2,0),\n (1,0),(1,1),(1,2),(1,3),(0,3),(0,2),(0,1),(0,0)]\n for i in Path1:\n cellvals[i] = state.getCellValue(i)\n mon = 0\n for i in range(4):\n if cellvals.get((i,0)) >= cellvals.get((i,1)):\n if cellvals.get((i,1)) >= cellvals.get((i,2)):\n if cellvals.get((i,2)) >= cellvals.get((i,3)):\n mon +=1\n for j in range(4):\n if cellvals.get((0,j)) >= cellvals.get((1,j)):\n if cellvals.get((1,j)) >= cellvals.get((2,j)):\n if cellvals.get((2,j)) >= cellvals.get((3,j)):\n mon+=1\n return mon/8\n\n\n\n def htest2(state):\n score1 = 0\n score2 = 0\n r = 0.5\n\n Path1 = [(3,0),(3,1),(3,2),(3,3),(2,3),(2,2),(2,1),(2,0),\n (1,0),(1,1),(1,2),(1,3),(0,3),(0,2),(0,1),(0,0)]\n Path2 = [(3,0),(2,0),(1,0),(0,0),(0,1),(1,1),(2,1),(3,1),\n (3,2),(2,2),(1,2),(0,2),(0,3),(1,3),(2,3),(3,3)]\n valDict = {}\n for n in range(16):\n valDict[Path1[n]] = state.getCellValue(Path1[n])\n for n in range(16):\n if n%3 == 0:\n self.emergency()\n cell1 = valDict.get(Path1[n])\n cell2 = valDict.get(Path2[n])\n score1 += (cell1) * (r**n)\n score2 += (cell2) * (r**n)\n return max(score1,score2)\n\n\n def Maximize(stateTup,A,B):\n \"\"\"\n Returns a tuple of state,eval(state)\n Takes in a stateTup(tuple of grid + depth of the grid), alpha,\n and beta\n \"\"\"\n self.emergency()\n t = Terminal(stateTup)\n if t != None:\n return (None, t)\n\n maxChild , maxUtility = None,-999999999\n state = stateTup[0]\n Map = self.dict.get(str(state.map))\n if Map == None:\n children = []\n for M in range(4):\n g = state.clone()\n if g.move(M):\n children.append(g)\n self.dict[str(state.map)] = children\n else:\n children = Map\n for child in children:\n childTup = (child,stateTup[1]+1)\n utility = Minimize(childTup,A,B)[1]\n if utility > maxUtility:\n maxChild , maxUtility = child , utility\n if maxUtility >= B:\n# global prune\n# prune +=1\n break\n if maxUtility > A:\n A = maxUtility\n\n return (maxChild,maxUtility)\n\n\n def Minimize(stateTup,A,B):\n \"\"\"\n Returns a tuple of state,eval(state)\n Takes in a stateTup(tuple of grid + depth of the grid), alpha,\n and beta\n \"\"\"\n self.emergency()\n t = Terminal(stateTup)\n if t != None:\n return (None, t)\n\n minChild , minUtility = None,999999999\n state = stateTup[0]\n Map= self.dict.get(str(state.map))\n if Map == None:\n cells= state.getAvailableCells()\n children = []\n tiles = [2,4]\n for i in cells:\n for j in tiles:\n g = state.clone()\n g.insertTile(i,j)\n children.append(g)\n self.dict[str(state.map)] = children\n else:\n children = Map\n for child in children:\n childTup = (child,stateTup[1]+1)\n utility = Maximize(childTup,A,B)[1]\n if utility < minUtility:\n minChild , minUtility = child , utility\n if minUtility <= A:\n# global prune\n# prune +=1\n break\n if minUtility < B:\n B = minUtility\n\n return (minChild,minUtility)\n\n\n\n def decision(grid):\n \"\"\"\n Decision function which returns the move which led to the state\n \"\"\"\n child = Maximize((grid,0),-999999999,999999999)[0]\n Child = child.map\n g = grid.clone()\n for M in range(4):\n if g.move(M):\n if g.map == Child:\n # global prune\n # global pruneLog\n # pruneLog.append(prune)\n # print(prune)\n # print(sum(pruneLog)/len(pruneLog))\n return M\n g = grid.clone()\n\n self.dict = {}\n self.h = {}\n self.prevTime = time.clock()\n self.depthLimit = 1\n self.mL = []\n self.over = False\n while self.over == False:\n self.depthLimit +=1\n try :\n self.mL.append(decision(grid))\n\n except KeyError:\n# print(self.depthLimit)\n return self.mL[-1]\n except IndexError:\n return random.randint(0,3)\n self.Alarm(time.clock())\n return self.mL[-1]", "def calc_grid(self):\n return int(self._posn.x / cell_size), int(self._posn.y / cell_size)", "def distribute_waterbag(self):\n # Generate particles by creating trials and finding particles with potential less than emittance, then assign the rest to momentum\n ptclsMade = 0\n phaseSpaceList = []\n while ptclsMade < self.npart:\n ranU = 0.0\n while ranU <= 0:\n ranU = random.random()\n\n # Generate some bounds on the transverse size to reduce waste in generating the bunch\n # Use the lemming method to find the maximum y\n trialH = np.sqrt(ranU)\n newH = self.emit*trialH\n y0 = np.sqrt(newH)\n #self.emittance = newH\n yMax = newton(self.whatsleft, y0)\n\n #bounding the horizontal coordinate is difficult, but it should not exceed the pole\n xMax = self.c\n #xMax = yMax\n\n trialValue = 1e10\n while trialValue >= newH:\n xTrial = 2.*(0.5 - random.random())*xMax\n yTrial = 2.*(0.5 - random.random())*yMax\n trialValue = self.compute_potential(xTrial, yTrial)\n\n initialValue = trialValue\n if initialValue < newH:\n pMag = np.sqrt(2*(newH - initialValue))\n pDir = 2*np.pi* random.random()\n pxHat = pMag * np.cos(pDir)\n pyHat = pMag * np.sin(pDir)\n xReal = xTrial * np.sqrt(self.betax)\n yReal = yTrial * np.sqrt(self.betay)\n pxReal = (pxHat - self.alphax*xTrial)/np.sqrt(self.betax)\n pyReal = (pyHat - self.alphay*yTrial)/np.sqrt(self.betay)\n ptclCoords = np.array([xReal, pxReal, yReal, pyReal])\n phaseSpaceList.append(ptclCoords)\n ptclsMade += 1\n\n #Add 3 more particles if creating a quiet start\n if self.quiet:\n self.exact_centroids(ptclCoords, phaseSpaceList)\n ptclsMade += 3\n else:\n print(\"Initial value generated exceeds limiting H. Sampling new value.\")\n\n self.particles[:,:4] = np.asarray(phaseSpaceList)", "def update_particle_cloud(self, scan):\n\n \"\"\"\n Initialise arrays for the new particle cloud,\n particle weights and cummulative weights\n \"\"\"\n newParticleCloud = []\n particleWeights = []\n \n randomGauss = 10*self.NUMBER_PREDICTED_READINGS\n gaussianRandomNumX = []\n gaussianRandomNumY = []\n\n sensorSigma=0.1 #variance\n sensorMu=0 #mean\n noise=sensorSigma * numpy.random.randn() + sensorMu\n\n for i in range (0,randomGauss):\n gaussianRandomNumX.append(random.gauss(0,1))\n gaussianRandomNumY.append(random.gauss(0,1))\n\n for p in self.particlecloud.poses:\n particleWeights.append(self.sensor_model.get_weight(scan, p))\n\n for i in range(len(self.particlecloud.poses)):\n randomSelection = numpy.random.random()\n csum = 0\n for p in self.particlecloud.poses:\n weight = self.sensor_model.get_weight(scan, p) / sum(particleWeights)\n csum += weight\n if csum >= randomSelection:\n newParticle = copy.deepcopy(p)\n newParticle.position.x = newParticle.position.x + (gaussianRandomNumX[i] * noise)\n newParticle.position.y = newParticle.position.y + (gaussianRandomNumY[i] * noise)\n newParticle.position.z = newParticle.position.z\n newParticleCloud.append(newParticle)\n break\n self.particlecloud.poses = newParticleCloud\n\n pass", "def __update_visible(self) -> None:\n for i in range(0, 8):\n visible_row = self.__row_position + Labyrinth.ALL_ROW_MOVE[i]\n visible_col = self.__col_position + Labyrinth.ALL_COL_MOVE[i]\n if 0 <= visible_row < self.__labyrinth.labyrinth_height and \\\n 0 <= visible_col < self.__labyrinth.labyrinth_width:\n self.__labyrinth.visible_cells[visible_row][visible_col] = 1", "def calcMovAvgs(self, windowLength):\n movingAvgs = []\n cumSum = [0]\n n = len(self.x)\n\n for i in range(n):\n cumSum.append(cumSum[i] + self.x[i])\n if i >= windowLength:\n temp = (cumSum[i] - cumSum[i-windowLength])/windowLength\n movingAvgs.append(temp)\n return [None]*windowLength + movingAvgs", "def push_up (grid):\r\n temp = [] #Empty list\r\n for i in range(4):\r\n for j in range(4):\r\n if grid[j][i] > 0:\r\n temp.append(grid[j][i]) #Extract non-zero values from column or row\r\n lentemp = len(temp)\r\n if lentemp > 0:\r\n for k in range(1, lentemp):\r\n if (temp[k] == temp[k-1]) or (temp[k-1] == 0): #Check if addition is applicable\r\n temp[k-1] = temp[k] + temp[k-1] #Do the relevant addition\r\n temp[k] = 0 #Replace shifted value with 0\r\n while 0 in temp:\r\n temp.remove(0) #Remove zeroes\r\n lentemp = len(temp)\r\n for k in range(4-lentemp):\r\n temp.append(0) #Add zeroes to the end\r\n for j in range(4):\r\n grid[j][i] = temp[j] #Replace the old values with the new\r\n temp = [] #Reset the list for the next row or column\r\n return grid", "def _create_shadow_cells(self, assignments):\n prev_assignments = assignments[:, 1]\n num_assigned = prev_assignments.shape[0]\n\n prev_set = {i for i in range(len(self._prev_cells))}\n assigned_set = set(prev_assignments)\n\n diff_set = prev_set - assigned_set\n\n new_assignments = np.zeros(shape=(self._prev_cells.shape[0],)).astype(\"int32\")\n new_assignments[:num_assigned] = prev_assignments\n\n new_curr_cells = np.zeros(shape=self._prev_cells.shape)\n new_curr_cells[:self._curr_cells.shape[0]] = self._curr_cells\n\n i = num_assigned\n\n for unassigned in diff_set:\n new_assignments[i] = unassigned\n new_curr_cells[i] = self._prev_cells[unassigned]\n\n # move shadow cell (pos += vel)\n new_curr_cells[i, bcell.BEG_POS_INDEX:bcell.END_POS_INDEX] += \\\n new_curr_cells[i, bcell.BEG_VEL_INDEX:bcell.END_VEL_INDEX]\n\n i += 1\n\n self._prev_cells = self._prev_cells[new_assignments]\n self._curr_cells = new_curr_cells\n self._particles_all = self._particles_all[new_assignments]", "def update(match):\r\n \r\n \r\n coordinates= match.board\r\n \r\n rows=len(match.board)\r\n column=len(match.board[0])\r\n for x in range(rows):\r\n for y in range(column):\r\n cell_up = match.board[wrapx(x)][wrapy(y+1)]\r\n cell_down = match.board[wrapx(x)][wrapy(y-1)]\r\n cell_right = match.board[wrapx(x+1)][wrapy(y)]\r\n cell_left = match.board[wrapx(x-1)][wrapy(y)]\r\n cell_diagupright = match.board[wrapx(x+1)][wrapy(y+1)]\r\n cell_diagupleft = match.board[wrapx(x-1)][wrapy(y+1)]\r\n cell_diagdownright = match.board[wrapx(x+1)][wrapy(y-1)] \r\n cell_diagdownleft = match.board[wrapx(x-1)][wrapy(y-1)]\r\n \r\n listofneightbours = [cell_up, cell_down, cell_right, cell_left, cell_diagupright, cell_diagupleft,\r\n cell_diagdownright, cell_diagdownleft]\r\n aliveneighbours = listofneighbours.count(1)\r\n \r\n if aliveneighbours < 2:\r\n x = 0\r\n elif aliveneighbours == 2:\r\n x = 1\r\n elif aliveneighbours == 3:\r\n x = 1\r\n else:\r\n x = 0", "def _move_receptor_to_grid_center(self):\n lower_receptor_corner = np.array([self._crd[:,i].min() for i in range(3)], dtype=float)\n upper_receptor_corner = np.array([self._crd[:,i].max() for i in range(3)], dtype=float)\n \n receptor_box_center = (upper_receptor_corner + lower_receptor_corner) / 2.\n grid_center = (self._origin_crd + self._uper_most_corner_crd) / 2.\n displacement = grid_center - receptor_box_center\n\n print(\"Receptor is translated by \", displacement)\n\n for atom_ind in range(len(self._crd)):\n self._crd[atom_ind] += displacement\n return None", "def advance_board(self):\n # We can advance the board using a pretty simple convolution,\n # so we don't have to execute a lot of loops in python.\n # Of course, this probably won't be sufficient for extremely\n # large boards.\n self.num_steps += 1\n board = self.board\n cfilter = np.array([[1,1,1],[1,0,1],[1,1,1]], dtype=np.uint16)\n\n alive = board & CellTypes.alive > 0\n spawning = board & CellTypes.spawning > 0\n frozen = board & CellTypes.frozen > 0\n\n can_die = ~frozen & (\n convolve2d(board & CellTypes.preserving, cfilter) == 0)\n can_grow = ~frozen & (\n convolve2d(board & CellTypes.inhibiting, cfilter) == 0)\n\n num_neighbors = convolve2d(alive, cfilter)\n num_spawn = convolve2d(spawning, cfilter)\n spawn_prob = 1 - (1 - self.spawn_prob)**num_spawn\n has_spawned = coinflip(spawn_prob, board.shape)\n\n born_rule = np.zeros(9, dtype=bool)\n born_rule[list(self.born_rule)] = True\n dead_rule = np.ones(9, dtype=bool)\n dead_rule[list(self.survive_rule)] = False\n\n new_alive = (born_rule[num_neighbors] | has_spawned) & ~alive & can_grow\n new_dead = dead_rule[num_neighbors] & alive & can_die\n\n new_flags = np.zeros_like(board)\n color_weights = 1 * alive + 2 * spawning\n for color in CellTypes.colors:\n # For each of the colors, see if there are two or more neighbors\n # that have it. If so, any new cells (whether born or spawned)\n # will also get that color.\n has_color = board & color > 0\n new_color = convolve2d(has_color * color_weights, cfilter) >= 2\n new_flags += color * new_color\n indestructible = alive & (board & CellTypes.destructible == 0)\n new_flags += CellTypes.destructible * (convolve2d(indestructible, cfilter) < 2)\n\n board *= ~(new_alive | new_dead)\n board += new_alive * (CellTypes.alive + new_flags)", "def flag_cell(self, event):\n if self.mineboard.gamestate is None:\n x = (event.x-2) // CELLWIDTH\n y = (event.y-2) // CELLWIDTH\n self.mineboard.flag_cell(y, x)\n self.update_cells()\n mines_rem = self.mineboard.minecount - self.mineboard.flagcount\n # updates the mines_left label\n if mines_rem == 1:\n self.mines_left.set(f\"{mines_rem} mine left\")\n else:\n self.mines_left.set(f\"{mines_rem} mines left\")", "def _modify_updates(self, updates):\n\n if self.max_kernel_norm is not None:\n W, = self.transformer.get_params()\n if W in updates:\n updated_W = updates[W]\n row_norms = T.sqrt(T.sum(T.sqr(updated_W), axis=(0, 1, 2)))\n desired_norms = T.clip(row_norms, 0, self.max_kernel_norm)\n scales = desired_norms / (1e-7 + row_norms)\n updates[W] = (updated_W * scales.dimshuffle('x', 'x', 'x', 0))", "def build(self):\n # Store current positions of all particles\n self.old_pos = []\n for p in self.sys.particles:\n self.old_pos.append(copy(p.r))\n \n # Set up the cell list\n self.cell_list.wipe()\n for p in self.sys.particles:\n self.cell_list.add_particle(p)\n\n # Build the list \n self.neighbours = []\n for p in self.sys.particles:\n neighbours = []\n for n in self.cell_list.get_neighbours(p):\n pn = self.sys.particles[n]\n if pn.id > p.id:\n dr = pn.r - p.r \n dr.apply_periodic(self.sys.box)\n if dr.length() < self.rcut + self.pad:\n neighbours.append(n)\n self.neighbours.append(neighbours)\n \n self.sys.has_nl = True", "def updateCells(cell_positions):\n # Build a set of canditates for live cells at the next generation, instead of looking through the whole grid\n # These will be dead neighbours of living cells\n possible_future_cells = set()\n # Make sets of cells to add and remove at the end of the check\n cells_remove = set()\n cells_add = set()\n for cell in cell_positions:\n # Get adjacent squares\n neighbours_dict = cellNeighbours(cell)\n number_live_neighbours = 0\n # Check which of these corresponds to another living cell\n for square in neighbours_dict.values():\n if square in cell_positions:\n number_live_neighbours+=1\n else:\n possible_future_cells.add(square)\n\n # Any live cell with fewer than two live neighbours dies, as if caused by under-population\n if number_live_neighbours<2:\n cells_remove.add(cell)\n # Any live cell with two or three live neighbours lives on to the next generation\n # do nothing\n # Any live cell with more than three live neighbours dies, as if by overcrowding\n elif number_live_neighbours>3:\n cells_remove.add(cell)\n # Any dead cell with exactly three live neighbours becomes a live cell, as if by reproduction\n for cell_candidate in possible_future_cells:\n cell_candidate_neighbours = cellNeighbours(cell_candidate).values()\n # Count number of live neighbours\n count = 0\n for square in cell_candidate_neighbours:\n if square in cell_positions:\n count+=1\n if count == 3:\n cells_add.add(cell_candidate)\n # Update cell_positions by removing dead cells and adding new-born cells\n for cell in cells_add:\n cell_positions.add(cell)\n for cell in cells_remove:\n cell_positions.remove(cell)\n # Return the update live cell list\n return cell_positions", "def occupied_cells(self):\n\n for lm in self.landmarks:\n if self.cell_size < 1:\n # expand the range the landmark exists\n lm_x_range = np.arange(lm[0]-self.R, lm[0]+self.R, self.cell_size)\n lm_y_range = np.arange(lm[1]-self.R, lm[1]+self.R, self.cell_size)\n\n # loop through expanded ranges and compute grid positions\n for lm_x in lm_x_range:\n for lm_y in lm_y_range:\n\n row, col = self.cell_index([lm_x, lm_y])\n\n # apply cost of occupied cell\n try:\n self.world[row][col] = 1000\n except IndexError:\n pass\n\n else:\n # apply cost of occupied cell\n row, col = self.cell_index(lm)\n try:\n self.world[row][col] = 1000\n except IndexError:\n pass", "def project(self):\n # update positions compared to observer\n pos = self.pos.copy()\n\n # center coordinates around obs coords\n pos[:, 0] -= np.sin(self.theta) * self.V * self.time_elapsed\n pos[:, 2] -= np.cos(self.theta) * self.V * self.time_elapsed\n\n # wrap in a novel box around obs coords\n for i in range(3):\n pos[:, i] = self.bounds[2*i] + np.mod(pos[:, i], self.bounds[2*i + 1]-self.bounds[2*i])\n\n d = (pos**2).sum(axis=1)**.5\n # ind_visible = (pos[:, 2] > 0) * (self.d_min<d) * (d<self.d_max)\n ind_visible = (pos[:, 2] > self.d_min) * (d < self.d_max)\n N_visible = int(np.sum(ind_visible))\n\n # self.state = [X, Y, size]\n self.state = np.ones((N_visible, 7))\n for i in range(2):\n self.state[:, i] = self.mag * pos[ind_visible, i] / pos[ind_visible, 2]\n print(i, self.state[:, i].min(), self.state[:, i].max())\n self.state[:, 2] = self.size / d[ind_visible]\n\n # colors do not change\n self.state[:, 3:] = pos[ind_visible, 3:]\n\n # TODO: larger transparency at larger distance => too fancy :-)\n # self.state[:, 2] = self.size / d[ind_visible]\n\n # for i in range(3):\n # self.state[:, i] *= (self.bounds[2*i+1] - self.bounds[2*i])\n # self.state[:, i] -= self.bounds[2*i]", "def checkAmountOfNeighbors(self):\n cellsToDelete = []\n for cell in self.cells:\n if(cell.numOfNeighbor > 3 or cell.numOfNeighbor < 2 or (cell.numOfNeighbor == 2 and cell.dead == True)):\n cellsToDelete.append(cell)\n elif(cell.numOfNeighbor == 3 and cell.dead == True):\n cell.makeAlive()\n cell.numOfNeighbor = 0\n\n self.removeCells(cellsToDelete)", "def _update_positions(self):\n self._velocities += self._accelerations * self.time_step\n self._positions += self._velocities * self.time_step", "def move(self, direction):\r\n # replace with your code\r\n initial_tile = self.__direct_top[direction]\r\n offset = OFFSETS[direction]\r\n direct_range = self.__direct_range[direction] \r\n backup_list = [[0 for dummy_col in range(self.grid_width)] for dummy_row in range(self.grid_height)]\r\n \r\n for initial_count, tile_cursor in enumerate(initial_tile):\r\n tem_list = []\r\n grid_cursor = tile_cursor\r\n for dummy_cursor in range(direct_range):\r\n \r\n tem_list.append(self.grid[grid_cursor[0]][grid_cursor[1]])\r\n grid_cursor = tuple(x + y for x,y in zip(grid_cursor,offset))\r\n \r\n new_list = merge(tem_list)\r\n if self.update_dict[direction] == 0:\r\n for col_cursor in range(direct_range):\r\n backup_list[col_cursor][initial_count] = new_list[col_cursor]\r\n elif self.update_dict[direction] == 1: \r\n for col_cursor in range(direct_range):\r\n backup_list[self.grid_height -1 - col_cursor][initial_count] = new_list[col_cursor]\r\n elif self.update_dict[direction] ==3:\r\n backup_list[initial_count] = new_list\r\n else:\r\n for col_cursor in range(direct_range):\r\n backup_list[initial_count][self.grid_width -1 - col_cursor] = new_list[col_cursor]\r\n \r\n flag = (self.grid == backup_list)\r\n self.grid = backup_list\r\n if not flag:\r\n self.new_tile()", "def _move_particle(self, ip):\n # RANDOM WALK\n # ADVANCE ONLY THE PARTICLES THAT ARE \"ON\" (i.e. ABOVE STTHR).\n #\n particle = self.particles[ip] # get particle\n props = [\"state\", \"type\", \"x\", \"y\", \"ux\", \"vy\", \"factor\", \"tmem\"]\n state, pType, x, y, ux, vy, factor, tmem = particle.get_from_keys(props)\n if state > STTHR and pType == 1:\n DU = -(ux - UXM)*2.0*TFREQ*self.DT + CLANG*self.SQRTDT*normal()\n DV = -(vy - VYM)*2.0*TFREQ*self.DT + CLANG*self.SQRTDT*normal()\n UXP = ux + DU\n VYP = vy + DV\n XP = x + UXP*self.DT*factor\n YP = y + VYP*self.DT*factor\n STP = state*np.exp(-self.DT/tmem)\n particle.update(ux=UXP, vy=VYP, x=XP, y=YP, state=STP)\n elif (state > STTHR) and pType == 2:\n DU = ULAM*normal()\n DV = ULAM*normal()\n XP = x + DU*self.DT\n YP = y + DV*self.DT\n STP = state*np.exp(-self.DT/ TMEMRAD)\n particle.update(x=XP, y=YP, state=STP)\n if x > self.grid.XMAX - self.grid.DX:\n particle.update(x=self.grid.XMAX - self.grid.DX, state=0.)\n elif x < self.grid.XMIN + self.grid.DX:\n particle.update(x=self.grid.XMIN + self.grid.DX, state=0.)\n if y > self.grid.YMAX - self.grid.DY:\n particle.update(y=self.grid.YMAX - self.grid.DY, state=0.)\n elif y < self.grid.YMIN + self.grid.DY:\n particle.update(y=self.grid.YMIN + self.grid.DY, state=0.)", "def needs_rebuild(self):\n for p in self.sys.particles:\n dr = p.r - self.old_pos[p.id]\n dr.apply_periodic(self.sys.box)\n if dr.length() >= 0.5*self.pad:\n return True \n return False", "def calc_new_hill_poi(new_poi, params, x, above_idx):\n B_a = 0\n N_a = 1\n B_b = 2\n N_b = 3\n \n row, col = cuda.grid(2)\n if row < x.shape[0] and col < x.shape[1]:\n if above_idx[row,col] == True: \n new_poi[row, col] = \\\n params[B_a] * (x[row, col] ** params[N_a]) \\\n / (1 + x[row, col] ** params[N_a])\n else:\n new_poi[row, col] = \\\n params[B_b] * (x[row, col] ** params[N_b]) \\\n / (1 + x[row, col] ** params[N_b])", "def mover_bm_izquierda(self):\n self.nueva_posicion_posible_parte_superior = self.mapa.consultar_casilla_por_movimiento([self.casilla[0] - 1,\n self.casilla[1]],\n [self.vertice_1[0] - self.velocidad,self.vertice_1[1]], \n [self.vertice_1[0] - 5 - 5, self.vertice_1[1]])\n self.nueva_posicion_posible_parte_inferior = self.mapa.consultar_casilla_por_movimiento([self.casilla[0] - 1,\n self.casilla[1] + 1],\n [self.vertice_3[0] - self.velocidad,self.vertice_3[1]],\n [self.vertice_3[0] - 5,self.vertice_3[1]]) \n if self.nueva_posicion_posible_parte_superior[0] != 1 and self.nueva_posicion_posible_parte_inferior[0] != 1:\n self.x -= self.velocidad * (self.x >= 15)\n self.posicion = [self.x,self.posicion[1]]\n self.casilla = [self.casilla[0] - self.nueva_posicion_posible_parte_superior[1] *(self.nueva_posicion_posible_parte_inferior[0] != 1) * (self.nueva_posicion_posible_parte_superior[0] != 1), self.casilla[1]]\n self.redefinir_vertices()", "def _update_cells(self):\n for row_number in range(self.number_cells_y):\n for col_number in range(self.number_cells_x):\n if self.to_be_updated[row_number][col_number]:\n self.cells[row_number][col_number].update()", "def push_down (grid):\r\n \r\n #moves values down\r\n for row in range (3,0,-1):\r\n for column in range (4):\r\n if grid[row][column]==0 and grid[row-1][column]!=0:\r\n grid[row][column]=grid[row-1][column]\r\n grid[row-1][column]=0\r\n \r\n #moves values down\r\n for row in range (3,0,-1):\r\n for column in range (4):\r\n if grid[row][column]==0 and grid[row-1][column]!=0:\r\n grid[row][column]=grid[row-1][column]\r\n grid[row-1][column]=0 \r\n \r\n #checks for similar values and combines whilst ensuring values dont get added twice\r\n check_row=-1\r\n check_column=-1\r\n for row in range (0,3):\r\n for column in range (4):\r\n #check if values have been added already\r\n if row==check_row and column==check_column:\r\n continue\r\n \r\n elif grid[row][column]==grid[row+1][column]:\r\n grid[row+1][column]= 2*grid[row][column]\r\n grid[row][column]=0\r\n check_row=row+1\r\n check_column=column\r\n \r\n elif grid[row+1][column] == 0:\r\n grid[row+1][column]=grid[row][column]\r\n grid[row][column]=0\r\n \r\n \r\n for row in range (3,0,-1):\r\n for column in range (4):\r\n if grid[row][column]==0 and grid[row-1][column]!=0:\r\n grid[row][column]=grid[row-1][column]\r\n grid[row-1][column]=0 \r\n \r\n for row in range (3,0,-1):\r\n for column in range (4):\r\n if grid[row][column]==0 and grid[row-1][column]!=0:\r\n grid[row][column]=grid[row-1][column]\r\n grid[row-1][column]=0", "def __FreeTiles(self, grid, log=False):\n\n x_pos, _ = np.where(grid == 0)\n return len(x_pos)", "def move(self, row, col, player):\n value = (1.5 - player) * 2\n self.rows[row] += value\n self.colums[col] += value\n if row == col:\n self.diag[0] += value\n if row + col == self.n-1:\n self.diag[1] += value\n if abs(self.rows[row]) == self.n or abs(self.colums[col]) == self.n or abs(self.diag[0]) == self.n or abs(self.diag[1]) == self.n:\n return player\n return 0", "def update_points(self, *args):\n points = [Window.width / 2, Window.height / 2, .5, .5]\n i = 0\n while i < 2 * pi:\n i += 0.01 * pi\n points.extend([\n Window.width / 2 + cos(i) * (self.radius + self.sin_wobble *\n sin(i * self.sin_wobble_speed)),\n Window.height / 2 + sin(i) * (self.radius + self.sin_wobble *\n sin(i * self.sin_wobble_speed)),\n self.offset_x + sin(i),\n self.offset_y + cos(i)])\n\n self.mesh_points = points", "def getNumberPoints(self, move):\r\n (current_point_white, current_point_black) = self._board.get_nb_pieces()\r\n self._board.push(move)\r\n (new_point_white, new_point_black) = self._board.get_nb_pieces()\r\n self._board.pop()\r\n \r\n if(self._mycolor == 1): #black\r\n return (new_point_black-current_point_black) \r\n else:\r\n return (new_point_white-current_point_white)", "def update_pos(self, game_field, all_ghost_out, windowsize):\r\n\r\n # If Pac-Man wants to change the direction into a direction, that is not the same or the opposite of the current direction, it could possible be a pre- or postturn\r\n if self.direction != self.last_dir and find_opposite(self.last_dir) != self.direction and self.state != '':\r\n self.pre_or_post_turn(game_field, all_ghost_out)\r\n\r\n # If Pac-Man moves, update his position depending on his direction\r\n if self.state == 'm':\r\n fak = 1\r\n if self.direction == 'u':\r\n self.pos[1] -= fak * self.speed\r\n elif self.direction == 'd':\r\n self.pos[1] += fak * self.speed\r\n elif self.direction == 'l':\r\n self.pos[0] -= fak * self.speed\r\n elif self.direction == 'r':\r\n self.pos[0] += fak * self.speed\r\n\r\n ongrid = (self.pos[0] % self.grid_size == 0 and self.pos[1] % self.grid_size == 0)\r\n\r\n # When Pac-Man is on grid check the field type he's on and in front of him\r\n if ongrid :\r\n field = game_field.possible_way(self.pos, self.last_dir)\r\n self.cnt_points(field, all_ghost_out)\r\n\r\n # When the next field is a wall of the maze, make Pac-Man stop moving, otherwise let him continue moving\r\n if field != None and field[0] == 'r':\r\n field2 = game_field.possible_way(self.pos, self.direction)\r\n self.cnt_points(field2, all_ghost_out)\r\n if field2 != None and field2[0] == 'r':\r\n self.state = ''\r\n else:\r\n self.state = 'm'\r\n\r\n # When the field in front of him is the end of a tunnel move Pac-Man to the other side\r\n if field == 'os':\r\n if self.direction == 'l':\r\n self.pos[0] = windowsize[0] \r\n elif self.direction == 'r':\r\n self.pos[0] = -self.grid_size\r\n\r\n # When the next field is a field Pac-Man can move on to, safe the latest direction in direction\r\n if (field == None or field[0] != 'r'):\r\n self.direction = self.last_dir[:]\r\n\r\n # Force Pacmans direction to drive through the tunnel, just to avoid graphical bugs\r\n if self.pos[0] < 0:\r\n self.direction = 'r'\r\n self.last_dir = 'r'\r\n elif self.pos[0] > windowsize[0] - self.grid_size:\r\n self.direction = 'l'\r\n self.last_dir = 'l'", "def periodic_kernel(rmax, kernel, pos, wts, log=null_log):\n if rmax>=0.5:\n raise Exception('Cannot have rmax greater than half the box size, could get periodic images')\n\n num_pts = len(pos)\n pos = array(pos)\n wts = array(wts)\n\n print('Finding optimal shift',file=log)\n pos = shift_pos_optimally(pos, rmax, log)\n print('Padding the unit cube', file=log)\n pad_idx, pad_pos = pad_unitcube(pos, rmax)\n\n print('Inserted {:,} ghost particles for periodicity'.format(len(pad_idx)),file=log)\n new_pts = concatenate((pos, pad_pos), axis=0)\n\n if sum(wts.shape)<=1:\n new_wts = empty(len(new_pts), dtype=wts.dtype)\n new_wts[:] = wts\n else:\n new_wts = concatenate((wts, wts[pad_idx]))\n\n # Scale everything to be in the new box\n scale_fac = 1.0 / (1+2*rmax) \n new_pts += rmax\n new_pts *= scale_fac\n\n pairs, sort_idx, pos, wts, accel = radial_kernel_evaluate(rmax*scale_fac, kernel, new_pts, new_wts, log=log, sort_data=True)\n\n # unsort only the real points\n unsort = empty_like(sort_idx)\n unsort[sort_idx] = arange(len(new_pts))\n unsort = unsort[:num_pts]\n\n accel = accel[unsort]\n\n # undo the scale factor (remember dx's were all shortened)\n accel *= 1.0/scale_fac\n\n return pairs, accel", "def pixel_shift_fun(self, i, points, image_shape):\n self.delta_0 = np.round(self.displacements[:, i, 0]).astype(int)\n self.delta_1 = np.round(self.displacements[:, i, 1]).astype(int)\n \n # Exlude the points that have displacement going outside of the image range\n out_of_range_it = np.logical_or(self.delta_0 + points[:, 0] > image_shape[0] - 1, self.delta_1 + points[:, 1] > image_shape[1] - 1)\n if np.any(out_of_range_it):\n self.delta_0[out_of_range_it] = 0\n self.delta_1[out_of_range_it] = 0\n self.valid_points[out_of_range_it] = False\n warnings.warn('Displacement is going outside of the image range! The valid points are saved in self.method.valid_points')\n self.displacements[~self.valid_points, i, :] = np.nan", "def update_before(self, x: int, y: float, w: float) -> None:\n old_value = 0\n if x < len(self.x):\n old_value = self.y[x]\n self.update(x + 1, y, w)\n while len(self.x) < x:\n self.x.append(len(self.x))\n self.y.append(y)\n self.w.append(w)\n if self.w[x] <= w:\n self.x[x] = x\n self.y[x] = y\n self.w[x] = w\n pl = x - 1\n while pl >= 0 and (self.y[pl] == 0 or (self.w[pl] <= w and self.y[pl] == old_value)):\n self.y[pl] = y\n self.w[pl] = w\n pl -= 1", "def local_update(self, increment):\r\n\r\n self.window_l += increment\r\n if self.window_l >= self.window_t: return\r\n self.connection.send_window_update(\r\n increment = self.window_o - self.window_l,\r\n stream = self.identifier\r\n )\r\n self.window_l = self.window_o", "def addNbr (self) :\n #we pick out the random number : 2 or 4\n if random.randint(1,10) == 1:\n randomNbr = 4\n else :\n randomNbr = 2\n\n #we pick a random position for the number\n emptyCounter = 0\n for k in range (4) :\n for i in range (4) :\n if self.grid[k,i] == 0 :\n emptyCounter += 1\n\n randomPosition = random.randint(0,emptyCounter-1)\n counter = 0\n for k in range (4) :\n for i in range (4) :\n if self.grid[k,i] == 0 :\n if (counter == randomPosition) :\n self.grid[k,i] = randomNbr\n return #we leave the function\n counter += 1", "def push_up (grid):\r\n \r\n #moves values up\r\n for row in range(3):\r\n for column in range(4):\r\n if grid[row][column]==0:\r\n grid[row][column]=grid[row+1][column]\r\n grid[row+1][column]=0\r\n \r\n #moves values up\r\n for row in range(3):\r\n for column in range(4):\r\n if grid[row][column]==0:\r\n grid[row][column]=grid[row+1][column]\r\n grid[row+1][column]=0 \r\n \r\n #checks for similar values and combines\r\n for row in range(3):\r\n for column in range(4):\r\n if grid[row][column]==grid[row+1][column]: \r\n grid[row][column]=2*grid[row][column]\r\n grid[row+1][column]=0\r\n \r\n #moves remaining values up \r\n for row in range(3):\r\n for column in range(4):\r\n if grid[row][column]==0: \r\n grid[row][column]=grid[row+1][column]\r\n grid[row+1][column]=0", "def step(self):\n all_p = self.amount\n neighbors = self.model.grid.get_neighbors(self.pos, True)\n for n in neighbors:\n all_p += n.amount\n ave_p = all_p / (len(neighbors) + 1)\n\n self._nextAmount = (1 - self.model.evaporate) * \\\n (self.amount + (self.model.diffusion * \\\n (ave_p - self.amount)))\n\n if self._nextAmount < self.model.lowerbound:\n self._nextAmount = 0", "def move(self, action):\n ligne = self.location_[0] + self.actions_[action][0]\n column = self.location_[1] + self.actions_[action][1]\n newLocation = (ligne, column)\n self.location_ = newLocation\n newState = (self.location_[0] * self.width ) + self.location_[1]\n\n if self.location_[0] == 0 and self.location_[0] == 0:\n return 0\n\n return newState", "def calc_generation_wind_proposed (self):\n if self.comp_specs['proposed capacity'] != UNKNOWN:\n self.load_offset_proposed = \\\n self.comp_specs['proposed capacity']\n self.generation_wind_proposed = \\\n self.comp_specs['proposed generation']\n\n if self.generation_wind_proposed == UNKNOWN:\n self.generation_wind_proposed = self.load_offset_proposed *\\\n float(self.comp_specs\\\n ['capacity factor'])*\\\n constants.hours_per_year\n\n return\n\n self.load_offset_proposed = 0\n\n offset = self.average_load*\\\n (self.comp_specs['percent generation to offset'] / 100.0)\n #~ print self.forecast.generation['generation hydro'].sum()\n\n # removed on purpose\n #~ hydro = \\\n #~ self.forecast.generation['generation hydro'].fillna(0).sum()\n #~ if hydro > 0:\n #~ offset *= 2\n\n # existing very variable RE\n existing_RE = \\\n int(float(self.cd['wind capacity'])) + \\\n int(float(self.cd['solar capacity']))\n\n if existing_RE < (round(offset/25) * 25): # ???\n #~ print \"True\"\n self.load_offset_proposed = round(offset/25) * 25 - existing_RE\n\n\n\n # not needed for now\n #~ self.total_wind_generation = self.generation_load_proposed + \\\n #~ int(self.comp_specs['wind capacity'])\n\n self.generation_wind_proposed = self.load_offset_proposed * \\\n float(self.comp_specs['capacity factor'])*\\\n constants.hours_per_year\n #~ print 'self.load_offset_proposed',self.load_offset_proposed\n #~ print 'self.generation_wind_proposed',self.generation_wind_proposed", "def shift_cells(self, cells, direction):\n merge = False\n # sorted so cells within the same piece won't run into each other\n sorted_cells = TransformPiece.sort_cells(cells, direction)\n # if trying to shift past a wall, abort mission\n for cell in sorted_cells:\n value = self.get_cell_value(cell)\n if value < 1:\n continue\n adjacent_coords = TransformPiece.get_adjacent_coordinates(cell, direction)\n adjacent_value = self.get_cell_value(adjacent_coords)\n if adjacent_value == -1 and not self.is_in_buffer(adjacent_coords):\n return\n # do shift\n for cell in sorted_cells:\n value = self.get_cell_value(cell)\n if value < 1:\n continue\n adjacent_coords = TransformPiece.get_adjacent_coordinates(cell, direction)\n adjacent_value = self.get_cell_value(adjacent_coords)\n if adjacent_value < 1:\n # shift cell into empty space\n self.set_cell_value(adjacent_coords, value)\n self.clear_cell(cell)\n elif adjacent_value == value:\n # do merge\n self.set_cell_value(adjacent_coords, value * 2)\n self.clear_cell(cell)\n merge = True\n return merge", "def compute_positions(scores, layers, cells, direction):\n prior = 1/np.arange(3, layers+3)\n prior = prior/prior.sum()\n x = np.linspace(-5, 5, layers)[::-1] \n prior = 1/(1 + np.exp(-x))\n prior = prior/prior.sum()\n\n # Compute the probability depending on the direction in which we want to know where the change is\n if direction == \"lr\":\n col_scores = np.array([scores[np.arange(cells[0])*cells[1]+j, np.arange(cells[0])*cells[1]+j+1].sum() for j in range(layers)])\n elif direction == \"rl\":\n col_scores = np.array([scores[np.arange(cells[0])*cells[1]+cells[1]-1-j, np.arange(cells[0])*cells[1]+cells[1]-1-j-1].sum() for j in range(layers)])\n elif direction == \"tb\":\n col_scores = np.array([scores[np.arange(cells[1])+cells[1]*j, np.arange(cells[1])+cells[1]*(j+1)].sum() for j in range(layers)])\n elif direction == \"bt\":\n col_scores = np.array([scores[np.arange(cells[1])+cells[1]*(cells[0]-1-j), np.arange(cells[1])+cells[1]*(cells[0]-1-j-1)].sum() for j in range(layers)])\n \n # Apply softmax + multiply by prior -> Then get the most likely position\n col_scores = sm(col_scores)\n position = np.argmax(col_scores*prior)\n return position", "def _update_battle_position(self, new_cells=[], previous_cells=[]):\n if previous_cells:\n for previous_cell in previous_cells:\n self._battle_area.set_cell(previous_cell.get_name(), False)\n if new_cells:\n for new_cell in new_cells:\n self._battle_area.set_cell(new_cell.get_name(), self)", "def compute_controller(self):\n \n # here we implement an example for a consensus algorithm\n neig = self.get_neighbors()\n messages = self.get_messages()\n pos, rot = self.get_pos_and_orientation()\n \n #send message of positions to all neighbors indicating our position\n for n in neig:\n self.send_message(n, pos)\n \n # check if we received the position of our neighbors and compute desired change in position\n # as a function of the neighbors (message is composed of [neighbors id, position])\n dx = 0.\n dy = 0.\n if messages:\n for m in messages:\n dx += m[1][0] - pos[0]\n dy += m[1][1] - pos[1]\n # integrate\n des_pos_x = pos[0] + self.dt * dx\n des_pos_y = pos[1] + self.dt * dy\n \n #compute velocity change for the wheels\n vel_norm = np.linalg.norm([dx, dy]) #norm of desired velocity\n if vel_norm < 0.01:\n vel_norm = 0.01\n des_theta = np.arctan2(dy/vel_norm, dx/vel_norm)\n right_wheel = np.sin(des_theta-rot)*vel_norm + np.cos(des_theta-rot)*vel_norm\n left_wheel = -np.sin(des_theta-rot)*vel_norm + np.cos(des_theta-rot)*vel_norm\n self.set_wheel_velocity([left_wheel, right_wheel])", "def _shift_wall_index(self):\n if self.wall_index > 0:\n self.wall_index -= 1\n else:\n self._shift_wall_wind()\n self.wall_index = len(self.current_wall) - 1", "def delta(self):\n return (self.upper-self.lower) / float(self.num_cells)", "def move(self): # AH note. Swich move with extra_steps?\n if self.adjustment < 0:\n self.position += self.extra_steps\n super().move()\n self.no_moves += 1\n # Do the regular move", "def ignite(cell, neighbours, wind):\n\n cell = int(cell)\n ignition_factor = 0\n if cell in [LAKE, BURNING, BURNT, END_BURN]: return ignition_factor\n neighbours = neighbours.astype(int)\n fully_burning_threshhold = [0.04, 0.01, 0, 0.1, 0, 0, 0.04]\n fully_burning_factor = 20\n start_burning_threshhold = [0.02, 0.005, 0, 0.05, 0, 0, 0.04]\n start_burning_factor = 10\n\n # add to cell ignition factor by multiplying\n # windspeed modifier and the cells burning threshhold\n # if a random number is less than the resulting number add\n # the burning factor multiplied by the wind speed modifier\n\n for index, neighbour in enumerate(neighbours):\n if neighbour == BURNING:\n if fully_burning_threshhold[cell] * wind[index] >= random.uniform(\n 0, 1):\n ignition_factor += int(\n math.floor(wind[index] * fully_burning_factor))\n if neighbour in [START_BURN,END_BURN] and \\\n start_burning_threshhold[cell] * wind[index] >= random.uniform(0,1):\n ignition_factor += int(\n math.floor(wind[index] * start_burning_factor))\n\n # if the cell is has already started to burn then a burning factor is\n # automatically applied\n\n if cell == START_BURN: ignition_factor += start_burning_factor\n return int(ignition_factor)", "def update_window(self, now=None):\n if now is None:\n now = datetime.datetime.now()\n\n window_age = now - self._window_start\n breakpoint = datetime.timedelta(days=self.WINDOW_SIZE)\n\n # Don't do anything to window if we're within the window.\n if window_age <= breakpoint:\n return\n\n # Shuffle the window half the window size if we're past the end.\n self._window_start += datetime.timedelta(days=self.WINDOW_SIZE / 2.0)\n\n # Work out what the deltas count would have been for this new\n # timespan. Take into account how far beyond the window end we are.\n delta_factor = 1 - ((self.WINDOW_SIZE / 2.0) / window_age.days)\n self._deltas = int(self._deltas * delta_factor)", "def build_grains(self):\n\t\ttime = datetime.datetime.now()\n\t\tif self.probability == 0:\n\t\t\tfor cell in self.space.flat:\n\t\t\t\tif cell.state != 0 :\n\t\t\t\t\tcontinue\n\t\t\t\telif self.check_empty_neighbours(cell):\n\t\t\t\t\tcontinue\n\t\t\t\telse:\t\n\t\t\t\t\tneighbours = self.get_neighbours(cell)\n\t\t\t\t\tgrains = [0 for i in range(self.grains)]\n\t\t\t\t\tfor i in range(1,self.grains+1):\n\t\t\t\t\t\tfor neighbour in neighbours:\n\t\t\t\t\t\t\tif neighbour.state == i and neighbour.timestamp < time:\n\t\t\t\t\t\t\t\tgrains[i] = grains[i] + 1\n\t\t\t\t\tif grains == [0 for i in range(self.grains)]:\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tnew_grain = 0\n\t\t\t\t\tfor i in range(self.grains):\n\t\t\t\t\t\tif grains[i] >= new_grain:\n\t\t\t\t\t\t\tnew_grain = i\n\t\t\t\t\tcell.change_state(time, new_grain)\n\t\t\t\t\tself.empty_cells = self.empty_cells - 1\n\t\telse:\n\t\t\tfor cell in self.space.flat:\n\t\t\t\tif cell.state != 0 :\n\t\t\t\t\tcontinue\n\t\t\t\telif self.check_empty_neighbours(cell):\n\t\t\t\t\tcontinue\n\t\t\t\telse:\n\t\t\t\t\tneighbours = self.get_neighbours(cell)\n\t\t\t\t\tif self.decide_changing(cell,neighbours,5, time):\n\t\t\t\t\t\tneighbours = self.get_nearest_neighbours(cell)\n\t\t\t\t\t\tif self.decide_changing(cell,neighbours,3, time):\n\t\t\t\t\t\t\tneighbours = self.get_further_neighbours(cell)\n\t\t\t\t\t\t\tif self.decide_changing(cell,neighbours,3, time):\n\t\t\t\t\t\t\t\tneighbours = self.get_neighbours(cell)\n\t\t\t\t\t\t\t\tgrains = [0 for i in range(self.grains)]\n\t\t\t\t\t\t\t\tfor i in range(1,self.grains+1):\n\t\t\t\t\t\t\t\t\tfor neighbour in neighbours:\n\t\t\t\t\t\t\t\t\t\tif neighbour.state == i and neighbour.timestamp < time:\n\t\t\t\t\t\t\t\t\t\t\tgrains[i] = grains[i] + 1\n\t\t\t\t\t\t\t\tif grains == [0 for i in range(self.grains)]:\n\t\t\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t\t\tnew_grain = 0\n\t\t\t\t\t\t\t\tfor i in range(self.grains):\n\t\t\t\t\t\t\t\t\tif grains[i] >= new_grain:\n\t\t\t\t\t\t\t\t\t\tnew_grain = i\n\t\t\t\t\t\t\t\trandom_number = random.random() * 100\n\t\t\t\t\t\t\t\tif random_number <= self.probability:\n\t\t\t\t\t\t\t\t\tcell.change_state(time, new_grain)\n\t\t\t\t\t\t\t\t\tself.empty_cells = self.empty_cells - 1\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\tcontinue", "def update_wp_position(self, event):\n wp = -1\n cur_pos = np.array(\n [self.global_pose.latitude, self.global_pose.longitude])\n for idx, waypoint in enumerate(self.waypoints):\n temp = np.array([waypoint['lat'], waypoint['long']])\n alt_diff = abs(self._rel_alt[-1] - waypoint['rel_alt'])\n if idx == 0 and (np.linalg.norm(cur_pos - temp) < self._radius):\n wp = idx\n break\n elif (np.linalg.norm(cur_pos - temp) <\n self._radius) and (alt_diff < self._alt_radius):\n wp = idx\n break\n self._current_wp = wp" ]
[ "0.59834635", "0.5809151", "0.57765406", "0.56846297", "0.55949116", "0.5528766", "0.5480749", "0.54742026", "0.54698974", "0.54524153", "0.54521763", "0.54496145", "0.5448103", "0.5435209", "0.54142", "0.5403219", "0.53876007", "0.5387515", "0.5371499", "0.5368437", "0.53605324", "0.5331428", "0.5330667", "0.5313796", "0.52975523", "0.52965856", "0.52924114", "0.52881426", "0.52798", "0.5271108", "0.52578944", "0.525544", "0.5251035", "0.52303594", "0.52303594", "0.5230158", "0.5226065", "0.5220064", "0.5217016", "0.52124983", "0.5188116", "0.5187083", "0.5181989", "0.51730764", "0.5164199", "0.51625663", "0.51603955", "0.51454157", "0.5141329", "0.5139601", "0.51389724", "0.5129387", "0.5118499", "0.5117163", "0.51050353", "0.5096988", "0.5096282", "0.5095416", "0.5091108", "0.50905204", "0.5083797", "0.5077822", "0.5071231", "0.50707954", "0.5068251", "0.50668895", "0.5057067", "0.50543374", "0.5053862", "0.50532234", "0.50497186", "0.5046128", "0.5044838", "0.5040458", "0.5034806", "0.5029135", "0.50277317", "0.5026623", "0.5014758", "0.5013047", "0.5009474", "0.5006451", "0.5005367", "0.5002255", "0.49958438", "0.49944472", "0.4993969", "0.49911767", "0.49771136", "0.49743184", "0.497415", "0.49729", "0.49722165", "0.4971567", "0.49662042", "0.49636462", "0.49601567", "0.49596223", "0.49577296", "0.49552247" ]
0.67525464
0
Generate new particles at the right end of the plasma (i.e. between z_end_plasma nz_injectdz and z_end_plasma) Return them in the form of a particle buffer of shape (8, Nptcl)
def generate_particles( self, species, dz, time ) : # Shortcut for the number of integer quantities n_int = species.n_integer_quantities n_float = species.n_float_quantities # Create new particle cells if (self.nz_inject > 0) and (species.continuous_injection == True): # Create a temporary density function that takes into # account the fact that the plasma has moved if species.dens_func is not None: def dens_func( z, r ): return( species.dens_func( z-self.v_end_plasma*time, r ) ) else: dens_func = None # Create the particles that will be added zmax = self.z_end_plasma zmin = self.z_end_plasma - self.nz_inject*dz Npz = self.nz_inject * self.p_nz new_ptcl = Particles( species.q, species.m, species.n, Npz, zmin, zmax, species.Npr, species.rmin, species.rmax, species.Nptheta, species.dt, dens_func=dens_func, ux_m=self.ux_m, uy_m=self.uy_m, uz_m=self.uz_m, ux_th=self.ux_th, uy_th=self.uy_th, uz_th=self.uz_th) # Initialize ionization-relevant arrays if species is ionizable if species.ionizer is not None: new_ptcl.make_ionizable( element=species.ionizer.element, target_species=species.ionizer.target_species, level_start=species.ionizer.level_start, full_initialization=False ) # Convert them to a particle buffer # - Float buffer float_buffer = np.empty( (n_float, new_ptcl.Ntot), dtype=np.float64 ) float_buffer[0,:] = new_ptcl.x float_buffer[1,:] = new_ptcl.y float_buffer[2,:] = new_ptcl.z float_buffer[3,:] = new_ptcl.ux float_buffer[4,:] = new_ptcl.uy float_buffer[5,:] = new_ptcl.uz float_buffer[6,:] = new_ptcl.inv_gamma float_buffer[7,:] = new_ptcl.w if species.ionizer is not None: float_buffer[8,:] = new_ptcl.ionizer.w_times_level # - Integer buffer uint_buffer = np.empty( (n_int, new_ptcl.Ntot), dtype=np.uint64 ) i_int = 0 if species.tracker is not None: uint_buffer[i_int,:] = \ species.tracker.generate_new_ids(new_ptcl.Ntot) i_int += 1 if species.ionizer is not None: uint_buffer[i_int,:] = new_ptcl.ionizer.ionization_level else: # No new particles: initialize empty arrays float_buffer = np.empty( (n_float, 0), dtype=np.float64 ) uint_buffer = np.empty( (n_int, 0), dtype=np.uint64 ) return( float_buffer, uint_buffer )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_particles(self):\n # xf, yf = create_fluid_with_solid_cube()\n xf, yf = create_fluid()\n uf = np.zeros_like(xf)\n vf = np.zeros_like(xf)\n m = initialize_mass(xf, yf)\n rho = initialize_density_fluid(xf, yf)\n h = np.ones_like(xf) * self.hdx * self.dx\n fluid = get_particle_array_wcsph(x=xf, y=yf, h=h, m=m, rho=rho, u=uf,\n v=vf, name=\"fluid\")\n\n xt, yt = create_boundary(self.dx / 2.)\n ut = np.zeros_like(xt)\n vt = np.zeros_like(xt)\n m = np.ones_like(xt) * 1500 * self.dx * self.dx\n rho = np.ones_like(xt) * 1000\n h = np.ones_like(xt) * self.hdx * self.dx / 2.\n tank = get_particle_array_wcsph(x=xt, y=yt, h=h, m=m, rho=rho, u=ut,\n v=vt, name=\"tank\")\n\n return [fluid, tank]", "def generate_particle_distribution(self, max_loop = np.inf, outfile=None):\n \n self.pos = np.zeros((self.N_part, 3))\n self.vel = np.zeros((self.N_part, 3))\n \n \n F_max = np.max(self.DF.f) ; F_min = np.min(self.DF.f)\n\n n_particles = 0\n loop_counter = 0\n \n if self.optimize:\n relative_potential = self._interpolate_relative_potential\n else:\n relative_potential = self.DF.relative_potential\n \n \n \n # Continue until max number of particles chosen, or until max loop counter\n while ((n_particles < self.N_part) and (loop_counter < max_loop)):\n \n # choose random position, eval potential, choose velocity\n r = self._choose_position()\n \n Psi = relative_potential(r) \n v = self._choose_velocity(r, Psi)\n \n E = Psi - 0.5 * v * v\n\n # interpolate along DF to find f(E) of chosen particle\n f_E = self.DF.interpolate_f(E)\n\n # random number from 0 to F_max for accept reject\n #F = np.random.rand() * F_max\n \n # HOLY CRAP....Fmax - Fmin ... not Fmin - Fmax\n F = 10.0**( np.random.rand()*(np.log10(F_max) - np.log10(F_min)) + np.log10(F_min) )\n \n \n if F <= f_E: # accept particle\n\n \n # convert position to cartesian using random theta and phi\n theta = np.random.rand() * np.pi\n phi = np.random.rand() * 2.0 * np.pi\n \n x = np.sin(theta) * np.cos(phi)\n y = np.sin(theta) * np.sin(phi)\n z = np.cos(theta)\n \n # save particle position\n self.pos[n_particles] = r * np.array([x,y,z])\n \n # repeat for velocity using new random numbers\n theta = np.random.rand() * np.pi\n phi = np.random.rand() * 2.0 * np.pi\n \n vx = np.sin(theta) * np.cos(phi)\n vy = np.sin(theta) * np.sin(phi)\n vz = np.cos(theta)\n \n # save particle velocity\n self.vel[n_particles] = v * np.array([vx,vy,vz])\n \n \n n_particles = n_particles + 1\n \n \n if (loop_counter % 5000) == 0:\n _my_print(\"Have %4i particles. On loop %6i\"%(n_particles, loop_counter))\n loop_counter = loop_counter + 1\n \n \n if (not outfile == None):\n self.write_pd(outfile)\n \n return self.pos, self.vel", "def _init_particles(self):\n self.NPART = self.grid.get_npart()\n self.particles = np.empty(self.NPART, dtype=object)\n for i in range(self.NPART):\n tmem = TMEM\n ux = UXM + UPRIME*normal()*LANGFACTOR\n vy = VYM + UPRIME*normal()*LANGFACTOR\n self.particles[i] = Particle(tmem=tmem, ux=ux, vy=vy)\n #\n # PUT THE PARTICLES IN THE CELLS.\n # LOOP OVER CELLS AND DEFINE THEIR PARTICLES.\n # FOR NOW, ONLY POSITION DEPENDS ON SPACE HEIGHT & MEMORY DO NOT.\n # FIRST THE TREE PARTICLES, THEN THE BUILDING PARTICLES.\n #\n NX = self.grid.NX\n NY = self.grid.NY\n icounter = 0\n for i in range(NX - 1):\n for j in range(NY - 1):\n cell = self.grid.CELLS[i, j]\n x = self.grid.XCELL[i, j]\n y = self.grid.YCELL[i, j]\n for k in range(cell.NPARTTR):\n self.particles[k + icounter].update(x=x, y=y, type=1)\n for k in range(cell.NPARTRAD):\n self.particles[k + cell.NPARTTR + icounter].update(x=x, y=y, type=2)\n icounter += cell.NPARTTR + cell.NPARTRAD", "def distribute_waterbag(self):\n # Generate particles by creating trials and finding particles with potential less than emittance, then assign the rest to momentum\n ptclsMade = 0\n phaseSpaceList = []\n while ptclsMade < self.npart:\n ranU = 0.0\n while ranU <= 0:\n ranU = random.random()\n\n # Generate some bounds on the transverse size to reduce waste in generating the bunch\n # Use the lemming method to find the maximum y\n trialH = np.sqrt(ranU)\n newH = self.emit*trialH\n y0 = np.sqrt(newH)\n #self.emittance = newH\n yMax = newton(self.whatsleft, y0)\n\n #bounding the horizontal coordinate is difficult, but it should not exceed the pole\n xMax = self.c\n #xMax = yMax\n\n trialValue = 1e10\n while trialValue >= newH:\n xTrial = 2.*(0.5 - random.random())*xMax\n yTrial = 2.*(0.5 - random.random())*yMax\n trialValue = self.compute_potential(xTrial, yTrial)\n\n initialValue = trialValue\n if initialValue < newH:\n pMag = np.sqrt(2*(newH - initialValue))\n pDir = 2*np.pi* random.random()\n pxHat = pMag * np.cos(pDir)\n pyHat = pMag * np.sin(pDir)\n xReal = xTrial * np.sqrt(self.betax)\n yReal = yTrial * np.sqrt(self.betay)\n pxReal = (pxHat - self.alphax*xTrial)/np.sqrt(self.betax)\n pyReal = (pyHat - self.alphay*yTrial)/np.sqrt(self.betay)\n ptclCoords = np.array([xReal, pxReal, yReal, pyReal])\n phaseSpaceList.append(ptclCoords)\n ptclsMade += 1\n\n #Add 3 more particles if creating a quiet start\n if self.quiet:\n self.exact_centroids(ptclCoords, phaseSpaceList)\n ptclsMade += 3\n else:\n print(\"Initial value generated exceeds limiting H. Sampling new value.\")\n\n self.particles[:,:4] = np.asarray(phaseSpaceList)", "def build(self):\n # Store current positions of all particles\n self.old_pos = []\n for p in self.sys.particles:\n self.old_pos.append(copy(p.r))\n \n # Set up the cell list\n self.cell_list.wipe()\n for p in self.sys.particles:\n self.cell_list.add_particle(p)\n\n # Build the list \n self.neighbours = []\n for p in self.sys.particles:\n neighbours = []\n for n in self.cell_list.get_neighbours(p):\n pn = self.sys.particles[n]\n if pn.id > p.id:\n dr = pn.r - p.r \n dr.apply_periodic(self.sys.box)\n if dr.length() < self.rcut + self.pad:\n neighbours.append(n)\n self.neighbours.append(neighbours)\n \n self.sys.has_nl = True", "def distribute_KV(self):\n\n assert (self.emitx == self.emity), \"For a KV distribution, the planar emittances must be equal\"\n\n #total emittance of the K-V distribution is 4 times the planar emittance\n emit = 4.*self.emitx\n self.emit = emit\n\n # Generate some bounds on the transverse size to reduce waste in generating the bunch\n # Use the lemming method to find the maximum y\n y0 = np.sqrt(self.emit)\n\n yMax = newton(self.whatsleft, y0)\n xMax = yMax\n\n # Generate particles by creating trials and finding particles with potential less than emittance,\n # then assign the rest to momentum\n ptclsMade = 0\n phaseSpaceList = []\n\n while ptclsMade < self.npart:\n #Note that the particle coordinates here are distributed in normal coordinates\n xTrial = 2.*(0.5 - random.random())*xMax\n yTrial = 2.*(0.5 - random.random())*yMax\n trialValue = self.compute_potential(xTrial, yTrial)\n if trialValue < self.emit:\n\n pMag = np.sqrt(2.*(self.emit - trialValue))\n pDir = 2.*np.pi * random.random()\n pxHat = pMag * np.cos(pDir)\n pyHat = pMag * np.sin(pDir)\n\n xReal = xTrial * np.sqrt(self.betax)\n yReal = yTrial * np.sqrt(self.betay)\n\n #We want to provide the user with standard (non-normal) coordinates\n pxReal = (pxHat - self.alphax*xTrial)/np.sqrt(self.betax)\n pyReal = (pyHat - self.alphay*yTrial)/np.sqrt(self.betay)\n\n ptclCoords = np.array([xReal, pxReal, yReal, pyReal])\n phaseSpaceList.append(ptclCoords)\n ptclsMade += 1\n\n #Add 3 more particles if creating a quiet start\n if self.quiet:\n self.exact_centroids(ptclCoords, phaseSpaceList)\n ptclsMade += 3\n\n self.particles[:,:4] = np.asarray(phaseSpaceList)", "def x_add_particles():\n particle_count_list = np.zeros(7)", "def addParticles( screen, number, color ):\n\t\n\tparticles = []\n\t\n\tfor i in range( number ):\n\t\n\t\tradius = 5\n\t\tmass = 1\n\t\t\n\t\t#random position and velocity\n\t\tx, y = randint(-WINDOW_X + radius, 1), randint(-WINDOW_Y + radius, WINDOW_Y - radius)\n\t\tvx, vy = randrange(-1, 2, 2) * 100, randrange(-1, 2, 2) * 100\n\t\t\n\t\tparticles.append( Particle( screen, x, y, vx, vy, radius, mass, color ))\n\t\n\treturn particles", "def inject_planet(self,data, psf_library, c_ratio=[0.01, 0.1], x_bound=[4, 61], y_bound=[4, 61], no_blend=False):\n\n image = data.copy()\n pl_num = np.random.randint(1, high=4)\n pos_label = np.zeros([64, 64])\n used_xy = np.array([])\n c_prior = np.linspace(c_ratio[0], c_ratio[1], 100)\n if x_bound[0] < 4 or x_bound[0] > 61:\n raise Exception(\"current method only injects whole psf\")\n if y_bound[0] < 4 or y_bound[0] > 61:\n raise Exception(\"current method only injects whole psf\")\n\n for num in range(pl_num):\n while True:\n np.random.shuffle(c_prior)\n psf_idx = np.random.randint(0, high=psf_library.shape[0])\n Nx = np.random.randint(x_bound[0], high=x_bound[1])\n Ny = np.random.randint(y_bound[0], high=y_bound[1])\n if len(used_xy) == 0:\n pass\n else:\n if no_blend:\n if np.any(dist([Nx, Ny], used_xy) < 3):\n pass\n else:\n if np.any(np.array([Nx, Ny]) == used_xy):\n pass\n if dist([Nx, Ny], (32.5, 32.5)) < 4:\n pass\n else:\n planet_psf = psf_library[psf_idx]\n brightness_f = c_prior[0] * np.max(image) / np.max(planet_psf)\n image[Ny - 4:Ny + 3, Nx - 4:Nx + 3] += planet_psf * brightness_f\n used_xy = np.append(used_xy, [Nx, Ny]).reshape(-1, 2)\n pos_label[Ny - 4:Ny + 3, Nx - 4:Nx + 3] = 1\n break\n return image, pos_label", "def particle_initial_velocity(fignr,N,D,T,m,dim,kb):\n V = np.zeros((3,N))\n V[0:dim,:] = np.random.normal(0, kb*T/m, (dim,N))# / np.sqrt(T/(kb*m))\n plotfunctions.velocity(fignr,N,V)\n # Typical speed for particles\n return V", "def new_star_particle():\n function = LegacyFunctionSpecification()\n function.must_handle_array = True\n function.addParameter('index_of_the_particle', dtype='int32', direction=function.OUT, description =\n \"\"\"\n An index assigned to the newly created particle.\n This index is supposed to be a local index for the code\n (and not valid in other instances of the code or in other codes)\n \"\"\"\n )\n for par in [\"x\", \"y\", \"z\"]:\n function.addParameter(par, dtype='float64', unit=generic_unit_system.length, direction=function.IN, \n description = \"The initial position vector of the particle\")\n function.addParameter('radius', dtype='float64', unit=generic_unit_system.length, direction=function.IN, description = \"The radius of the particle\")\n for par in [\"red\", \"green\", \"blue\"]:\n function.addParameter(par, dtype='float64', direction=function.IN, \n description = \"The RGB color of the particle\")\n function.addParameter(\"alpha\", dtype='float64', direction=function.IN, description = \"The opacity of the particle\", default = 1.0)\n function.addParameter('npoints', dtype='int32', direction=function.LENGTH)\n function.result_type = 'int32'\n return function", "def init_particles(self):\n \n # Each particle is a dimension-K vector. We generate each particle \n # uniformly at random from the space [0,1]^K. \n self.Particles = np.random.uniform(0, 1, (self.Npar, self.K))\n #print(\"Particles: \", self.Particles) \n return None", "def __init__(self, dim: tuple, count: int):\n self.surface = pygame.Surface(dim)\n # initialize\n self.particles = []\n # initialize\n for counter in range(count):\n pos = pygame.Vector2(random.randint(0, self.surface.get_width()), random.randint(0, self.surface.get_height()))\n direction = pygame.Vector2(10 * (random.random() - 0.5), 10 * (random.random() - 0.5))\n color = pygame.Color(random.randint(0, 255), random.randint(0, 255), random.randint(0, 255), 255)\n size = 5 + random.randint(0, 10)\n particle = Particle(self.surface, pos, direction, size, color)\n self.particles.append(particle)", "def new_marker_particle():\n function = LegacyFunctionSpecification()\n function.must_handle_array = True\n function.addParameter('index_of_the_particle', dtype='int32', direction=function.OUT, description =\n \"\"\"\n An index assigned to the newly created particle.\n This index is supposed to be a local index for the code\n (and not valid in other instances of the code or in other codes)\n \"\"\"\n )\n for par in [\"x\", \"y\", \"z\"]:\n function.addParameter(par, dtype='float64', unit=generic_unit_system.length, direction=function.IN, \n description = \"The initial position vector of the particle\")\n function.addParameter('radius', dtype='float64', unit=generic_unit_system.length, direction=function.IN, description = \"The radius of the particle\")\n for par in [\"red\", \"green\", \"blue\"]:\n function.addParameter(par, dtype='float64', direction=function.IN, \n description = \"The RGB color of the particle\")\n function.addParameter(\"alpha\", dtype='float64', direction=function.IN, description = \"The opacity of the particle\", default = 1.0)\n function.addParameter('npoints', dtype='int32', direction=function.LENGTH)\n function.result_type = 'int32'\n return function", "def simulate_brownian(num_part, dt, time_steps, x0, y0, z0, sigma, drift = False):\n # Calculating drift components \n if drift == True: \n v_x = np.random.random() \n v_y = np.random.random() \n v_z = np.random.random() \n drift_x = v_x * dt \n drift_y = v_y * dt \n drift_z = v_z * dt \n else: \n drift_x = 0 \n drift_y = 0 \n drift_z = 0 \n\n # Generate Brownian increments \n increment_x = np.random.normal(loc = 0.0, scale = sigma, size = (num_part, time_steps - 1)) \n increment_y = np.random.normal(loc = 0.0, scale = sigma, size = (num_part, time_steps - 1)) \n increment_z = np.random.normal(loc = 0.0, scale = sigma, size = (num_part, time_steps - 1)) \n\n # Pre-allocation of memory for particle positions \n p_x = np.zeros(shape = (num_part, time_steps - 1)) \n p_y = np.zeros(shape = (num_part, time_steps - 1))\n p_z = np.zeros(shape = (num_part, time_steps - 1))\n\n # Generate initial position of particle(s) \n p_x[:, 0] = x0 + 20 * np.random.random(size = (1, num_part)) \n p_y[:, 0] = y0 + 20 * np.random.random(size = (1, num_part)) \n p_z[:, 0] = z0 + 20 * np.random.random(size = (1, num_part)) \n\n for p in np.arange(0, num_part, step = 1): \n for ti in np.arange(start = 1, stop = time_steps, step = 1): \n p_x[p, ti] = p_x[p, ti - 1] + increment_x[p, ti] + 10 * drift_x \n p_y[p, ti] = p_y[p, ti - 1] + increment_y[p, ti] + 10 * drift_y \n p_z[p, ti] = p_z[p, ti - 1] + increment_z[p, ti] + 10 * drift_z \n\n return p_x, p_y, p_z", "def new_gas_particle():\n function = LegacyFunctionSpecification()\n function.must_handle_array = True\n function.addParameter('index_of_the_particle', dtype='int32', direction=function.OUT, description =\n \"\"\"\n An index assigned to the newly created particle.\n This index is supposed to be a local index for the code\n (and not valid in other instances of the code or in other codes)\n \"\"\"\n )\n for par in [\"x\", \"y\", \"z\"]:\n function.addParameter(par, dtype='float64', unit=generic_unit_system.length, direction=function.IN, \n description = \"The initial position vector of the particle\")\n function.addParameter('radius', dtype='float64', unit=generic_unit_system.length, direction=function.IN, description = \"The radius of the particle\")\n for par in [\"red\", \"green\", \"blue\"]:\n function.addParameter(par, dtype='float64', direction=function.IN, \n description = \"The RGB color of the particle\")\n function.addParameter(\"alpha\", dtype='float64', direction=function.IN, description = \"The opacity of the particle\", default = 1.0)\n function.addParameter('npoints', dtype='int32', direction=function.LENGTH)\n function.result_type = 'int32'\n return function", "def update_particle_cloud(self, scan):\n\n \"\"\"\n Initialise arrays for the new particle cloud,\n particle weights and cummulative weights\n \"\"\"\n newParticleCloud = []\n particleWeights = []\n \n randomGauss = 10*self.NUMBER_PREDICTED_READINGS\n gaussianRandomNumX = []\n gaussianRandomNumY = []\n\n sensorSigma=0.1 #variance\n sensorMu=0 #mean\n noise=sensorSigma * numpy.random.randn() + sensorMu\n\n for i in range (0,randomGauss):\n gaussianRandomNumX.append(random.gauss(0,1))\n gaussianRandomNumY.append(random.gauss(0,1))\n\n for p in self.particlecloud.poses:\n particleWeights.append(self.sensor_model.get_weight(scan, p))\n\n for i in range(len(self.particlecloud.poses)):\n randomSelection = numpy.random.random()\n csum = 0\n for p in self.particlecloud.poses:\n weight = self.sensor_model.get_weight(scan, p) / sum(particleWeights)\n csum += weight\n if csum >= randomSelection:\n newParticle = copy.deepcopy(p)\n newParticle.position.x = newParticle.position.x + (gaussianRandomNumX[i] * noise)\n newParticle.position.y = newParticle.position.y + (gaussianRandomNumY[i] * noise)\n newParticle.position.z = newParticle.position.z\n newParticleCloud.append(newParticle)\n break\n self.particlecloud.poses = newParticleCloud\n\n pass", "def simulate_fractionalbrownian(num_part, H, M, n, t, x0, y0, z0, gamma_H):\n # Generate zero mean and unit variance increments \n incx = np.random.normal(loc = 0.0, scale = 1.0, size = (num_part, t.shape[0])) \n incy = np.random.normal(loc = 0.0, scale = 1.0, size = (num_part, t.shape[0])) \n incz = np.random.normal(loc = 0.0, scale = 1.0, size = (num_part, t.shape[0])) \n\n # Pre-allocation of memory for particle positions \n p_x = np.zeros(shape = (num_part, t.shape[0])) \n p_y = np.zeros(shape = (num_part, t.shape[0])) \n p_z = np.zeros(shape = (num_part, t.shape[0])) \n\n # Generate initial position of particle(s)\n p_x[:, 0] = x0 + 10 * np.random.random(size = (1, num_part)) \n p_y[:, 0] = y0 + 10 * np.random.random(size = (1, num_part)) \n p_z[:, 0] = z0 + 10 * np.random.random(size = (1, num_part)) \n \n for p in np.arange(0, num_part, step = 1): \n for ti in np.arange(start = 1, stop = t.shape[0], step = 1): \n\n s1_x = np.array([ ((i ** (H - 0.5)) * incx[p, 1 + ti - i]) for i in range(1, n + 1)]).sum() \n s2_x = np.array([ (((n + i) ** (H - 0.5) - i ** (H - 0.5)) * incx[p, 1 + ti - n - i]) for i in range(1, 1 + n * (M - 1))]).sum() \n s1_y = np.array([ ((i ** (H - 0.5)) * incy[p, 1 + ti - i]) for i in range(1, n + 1)]).sum() \n s2_y = np.array([ (((n + i) ** (H - 0.5) - i ** (H - 0.5)) * incy[p, 1 + ti - n - i]) for i in range(1, 1 + n * (M - 1))]).sum() \n s1_z = np.array([ ((i ** (H - 0.5)) * incz[p, 1 + ti - i]) for i in range(1, n + 1)]).sum() \n s2_z = np.array([ (((n + i) ** (H - 0.5) - i ** (H - 0.5)) * incz[p, 1 + ti - n - i]) for i in range(1, 1 + n * (M - 1))]).sum() \n\n icx = gamma_H * (s1_x + s2_x) \n icy = gamma_H * (s1_y + s2_y) \n icz = gamma_H * (s1_z + s2_z) \n\n p_x[p, ti] = p_x[p, ti - 1] + icx \n p_y[p, ti] = p_y[p, ti - 1] + icy \n p_z[p, ti] = p_z[p, ti - 1] + icz \n return p_x, p_y, p_z", "def initializeParticles(self):\n import itertools\n import random\n #create a list of possible ghost permutations, where each of three ghosts can be on any of the legal positions in the boards.\n permutations = list(itertools.product(self.legalIntentions, repeat=self.numAgents))\n \n random.shuffle(permutations)\n p = len(permutations)\n n = self.numParticles\n self.particles = []\n #create the particles\n while n >= p:\n self.particles += permutations\n n -= p\n #add the remainder\n self.particles += permutations[0: n - 1]", "def create_particle(self,r,v=(0.0,0.0,0.0)):\n self.r[self.n] = r\n self.m[self.n] = self.m[self.n-1] \n self.v[self.n] = v\n self.n = self.n+1\n self.rebuild_lists()", "def monteCarloRun(startingPoints, qms, vs, directions, BR, BZ, r, z, rLim, fluxGridCoarseness, steppingMethod):\n totalGrid = np.zeros((BR.shape[0]//fluxGridCoarseness, BR.shape[1]//fluxGridCoarseness))\n trappedGrid = np.zeros((BR.shape[0]//fluxGridCoarseness, BR.shape[1]//fluxGridCoarseness))\n rReduced = np.linspace(np.min(r), np.max(r), len(r)//fluxGridCoarseness)\n rDelta = rReduced[1]-rReduced[0]\n rReduced += rDelta/2. # Use distance to cell centers to count particles\n zReduced = np.linspace(np.min(z), np.max(z), len(z)//fluxGridCoarseness)\n zDelta = zReduced[1]-zReduced[0]\n zReduced += zDelta/2. # Use distance to cell centers to count particles\n \n habitatCrossings = 0\n GDTcrossings = 0\n detectorCounts = np.zeros(14)\n \n gridStep = r[1]-r[0]\n \n numParticles = len(qms)\n for particleNumber in prange(numParticles):\n if particleNumber % (numParticles/10) == 0:\n print(particleNumber)\n \n qm = qms[particleNumber]\n v0 = vs[particleNumber]\n dt = (r[1]-r[0])/v0/2\n maxTime = rLim * 3 / v0\n maxSteps = int(maxTime / dt)\n particleGrid = np.zeros((BR.shape[0]//fluxGridCoarseness, BR.shape[1]//fluxGridCoarseness))\n crossedHabitat = 0\n crossedGDT = 0\n particleDetectorCounts = np.zeros(14)\n \n # Generate random point and direction\n point1 = startingPoints[particleNumber]\n direction = directions[particleNumber]\n noAccelStep = 0.99*gridStep*direction\n trapped = True\n \n x = point1.copy() # copy is important... \n v = direction*v0\n E = np.zeros(3)\n \n if steppingMethod == 2:\n x, _ = RKnext(x, v, qm, BR, BZ, r, z, dt/2)\n\n for i in range(maxSteps):\n # Count crossings\n particleR = (x[0]**2 + x[1]**2)**.5\n nearestR = nearestIndex(rReduced, particleR)\n nearestZ = nearestIndex(zReduced, x[2])\n particleGrid[nearestZ, nearestR] = 1\n if 9.7 < particleR < 12.3 and -1.3 < x[2] < 1.3:\n crossedHabitat = 1\n if -14 < x[2] < 14 and particleR < 5:\n crossedGDT = 1\n # Will's detectors\n # for det in range(14):\n # vd = (x[0] - det*1.4, x[1], x[2])\n # if (vd[0]**2+vd[1]**2+vd[2]**2)**.5 < 0.5:\n # particleDetectorCounts[det] = 1\n \n # Step\n if steppingMethod == 0:\n x += noAccelStep\n elif steppingMethod == 1:\n x, v = RKnext(x, v, qm, BR, BZ, r, z, dt)\n elif steppingMethod == 2:\n B = BxyzInterpolated(x, BR, BZ, r, z)\n x, v = BBnext(x, v, qm, B, E, dt)\n \n # Stop stepping if out of bounds\n if (particleR**2+x[2]**2)**.5 > rLim + .001: \n trapped = False\n break\n detectorCounts += particleDetectorCounts\n totalGrid += particleGrid\n if trapped:\n trappedGrid += particleGrid\n habitatCrossings += crossedHabitat\n GDTcrossings += crossedGDT\n \n print(\"Will's detectors:\", detectorCounts)\n \n # Divide cell counts by volume of cell\n totalGridUnscaled = totalGrid.copy()\n trappedGridUnscaled = trappedGrid.copy()\n for i in range(len(rReduced)):\n for j in range(len(zReduced)):\n volume = np.pi*((rReduced[i]+rDelta/2.)**2-(rReduced[i]-rDelta/2.)**2)*zDelta\n totalGrid[j, i] /= volume\n trappedGrid[j, i] /= volume\n \n return rReduced, zReduced, totalGrid, trappedGrid, habitatCrossings, GDTcrossings, totalGridUnscaled, trappedGridUnscaled", "def distribute_KV(self):\n\n assert (self.emitx == self.emity), \"For a KV distribution, the planar emittances must be equal\"\n\n #total emittance of the K-V distribution is equal to the planar emittance\n #this differs from the linear K-V distribution\n emit = self.emitx\n self.emit = emit\n\n # Generate some bounds on the transverse size to reduce waste in generating the bunch\n # Use the lemming method to find the maximum y\n y0 = np.sqrt(self.emit)\n\n yMax = newton(self.whatsleft, y0)\n\n #bounding the horizontal coordinate is difficult, but it should not exceed the pole\n xMax = self.c\n\n # Generate particles by creating trials and finding particles with potential less than emittance,\n # then assign the rest to momentum\n ptclsMade = 0\n phaseSpaceList = []\n\n while ptclsMade < self.npart:\n #Note that the particle coordinates here are distributed in normal coordinates\n xTrial = 2.*(0.5 - random.random())*xMax\n yTrial = 2.*(0.5 - random.random())*yMax\n trialValue = self.compute_potential(xTrial, yTrial)\n if trialValue < self.emit:\n\n pMag = np.sqrt(2.*(self.emit - trialValue))\n pDir = 2.*np.pi * random.random()\n pxHat = pMag * np.cos(pDir)\n pyHat = pMag * np.sin(pDir)\n\n xReal = xTrial * np.sqrt(self.betax)\n yReal = yTrial * np.sqrt(self.betay)\n\n #We want to provide the user with standard (non-normal) coordinates\n pxReal = (pxHat - self.alphax*xTrial)/np.sqrt(self.betax)\n pyReal = (pyHat - self.alphay*yTrial)/np.sqrt(self.betay)\n\n ptclCoords = np.array([xReal, pxReal, yReal, pyReal])\n phaseSpaceList.append(ptclCoords)\n ptclsMade += 1\n\n #Add 3 more particles if creating a quiet start\n if self.quiet:\n self.exact_centroids(ptclCoords, phaseSpaceList)\n ptclsMade += 3\n\n self.particles[:,:4] = np.asarray(phaseSpaceList)", "def spring_particle(name, num_trajectories, NUM_PARTS, T_max, dt, sub_sample_rate, noise_std, seed):\n num_particles = NUM_PARTS\n collater = {}\n\n def diffeq_hyper(t, q, k, m, nparts):\n num_particles = nparts\n vels = q[2 * num_particles:]\n xs = q[:2 * num_particles]\n xs = xs.reshape(-1, 2)\n forces = np.zeros(xs.shape)\n new_k = np.repeat(k, num_particles) * np.tile(k, num_particles)\n new_k = np.repeat(new_k, 2).reshape(-1, 2)\n dx = np.repeat(xs, num_particles, axis=0) - np.tile(xs, (num_particles, 1))\n resu = -new_k * dx\n forces = np.add.reduceat(resu, np.arange(0, nparts * nparts, nparts)).ravel()\n\n return np.concatenate([vels / np.repeat(m, 2), forces]).ravel()\n\n def hamiltonian(vec, m, k, num_particles):\n num_particles = num_particles\n x = vec[:num_particles * 2]\n p = vec[2 * num_particles:]\n xs = x.reshape(-1, 2)\n ps = p.reshape(-1, 2)\n U1 = 0\n K = 0\n for i in range(num_particles):\n for j in range(i + 1, num_particles):\n U1 += .5 * k[i] * k[j] * ((xs[i] - xs[j]) ** 2).sum()\n K += 0.5 * ((ps[i] ** 2).sum()) / m[i]\n return K, U1\n\n theta = []\n dtheta = []\n energy = []\n mass_arr = []\n ks_arr = []\n lagrangian = []\n np.random.seed(seed)\n\n for traj in range(num_trajectories):\n ks = np.ones(NUM_PARTS)#np.random.uniform(.5, 1, size=(NUM_PARTS))\n positions = np.random.uniform(-1, 1, size=(NUM_PARTS, 2))\n velocities = np.random.uniform(-3, 3, size=(NUM_PARTS, 2))\n masses = np.ones(NUM_PARTS)#np.random.uniform(0.1, 1, size=NUM_PARTS)\n momentum = np.multiply(velocities, np.repeat(masses, 2).reshape(-1, 2))\n q = np.concatenate([positions, momentum]).ravel()\n qnrk = rk(lambda t, y: diffeq_hyper(t, y, ks, masses, num_particles), (0, T_max), q,\n t_eval=np.arange(0, T_max, dt),\n rtol=1e-12, atol=1e-12, method='DOP853')\n accum = qnrk.y.T\n ssr = int(sub_sample_rate / dt)\n accum = accum[::ssr]\n daccum = np.array([diffeq_hyper(0, accum[i], ks, masses, num_particles) for i in range(accum.shape[0])])\n energies = []\n lags = []\n for i in range(accum.shape[0]):\n ktmp, utmp = hamiltonian(accum[i], masses, ks, NUM_PARTS)\n energies.append(ktmp + utmp)\n lags.append(ktmp - utmp)\n\n accum += np.random.randn(*accum.shape) * noise_std\n daccum += np.random.randn(*daccum.shape) * noise_std\n\n theta.append(accum)\n dtheta.append(daccum)\n energy.append(energies)\n mass_arr.append(masses)\n ks_arr.append(ks)\n lagrangian.append(lags)\n\n collater['x'] = np.concatenate(theta)\n collater['dx'] = np.concatenate(dtheta)\n collater['energy'] = np.concatenate(energy)\n collater['lagrangian'] = np.concatenate(lagrangian)\n\n collater['mass'] = mass_arr\n collater['ks'] = ks_arr\n\n f = open(name + \".pkl\", \"wb\")\n pickle.dump(collater, f)\n f.close()\n\n return collater", "def createParticles(self, type, style, *args):\n if not self.rank:\n logging.info('Creating particles {} with args'.format(type) + (' {}' * len(args)).format(*args))\n\n self.lmp.command('create_atoms {} {}'.format(type, style) + (' {}' * len(args)).format(*args))", "def pontos(self):\n \n self.sc = 1. \n self.x = self.sc*np.array([-155., -139.4, -124., -108.5, -93., -77.5, -62., -46.5, -31., -15.5, 0, 15.5, 31., 46.5, 62., 77.5, 93., 108.5, 124., 139.5, 155.])\n self.y = self.sc*np.array([ 9.23, 14.37, 18.98, 23.6, 23.6, 23.6, 23.6, 23.6, 23.6, 23.6, 23.6, 23.6, 23.6, 23.6, 23.6, 23.6, 23.6, 23.6, 21.55, 14.37, 3.59])\n self.px_index = len(self.x)\n #self.py_index = len(self.x)/2\n\n self.coord = np.array([self.x,self.y,np.full(len(self.x),self.z)])\n \n self.x = self.x[::-1]\n self.y = -self.y[::-1] \n self.new = np.array([self.x,self.y,np.full(len(self.x),self.z)])\n self.coord = np.array([np.append(self.coord[0],self.new[0]),np.append(self.coord[1],self.new[1]),np.append(self.coord[2],self.new[2])])\n self.coord = np.array([np.append(self.coord[0],self.coord[0,0]),np.append(self.coord[1],self.coord[1,0]),np.append(self.coord[2],self.coord[2,0])])\n\n self.coord[0] = self.coord[0] - (np.amax(self.coord[0])+np.amin(self.coord[0]))/2\n self.coord[1] = self.coord[1] + (np.amax(self.coord[1])-np.amin(self.coord[1]))/2 \n \n self.coordi = np.array(self.coord)\n \n self.cg = np.array([0 + self.dx, self.H/2 + self.dy, self.z]) \n self.cgi = np.array(self.cg)\n \n self.thi = 0. + self.dth \n self.th = float(self.thi) \n \n self.coordnav(self.dx,self.dy,self.dth)", "def __init__(self,nparticles,size, mass=1, G=1, boundary_periodic = True,early_universe=False, softner=1, position = [], momentum = []):\n self.softner = softner\n self.G = G\n self.boundary_periodic = boundary_periodic\n self.nparticles = nparticles\n self.size = size\n self.mass = np.ones(nparticles)*mass\n #If the boundary condition are not periodic, the grid_size is double but particle kept in the first quadrant so \n #that the particles cannot feel the effect of the particles closed to the opposite boundary when we take the convolution\n if boundary_periodic==True:\n self.grid_size = size\n else:\n self.grid_size = 2*size\n #Initialize the partticle grid\n # if early_universe == True:\n # self.ptclgrid.early_universe_grid(softner)\n # self.mass = self.ptclgrid.mass\n self.ptclgrid = ParticleGrid(nparticles,self.grid_size,self.size, mass=self.mass, soft=softner, early_universe=early_universe)\n #If initial position are givem, place the particle to the right place on the grid\n if len(position) != 0:\n self.ptclgrid.update_position(position, mass)\n\n self.grid = self.ptclgrid.grid\n self.grid_pos = self.ptclgrid.grid_pos\n x0,y0 = self.ptclgrid.position.transpose()\n initial_condition = np.array([x0,y0, self.mass]).transpose()\n #Initialize the Particle list containing the position and momentum of the particles\n self.particles = ParticleList(nparticles, initial_condition)\n #If initial mometa are given, intialize it \n if len(momentum) != 0:\n self.particles.momentum = momentum\n #Computes the green function on the grid\n self.compute_green_function(self.grid_size)\n #Initialize the array with the acceleration of the particles\n self.acc = np.zeros((len(self),2))", "def __init__(self, number_of_particles, restitution_coefficient, initial_positions, initial_velocities, masses,\n radii, pbc):\n self.N = number_of_particles # amount of particles\n self.restitution_coefficient = restitution_coefficient # coefficient determining the energy lost in collisions\n # initialize variables used in the class\n self.positions = np.zeros((self.N, 3)) # positions of particles\n self.initial_positions = np.zeros((self.N, 3)) # help variable to compute mean square displacement\n self.velocities = np.zeros((self.N, 3)) # velocities of particles\n self.masses = np.zeros(self.N) # mass of each particle\n self.radii = np.zeros(self.N) # radius of each particle\n self.collision_count_particles = np.zeros(self.N) # array keeping track of the number of collisions\n\n # set parameters equal to the input to the class. Use .copy() such that the parameters can be used in outer loop\n self.positions = initial_positions.copy()\n self.initial_positions = initial_positions.copy()\n self.velocities = initial_velocities.copy()\n self.masses = masses\n self.radii = radii\n # a priority queue / heap queue of tuples of (time_collision, collision_entities, collision_count when\n # computing the collision, box number of the particles). The collision count at computation is used to\n # ignore non-valid collisions due to the involved particles being in other collisions between computation and\n # collision. Box number is needed for the pbc.\n self.collision_queue = [] # heap queue needs list structure to work\n\n # In order to create 27 copies for pbc in three dimensions one need to known their relation to the original\n # box. These are given by offsets. Offsets is also used to correct positions of particles colliding in\n # different boxes (due to the pbc).\n self.offsets = [(-1, 1, 1), (0, 1, 1), (1, 1, 1), (-1, 0, 1), (0, 0, 1), (1, 0, 1), (-1, -1, 1), (0, -1, 1),\n (1, -1, 1), (-1, 1, 0), (0, 1, 0), (1, 1, 0), (-1, 0, 0), (0, 0, 0), (1, 0, 0), (-1, -1, 0),\n (0, -1, 0), (1, -1, 0), (-1, 1, -1), (0, 1, -1), (1, 1, -1), (-1, 0, -1), (0, 0, -1),\n (1, 0, -1), (-1, -1, -1), (0, -1, -1), (1, -1, -1)]\n # Crossings is used to compute current positions due to the periodic boundary conditions. It essentially get\n # updated every time a particle cross the edge in the x-, y- or z-direction.\n self.crossings = np.zeros((self.N, 3))\n\n self.pbc = pbc # periodic boundary conditions", "def bndy_plasma(self):\n self.ne[0], self.ne[-1] = 1e11, 1e11\n self.ni[0], self.ni[-1] = 1e11, 1e11\n self.nn[0], self.nn[-1] = 1e11, 1e11\n self.Te[0], self.Te[-1] = 0.1, 0.1\n self.Ti[0], self.Ti[-1] = 0.01, 0.01\n # self.coll_em[0], self.coll_em[-1] = 1e5, 1e5\n # self.coll_im[0], self.coll_im[-1] = 1e5, 1e5", "def iterator(self):\n print('Iterator running...')\n for i in range(self.num_itr):\n for j in range(self.part_num):\n # create r1,r2\n r1 = np.random.uniform(self.vmin, self.vmax, self.dim)\n r2 = np.random.uniform(self.vmin, self.vmax, self.dim)\n # Update\n self.particle[j].Vel = self.w * self.particle[j].Vel \\\n + self.c1 * r1 * (self.particle[j].Best_pos - self.particle[j].Pos) \\\n + self.c2 * r2 * (self.GlobalBest_Pos - self.particle[j].Pos)\n self.particle[j].Pos = self.particle[j].Pos + self.particle[j].Vel\n # Check whether position out of search space\n for x in range(len(self.particle[j].Pos)):\n if self.particle[j].Pos[x] > self.var_size[x][1]:\n self.particle[j].Pos[x] = self.var_size[x][1]\n if self.particle[j].Pos[x] < self.var_size[x][0]:\n self.particle[j].Pos[x] = self.var_size[x][0]\n assert self.var_size[x][1] >= self.particle[j].Pos[x] >= self.var_size[x][0]\n # self.particle[j].Pos[2] = int(self.particle[j].Pos[2])\n # Recalculate cost\n #print(self.particle[j].Pos)\n self.particle[j].Cost = self.objective(self.particle[j].Pos)\n print(\"Current cost=\", self.particle[j].Cost, \"With position:\", self.particle[j].Pos)\n if self.particle[j].Cost < self.particle[j].Best_cost:\n self.particle[j].Best_cost = self.particle[j].Cost\n self.particle[j].Best_pos = self.particle[j].Pos\n print(\"Find better personel best, Updating with pos:\", self.particle[j].Pos)\n if self.particle[j].Best_cost < self.GlobalBest_Cost:\n self.GlobalBest_Cost = self.particle[j].Best_cost\n self.GlobalBest_Pos = self.particle[j].Best_pos\n print(\"Find better global solution, Updating with pos:\", self.particle[j].Pos)\n else:\n print(\"Not better than previous global solution, dropping...\")\n else:\n print(\"Not better than previous personal best, dropping...\")\n self.Best_Cost.append(self.GlobalBest_Cost)\n self.w = self.w * 0.9\n print()\n print('iteration', i + 1, ': Cost=', self.GlobalBest_Cost)\n print_params(self.GlobalBest_Pos, self.candidate, net=self.net)", "def __create_sample_data__(npts = 20):\n\t#data function\n\tdef wavy(x, y):\n\t\treturn np.sin(0.2*np.pi*x)*np.cos(0.4*np.pi*y)\n\t\n\t#make grid\n\txs = np.linspace(0, 2*20, 2*npts + 1)\n\tys = np.linspace(0, 20, npts + 1)\n\t(xgrid, ygrid) = np.meshgrid(xs, ys)\n\tzgrid = wavy(xgrid, ygrid)\n\t\n\treturn (xgrid, ygrid, zgrid)", "def _launch_particles(self, istep):\n for i in range(self.grid.NX-1):\n for j in range(self.grid.NY-1):\n INDX = i\n INDY = j\n cell = self.grid.CELLS[INDX, INDY]\n TLOCAL = self.TIME[istep] - cell.CLOCK\n TCRIT = cell.TIGNTR * (1 + RELT*normal())\n if cell.BURNSTAT == 1 and TLOCAL > TCRIT and cell.BURNSTAT2 == 1:\n LOCALF = LANGFACTOR\n indp = (INDX*(self.grid.NY - 1) + INDY)*2*Cell.NPARTMAX - 1\n for k in range(cell.NPARTTR):\n self.particles[k + indp].update(state=1.0, factor=LOCALF)\n for k in range(cell.NPARTRAD):\n self.particles[k + cell.NPARTTR + indp].update(state=1.0, factor=LOCALF)\n cell.BURNSTAT2 = 0", "def initialise_particle_cloud(self, initialpose):\n # ----- Initialize the particle cloud as an empty array\n self.particlecloud = PoseArray()\n\n \"\"\"Create the noise to multiply by the random Gaussian number that will\n get added to each of the Poses, that are set to a random position\n and orientation around the initial pose\"\"\"\n sensorSigma=3 #variance\n sensorMu=0 #mean\n noise=sensorSigma * numpy.random.randn() + sensorMu\n\n \"\"\"Create a range for the ammount of random Gaussian values to generate \"\"\"\n randomGauss = 10*self.NUMBER_PREDICTED_READINGS\n\n gaussianRandomNumX = []\n gaussianRandomNumY = []\n randomYawArray = []\n\n for i in range (0,randomGauss):\n gaussianRandomNumX.append(random.gauss(0,1))\n gaussianRandomNumY.append(random.gauss(0,1))\n x=random.randint(1,180)\n randomYaw=(math.pi/x)\n randomYawArray.append(randomYaw)\n\n iterator = 0\n\n \"\"\"\n\t Set the particles to a random position and orientation around the initial pose\n \"\"\"\n particleNumber = 10**2 # 10**3 # 10**4 # 10**5 experiment with different ammounts of particles\n\n while iterator < particleNumber:\n particle = Pose()\n particle.position.x = initialpose.pose.pose.position.x + (gaussianRandomNumX[iterator] * noise)\n particle.position.y = initialpose.pose.pose.position.y + (gaussianRandomNumY[iterator] * noise)\n particle.position.z = initialpose.pose.pose.position.z\n particle.orientation = rotateQuaternion(initialpose.pose.pose.orientation, randomYawArray[iterator])\n\n self.particlecloud.poses.append(particle)\n iterator += 1\n\n return self.particlecloud", "def generatePos(self):\n self.pos = np.zeros((self.num_points, 2), dtype='int32')\n self.pos[:, 1] = np.repeat(list(reversed(np.arange(1, self.x*2, 2))), self.y)\n self.pos[:, 0] = np.tile(np.arange(1, self.x*2, 2), self.y)", "def parallel_generate_particle_distribution(self, max_loop = np.inf, Ncore = 1, outfile=None):\n \n self.pos = np.zeros((self.N_part, 3))\n self.vel = np.zeros((self.N_part, 3))\n \n \n # start running\n nmax = self.N_part / Ncore\n #pool = Pool(processes = Ncore)\n #pool.apply_async(_while_loop,)\n #result = pool.map(_while_loop, args=(self, nmax, max_loop,))\n #print result.get(timeout = 100)\n #p = Process(target=_while_loop, args=(nmax, max_loop,))\n jobs = []\n for i in np.arange(Ncore):\n p = multiprocessing.Process(target=_while_loop, args=(self, nmax, max_loop, \n Ncore, outfile,))\n jobs.append(p)\n p.start()\n \n for p in jobs:\n p.join()\n \n #results = [None]*self.N_part\n #results = [OUTPUT.get() for p in jobs]\n \n #results = np.array(results)\n \n #pos = results[:,0]\n #pos = pos.reshape(self.N_part,3)\n #self.pos = pos\n \n #vel = results[:,1]\n #vel = vel.reshape(self.N_part,3)\n #self.vel = vel\n \n \n #if (not outfile == None):\n # self.write_pd(outfile)\n # combine to a single output\n bash_command = \"cat \"\n for i in np.arange(Ncore) + 1:\n temp_name = outfile + \"_%02i_\"%(i) + \".temp\"\n bash_command = bash_command + temp_name + \" \"\n bash_command = bash_command + \"> \" + outfile\n os.system(bash_command)\n \n # now remove temporary files\n bash_command = \"rm \"\n for i in np.arange(Ncore) + 1:\n temp_name = outfile + \"_%02i_\"%(i) + \".temp\"\n bash_command = bash_command + temp_name + \" \"\n os.system(bash_command)\n \n bash_command = \"sed -i -e '1i#m x y z vx vy vz\\' \" + outfile\n os.system(bash_command)\n self.load_particle_ic(outfile)\n \n return self.pos, self.vel", "def generate_phi(self):\n self.phi = np.empty((100, self.K))\n for i, point in enumerate(self.X):\n for j, center in enumerate(self.centers):\n self.phi[i][j] = np.exp(-self.gamma * distance.euclidean(point, center) ** 2)\n self.phi = np.concatenate((self.phi, np.ones((100, 1))), axis=1)", "def pc_output_buffers_full(self, *args) -> \"std::vector< float,std::allocator< float > >\":\n return _beamforming_swig.doaesprit_sptr_pc_output_buffers_full(self, *args)", "def _calculateParticlesProducedDerivOpt(N, gluonDOF, momentaMagSquared, omegaFFT):\n # Where we will calculate dN/d^2k \n particleProduction = np.zeros((N,N))\n\n # # 2D Levi-Cevita symbol\n LCS = np.array([[0,1],[-1,0]])\n\n # # 2D Delta function\n KDF = np.array([[1,0],[0,1]])\n\n # Note that unlike in the rest of the code, i and j *do not* refer to the\n # spacial indices here: x and y do (too many indices... :/ )\n for y in range(N):\n for x in range(N):\n # To prevent any divide by zero errors\n if momentaMagSquared[y,x] == 0:\n continue\n \n # All of these 2s are for our two dimensions, x and y\n for i in range(2):\n for j in range(2):\n for l in range(2):\n for m in range(2):\n\n for a in range(gluonDOF):\n particleProduction[y,x] += np.real(2/(2*np.pi)**3 / momentaMagSquared[y,x] * (\n (KDF[i,j]*KDF[l,m] + LCS[i,j]*LCS[l,m])) * (\n omegaFFT[y,x,i,j,a] * np.conj(omegaFFT[y,x,l,m,a])))\n\n return particleProduction", "def generate_6D_Gaussian_bunch(\n self, n_macroparticles, intensity, epsn_x, epsn_y, sigma_z\n ):\n if self.longitudinal_mode == \"linear\":\n check_inside_bucket = lambda z, dp: np.array(len(z) * [True])\n Q_s = self.longitudinal_map.Q_s\n elif self.longitudinal_mode == \"non-linear\":\n bucket = self.longitudinal_map.get_bucket(\n gamma=self.gamma, mass=self.mass, charge=self.charge\n )\n check_inside_bucket = bucket.make_is_accepted(margin=0.05)\n Q_s = bucket.Q_s\n else:\n raise NotImplementedError(\"Something wrong with self.longitudinal_mode\")\n\n eta = self.longitudinal_map.alpha_array[0] - self.gamma ** -2\n beta_z = np.abs(eta) * self.circumference / 2.0 / np.pi / Q_s\n sigma_dp = sigma_z / beta_z\n epsx_geo = epsn_x / self.betagamma\n epsy_geo = epsn_y / self.betagamma\n\n injection_optics = self.transverse_map.get_injection_optics()\n\n bunch = generators.ParticleGenerator(\n macroparticlenumber=n_macroparticles,\n intensity=intensity,\n charge=self.charge,\n mass=self.mass,\n circumference=self.circumference,\n gamma=self.gamma,\n distribution_x=generators.gaussian2D(epsx_geo),\n alpha_x=injection_optics[\"alpha_x\"],\n beta_x=injection_optics[\"beta_x\"],\n D_x=injection_optics[\"D_x\"],\n distribution_y=generators.gaussian2D(epsy_geo),\n alpha_y=injection_optics[\"alpha_y\"],\n beta_y=injection_optics[\"beta_y\"],\n D_y=injection_optics[\"D_y\"],\n distribution_z=generators.cut_distribution(\n generators.gaussian2D_asymmetrical(sigma_u=sigma_z, sigma_up=sigma_dp),\n is_accepted=check_inside_bucket,\n ),\n ).generate()\n\n return bunch", "def gen_data(npt, typ, ndim, rstate=None):\n mid = .5 # i'm placing in unit cube\n if typ == 'ball':\n r0 = 0.5\n pts = genball(npt, ndim, rstate=rstate) * r0 + mid\n volume = (np.pi**(ndim / 2) / scipy.special.gamma(ndim / 2 + 1) *\n r0**ndim)\n elif typ == 'pin':\n w = 0.01\n a = 1\n pts = np.zeros((npt, ndim))\n pts[:, 1:] = genball(npt, ndim - 1, rstate=rstate) * w + mid\n pts[:, 0] = (rstate.uniform(size=npt) - 0.5) * a + mid\n volume = (np.pi**((ndim - 1) / 2) /\n scipy.special.gamma((ndim - 1) / 2 + 1) * w**(ndim - 1) * a)\n elif typ == 'torus':\n w = 0.01\n r0 = 0.45\n pts = np.zeros((npt, ndim))\n pts[:, :2] = genshell(r0 - w / 2, r0 + w / 2, npt, 2,\n rstate=rstate) + mid\n pts[:,\n 2:] = (rstate.uniform(size=(npt, ndim - 2)) * 2 - 1) * w / 2 + mid\n volume = w**(ndim - 2) * np.pi * ((r0 + w / 2)**2 - (r0 - w / 2)**2)\n elif typ == 'cylinder':\n w = 0.01\n r0 = 0.45\n a = 1\n pts = np.zeros((npt, ndim))\n pts[:, :2] = genshell(r0 - w / 2, r0 + w / 2, npt, 2,\n rstate=rstate) + mid\n pts[:, 2:] = rstate.uniform(size=(npt, ndim - 2)) * a\n volume = np.pi * ((r0 + w / 2)**2 - (r0 - w / 2)**2)\n elif typ == 'shell':\n r1 = 0.45\n r2 = 0.46\n pts = genshell(r1, r2, npt, ndim, rstate=rstate) + mid\n volume = (np.pi**(ndim / 2) / scipy.special.gamma(ndim / 2 + 1) *\n (r2**ndim - r1**ndim))\n else:\n raise RuntimeError('unknown', typ)\n return pts, volume", "def add_elec_bunch_gaussian( sim, sig_r, sig_z, n_emit, gamma0, sig_gamma,\n Q, N, tf=0., zf=0., boost=None,\n filter_currents=True, save_beam=None ):\n # Get Gaussian particle distribution in x,y,z\n x = np.random.normal(0., sig_r, N)\n y = np.random.normal(0., sig_r, N)\n z = np.random.normal(zf, sig_z, N) # with offset in z\n # Define sigma of ux and uy based on normalized emittance\n sig_ur = (n_emit/sig_r)\n # Get Gaussian distribution of transverse normalized momenta ux, uy\n ux = np.random.normal(0., sig_ur, N)\n uy = np.random.normal(0., sig_ur, N)\n # Now we imprint an energy spread on the gammas of each particle\n if sig_gamma > 0.:\n gamma = np.random.normal(gamma0, sig_gamma, N)\n else:\n # Or set it to zero\n gamma = np.full(N, gamma0)\n if sig_gamma < 0.:\n print(\"Warning: Negative energy spread sig_gamma detected.\"\n \" sig_gamma will be set to zero. \\n\")\n # Finally we calculate the uz of each particle\n # from the gamma and the transverse momenta ux, uy\n uz = np.sqrt((gamma**2-1) - ux**2 - uy**2)\n # Get inverse gamma\n inv_gamma = 1./gamma\n # Get weight of each particle\n w = -1. * Q / N * np.ones_like(x)\n\n # Propagate distribution to an out-of-focus position tf.\n # (without taking space charge effects into account)\n if tf != 0.:\n x = x - ux*inv_gamma*c*tf\n y = y - uy*inv_gamma*c*tf\n z = z - uz*inv_gamma*c*tf\n\n # Save beam distribution to an .npz file\n if save_beam is not None:\n np.savez(save_beam, x=x, y=y, z=z, ux=ux, uy=uy, uz=uz,\n inv_gamma=inv_gamma, w=w)\n\n # Add the electrons to the simulation\n add_elec_bunch_from_arrays( sim, x, y, z, ux, uy, uz, w,\n boost=boost, filter_currents=filter_currents )", "def metropolis_step_PBC(self, positions):\n \"\"\"with brute-force sampling of new positions.\"\"\"\n\n # r = random.random()*random.choice((-1, 1))\n # r is a random number drawn from the uniform prob. dist. in [0,1]\n r = np.zeros(self.num_d)\n for i in range(self.num_d):\n r[i] = np.random.uniform(-1, 1)\n # Pick a random particle\n random_index = np.random.randint(0, high=len(positions))\n new_positions = np.array(positions)\n new_random_position = new_positions[random_index, :]\n # Suggest a new move\n new_positions[random_index, :] = new_random_position + r*self.delta_R\n # Check boundarys, apply PBC if necessary\n pbc = self.periodic_boundary_conditions(new_positions, random_index)\n new_positions[random_index, :] = pbc\n\n test_wavefunction = self.w.wavefunction(new_positions)\n if test_wavefunction**2 <= 1e-14:\n pass\n else:\n acceptance_ratio = self.w.wavefunction_ratio(positions,\n new_positions)\n epsilon = np.random.sample()\n\n if acceptance_ratio > epsilon:\n positions = new_positions\n self.s.distances_update_PBC(positions, random_index)\n # print (self.s.distances)\n self.c += 1.0\n\n else:\n pass\n\n return positions", "def add_elec_bunch( sim, gamma0, n_e, p_zmin, p_zmax, p_rmin, p_rmax,\n p_nr=2, p_nz=2, p_nt=4, dens_func=None, boost=None,\n direction='forward', filter_currents=True ) :\n\n # Convert parameters to boosted frame\n if boost is not None:\n beta0 = np.sqrt( 1. - 1./gamma0**2 )\n p_zmin, p_zmax = boost.copropag_length(\n [ p_zmin, p_zmax ], beta_object=beta0 )\n n_e, = boost.copropag_density( [n_e], beta_object=beta0 )\n gamma0, = boost.gamma( [gamma0] )\n\n # Modify the input parameters p_zmin, p_zmax, r_zmin, r_zmax, so that\n # they fall exactly on the grid, and infer the number of particles\n p_zmin, p_zmax, Npz = adapt_to_grid( sim.fld.interp[0].z,\n p_zmin, p_zmax, p_nz )\n p_rmin, p_rmax, Npr = adapt_to_grid( sim.fld.interp[0].r,\n p_rmin, p_rmax, p_nr )\n\n # Create the electrons\n relat_elec = Particles( q=-e, m=m_e, n=n_e,\n Npz=Npz, zmin=p_zmin, zmax=p_zmax,\n Npr=Npr, rmin=p_rmin, rmax=p_rmax,\n Nptheta=p_nt, dt=sim.dt,\n continuous_injection=False,\n dens_func=dens_func, use_cuda=sim.use_cuda,\n grid_shape=sim.fld.interp[0].Ez.shape )\n\n # Give them the right velocity\n relat_elec.inv_gamma[:] = 1./gamma0\n relat_elec.uz[:] = np.sqrt( gamma0**2 -1.)\n\n # Electron beam moving in the background direction\n if direction == 'backward':\n relat_elec.uz[:] *= -1.\n\n # Add them to the particles of the simulation\n sim.ptcl.append( relat_elec )\n\n # Get the corresponding space-charge fields\n get_space_charge_fields( sim.fld, [relat_elec], gamma0,\n filter_currents, direction=direction)", "def partition_particles(self):\n\n nPartitions = self.nPartitions\n N, tau, dom_mins, dom_maxs = self.N, self.tau, self.dom_mins, self.dom_maxs\n\n # mark the ghosts\n self.particle_rdd = self._set_ghost_mask(self.particle_rdd)\n \n gl_to_loc_map = self.global_to_local_map\n gl_to_loc_map_b = self.sc.broadcast(gl_to_loc_map)\n\n def remap_partition(particles):\n \"\"\"Helper function to remap groups\"\"\"\n remap_gid_partition_cython(particles, gl_to_loc_map_b.value)\n return particles\n\n ghosts_rdd = (self._partition_rdd(self.particle_rdd, partition_ghosts)\n .filter(lambda (k,v): k in gl_to_loc_map_b.value)\n .map(lambda (k,v): (gl_to_loc_map_b.value[k],v))\n .partitionBy(nPartitions)\n .map(lambda (k,v): v, preservesPartitioning=True))\n \n part_rdd = self.particle_rdd\n\n partitioned_rdd = ghosts_rdd + part_rdd\n self._partitioned_rdd = partitioned_rdd\n\n return partitioned_rdd", "def partition_particles(self):\n\n nPartitions = self.nPartitions\n N, tau, dom_mins, dom_maxs = self.N, self.tau, self.dom_mins, self.dom_maxs\n\n # mark the ghosts\n self.particle_rdd = self._set_ghost_mask(self.particle_rdd)\n \n \n ghosts_rdd = (self._partition_rdd(self.particle_rdd, partition_ghosts)\n .partitionBy(nPartitions)\n .map(lambda (_,v): v, preservesPartitioning=True))\n\n part_rdd = self.particle_rdd\n partitioned_rdd = ghosts_rdd + part_rdd\n self._partitioned_rdd = partitioned_rdd\n\n return partitioned_rdd", "def get_planetesimals_disk(n_disk, r_in=20.0|units.AU, r_out=50.0|units.AU, m_star=1.0|units.MSun, \r\n alpha=None, m_disk=1.0e-15|units.MSun, seed = 42, disk_num = 1):\r\n numpy.random.seed(seed) # Mess with random seed\r\n\r\n for i in xrange(int(disk_num) + 4):\r\n planetesimals = Particles(n_disk)\r\n print \"Seed:\", seed\r\n print planetesimals.key[:10]\r\n planetesimals.mass = 0.0|units.MJupiter\r\n planetesimals.radius = 100.0|units.km\r\n planetesimals.collection_attributes.timestamp = 0.0 | units.yr\r\n \r\n if alpha is not None:\r\n converter = nbody_system.nbody_to_si(m_disk, r_in)\r\n power_disk = ProtoPlanetaryDisk(n_disk, convert_nbody=converter, densitypower=alpha, \r\n Rmin=1.0, Rmax=1.0*r_out/r_in, q_out=0.0, discfraction=1.0).result\r\n x = power_disk.x\r\n y = power_disk.y\r\n z = power_disk.z # <--- Mystery error?\r\n\r\n print \"X\"\r\n print x.value_in(units.AU)\r\n print \"Y\"\r\n print y.value_in(units.AU)\r\n print \"Z\"\r\n print z.value_in(units.AU)\r\n #z = 0\r\n\r\n print \"MASS\"\r\n print power_disk.mass\r\n\r\n #power_disk.mass = 0.0 * power_disk.mass ###### THIS WORKS!!!! (if you want to switch to this later)\r\n\r\n print power_disk.mass\r\n \r\n a = (x**2 + y**2)**0.5\r\n print \"SM-AXIS\"\r\n print a.value_in(units.AU)\r\n \r\n phi = numpy.arctan2(y.value_in(units.AU), x.value_in(units.AU))\r\n vc = (constants.G*m_star/a)**0.5\r\n vx = - vc * numpy.sin(phi)\r\n vy = vc * numpy.cos(phi)\r\n vz = 0.0 * vc\r\n # vz = - vc * numpy.sin(phi) # ???????????????????????????????????????????????????????????????? #\r\n\r\n print \"VX\"\r\n print vx.value_in(units.km / units.s)\r\n print \"VY\"\r\n print vy.value_in(units.km / units.s)\r\n print \"VZ\"\r\n print vz.value_in(units.km / units.s)\r\n\r\n print \"PLANAR VELOCITY VECTOR\"\r\n print ((vx**2 + vy**2)**(0.5)).value_in(units.km / units.s)\r\n\r\n #vx = power_disk.vx\r\n #vy = power_disk.vy\r\n #vz = power_disk.vz\r\n\r\n #print \"POWER DISK VX\"\r\n #print vx.value_in(units.km / units.s)\r\n #print \"POWER DISK VY\"\r\n #print vy.value_in(units.km / units.s)\r\n #print \"POWER DISK VZ\"\r\n #print vz.value_in(units.km / units.s)\r\n \r\n else:\r\n a = r_in + (r_out-r_in)*numpy.random.rand(n_disk)\r\n phi_rand = 2.0 * numpy.pi * numpy.random.rand(n_disk)\r\n \r\n x = a * numpy.cos(phi_rand)\r\n y = a * numpy.sin(phi_rand)\r\n z = 0.0 * a\r\n \r\n vc = (constants.G*m_star/a)**0.5\r\n vx = - vc * numpy.sin(phi_rand)\r\n vy = vc * numpy.cos(phi_rand)\r\n vz = 0.0 * vc\r\n \r\n planetesimals.x = x\r\n planetesimals.y = y\r\n planetesimals.z = z\r\n \r\n planetesimals.vx = vx\r\n planetesimals.vy = vy\r\n planetesimals.vz = vz\r\n \r\n return planetesimals", "def create_plasma(self) -> list:\n\n self.plasma = paramak.Plasma(\n major_radius=6.2e2,\n minor_radius=2e2,\n elongation=1.7,\n triangularity=0.33,\n vertical_displacement=5.7e1,\n configuration=\"single-null\",\n rotation_angle=self.rotation_angle,\n )\n\n return [self.plasma]", "def resampleParticles(self, gameState):\n self.particles = []\n for i in range(self.numParticles):\n self.particles.append(tuple(util.sample(self.uniformPrior) for _ in\n self.ghostIndices))", "def generate_synth_data(n):", "def sphere_cart()\ndef simulator(nparticles, ninteractions, vacradius, vesradius):\n for i in range(nparticles):\n #neutron = neutron_func(i)\n energy = 14E6\n phi = calc_phi()\n theta = calc_theta()\n xneut = 0\n yneut = 0\n zneut = 0\n d = collision_distance(phi, theta, xneut, zneut)\n r = -np.log(random.random(seed))/sigma_t(energy)\n j = 0\n while (j <= ninteractions)\n xneut = sphere_cart(scatter(energy, A)[0:2])", "def new_plummer_distribution(number_of_particles, \n total_mass = 1.0|nbody_system.mass, \n virial_radius = 1.0|nbody_system.length,\n mass_cutoff = 0.999,\n G = None,\n **keyword_arguments): # optional arguments for UniformSphericalDistribution\n particles = new_plummer_spatial_distribution(number_of_particles, total_mass=total_mass, \n virial_radius=virial_radius, **keyword_arguments)\n \n if G is None:\n G = nbody_system.G if generic_unit_system.is_generic_unit(total_mass.unit) else constants.G\n velocity_unit = (G*total_mass/virial_radius).sqrt().unit.base_unit()\n plummer_radius = 0.1875 * numpy.pi * virial_radius\n \n escape_velocity = (1 + particles.position.lengths_squared()/plummer_radius**2)**(-0.25) | velocity_unit\n velocity = escape_velocity * sample_from_velocity_distribution(number_of_particles)\n velocity *= numpy.sqrt((G*total_mass*number_of_particles) / (2*virial_radius*velocity.length_squared()))\n particles.velocity = velocity.reshape((-1,1)) * random_direction(number_of_particles)\n return particles", "def update(self, env, u, z, marker_id):\n # YOUR IMPLEMENTATION HERE\n\n new_particles_bar = np.zeros((self.num_particles, 3))\n importance_weights = np.ones(self.num_particles)\n ita = 0\n for m in range(self.num_particles):\n u_noisy = env.sample_noisy_action(u, self.alphas)\n xt = env.forward(self.particles[m,:].reshape(-1, 1), u_noisy)\n zt_hat = env.observe(xt, marker_id)\n importance_weights[m] = env.likelihood(minimized_angle(z - zt_hat), self.beta)\n new_particles_bar[m,:] = xt.reshape(1, -1)\n ita += importance_weights[m]\n \n importance_weights = importance_weights/ita\n\n self.particles, self.weights = self.resample(new_particles_bar, importance_weights)\n mean, cov = self.mean_and_variance(self.particles)\n return mean, cov", "def _create_shadow_cells(self, assignments):\n prev_assignments = assignments[:, 1]\n num_assigned = prev_assignments.shape[0]\n\n prev_set = {i for i in range(len(self._prev_cells))}\n assigned_set = set(prev_assignments)\n\n diff_set = prev_set - assigned_set\n\n new_assignments = np.zeros(shape=(self._prev_cells.shape[0],)).astype(\"int32\")\n new_assignments[:num_assigned] = prev_assignments\n\n new_curr_cells = np.zeros(shape=self._prev_cells.shape)\n new_curr_cells[:self._curr_cells.shape[0]] = self._curr_cells\n\n i = num_assigned\n\n for unassigned in diff_set:\n new_assignments[i] = unassigned\n new_curr_cells[i] = self._prev_cells[unassigned]\n\n # move shadow cell (pos += vel)\n new_curr_cells[i, bcell.BEG_POS_INDEX:bcell.END_POS_INDEX] += \\\n new_curr_cells[i, bcell.BEG_VEL_INDEX:bcell.END_VEL_INDEX]\n\n i += 1\n\n self._prev_cells = self._prev_cells[new_assignments]\n self._curr_cells = new_curr_cells\n self._particles_all = self._particles_all[new_assignments]", "def parse_chunks(self): \n result_particles = []\n (timestamp, chunk, start, end) = self._chunker.get_next_data_with_index()\n\n while chunk is not None:\n #\n # Discard the Flag record since it has already been processed.\n # We also need to check for this being the first record, \n # since an end of velocity record could result in a pattern match \n # with a Flag record if the size of the velocity records are \n # greater than or equal to the Flag record size.\n #\n if self._read_state[StateKey.FIRST_RECORD] and \\\n FLAG_RECORD_MATCHER.match(chunk):\n self._increment_state(FLAG_RECORD_SIZE)\n\n #\n # If we haven't reached the end of the Velocity record,\n # see if this next record is the last one (all zeroes).\n #\n elif not self._read_state[StateKey.VELOCITY_END]:\n velocity_end = self.velocity_end_record_matcher.match(chunk)\n self._increment_state(self.velocity_record_size)\n\n #\n # A velocity data record of all zeroes does not generate\n # a data particle.\n #\n if velocity_end:\n self._read_state[StateKey.VELOCITY_END] = True\n else:\n #\n # If the file is missing an end of velocity record,\n # meaning we'll exhaust the file and run off the end,\n # this test will catch it.\n #\n velocity_fields = self.parse_velocity_record(chunk)\n if velocity_fields:\n #\n # Generate a data particle for this record and add\n # it to the end of the particles collected so far.\n #\n timestamp = self.calculate_timestamp()\n ntp_time = ntplib.system_to_ntp_time(timestamp)\n\n particle = self._extract_sample(\n Vel3dKWfpStcVelocityDataParticle,\n None, velocity_fields, ntp_time)\n\n result_particles.append((particle,\n copy.copy(self._read_state)))\n\n #\n # Ran off the end of the file. Tell 'em the bad news.\n #\n else:\n log.warn(\"EOF reading velocity records\")\n raise SampleException(\"EOF reading velocity records\")\n\n #\n # If we have read the end of velocity data records,\n # the next record is the Time data record by definition.\n # Generate the data particle and\n # add it to the end of the particles collected so far.\n #\n else:\n #\n # Make sure there was enough data to comprise a Time record.\n # We can't verify the validity of the data,\n # only that we had enough data.\n #\n time_fields = self.parse_time_record(chunk)\n if time_fields:\n #\n # Convert the tuple to a list, add the number of\n # Velocity record received (not counting the end of\n # Velocity record, and convert back to a tuple.\n #\n time_list = list(time_fields)\n time_list.append(self.calculate_record_number() - 1)\n time_fields = tuple(time_list)\n ntp_time = ntplib.system_to_ntp_time(self.time_on)\n\n particle = self._extract_sample(\n Vel3dKWfpStcTimeDataParticle, \n None, time_fields, ntp_time)\n\n self._increment_state(TIME_RECORD_SIZE)\n result_particles.append((particle,\n copy.copy(self._read_state)))\n\n else:\n log.warn(\"EOF reading time record\")\n raise SampleException(\"EOF reading time record\")\n\n self._read_state[StateKey.FIRST_RECORD] = False\n\n (timestamp, chunk, start, \n end) = self._chunker.get_next_data_with_index()\n\n return result_particles", "def __init__(self,nparticles,initial_condition):\n self.nparticles = nparticles\n self.particles = np.array([Particle(mass,x,y) for x,y,mass in initial_condition])\n self.mass = np.array([self.particles[i].mass for i in range(len(self.particles))])\n self.position = np.array([self.particles[i].position for i in range(len(self.particles))])\n self.momentum = np.array([self.particles[i].momentum for i in range(len(self.particles))])", "def box_collision_info(self):\r\n position = np.zeros((self.Npart,3)) # antall part, dim, iterasjoner\r\n position[:,:] = np.random.uniform(0,1e-6, size = (self.Npart,3))\r\n velocity = np.zeros((self.Npart,3))\r\n velocity[:,:] = np.random.normal(0,self.sigma,size = (self.Npart,3))\r\n\r\n part_collided = 0\r\n part_escaped = 0\r\n momentum = 0\r\n\r\n print 'engine started'\r\n for i in xrange(1,self.n):\r\n #collision\r\n position += velocity*dt\r\n l_hole = position[:,0:2] > self.L/4\r\n h_hole = position[:,0:2] < (3*self.L)/4\r\n pos_xy = np.logical_and(l_hole, h_hole)\r\n pos_xy = np.logical_and(pos_xy[:,0], pos_xy[:,1])\r\n pos_z = position[:,2] < 0\r\n esc_part = np.logical_and(pos_z, pos_xy)\r\n\r\n #velocity[esc_part] = velocity[esc_part]\r\n part_escaped += np.sum(esc_part)\r\n\r\n for j in xrange(0,3):\r\n impact_wall_pos = np.logical_and(position[:,j] > 0,\r\n position[:,j] < self.L)\r\n velocity[np.logical_not(impact_wall_pos),j] = -velocity[\r\n np.logical_not(impact_wall_pos),j]\r\n\r\n\r\n if j == 0:\r\n part_collided += np.sum(np.logical_not(impact_wall_pos),j)\r\n momentum += np.sum(2*self.m*abs(velocity[np.logical_not(\r\n impact_wall_pos),j]))\r\n\r\n\r\n\r\n position[position < 0] = 0\r\n position[position >self.L] = self.L\r\n\r\n particle_collided = part_collided/2\r\n return position, velocity,part_escaped, impact_wall_pos, particle_collided, momentum", "def vec_repeat_at_end(x, p):\n n = x.shape[0]\n indices = jnp.arange(p) % n\n padding = x[indices]\n return jnp.concatenate((x, padding))", "def update_particles(self):\n for particle in self.particles:\n particle.update_coordinates(self.bounds)", "def assign_particle_extrema(data_dict, result_dict,\n mode='local_density',\n radius=1.5):\n\n # startpts = result_dict['particle_start_points'][:, COORD_COLS]\n # endpts = result_dict['particle_end_points'][:, COORD_COLS]\n particles = result_dict['particles']\n\n # assert len(startpts) == len(endpts)\n # assert len(startpts) == len(particles)\n \n for i, p in enumerate(particles):\n if p.semantic_type == 1:\n start_point = p.start_point\n end_point = p.end_point\n new_start_point, new_end_point = get_track_points(p.points,\n start_point,\n end_point, \n p.depositions,\n correction_mode=mode,\n r=radius)\n p.start_point = new_start_point\n p.end_point = new_end_point\n \n return {}", "def _update_surface_normals(self):\n\n # This is the case if there are too few points to\n # compute normals so there can be values to remove\n\n #can be important for parallel\n self.swarm.shadow_particles_fetch()\n\n if self.empty:\n self.director.data[...] = 0.0\n else:\n\n particle_coords = self.swarm.particleCoordinates.data\n\n Nx = np.empty(self.swarm.particleLocalCount)\n Ny = np.empty(self.swarm.particleLocalCount)\n Nz = np.empty(self.swarm.particleLocalCount)\n\n for i, xyz in enumerate(particle_coords):\n r, neighbours = self.kdtree.query(particle_coords[i], k=4)\n\n # this point is neighbour[0] and neighbour points are neighbours[(1,2,3)]\n XYZ1 = self.kdtree.data[neighbours[1]]\n XYZ2 = self.kdtree.data[neighbours[2]]\n XYZ3 = self.kdtree.data[neighbours[3]]\n\n dXYZ1 = XYZ2 - XYZ1\n dXYZ2 = XYZ3 - XYZ1\n\n # Cross product of those 2 vectors can be use as the local normal (perhaps)\n\n Nx[i], Ny[i], Nz[i] = np.cross(dXYZ1, dXYZ2)\n #if i == 0:\n # print(Nx, Ny, Nz)\n # print(xyz[0], xyz[1],xyz[2])\n # print((self.insidePt[0] - xyz[0]) * Nx[i] )\n\n if (self.insidePt):\n sign = np.sign( (self.insidePt[0] - xyz[0]) * Nx[i] +\n (self.insidePt[1] - xyz[1]) * Ny[i] +\n (self.insidePt[2] - xyz[2]) * Nz[i] )\n Nx[i] *= sign\n Ny[i] *= sign\n Nz[i] *= sign\n\n\n for i in range(0, self.swarm.particleLocalCount):\n scale = 1.0 / np.sqrt(Nx[i]**2 + Ny[i]**2 + Nz[i]**2)\n Nx[i] *= scale\n Ny[i] *= scale\n Nz[i] *= scale\n\n\n self.director.data[:,0] = Nx[:]\n self.director.data[:,1] = Ny[:]\n self.director.data[:,2] = Nz[:]\n\n print(\"Surf Norms\")\n\n return", "def add_particles(P8gen, particles, data):\n for particle_id in particles:\n # Find particle in database (None: particle not found)\n particle = next((p for p in data['particles']\n if particle_id in [p['id'], p['name']]), None)\n if particle is None:\n raise ValueError(\"Could not find particle ID {0} in file {1}\"\n .format(particle, datafile))\n # Add the particle\n P8gen.SetParameters(particle['cmd'])", "def buffer_points_for_periodicBC(xy, PV, check=False):\n Epts = xy + PV[0]\n Npts = xy + PV[1]\n Wpts = xy - PV[0]\n Spts = xy - PV[1]\n NEpts = xy + PV[0] + PV[1]\n NWpts = xy - PV[0] + PV[1]\n SWpts = xy - PV[0] - PV[1]\n SEpts = xy + PV[0] - PV[1]\n xyout = np.vstack((xy, Epts, NEpts, Npts, NWpts, Wpts, SWpts, Spts, SEpts))\n if check:\n eps = 0.1\n plt.scatter(xy[:, 0], xy[:, 1], c='r', edgecolor='none')\n plt.scatter(Epts[:, 0] + eps, Epts[:, 1], c='y', edgecolor='none')\n plt.scatter(NEpts[:, 0] + eps, NEpts[:, 1] + eps, c='g', edgecolor='none')\n plt.scatter(Npts[:, 0], Npts[:, 1] + eps, c='b', edgecolor='none')\n plt.scatter(NWpts[:, 0] - eps, NWpts[:, 1] + eps, c='w')\n plt.scatter(Wpts[:, 0] - eps, Wpts[:, 1], c='m', edgecolor='none')\n plt.scatter(SWpts[:, 0] - eps, SWpts[:, 1] - eps, c='k', edgecolor='none')\n plt.scatter(Spts[:, 0], Spts[:, 1] - eps, c='lightgrey', edgecolor='none')\n plt.scatter(SEpts[:, 0] - eps, SEpts[:, 1] - eps, c='c', edgecolor='none')\n plt.show()\n return xyout", "def update_points(self, *args):\n points = [Window.width / 2, Window.height / 2, .5, .5]\n i = 0\n while i < 2 * pi:\n i += 0.01 * pi\n points.extend([\n Window.width / 2 + cos(i) * (self.radius + self.sin_wobble *\n sin(i * self.sin_wobble_speed)),\n Window.height / 2 + sin(i) * (self.radius + self.sin_wobble *\n sin(i * self.sin_wobble_speed)),\n self.offset_x + sin(i),\n self.offset_y + cos(i)])\n\n self.mesh_points = points", "def pc_output_buffers_full(self, *args):\n return _spacegrant_swig.DeNRZI_sptr_pc_output_buffers_full(self, *args)", "def _while_loop(pd, nmax, max_loop, ncore, outfile):\n \n # make sure the random seed is different for every processor\n random_number_seeds = [3456789, 7654321, 2435467, 8273645,\n 1085712, 4154712, 1248291, 8415917,\n 2345161, 5710916, 5718601, 7516234,\n 9235161, 4917519, 1111245, 8167834] \n \n # get the processor ID (1 - Ncore) and convert to single integer\n current = multiprocessing.current_process()\n pid = current._identity\n pid = pid[0]\n \n #\n # choose a different seed for each processor from the list so each processor has\n # a different randum number seed. Then, fiddle with each seed a little so \n # the seeds aren't the same every time the code is run\n seed = np.int(random_number_seeds[pid] * (np.random.rand()*(10.0 - 0.01) + 0.01))\n \n np.random.seed(seed)\n \n #print 'id and seed', pid, seed\n \n n_particles = 0\n loop_counter = 0\n fmin_scale = 1.0E-100 # -16 -> -32 -> -100\n F_max = np.max(pd.DF.f) #; F_min = np.min(pd.DF.f);\n F_min = np.min(pd.DF.f) * fmin_scale\n # F_max = np.max(pd.DF.f[:-1])#; F_max = 1.0E-88\n #print F_min, F_max\n if pd.optimize:\n relative_potential = pd._interpolate_relative_potential\n else:\n relative_potential = pd.DF.relative_potential\n \n \n pos = np.zeros((nmax, 3))\n vel = np.zeros((nmax, 3)) \n \n while (( n_particles < nmax) and (loop_counter < max_loop)):\n \n r = pd._choose_position()\n Psi = relative_potential(r) \n \n v = pd._choose_velocity(r, Psi)\n \n E = Psi - 0.5 * v * v\n \n f_E = pd.DF.interpolate_f(E)\n \n logF = ( np.random.rand()*(np.log10(F_max) - np.log10(F_min)) + np.log10(F_min) )\n \n # if choosing random F in log F, might be good to do the comparison in logspace as well\n #.... i.e log(F) <= log(f_E) \n #\n # 0 FOR F_E MEANS THAT E < E_min of the potential. THis happens when Psi - KE is smaller\n # than the value of the potential at large_r... should this be considered unbound \n # even though it isn't zero? Is this effectively zero? This has been adjusted in the velocity\n # picking routine but needs verification to make sure it works.... but regardless, I doubt\n # that this is the reason why the particles are failing for NFW but working for hernquist....\n #\n if np.abs(np.log10(f_E)) == np.inf:\n keep_particle = False\n _my_print('log value error... throwing out particle')\n else:\n keep_particle = (logF <= np.log10(f_E))\n\n \n if keep_particle: \n index = n_particles \n \n # \n # need to sample in cosine theta NOT theta!!!!! WTF!!!!!\n #\n #\n \n theta = np.arccos( np.random.rand() * (2.0) - 1.0)\n \n # convert position to cartesian using random theta and phi\n phi = np.random.rand() * 2.0 * np.pi\n \n x = np.sin(theta) * np.cos(phi)\n y = np.sin(theta) * np.sin(phi)\n z = np.cos(theta)\n \n pos[index] = r * np.array([x,y,z])\n\n # repeat for velocity using new random numbersw\n theta = np.arccos( np.random.rand() * (2.0) - 1.0)\n phi = np.random.rand() * 2.0 * np.pi\n \n vx = np.sin(theta) * np.cos(phi)\n vy = np.sin(theta) * np.sin(phi)\n vz = np.cos(theta)\n \n vel[index] = v * np.array([vx,vy,vz])\n \"\"\" \n #else: # do strict cartesian ... slower.... less attractive\n # \n # axis_list = [0, 1, 2]\n # random.shuffle(axis_list)\n \n # #axis_index = np.random.randint(3)\n # first_axis = axis_list[0]\n #\n # pos[index, first_axis] = np.random.rand() * (2.0*r) - r\n #del axis_list[axis_index]\n \n #axis_index = np.random.randint(2)\n # second_axis = axis_list[1]\n \n max_r = np.sqrt(r*r - pos[index,first_axis]**2)\n \n pos[index, second_axis] = np.random.rand()*(2.0 * max_r) - max_r\n #del axis_list[axis_index]\n \n max_r = np.sqrt(r*r - pos[index,first_axis]**2 - pos[index,second_axis]**2)\n \n third_axis = axis_list[2]\n pos[index, third_axis] = np.random.rand() * (2.0 * max_r) - max_r\n \n if np.sqrt(pos[index,0]**2 + pos[index,1]**2 + pos[index,2]**2) > r:\n _my_print('R IS TOO LARGE')\n \n ###\n axis_list = [0, 1, 2]\n random.shuffle(axis_list)\n \n #axis_index = np.random.randint(3)\n first_axis = axis_list[0]\n \n vel[index, first_axis] = np.random.rand() * (2.0*v) - v\n #del axis_list[axis_index]\n \n #axis_index = np.random.randint(2)\n second_axis = axis_list[1]\n \n max_v = np.sqrt(v*v - vel[index,first_axis]**2)\n \n vel[index, second_axis] = np.random.rand()*(2.0 * max_v) - max_v\n #del axis_list[axis_index]\n \n max_v = np.sqrt(v*v - vel[index,first_axis]**2 - vel[index,second_axis]**2)\n \n third_axis = axis_list[2]\n vel[index, third_axis] = np.random.rand() * (2.0 * max_v) - max_v \n \n \n \"\"\"\n n_particles = n_particles + 1\n \n if (loop_counter % 5000) == 0:\n _my_print(\"Have %4i particles. On loop %6i\"%(n_particles, loop_counter))\n loop_counter = loop_counter + 1\n \n # now write out to a temporary file\n f = open(outfile + \"_%02i_\"%(pid) + \".temp\", 'w')\n fmt = \"%12.12E %12.12E %12.12E %12.12E %12.12E %12.12E %12.12E\\n\"\n \n \n for i in np.arange(nmax):\n f.write(fmt%(pd.M_part, pos[i,0], pos[i,1], pos[i,2], \n vel[i,0], vel[i,1], vel[i,2]))\n \n \n f.close() \n \n return pos, vel", "def start(self):\r\n for z in range(100):\r\n x = utilities.randint(0, self.width)\r\n y = utilities.randint(0, self.height)\r\n h = utilities.randint(2, 20)\r\n w = utilities.randint(2, 20) \r\n c = utilities.randRGBA()\r\n Planet(x, y, z, h, w, c)", "def start_points(n, world):\n world[0, 0] = 1\n world[n-1, n-1] = 1\n world[0, n-1] = 1\n world[n-1, 0] = 1\n world[np.round(n/2).astype(int)][np.round(n/2).astype(int)] = 1\n return world", "def placeParticle(self, x, y, z, count):\n self._sim.placeNumber(self, x, y, z, count)\n return self", "def gen_phantom(signal = (1.0, 1.0), s_res = .7, oversamp = 8, diam = (2, 4, 6, 8), n_frames = 20, offset=0):\n all_ves = []\n for d in diam:\n ves = gen_vessel(d, s_res/oversamp, oversamp, n_frames)\n all_ves.append(ves)\n\n total_width = 0\n max_height = 0\n for ves in all_ves:\n total_width += ves.shape[1]\n max_height = max(max_height, ves.shape[2])\n\n velv = np.zeros((n_frames, total_width, max_height))\n mask_crop = np.zeros((total_width, max_height))\n\n x_loc = 0\n ves_count = 1\n for ves in all_ves:\n velv[:, x_loc:x_loc+ves.shape[1], int(max_height/2 - ves.shape[2]/2):int(max_height/2 + ves.shape[2]/2)] = ves\n ves_mask = np.zeros((ves.shape[1], ves.shape[2]))\n ves_mask[np.abs(ves).sum(0) > 0] = ves_count\n mask_crop[x_loc:x_loc + ves.shape[1], int(max_height / 2 - ves.shape[2] / 2):int(max_height / 2 + ves.shape[2] / 2)] = ves_mask\n x_loc += ves.shape[1]\n ves_count += 1\n\n vel = np.zeros((n_frames, int(2*total_width), int(2*max_height)))\n mag = np.zeros(( int(2*total_width), int(2*max_height)))\n mask = np.zeros(( int(2*total_width), int(2*max_height)))\n\n d1 = velv.shape[1]//2\n d2 = velv.shape[2]//2\n do = offset\n vel[:,d1+do:-d1+do,d2+do:-d2+do] = velv\n mask[d1+do:-d1+do,d2+do:-d2+do] = mask_crop\n\n [xx, yy] = np.meshgrid(np.linspace(-1, 1, mag.shape[0]), np.linspace(-1, 1, mag.shape[1]), indexing='ij')\n rad = xx * xx + yy * yy\n mag[rad < .90] = signal[0]\n mag[np.abs(vel).sum(0) > 0] = signal[1]\n\n return mask, mag, vel", "def pc_output_buffers_full(self, *args) -> \"std::vector< float,std::allocator< float > >\":\n return _beamforming_swig.randomsampler_sptr_pc_output_buffers_full(self, *args)", "def particle_forceV(R,N,sigma,epsilon,D):\n F = np.zeros((3,N))\n x = np.zeros(N-1)\n y = np.zeros(N-1)\n z = np.zeros(N-1)\n r = np.zeros(N-1)\n # loop over all particles\n for i in range(N):\n # Distances for x,y,z between particles\n x = R[0,np.arange(N)!=i]-R[0,i]\n y = R[1,np.arange(N)!=i]-R[1,i]\n z = R[2,np.arange(N)!=i]-R[2,i]\n [x,y,z] = minimal_image(x,y,z,D)\n c = np.stack((x,y,z))\n r = np.sqrt(np.sum(c**2,0))\n a = (c*4*(sigma/epsilon)*(12/r**14-6/r**8))\n F[:,i] = -np.sum(a,1)\n return F", "def genBCCParams(self, evol='faber', Q=-0.866):\n\n zmeans = ( self.zbins[1:] + self.zbins[:-1] ) / 2\n par = np.zeros((len(zmeans), 8))\n\n for i, z in enumerate(zmeans):\n par[i,:] = self.genDSGParams(z, evol=evol, Q=Q)\n\n return par", "def spawn_system(\n max_steps = 25,\n xmax = 10,\n YMAX = 10,\n ZMAX = 10 ,\n NDIM = 3,\n SIDE = (5,5,5),\n VMAX = 0.0,\n dt = 0.05,\n SPACING = 1.0,\n TEMPERATURE = 0.95,\n HLONG = 4.0,\n HSHORT = 2.0,\n RINIT = 'grid',\n ascl = 7.45e+04,\n bscl = 5.84e-01,\n kbscl = 3.29e+04,\n pmass = 1.386e-01,\n ofname = 'data/toybox.nc'\n ):\n\n NP = SIDE[0]*SIDE[1]*SIDE[2]\n cnt = 0\n fps = 0\n\n print \"Initialising\"\n p = particles.SmoothParticleSystem(\n NP,maxn=NP,\n d=3,\n rinit=RINIT,\n vmax=VMAX,\n side=SIDE,\n spacing=SPACING,\n xmax=xmax,\n ymax=YMAX,\n zmax=ZMAX,\n temperature=TEMPERATURE,\n hlong=HLONG,\n hshort=HSHORT,\n thermostat_temp=TEMPERATURE,\n thermostat=True,\n mass=pmass\n )\n nl = neighbour_list.VerletList(p,cutoff=HLONG)\n p.nlists.append(nl)\n p.nl_default = nl\n p.forces.append(\n spam_complete_force.SpamComplete(\n p,nl,adash=ascl,bdash=bscl,kbdash=kbscl))\n #p.forces.append(forces.FortranCollisionForce(p,nl,cutoff=0.5))\n tstart = time()\n nl.build()\n nl.separations()\n spam_properties(p,nl)\n print 'Built list and calc properties',time()-tstart\n cnt = 0\n attribs = {'creator':'Andrew', 'log':'functional test'}\n create_sph_ncfile(ofname,attribs,NP,NDIM)\n print \"STEP INT DERIV = PAIR + SPAM + FORCE \"\n tstartrun = time()\n for i in range(max_steps):\n tstart = time()\n p.update(dt)\n if np.isnan(p.r).any():\n print 'stopping due to nan'\n break\n if i % 10 == 0:\n write_step(ofname,p)\n print 'Step',i,'took',time()-tstart\n g = p.timing.keys()\n g.sort()\n for k in g:\n print k,p.timing[k]\n print 'Completed',i,'steps, in',time()-tstartrun\n return ofname", "def Generate_BG_Template(outputSize=300, angularSize = 10, fileOut = 'BGRateMap.pickle' ):\r\n template = np.zeros((outputSize,outputSize))\r\n ppd=float(outputSize)/float(angularSize) # pixels per deg\r\n \r\n events110 = ParseFermi.Import_File('photons.txt', energyRange = (120000,140000),lonRange=(-5,5),latRange = (-5,5))\r\n events130 = ParseFermi.Import_File('photons.txt', energyRange = (100000,120000),lonRange=(-5,5),latRange = (-5,5))\r\n events150 = ParseFermi.Import_File('photons.txt', energyRange = (140000,200000),lonRange=(-5,5),latRange = (-5,5))\r\n \r\n for i in range(10000,200001,20000):\r\n if i == 130000:\r\n continue\r\n events = ParseFermi.Import_File('photons.txt', energyRange = (i-10000,i+10000),lonRange=(-5,5),latRange = (-5,5))\r\n BG = np.zeros((outputSize,outputSize)) \r\n for j in events:\r\n xIDX = int(j[1]*ppd+float(outputSize/2))\r\n yIDX = int(j[2]*ppd+float(outputSize/2))\r\n BG[yIDX][xIDX] += 1.0\r\n \r\n psfDeg = .2+float(200)/float(i)\r\n psfOut = psfDeg*ppd\r\n #print i/1e3, psfDeg, psfOut\r\n \r\n template += scipy.ndimage.filters.gaussian_filter(BG, psfOut)\r\n \r\n template = template/np.max(template)\r\n \r\n # Write to file \r\n outFile = open(fileOut, \"wb\" )\r\n pickle.dump(template, outFile)\r\n print 'Rate Map saved to ', fileOut\r\n \r\n plt.imshow(scipy.fliplr(template), 'jet',extent=[5,-5,-5,5])\r\n\r\n plt.xlabel(r'$l [^\\circ]$')\r\n plt.ylabel(r'$b [^\\circ]$')\r\n plt.xlim(5,-5)\r\n plt.ylim(-5,5)\r\n plt.colorbar()\r\n\r\n x,y = Find_Centroid(template)\r\n x,y = (x/ppd -angularSize/2.0,) ,(y/ppd -angularSize/2.0,)\r\n print x,y\r\n plt.scatter(x,y, s=10, c='r', marker = '+')\r\n \r\n X,Y = FormatEvents(events110)\r\n plt.scatter(X, Y, label = '100-120 GeV', marker = 'o' , c = 'k')\r\n \r\n X,Y = FormatEvents(events130)\r\n plt.scatter(X, Y, label = '120-140 GeV', marker = 'o' , c = 'r')\r\n \r\n X,Y = FormatEvents(events150)\r\n plt.scatter(X, Y, label = '140-200 GeV', marker = 'o' , c = 'g' )\r\n \r\n from matplotlib.font_manager import FontProperties\r\n fontP = FontProperties()\r\n fontP.set_size('small')\r\n plt.legend(loc=1, ncol=1, fancybox=True, shadow=False,prop=fontP,borderaxespad=0.,labelspacing = .2)\r\n \r\n from matplotlib.backends.backend_pdf import PdfPages\r\n if fileOut != '':\r\n pp = PdfPages(fileOut + '_sideband.pdf')\r\n plt.savefig(pp, format='pdf')\r\n print \"Figures saved to \", str(fileOut)+ '_sideband.pdf\\n',\r\n pp.close()\r\n \r\n plt.show()\r\n return template", "def computation_gr(particles,p_types,dist,i,j,nbins, rmax):\n i=np.where(p_types == i)[0][0]\n j=np.where(p_types == j)[0][0]\n\n\n if len(p_types)>1:\n #indexes to delete if there is more than one type of particles\n i_axis0=[]\n i_axis1=[]\n for k in range(len(p_types)):\n if k!=i:\n i_axis0.append(particles[k])\n if k!=j:\n i_axis1.append(particles[k])\n dist = np.delete(dist,np.hstack(i_axis0), axis=0)\n dist = np.delete(dist,np.hstack(i_axis1), axis=1)\n\n\n\n bin_count = np.zeros((nbins,3))\n bin_ends = -rmax*np.cos(np.linspace(np.pi/2,np.pi,num=nbins+1))\n\n vol_old=0\n for i in range(nbins):\n bin_count[i,0]=0.5*(bin_ends[i+1]+bin_ends[i]) #Count position in the middle of the bin only needed in the first\n rmax_bin=bin_ends[i+1]\n indexes=np.where(dist<=rmax_bin)\n dist[indexes]=1000\n bin_count[i,1]=len(indexes[0])/len(particles[j])\n print(len(particles[j]))\n vol_new=4/3*np.pi*rmax_bin**3\n bin_count[i,2]=bin_count[i,1]/(vol_new-vol_old)\n\n rho_ave=256/6.71838**3 #np.sum(bin_count[:,1])/(4/3*np.pi*rmax**3)\n\n print(rho_ave)\n\n bin_count[:,2]=bin_count[:,2]/rho_ave**2 #g(r)=rho(r)/rho_ave\n\n return bin_count", "def pc_output_buffers_full(self, *args):\n return _spacegrant_swig.NRZI_sptr_pc_output_buffers_full(self, *args)", "def setupParticles(self):\n\n for ss in self.pargs['species']:\n\n # Make sure we are setting up particles, not walls (so we check for id existence)\n if 'id' in ss and 'wall' not in ss:\n if not self.rank:\n logging.info('Setting up particles for group{id}'.format(**ss))\n\n randName = np.random.randint(10**5,10**8)\n pddName = 'pdd' + '{}'.format(np.random.randint(10**5,10**8))\n\n if 'vol_lim' not in ss:\n ss['vol_lim'] = 1e-20\n\n id = ss['id'] - 1\n self.lmp.command('group group{} type {}'.format(id, ss['id']))\n\n if 'args'in ss:\n args = ss['args']\n else:\n args = ()\n\n if 'radius' in ss:\n radius = ss['radius']\n\n if not isinstance(radius, tuple):\n radius = ('constant', radius)\n\n self.lmp.command('fix {} '.format(randName) + 'group{}'.format(id) + ' particletemplate/{style} 15485867 volume_limit {vol_lim} atom_type {id} density constant {density} radius'.format(**ss) + (' {}' * len(radius)).format(*radius) \\\n + (' {}' * len(args)).format(*args))\n else:\n self.lmp.command('fix {} '.format(randName) + 'group{}'.format(id) + ' particletemplate/{style} 15485867 volume_limit {vol_lim} atom_type {id} density constant {density}'.format(**ss) + (' {}' * len(args)).format(*args))\n \n self.lmp.command('fix {} '.format(pddName) + 'group{}'.format(id) + ' particledistribution/discrete 67867967 1'.format(**ss) + ' {} 1.0'.format(randName))\n\n if ss['style'] is 'multisphere':\n itype = ss['style']\n else:\n itype = 'nve/{style}'.format(**ss)\n\n #Do NOT unfix randName! Will cause a memory corruption error\n self.pddName.append(pddName)", "def particleFill(*args, closePacking: bool=True, doubleWalled: bool=True, maxX: float=0.0,\n maxY: float=0.0, maxZ: float=0.0, minX: float=0.0, minY: float=0.0, minZ:\n float=0.0, particleDensity: float=0.0, resolution: int=0, **kwargs)->None:\n pass", "def metropolis_step(self, positions):\n \"\"\"with brute-force sampling of new positions.\"\"\"\n\n # r = random.random()*random.choice((-1, 1))\n # r is a random number drawn from the uniform prob. dist. in [0,1]\n r = np.zeros(self.num_d)\n for i in range(self.num_d):\n r[i] = np.random.uniform(-1, 1)\n # Pick a random particle\n random_index = np.random.randint(0, high=len(positions))\n new_positions = np.array(positions)\n new_random_position = new_positions[random_index, :]\n # Suggest a new move\n new_positions[random_index, :] = new_random_position + r*self.delta_R\n # Old system and wavefunction\n wavefunction = self.w.wavefunction(positions)\n old_wavefunction_squared = wavefunction**2\n\n # Test the new position with a new system and wavefunction\n # sys_test = System(self.num_p, self.num_d)\n # sys_test.positions_distances(new_positions)\n # alpha = self.w.alpha\n # beta = self.w.beta\n # a = self.w.a\n # wave_test = Wavefunction(self.num_p, self.num_d, alpha, beta, a, sys_test)\n # test_wavefunction = wave_test.wavefunction(new_positions)\n test_wavefunction = self.w.wavefunction(new_positions)\n\n new_wavefunction_squared = test_wavefunction**2\n # print ('Old = ', positions)\n\n if new_wavefunction_squared <= 1e-14:\n pass\n else:\n # acceptance_ratio = self.w.wavefunction_ratio(positions,\n # new_positions)\n acceptance_ratio = new_wavefunction_squared/old_wavefunction_squared\n epsilon = np.random.sample()\n\n if acceptance_ratio > epsilon:\n positions = new_positions\n # print ('New = ', positions)\n # self.s.distances_update(positions, random_index)\n # self.s.positions_distances(new_positions)\n self.c += 1.0\n\n else:\n pass\n\n return positions", "def addnewbend(zs,ze,rc,ap=0.,ax=0.,ay=0.,ox=0.,oy=0.):\n # --- Make sure that at least some of the element is in the proper range,\n # --- z >= 0., and if zlatperi != 0, z <= zlatperi.\n assert (zs < ze),\"element start must be less than element end\"\n assert (top.zlatperi == 0.) or (ze > 0.),\"element end must be greater than zero if top.zlatperi is nonzero\"\n assert (top.zlatperi == 0.) or (zs < top.zlatperi),\"element start must be less than zlatperi if top.zlatperi is nonzero\"\n\n # --- Get a dict of the input arguments and their values.\n ldict = locals()\n\n # --- Setup the lattice arrays for the insertion of the new element. If\n # --- there are already bends, then find the place where the new one is to\n # --- be inserted and shift the existing data to open up a space.\n # --- Note that this uses that same check as in resetlat, that zs != ze to\n # --- determine whether or not a bend is defined.\n ie = 0\n # --- Find which element the new one goes before.\n while (ie <= top.nbend and top.bendzs[ie] <= zs and\n top.bendzs[ie] != top.bendze[ie]):\n ie = ie + 1\n\n # --- Increase the size of the arrays if the element will go past the end\n # --- or if the array is full (i.e. the last element is used).\n if ie > top.nbend or top.bendzs[-1] != top.bendze[-1]:\n top.nbend = top.nbend + 100\n gchange(\"Lattice\")\n\n # --- Setup dictionary relating lattice array with input argument names.\n # --- This is done here so that the references to the lattice arrays\n # --- refer to the updated memory locations after the gchange.\n edict={'zs':top.bendzs,'ze':top.bendze,'rc':top.bendrc,\n 'ap':top.bendap,'ax':top.bendax,'ay':top.benday,\n 'ox':top.bendox,'oy':top.bendoy}\n\n # --- Shift the existing data in the arrays to open up a space for the\n # --- new element.\n if ie <= top.nbend:\n for e in edict.itervalues():\n e[ie+1:] = e[ie:-1] + 0\n\n # --- Insert the new element. Note that edict correlates the lattice array\n # --- with the input arguments and ldict correlate the arguements with\n # --- their values.\n for (xx,e) in edict.iteritems():\n e[ie] = ldict[xx]\n\n # --- resetlat must be called before the data can be used\n top.lresetlat = true\n\n return ie", "def keplerian_ics(nparticles,zscale=0,vzscatter=0,radialprof='exponential',\n zprof='gaussian',centermass=1):\n from core import Particles,PointPotential\n \n if radialprof == 'exponential':\n r = -np.log(1-np.random.rand(nparticles))\n elif radialprof == 'gaussian':\n r = np.random.randn(nparticles)\n elif radialprof == 'ring':\n r = np.ones(nparticles)\n elif radialprof == 'uniform':\n r = np.random.rand(nparticles)\n elif radialprof.startswith('power'):\n alpha = float(radialprof[5:])\n r = ((1-alpha)*np.random.rand(nparticles))**(1/(1-alpha))\n else:\n raise ValueError('Invalid radial profile')\n \n phi = 2*np.pi*np.random.rand(nparticles)\n \n x = r*np.cos(phi)\n y = r*np.sin(phi)\n \n if zprof == 'exponential':\n z = -np.log(1-np.random.rand(nparticles))\n elif zprof == 'gaussian':\n z = np.random.randn(nparticles)\n elif zprof == 'sechsq':\n z = np.arctanh(np.random.rand(nparticles))\n elif zprof.startswith('power'):\n alpha = float(radialprof[5:])\n z = ((1-alpha)*np.random.rand(nparticles))**(1/(1-alpha))\n else:\n raise ValueError('Invalid z profile')\n z *= zscale\n \n vz = vzscatter*np.random.randn(nparticles)\n \n vcirc = (centermass/r)**0.5 \n vxy = vcirc - vz\n \n s = np.hypot(x,y)\n vx = y*vxy/s\n vy = -x*vxy/s\n \n ps = Particles(x,y,z,vx,vy,vz)\n pot = PointPotential(centermass)\n \n return ps,pot", "def applyPhotoZ (self,arr):\n print \"Applying Template SED PZs\"\n\n ztrue = arr['z']\n\n #select a template\n templates = ['El_B2004a.sed']+['Sbc_B2004a.sed','Scd_B2004a.sed']\n templates = templates +['Im_B2004a.sed','SB3_B2004a.sed','SB2_B2004a.sed','ssp_25Myr_z008.sed','ssp_5Myr_z008.sed']\n\n #read in f_mod files, interpolate, get values of f_mod_b\n ngals = len(ztrue)\n\n f_mod_o = np.zeros((self.nb, ngals))\n for z in range(ngals):\n #currently templates are randomly chosen but probably should be an input with true z\n templateno = np.random.choice(range(self.nt))\n for b in range(self.nb):\n spl = InterpolatedUnivariateSpline(self.z_grid, self.f_mod[:,templateno,b])\n f_mod_o[b][z] = spl(ztrue[z])\n\n #select sigma_b - 10% for now\n sigma = 0.1*f_mod_o\n #select observed fluxes f_obs_b = f_mod_b + sigma_b*rando\n f_obs = f_mod_o+ sigma * (np.random.normal(0.,1.,self.nb*ngals).reshape((self.nb,ngals)))\n # I don't seem to be able to find a more efficient way\n arrx=np.zeros(ngals,dtype=[('pz_f_obs',float,(self.nb,)),('pz_flux_sigma',float,(self.nb,))])\n arrx['pz_f_obs']=f_obs.T\n arrx['pz_flux_sigma']=sigma.T\n arr = recfunctions.merge_arrays((arr,arrx),flatten=True,usemask=False)\n return arr", "def getNewSkeletonPoints(self):\n xVec, yVec, zVec = self.XYZCoordinate\n xs = np.array(self.XYProjections)[:,0]\n L = xs[-1] - xs[0]\n xis = xs / L\n\n self.SkeletonPoints = []\n for i in range(len(xis)):\n xi_value = xis[i]\n sX = xs[i]\n sY = self.u_xyPlane.evalf(subs={'xi': xi_value})\n sZ = self.u_xzPlane.evalf(subs={'xi': xi_value})\n self.SkeletonPoints.append(self.coorOrigin + sX*xVec + sY*yVec + sZ*zVec) #coordinate of the bar origin is added", "def _write_particle(self, momentum, mass, pdg):\n\n E, px, py, pz = momentum\n\n self.file.write(\n \" %2i 1 0 0 0 0 %13.6e %13.6e %13.6e %13.6e %13.6e 0.00000 0.00000\\n\"\n % (pdg, px, py, pz, E, mass)\n )", "def generate_sphere_full():\n \n num_voxels = 31\n c = (15.0, 15.0, 15.0)\n\n data_x = []\n data_y = []\n data_z = []\n data_intensity = []\n\n volume = numpy.zeros((num_voxels, num_voxels, num_voxels))\n\n for x in range(num_voxels):\n for y in range(num_voxels):\n for z in range(num_voxels):\n\n if numpy.sqrt((x-c[0])**2 + (y-c[1])**2 + (z-c[2])**2) - 7.5 < 1.5:\n data_x.append(x)\n data_y.append(y)\n data_z.append(z)\n data_intensity.append(200.0)\n\n volume[x,y,z] = 200.0\n\n\n return data_x, data_y, data_z, data_intensity, volume", "def plume_buffer(int_gdf,swrad):\r\n ni_pt = int_gdf.geometry\r\n s_pt = swrad.geometry\r\n new2d =[] \r\n for cood in s_pt: \r\n pt_2d = Point(cood.x,cood.y)\r\n new2d.append(pt_2d)\r\n s_pt = new2d\r\n downwind_line = []\r\n for gp in zip(s_pt,ni_pt):\r\n line = LineString([gp[0],gp[1]])\r\n downwind_line.append(line)\r\n\r\n Buffer = [] \r\n for dl in zip(downwind_line,ND_list):\r\n buf = dl[0].buffer(dl[1]*0.2)\r\n Buffer.append(buf) \r\n \r\n DB = list_to_gdf (Buffer)\r\n \r\n return DB", "def noise(self):\r\n if self.buffer_offset + self.frames_per_buffer - 1 > self.x_max:\r\n #relleno con ceros al final si es necesario\r\n xs = np.arange(self.buffer_offset, self.x_max)\r\n tmp = np.random.random_sample(len(xs)) #ruido\r\n out = np.append(tmp, np.zeros(self.frames_per_buffer-len(tmp)))\r\n else:\r\n xs = np.arange(self.buffer_offset,\r\n self.buffer_offset + self.frames_per_buffer)\r\n out = np.random.random_sample(len(xs))\r\n self.buffer_offset += self.frames_per_buffer\r\n return out", "def gen_phantom_parts(signal = (1.0, 1.0), s_res = .7, oversamp = 8, diam = (4, 6, 8, 10), n_frames = 20):\n all_ves = []\n for d in diam:\n ves = gen_vessel(d, s_res/oversamp, oversamp, n_frames)\n all_ves.append(ves)\n\n total_width = 0\n max_height = 0\n for ves in all_ves:\n total_width += ves.shape[1]\n max_height = max(max_height, ves.shape[2])\n\n velv = np.zeros((n_frames, total_width, max_height))\n mask_crop = np.zeros((total_width, max_height))\n\n x_loc = 0\n ves_count = 1\n for ves in all_ves:\n velv[:, x_loc:x_loc+ves.shape[1], int(max_height/2 - ves.shape[2]/2):int(max_height/2 + ves.shape[2]/2)] = ves\n ves_mask = np.zeros((ves.shape[1], ves.shape[2]))\n ves_mask[np.abs(ves).sum(0) > 0] = ves_count\n mask_crop[x_loc:x_loc + ves.shape[1], int(max_height / 2 - ves.shape[2] / 2):int(max_height / 2 + ves.shape[2] / 2)] = ves_mask\n x_loc += ves.shape[1]\n ves_count += 1\n\n mag = np.zeros(( int(2*total_width), int(2*max_height)))\n\n [xx, yy] = np.meshgrid(np.linspace(-1, 1, mag.shape[0]), np.linspace(-1, 1, mag.shape[1]), indexing='ij')\n rad = xx * xx + yy * yy\n mag[rad < .90] = 1.0\n\n return mask_crop, mag, velv", "def pr_worker(pr_id, star, sh_star, rows, settings, qu):\n\n # Initialization\n out_part_dir = settings.out_part_dir\n do_ang_prior = settings.do_ang_prior\n do_ang_rnd = settings.do_ang_rnd\n in_mask = settings.in_mask\n rln_star = copy.deepcopy(sh_star)\n mask = ps.disperse_io.load_tomo(in_mask, mmap=False)\n\n # print '\\tLoop for particles: '\n count, n_rows = 0, len(rows)\n for row in rows:\n\n # print '\\t\\t\\t+Reading the entry...'\n in_pick_tomo = star.get_element('_rlnImageName', row)\n in_rec_tomo = star.get_element('_rlnMicrographName', row)\n in_ctf = star.get_element('_rlnCtfImage', row)\n x_pick = star.get_element('_rlnCoordinateX', row)\n y_pick = star.get_element('_rlnCoordinateY', row)\n z_pick = star.get_element('_rlnCoordinateZ', row)\n try:\n shift_x = star.get_element('_rlnOriginX', row)\n except KeyError:\n shift_x = 0\n try:\n shift_y = star.get_element('_rlnOriginY', row)\n except KeyError:\n shift_y = 0\n try:\n shift_z = star.get_element('_rlnOriginZ', row)\n except KeyError:\n shift_z = 0\n rot = star.get_element('_rlnAngleRot', row)\n tilt = star.get_element('_rlnAngleTilt', row)\n psi = star.get_element('_rlnAnglePsi', row)\n rot_prior, tilt_prior, psi_prior = None, None, None\n if ANGLE_NAMES[0] in do_ang_prior:\n rot_prior = rot\n if ANGLE_NAMES[0] in do_ang_rnd:\n rot = 180. * random.random()\n if ANGLE_NAMES[1] in do_ang_prior:\n tilt_prior = tilt\n if ANGLE_NAMES[1] in do_ang_rnd:\n tilt = 180. * random.random()\n if ANGLE_NAMES[2] in do_ang_prior:\n psi_prior = psi\n if ANGLE_NAMES[2] in do_ang_rnd:\n psi = 180. * random.random()\n angs = np.asarray((rot, tilt, psi), dtype=float)\n\n # Sub-volumes post-processing\n svol = ps.disperse_io.load_tomo(in_pick_tomo, mmap=False)\n r3d = pyto.geometry.Rigid3D()\n r3d.q = r3d.make_r_euler(angles=np.radians(angs), mode='zyz_in_active')\n if (shift_x != 0) or (shift_y != 0) or (shift_z != 0):\n svol = tomo_shift(svol, (shift_y, shift_x, shift_z))\n svol_sp = np.asarray(svol.shape, dtype=int)\n svol_cent = np.asarray((int(.5 * svol_sp[0]), int(.5 * svol_sp[1]), int(.5 * svol_sp[2])), dtype=np.float32)\n svol = r3d.transformArray(svol, origin=svol_cent, order=3, prefilter=True)\n stat_vol = svol[mask > 0]\n mn, st = stat_vol.mean(), stat_vol.std()\n if st > 0:\n svol = (svol - mn) / st\n svol = ps.globals.randomize_voxel_mask(svol, mask, ref='fg')\n r3d_inv = pyto.geometry.Rigid3D()\n r3d_inv.q = r3d.make_r_euler(angles=np.radians(angs), mode='zyz_in_passive')\n svol = r3d_inv.transformArray(svol, origin=svol_cent, order=3, prefilter=True)\n if (shift_x != 0) or (shift_y != 0) or (shift_z != 0):\n svol = tomo_shift(svol, (-shift_y, -shift_x, -shift_z))\n\n # Adding entry to particles STAR file\n out_part = out_part_dir + '/' + os.path.splitext(os.path.split(in_pick_tomo)[1])[0] + '.mrc'\n ps.disperse_io.save_numpy(svol, out_part)\n\n # Writing in the shared object\n print('\\t\\t-Process[' + str(pr_id) + '], Particle [' + str(count) + '/' + str(n_rows) + ']: ' + out_part)\n part_row = {'_rlnMicrographName': in_rec_tomo,\n '_rlnCtfImage': in_ctf,\n '_rlnImageName': out_part,\n '_rlnCoordinateX': x_pick,\n '_rlnCoordinateY': y_pick,\n '_rlnCoordinateZ': z_pick,\n '_rlnOriginX': shift_x,\n '_rlnOriginY': shift_y,\n '_rlnOriginZ': shift_z}\n part_row['_rlnAngleRot'] = rot\n part_row['_rlnAngleTilt'] = tilt\n part_row['_rlnAnglePsi'] = psi\n if ANGLE_NAMES[0] in do_ang_prior:\n part_row['_rlnAngleRotPrior'] = rot_prior\n if ANGLE_NAMES[1] in do_ang_prior:\n part_row['_rlnAngleTiltPrior'] = tilt_prior\n if ANGLE_NAMES[2] in do_ang_prior:\n part_row['_rlnAnglePsiPrior'] = psi_prior\n rln_star.add_row(**part_row)\n\n count += 1\n\n # Finishing the process\n qu.put(rln_star)\n sys.exit(pr_id)", "def snapshot_gen(ICobj):\n \n # Constants\n G = SimArray(1.0,'G')\n kB = SimArray(1.0,'k')\n # ------------------------------------\n # Load in things from ICobj\n # ------------------------------------\n # snapshot file name\n snapshotName = ICobj.settings.filenames.snapshotName\n # particle positions\n theta = ICobj.pos.theta\n r = ICobj.pos.r\n x = ICobj.pos.x\n y = ICobj.pos.y\n z = ICobj.pos.z\n # Number of particles\n nParticles = ICobj.pos.nParticles\n # Temperature power law (used for pressure gradient)\n Tpower = ICobj.settings.physical.Tpower\n # molecular mass\n m = ICobj.settings.physical.m\n # star mass\n m_star = ICobj.settings.physical.M.copy()\n # disk mass\n m_disk = ICobj.sigma.m_disk.copy()\n m_disk = isaac.match_units(m_disk, m_star)[0]\n # mass of the gas particles\n m_particles = np.ones(nParticles) * m_disk / float(nParticles)\n # re-scale the particles (allows making of lo-mass disk)\n m_particles *= ICobj.settings.snapshot.mScale\n \n # ------------------------------------\n # Initial calculations\n # ------------------------------------\n # Find total mass interior to every particle\n N_interior = np.array(r.argsort().argsort())\n m_int = m_particles[[0]]*N_interior + m_star\n # Retrieve rho (density) at each position\n rho = ICobj.rho(z,r)\n # Retrieve radial derivative at each position\n drho_dr = ICobj.rho.drho_dr(z,r)\n # Get temperature at each position\n T = ICobj.T(r)\n \n # ------------------------------------\n # Calculate particle velocities\n # ------------------------------------\n # Find keperlerian velocity squared due to gravity\n v2grav = G*m_int/r\n # Find contribution from density gradient\n v2dens = (kB*T/m)*(r*drho_dr/rho)\n # ignore nans and infs\n v2dens[(np.isnan(v2dens)) | (np.isinf(v2dens))] = 0.0\n # Find contribution from temperature gradient\n v2temp = (kB*T/m)*Tpower\n # Now find velocity from all contributions\n v = np.sqrt(v2grav + v2dens + v2temp)\n # Sometimes, at large r, the velocities due to the pressure and temp\n # Gradients become negative. If this is the case, set them to 0\n nanind = np.isnan(v)\n v[nanind] = 0.0\n \n # -------------------------------------------------\n # Assign output\n # -------------------------------------------------\n # Get units all set up\n m_unit = m_star.units\n pos_unit = r.units\n # time units are sqrt(L^3/GM)\n t_unit = np.sqrt((pos_unit**3)*np.power((G*m_unit), -1)).units\n # velocity units are L/t\n v_unit = (pos_unit/t_unit).ratio('km s**-1')\n # Make it a unit\n v_unit = pynbody.units.Unit('{} km s**-1'.format(v_unit))\n x.convert_units(pos_unit)\n y.convert_units(pos_unit)\n z.convert_units(pos_unit)\n \n # 3-D velocity\n vel = SimArray(np.zeros([nParticles,3]),v_unit)\n vel[:,0] = -np.sin(theta)*v\n vel[:,1] = np.cos(theta)*v\n \n # Generate positions\n xyz = SimArray(np.zeros([nParticles,3]),pos_unit)\n xyz[:,0] = x\n xyz[:,1] = y\n xyz[:,2] = z\n \n # Other settings\n eps = ICobj.settings.snapshot.eps\n star_eps = eps\n eps *= SimArray(np.ones(nParticles), pos_unit)\n metals = ICobj.settings.snapshot.metals\n star_metals = metals\n metals *= SimArray(np.ones(nParticles))\n \n # Generate snapshot\n snapshot = pynbody.new(star=1,gas=nParticles)\n snapshot.gas['vel'] = vel\n snapshot.gas['pos'] = xyz\n snapshot.gas['temp'] = T\n snapshot.gas['mass'] = m_particles\n snapshot.gas['metals'] = metals\n snapshot.gas['eps'] = eps\n snapshot.gas['mu'].derived = False\n snapshot.gas['mu'] = float(m.in_units('m_p'))\n \n snapshot.star['pos'] = SimArray([[ 0., 0., 0.]],pos_unit)\n snapshot.star['vel'] = SimArray([[ 0., 0., 0.]], v_unit)\n snapshot.star['mass'] = m_star\n snapshot.star['metals'] = SimArray(star_metals)\n snapshot.star['eps'] = SimArray(star_eps, pos_unit)\n \n param = isaac.make_param(snapshot, snapshotName)\n \n return snapshot, param", "def pc_output_buffers_full(self, *args) -> \"std::vector< float,std::allocator< float > >\":\n return _beamforming_swig.phasedarray_sptr_pc_output_buffers_full(self, *args)", "def generate_scatter(dm):\n # This section places something on the surface every few units -- good for testing out the get_height method\n scatter_offset = 128\n for x in range((map_center[0] - map_size[0] // 2) + scatter_offset, (map_center[0] + map_size[0] // 2), scatter_offset):\n for y in range((map_center[1] - map_size[1] // 2) + scatter_offset, (map_center[1] + map_size[1] // 2),\n scatter_offset):\n\n slope_angle = math.degrees(dm.get_slope(dm.get_relative_position((x,y))))\n h = ground.get_height((x, y))\n\n if water_height - 64 < h < water_height - 32 and random.randrange(20) == 0:\n # This spot is in shallow water, so spawn a boat. (spawn it slightly higher than water level, so that it can float)\n base.PropPhysics(m, origin=Origin(x, y, water_height + 8),\n angles=Origin(0, random.randrange(360), 0),\n model=\"models/props_canal/boat001{0}.mdl\".format((\"a\", \"b\")[random.randrange(2)]))\n elif water_height + 128 < h and random.randrange(15) == 0 and slope_angle < 30:\n # We have a flat area somewhat away from the shore, so we can put a tree here.\n base.PropStatic(m, origin=Origin(x, y, h - 3),\n model=\"models/props_foliage/tree_deciduous_0{0}a.mdl\".format(random.randrange(3) + 1),\n angles=Origin(0, random.randrange(360), 0), skin=1)", "def make_springs(x_offset = -0.5, y_offset = 0.5, z_offset = 2.5, z = np.linspace(0, 15, 250)):\n \n # Generate X and labs\n labs = np.repeat(np.array([1, 2]), len(z))\n x = np.r_[np.sin(z), np.sin(z) + x_offset]\n y = np.r_[np.cos(z), np.cos(z) + y_offset]\n z = np.r_[z, z + z_offset]\n X = np.c_[x, y, z] \n\n # Return results\n return X, labs", "def generate_interlacing_grids(npts_per_dim, period=1.0):\n\n dmin, dmax = 0.0, period\n\n dx = (dmax - dmin) / float(npts_per_dim)\n\n mesh1_points = generate_3d_regular_mesh(npts_per_dim, dmin=dmin, dmax=dmax)\n mesh2_points = mesh1_points + dx / 2.0\n\n return mesh1_points, mesh2_points", "def _generate_pores(self):\n logger.info(\"Place randomly located pores in the domain\")\n #Original Random Point Generator\n #coords = sp.rand(self._Np,3)*[self._Lx,self._Ly,self._Lz]\n #Seeding Code\n coords = np.zeros([self._Np,3])\n #reject points close to boundaries - if False there will be slightly more\n rejection = [False,False,True]\n for j in range(3):\n i = 0\n while i < self._Np:\n coord = np.random.uniform(0,1,1)\n if self._reject(coord) == rejection[j]:\n coords[i][j]=coord\n i += 1\n coords*=np.array([self._Lx,self._Ly,self._Lz])\n #Seeding Code\n #Uniform Random Generator\n #coords = np.array([np.random.uniform(0,self._Lx,self._Np),np.random.uniform(0,self._Ly,self._Np),np.random.uniform(0,self._Lz,self._Np)]).T\n\n self['pore.coords'] = coords\n logger.debug(\"End of method\")", "def test_generate_2d_perlin_noise(self) -> None:\n periods = 1\n while periods <= 128:\n perlin_noise = NoiseGenerator.generate2DPerlinNoise(self.ROWS, self.COLS, periods)\n visualize(perlin_noise, title=\"Perlin Noise Periods={}\".format(periods))\n periods <<= 1", "def pc_output_buffers_full_var(self, *args) -> \"std::vector< float,std::allocator< float > >\":\n return _beamforming_swig.doaesprit_sptr_pc_output_buffers_full_var(self, *args)", "def psogps(data_src, min_supp=MIN_SUPPORT, max_iteration=MAX_ITERATIONS, n_particles=N_PARTICLES,\n velocity=VELOCITY, coef_p=PERSONAL_COEFF, coef_g=GLOBAL_COEFF, return_gps=False):\n # Prepare data set\n d_set = DataGP(data_src, min_supp)\n d_set.init_attributes()\n # self.target = 1\n # self.target_error = 1e-6\n attr_keys = [GI(x[0], x[1].decode()).as_string() for x in d_set.valid_bins[:, 0]]\n\n if d_set.no_bins:\n return []\n\n it_count = 0\n eval_count = 0\n counter = 0\n var_min = 0\n var_max = int(''.join(['1'] * len(attr_keys)), 2)\n\n # Empty particle template\n empty_particle = structure()\n empty_particle.position = None\n empty_particle.fitness = None\n\n # Initialize Population\n particle_pop = empty_particle.repeat(n_particles)\n for i in range(n_particles):\n particle_pop[i].position = random.randrange(var_min, var_max)\n particle_pop[i].fitness = 1\n\n pbest_pop = particle_pop.copy()\n gbest_particle = pbest_pop[0]\n\n # Best particle (ever found)\n best_particle = empty_particle.deepcopy()\n best_particle.position = gbest_particle.position\n best_particle.fitness = costfxn(best_particle.position, attr_keys, d_set)\n\n velocity_vector = np.ones(n_particles)\n best_fitness_arr = np.empty(max_iteration)\n best_patterns = []\n str_best_gps = list()\n str_iter = ''\n str_eval = ''\n\n repeated = 0\n while counter < max_iteration:\n # while eval_count < max_evaluations:\n # while repeated < 1:\n for i in range(n_particles):\n # UPDATED\n if particle_pop[i].position < var_min or particle_pop[i].position > var_max:\n particle_pop[i].fitness = 1\n else:\n particle_pop[i].fitness = costfxn(particle_pop[i].position, attr_keys, d_set)\n eval_count += 1\n str_eval += \"{}: {} \\n\".format(eval_count, particle_pop[i].fitness)\n\n if pbest_pop[i].fitness > particle_pop[i].fitness:\n pbest_pop[i].fitness = particle_pop[i].fitness\n pbest_pop[i].position = particle_pop[i].position\n\n if gbest_particle.fitness > particle_pop[i].fitness:\n gbest_particle.fitness = particle_pop[i].fitness\n gbest_particle.position = particle_pop[i].position\n # if abs(gbest_fitness_value - self.target) < self.target_error:\n # break\n if best_particle.fitness > gbest_particle.fitness:\n best_particle = gbest_particle.deepcopy()\n\n for i in range(n_particles):\n new_velocity = (velocity * velocity_vector[i]) + \\\n (coef_p * random.random()) * (pbest_pop[i].position - particle_pop[i].position) + \\\n (coef_g * random.random()) * (gbest_particle.position - particle_pop[i].position)\n particle_pop[i].position = particle_pop[i].position + new_velocity\n\n best_gp = validategp(d_set, decodegp(attr_keys, best_particle.position))\n \"\"\":type best_gp: GP\"\"\"\n is_present = isduplicate(best_gp, best_patterns)\n is_sub = amcheck(best_patterns, best_gp, subset=True)\n if is_present or is_sub:\n repeated += 1\n else:\n if best_gp.support >= min_supp:\n best_patterns.append(best_gp)\n str_best_gps.append(best_gp.print(d_set.titles))\n # else:\n # best_particle.fitness = 1\n\n try:\n # Show Iteration Information\n best_fitness_arr[it_count] = best_particle.fitness\n str_iter += \"{}: {} \\n\".format(it_count, best_particle.fitness)\n except IndexError:\n pass\n it_count += 1\n\n if max_iteration == 1:\n counter = repeated\n else:\n counter = it_count\n # Output\n out = json.dumps({\"Algorithm\": \"PSO-GRAD\", \"Best Patterns\": str_best_gps, \"Iterations\": it_count})\n \"\"\":type out: object\"\"\"\n if return_gps:\n return out, best_patterns\n else:\n return out", "def advance_generation(self):\n # Todo: implement\n for particle in self.particles:\n if particle.value > particle.best_value:\n particle.best_position = particle.x\n particle.best_value = particle.value\n rp = random.uniform(0.0, 1.0)\n rg = random.uniform(0.0, 1.0)\n particle.v = self.w * particle.v + self.phip * rp * (particle.best_position - particle.x) + self.phig * rg * (self.get_best_position() - particle.x)\n particle.x = particle.x + particle.v\n particle.evaluated = False", "def make_particles_stable(P8gen, above_lifetime):\n # FIXME: find time unit and add it to the docstring\n p8 = P8gen.getPythiaInstance()\n n=1\n while n!=0:\n n = p8.particleData.nextId(n)\n p = p8.particleData.particleDataEntryPtr(n)\n if p.tau0() > above_lifetime:\n command = \"{}:mayDecay = false\".format(n)\n p8.readString(command)\n print(\"Pythia8 configuration: Made {} stable for Pythia, should decay in Geant4\".format(p.name()))", "def distribute_Gaussian(self):\n\n sigma_x = np.sqrt(self.emitx*self._betax)\n sigma_xp = np.sqrt(self.emitx*self._gammax)\n\n sigma_y = np.sqrt(self.emity*self._betay)\n sigma_yp = np.sqrt(self.emity*self._gammay)\n\n self.particles[:,0] = np.random.randn(self.npart)*sigma_x #set x-coordinates\n self.particles[:,1] = np.random.randn(self.npart)*sigma_xp #set xp-coordinates\n self.particles[:,2] = np.random.randn(self.npart)*sigma_y #set y-coordinates\n self.particles[:,3] = np.random.randn(self.npart)*sigma_yp #set yp-coordinates" ]
[ "0.612357", "0.6045421", "0.58844817", "0.5861245", "0.5850004", "0.57011443", "0.5697617", "0.56546646", "0.56274694", "0.5621595", "0.558431", "0.5581051", "0.5563471", "0.55546993", "0.5529091", "0.5474343", "0.5459851", "0.5450769", "0.54444504", "0.5442273", "0.54061013", "0.5380784", "0.53734106", "0.5352818", "0.5309615", "0.53055173", "0.53035873", "0.5256503", "0.5250319", "0.52325004", "0.5226785", "0.52202433", "0.5207419", "0.51946497", "0.51673704", "0.5163167", "0.51617044", "0.5149633", "0.51477766", "0.51338464", "0.5133069", "0.51325196", "0.51314086", "0.5094454", "0.50769365", "0.50694114", "0.5064995", "0.506301", "0.50595576", "0.50456315", "0.50404716", "0.5039141", "0.5038867", "0.5037132", "0.50366837", "0.50289947", "0.5022559", "0.5009087", "0.49972305", "0.4995842", "0.49891177", "0.49776196", "0.4975882", "0.4972564", "0.49706382", "0.4952289", "0.49519512", "0.49487284", "0.4948592", "0.4948181", "0.49468425", "0.4942238", "0.49393946", "0.4936511", "0.49362737", "0.4929815", "0.49252322", "0.4918907", "0.49185136", "0.49150974", "0.49142882", "0.4911741", "0.49091038", "0.4908294", "0.490668", "0.49052128", "0.49036494", "0.48991862", "0.48973918", "0.488653", "0.48857918", "0.48825917", "0.4880993", "0.48731697", "0.48710725", "0.4869299", "0.48669794", "0.48667282", "0.4865309", "0.48625824" ]
0.69258934
0
Shift the spectral fields by n_move cells (with respect to the spatial grid). Shifting is done either on the CPU or the GPU, if use_cuda is True. (Typically n_move is positive, and the fields are shifted backwards)
def shift_spect_grid( self, grid, n_move, shift_rho=True, shift_currents=True ): if grid.use_cuda: shift = grid.d_field_shift # Get a 2D CUDA grid of the size of the grid tpb, bpg = cuda_tpb_bpg_2d( grid.Ep.shape[0], grid.Ep.shape[1] ) # Shift all the fields on the GPU shift_spect_array_gpu[tpb, bpg]( grid.Ep, shift, n_move ) shift_spect_array_gpu[tpb, bpg]( grid.Em, shift, n_move ) shift_spect_array_gpu[tpb, bpg]( grid.Ez, shift, n_move ) shift_spect_array_gpu[tpb, bpg]( grid.Bp, shift, n_move ) shift_spect_array_gpu[tpb, bpg]( grid.Bm, shift, n_move ) shift_spect_array_gpu[tpb, bpg]( grid.Bz, shift, n_move ) if shift_rho: shift_spect_array_gpu[tpb, bpg]( grid.rho_prev, shift, n_move ) if shift_currents: shift_spect_array_gpu[tpb, bpg]( grid.Jp, shift, n_move ) shift_spect_array_gpu[tpb, bpg]( grid.Jm, shift, n_move ) shift_spect_array_gpu[tpb, bpg]( grid.Jz, shift, n_move ) else: shift = grid.field_shift # Shift all the fields on the CPU shift_spect_array_cpu( grid.Ep, shift, n_move ) shift_spect_array_cpu( grid.Em, shift, n_move ) shift_spect_array_cpu( grid.Ez, shift, n_move ) shift_spect_array_cpu( grid.Bp, shift, n_move ) shift_spect_array_cpu( grid.Bm, shift, n_move ) shift_spect_array_cpu( grid.Bz, shift, n_move ) if shift_rho: shift_spect_array_cpu( grid.rho_prev, shift, n_move ) if shift_currents: shift_spect_array_cpu( grid.Jp, shift, n_move ) shift_spect_array_cpu( grid.Jm, shift, n_move ) shift_spect_array_cpu( grid.Jz, shift, n_move )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def shift_spect_array_gpu( field_array, shift_factor, n_move ):\n # Get a 2D CUDA grid\n iz, ir = cuda.grid(2)\n\n # Only access values that are actually in the array\n if ir < field_array.shape[1] and iz < field_array.shape[0]:\n power_shift = 1. + 0.j\n # Calculate the shift factor (raising to the power n_move ;\n # for negative n_move, we take the complex conjugate, since\n # shift_factor is of the form e^{i k dz})\n for i in range( abs(n_move) ):\n power_shift *= shift_factor[iz]\n if n_move < 0:\n power_shift = power_shift.conjugate()\n # Shift fields\n field_array[iz, ir] *= power_shift", "def move_grids(self, fld, comm, time):\n # To avoid discrepancies between processors, only the first proc\n # decides whether to send the data, and broadcasts the information.\n dz = comm.dz\n if comm.rank==0:\n # Move the continuous position of the moving window object\n self.zmin += self.v * (time - self.t_last_move)\n # Find the number of cells by which the window should move\n zmin_global_domain, zmax_global_domain = comm.get_zmin_zmax(\n local=False, with_damp=False, with_guard=False )\n n_move = int( (self.zmin - zmin_global_domain)/dz )\n else:\n n_move = None\n # Broadcast the information to all proc\n if comm.size > 1:\n n_move = comm.mpi_comm.bcast( n_move )\n\n # Move the grids\n if n_move != 0:\n # Move the global domain\n comm.shift_global_domain_positions( n_move*dz )\n # Shift the fields\n Nm = len(fld.interp)\n for m in range(Nm):\n # Modify the values of the corresponding z's\n fld.interp[m].zmin += n_move*fld.interp[m].dz\n fld.interp[m].zmax += n_move*fld.interp[m].dz\n # Shift/move fields by n_move cells in spectral space\n self.shift_spect_grid( fld.spect[m], n_move )\n\n # Because the grids have just been shifted, there is a shift\n # in the cell indices that are used for the prefix sum.\n if fld.use_cuda:\n fld.prefix_sum_shift += n_move\n # This quantity is reset to 0 whenever prefix_sum is recalculated\n\n # Prepare the positions of injection for the particles\n # (The actual creation of particles is done when the routine\n # exchange_particles of boundary_communicator.py is called)\n if comm.rank == comm.size-1:\n # Move the injection position\n self.z_inject += self.v * (time - self.t_last_move)\n # Take into account the motion of the end of the plasma\n self.z_end_plasma += self.v_end_plasma * (time - self.t_last_move)\n # Increment the number of particle cells to add\n nz_new = int( (self.z_inject - self.z_end_plasma)/dz )\n self.nz_inject += nz_new\n # Increment the virtual position of the end of the plasma\n # (When `generate_particles` is called, then the plasma\n # is injected between z_end_plasma - nz_inject*dz and z_end_plasma,\n # and afterwards nz_inject is set to 0.)\n self.z_end_plasma += nz_new*dz\n\n # Change the time of the last move\n self.t_last_move = time", "def shift_spect_array_cpu( field_array, shift_factor, n_move ):\n Nz, Nr = field_array.shape\n\n # Loop over the 2D array (in parallel over z if threading is enabled)\n for iz in prange( Nz ):\n power_shift = 1. + 0.j\n # Calculate the shift factor (raising to the power n_move ;\n # for negative n_move, we take the complex conjugate, since\n # shift_factor is of the form e^{i k dz})\n for i in range( abs(n_move) ):\n power_shift *= shift_factor[iz]\n if n_move < 0:\n power_shift = power_shift.conjugate()\n # Shift the fields\n for ir in range( Nr ):\n field_array[iz, ir] *= power_shift", "def _move_in_one_more_block():\n with tik_inst.for_range(0, sub_h_align_block_size) as sub_h_idx:\n tik_inst.data_move(dst[sub_w_block * data_cnt_one_block * sub_h_idx],\n src[w_offset + w_size * sub_h_idx], 0, 1, sub_w_block, 0, 0)\n # in order to avoid dirty data when multiple core\n with tik_inst.for_range(0, data_cnt_one_block) as sub_h_idx_1:\n tik_inst.data_move(dst[sub_w_block * data_cnt_one_block *\n (sub_h_align_block_size + sub_h_idx_1)],\n src[w_offset +\n w_size * (sub_h_size - data_cnt_one_block + sub_h_idx_1)],\n 0, 1, sub_w_block, 0, 0)", "def grid_shift(grid, advection, trim_edges=0, field_list=None):\n if trim_edges == 0:\n trim_slice = slice(None, None)\n else:\n trim_slice = slice(int(trim_edges), -int(trim_edges))\n\n shifted_grid = copy.deepcopy(grid)\n\n # grab the x and y axis and trim\n shifted_grid.x[\"data\"] = grid.x[\"data\"][trim_slice].copy()\n shifted_grid.y[\"data\"] = grid.y[\"data\"][trim_slice].copy()\n\n # shift each field.\n if field_list is None:\n field_list = grid.fields.keys()\n\n for field in field_list:\n # copy data and fill with nans\n data = grid.fields[field][\"data\"].copy()\n data = np.ma.filled(data, np.nan)\n\n # shift the data\n shifted_data = shift(data, [0, advection[0], advection[1]], prefilter=False)\n\n # mask invalid, trim and place into grid\n shifted_data = np.ma.fix_invalid(\n shifted_data, copy=False, fill_value=get_fillvalue()\n )\n shifted_data = shifted_data[:, trim_slice, trim_slice]\n shifted_grid.fields[field][\"data\"] = shifted_data\n\n return shifted_grid", "def make_move(self, move):\n self.board[int(move) - 1] = self.nplayer", "def _data_move_in_mc_on_w(tik_inst, dst, src, data_pos_info):\n\n sub_h_size, sub_w_size, h_size, w_size, w_offset = data_pos_info\n data_cnt_one_block = _get_elment_cnt_one_block(src.dtype)\n sub_w_block = _ceil_div(sub_w_size, data_cnt_one_block)\n sub_h_align_block_size = sub_h_size // data_cnt_one_block * data_cnt_one_block\n sub_h_left = sub_h_size % data_cnt_one_block\n is_not_w_block_align = w_size % data_cnt_one_block > 0\n is_h_size_smaller_one_block = h_size < data_cnt_one_block\n\n def _move_in_one_more_block():\n \"\"\"\n move in one more block of h when h > sub_h and sub_h is not block align\n \"\"\"\n with tik_inst.for_range(0, sub_h_align_block_size) as sub_h_idx:\n tik_inst.data_move(dst[sub_w_block * data_cnt_one_block * sub_h_idx],\n src[w_offset + w_size * sub_h_idx], 0, 1, sub_w_block, 0, 0)\n # in order to avoid dirty data when multiple core\n with tik_inst.for_range(0, data_cnt_one_block) as sub_h_idx_1:\n tik_inst.data_move(dst[sub_w_block * data_cnt_one_block *\n (sub_h_align_block_size + sub_h_idx_1)],\n src[w_offset +\n w_size * (sub_h_size - data_cnt_one_block + sub_h_idx_1)],\n 0, 1, sub_w_block, 0, 0)\n\n with tik_inst.if_scope(is_not_w_block_align):\n # sub_h is block align or h is not enough one block\n with tik_inst.if_scope(tik.any(sub_h_left == 0, is_h_size_smaller_one_block)):\n with tik_inst.for_range(0, sub_h_size) as sub_h_idx:\n tik_inst.data_move(dst[sub_w_block * data_cnt_one_block * sub_h_idx],\n src[w_offset + w_size * sub_h_idx], 0, 1, sub_w_block, 0, 0)\n with tik_inst.else_scope():\n _move_in_one_more_block()\n\n with tik_inst.else_scope():\n with tik_inst.if_scope(tik.any(sub_h_left == 0, is_h_size_smaller_one_block)):\n src_strides = w_size // data_cnt_one_block - sub_w_block\n # mte max strides value is 65535\n with tik_inst.if_scope(src_strides > MTE_STRIDES):\n with tik_inst.for_range(0, sub_h_size) as sub_h_idx_2:\n tik_inst.data_move(dst[sub_w_size * sub_h_idx_2],\n src[w_offset + w_size * sub_h_idx_2],\n 0, 1, sub_w_block, 0, 0)\n with tik_inst.else_scope():\n tik_inst.data_move(dst, src[w_offset], 0, sub_h_size, sub_w_block, src_strides, 0)\n with tik_inst.else_scope():\n _move_in_one_more_block()", "def move(self):\n x = y = z = 0.0\n for cell in self.cells:\n x += (cell.x)#*n\n y += (cell.y)#*n\n z += (cell.z)#*n\n np = float(len(self.cells))\n med = numpy.array([x/np,y/np,z/np])\n \n dists = []\n for cell in self.cells:\n d = (cell.x-self.x)**2+(cell.y-self.y)**2+(cell.z-self.z)**2\n d = numpy.sqrt(d)\n dists.append(d)\n #md = (cell.x-med[0])**2+(cell.y-med[1])**2+(cell.z-med[2])**2\n #dists[-1] = (dists[-1]+md)/2\n cell = self.cells[numpy.argmin(dists)]\n cc = numpy.array([cell.x, cell.y, cell.z])\n \n t = self.t\n if abs(self.dnp) * ( self.np-self.np_req) > 0:\n t = self.tr\n self.dcenter = (1-t)*(med-self.center + self.u*(cc-med))\n self.x,self.y,self.z = self.center = self.center + self.dcenter", "def move(self, direction):\n new_grid = []\n # get the indices of specific direction\n new_indices = self._grid_indices[direction]\n for cell in new_indices:\n lst = self.traversed_list(cell, direction)\n merged_list = merge(lst)\n new_grid.append(merged_list)\n \n adjusted_grid = adjust_grid(new_grid,direction)\n if self.is_changed(adjusted_grid):\n self.update_grid(adjusted_grid)\n self.new_tile()", "def _move_in_one_more_block():\n with tik_inst.for_range(0, sub_h_align_block_size) as sub_h_idx_0:\n tik_inst.data_move(dst[sub_w_block * data_cnt_one_block * sub_h_idx_0],\n src[in_offset + sub_h_idx_0 * w_size],\n 0, 1, sub_w_block, 0, 0)\n # move in one more block of h\n with tik_inst.for_range(0, data_cnt_one_block) as sub_h_idx_1:\n tik_inst.data_move(\n dst[sub_w_block * data_cnt_one_block * (sub_h_align_block_size + sub_h_idx_1)],\n src[in_offset + (sub_h_idx_1 + sub_h_size - data_cnt_one_block) * w_size],\n 0, 1, sub_w_block, 0, 0)", "def move(self, direction):\n original_grid = []\n for row in self._grid:\n original_row = list(row)\n original_grid.append(original_row)\n steps = 0\n if direction == UP or direction == DOWN:\n steps = self._grid_height\n elif direction == LEFT or direction == RIGHT:\n steps = self._grid_width\n to_move = []\n for initial_cell in self._initial_cells[direction]:\n for step in range(steps):\n new_row = initial_cell[0] + step * OFFSETS[direction][0]\n new_column = initial_cell[1] + step * OFFSETS[direction][1]\n to_move.append(self._grid[new_row][new_column])\n to_move = merge(to_move)\n row = initial_cell[0]\n column = initial_cell[1]\n for step in range(steps):\n self._grid[row + OFFSETS[direction][0] * step][column + OFFSETS[direction][1] * step] = to_move[step]\n to_move = []\n if original_grid != self._grid:\n self.new_tile()", "def _data_move_out_mc_on_w(tik_inst, dst, src, data_pos_info):\n\n # sub_h_size is the original value without any change\n sub_h_size, sub_w_size, h_size, w_size, out_offset = data_pos_info\n data_size_one_block = _get_elment_cnt_one_block(src.dtype)\n\n def _sub_h_not_block_align_bigger_one_block():\n \"\"\"\n sub_h_size is not block align, sub_h_size is bigger than one block\n \"\"\"\n\n sub_h_block = sub_h_size // data_size_one_block\n with tik_inst.for_range(0, sub_w_size) as sub_w_idx_2:\n with tik_inst.if_scope(sub_h_block > 0):\n tik_inst.data_move(\n dst[out_offset + sub_w_idx_2 * h_size],\n src[sub_w_idx_2 * (sub_h_block + 1) * data_size_one_block],\n 0, 1, sub_h_block, 0, 0)\n # move in one more block for this case\n tik_inst.data_move(\n dst[out_offset + sub_w_idx_2 * h_size + sub_h_size - data_size_one_block],\n src[sub_w_idx_2 * (sub_h_block + 1) * data_size_one_block +\n sub_h_block * data_size_one_block],\n 0, 1, 1, 0, 0)\n\n with tik_inst.if_scope(sub_h_size == h_size):\n # the data order in ub is the expected order\n sub_hw_size = sub_h_size * sub_w_size\n with tik_inst.if_scope(h_size % data_size_one_block == 0):\n tik_inst.data_move(dst[out_offset],\n src,\n 0, 1, sub_hw_size // data_size_one_block, 0, 0)\n with tik_inst.else_scope():\n # sub_h_size is smaller than one block\n with tik_inst.if_scope(h_size < data_size_one_block):\n # the data_move will move 1 block at least\n with tik_inst.if_scope(sub_hw_size < data_size_one_block):\n tik_inst.data_move(dst[out_offset],\n src,\n 0, 1, 1, 0, 0)\n with tik_inst.else_scope():\n sub_hw_block = sub_hw_size // data_size_one_block\n tik_inst.data_move(dst[out_offset],\n src,\n 0, 1, sub_hw_block, 0, 0)\n # in order to avoid dirty data\n with tik_inst.new_stmt_scope():\n temp_reg = [tik_inst.Scalar(src.dtype)\n for i in ADDR_IDX_LIST[:data_size_one_block]]\n for idx in ADDR_IDX_LIST[:data_size_one_block]:\n temp_reg[idx].set_as(src[sub_hw_size - data_size_one_block + idx])\n for idx in ADDR_IDX_LIST[:data_size_one_block]:\n src[idx].set_as(temp_reg[idx])\n tik_inst.data_move(dst[out_offset + sub_hw_size - data_size_one_block],\n src, 0, 1, 1, 0, 0)\n with tik_inst.else_scope():\n # sub_h_size is not block align, sub_h_size is bigger than one block\n _sub_h_not_block_align_bigger_one_block()\n\n with tik_inst.else_scope():\n # h_size > sub_h_size, h_size is block align\n stride_cnt = (h_size - sub_h_size) // data_size_one_block\n with tik_inst.if_scope(tik.all(h_size % data_size_one_block == 0,\n stride_cnt <= MTE_STRIDES)):\n tik_inst.data_move(dst[out_offset],\n src,\n 0, sub_w_size, sub_h_size // data_size_one_block, 0, stride_cnt)\n with tik_inst.else_scope():\n # h_size is not block align, sub_h_size is block align\n with tik_inst.if_scope(sub_h_size % data_size_one_block == 0):\n with tik_inst.for_range(0, sub_w_size) as sub_w_idx:\n tik_inst.data_move(dst[out_offset + sub_w_idx * h_size],\n src[sub_w_idx * sub_h_size],\n 0, 1, sub_h_size // data_size_one_block, 0, 0)\n with tik_inst.else_scope():\n _sub_h_not_block_align_bigger_one_block()", "def move(self, direction):\r\n # replace with your code\r\n row_dir = OFFSETS[direction][0]\r\n col_dir = OFFSETS[direction][1]\r\n \r\n if row_dir == 0:\r\n new_cells = self._cells\r\n new_dir = col_dir\r\n else:\r\n new_tuples = zip(*self._cells)\r\n new_cells = [list(item) for item in new_tuples]\r\n new_dir = row_dir\r\n \r\n tmp_cells = []\r\n for lists in new_cells:\r\n lists = lists[::new_dir]\r\n merge_lists = merge(lists)\r\n tmp_cells.append(merge_lists[::new_dir])\r\n \r\n if row_dir == 0:\r\n self._cells = tmp_cells\r\n else:\r\n new_tuples = zip(*tmp_cells)\r\n new_cells = [list(item) for item in new_tuples]\r\n self._cells = new_cells\r\n \r\n self.new_tile()", "def moving(filtertype, S0, n):\n print('-------------------------- moving')\n \n # Constants:\n S = S0.copy() # Avoid overwritting data:\n S_new = np.zeros(len(S))\n nzero = np.zeros(2*n+1)\n \n # Moving median filter:\n if filtertype=='median':\n print 'Moving median filter'\n # Interval: d[n, 1+n, ... , N-1, N-n]\n for i in range(len(S)-2*n): \n S_new[n+i] = np.median(S[range((n+i)-n, (n+i)+n+1)])\n for i in range(n):\n # Interval: d[-n, -(n-1), ... , n-1, n] - Low end of data\n low = nzero\n low[range(n-i)] = S[0]*np.ones(n-i)\n low[-(n+1+i):] = S[range(0, n+1+i)]\n S_new[i] = np.median(low)\n # Interval: d[N-n, N-(n-1), ... , N+(n-1), N+n] - High end of data\n high = nzero\n high[range(n+1+i)] = S[range(len(S)-(n+i+1), len(S))]\n high[-(n-i):] = S[-1]*np.ones(n-i)\n S_new[len(S)-1-i] = np.median(high)\n\n # Moving mean filter:\n if filtertype=='mean':\n print 'Moving mean filter'\n # Interval: d[n, 1+n, ... , N-1, N-n]\n for i in range(len(S)-2*n): \n S_new[n+i] = np.mean(S[range((n+i)-n, (n+i)+n+1)])\n for i in range(n):\n # Interval: d[-n, -(n-1), ... , n-1, n] - Low end of data\n low = nzero\n low[range(n-i)] = S[0]*np.ones(n-i)\n low[-(n+1+i):] = S[range(0, n+1+i)]\n S_new[i] = np.mean(low)\n # Interval: d[N-n, N-(n-1), ... , N+(n-1), N+n] - High end of data\n high = nzero\n high[range(n+1+i)] = S[range(len(S)-(n+1+i), len(S))]\n high[-(n-i):] = S[-1]*np.ones(n-i)\n S_new[len(S)-1-i] = np.mean(high)\n\n # Output:\n return S_new", "def _shift_amplitudes(qc, n, inplace=False):\n if not inplace:\n qc = qc.copy()\n for q_reg in qc.qregs:\n # Unitary gate representing the shift operation on n qubits\n shift_matrix = np.roll(np.eye(2**q_reg.size), n, axis=1)\n # Add the gate to the circuit\n qc.append(UnitaryGate(shift_matrix), q_reg)\n return qc", "def pixelMove(*args, **kwargs)->None:\n pass", "def shift(self):\n \"\"\"\n shift cluster randomly within bounds of im\n \"\"\"\n r = self.std\n mid = self.mid_pixel #center pixel index of 384x384 image\n delta = self.im_size - self.mid_pixel - r - 10\n \n x = np.random.randint(low=-1*delta,high=delta,size=1)[0]\n y = np.random.randint(low=-1*delta,high=delta,size=1)[0]\n\n self.x += x\n self.y += y\n im_shift = np.roll(self.im,shift=y,axis=0)\n self.im = np.roll(im_shift,shift=x,axis=1)\n \n return", "def update_shift_count(self, move):\n if len(move) == 2:\n self.shift_count += 1\n else:\n self.shift_count = 0", "def move(self, direction):\r\n # replace with your code\r\n row_increment = OFFSETS[direction][0]\r\n col_increment = OFFSETS[direction][1]\r\n changed = False\r\n for header in self._grid_headers[direction]:\r\n row_header = header[0]\r\n col_header = header[1]\r\n source_line = []\r\n # get the source line first\r\n while (row_header >= 0) and (col_header >= 0) and (row_header < self._grid_height) and (col_header < self._grid_width):\r\n source_line.append(self.get_tile(row_header, col_header))\r\n row_header += row_increment\r\n col_header += col_increment\r\n # merge\r\n result_line = merge(source_line)\r\n # write the result back\r\n row_header = header[0]\r\n col_header = header[1]\r\n result_line_index = 0\r\n while (row_header >= 0) and (col_header >= 0) and (row_header < self._grid_height) and (col_header < self._grid_width):\r\n self.set_tile(row_header, col_header, result_line[result_line_index])\r\n if result_line[result_line_index] != source_line[result_line_index]:\r\n changed = True\r\n result_line_index += 1\r\n row_header += row_increment\r\n col_header += col_increment\r\n if changed:\r\n self.new_tile()", "def memmove(self, grid):\n self.moveList.append((self.x, self.y))\n self.moveList2.append((self.x, self.y))\n waysList = []\n for f in range(4):\n if self.test(grid, f):\n if (self.x+SPEED_X[f], self.y+SPEED_Y[f]) not in self.moveList:\n waysList.append(f)\n if len(waysList) == 1:\n self.flag = waysList[0]\n self.move(grid)\n return\n elif len(waysList) == 4:\n self.flag = 0\n self.move(grid)\n return\n elif len(waysList) > 1:\n for f in waysList:\n self.mem.append((self.x, self.y, f))\n self.x, self.y, self.flag = self.mem[-1]\n elif len(waysList) == 0:\n self.x, self.y, self.flag = self.mem[-1]\n for i in range(len(self.moveList2)):\n if self.moveList2[i][0] == self.x and self.moveList2[i][1] == self.y:\n del self.moveList2[i+1:]\n break\n self.move(grid)\n self.mem.pop()", "def fun_no_cut(self, reg_x_len, n_size, block_index, n_loop):\n data_input_ub = self.tik_instance.Tensor(self.dtype_x,\n self.shape_v,\n name=\"data_input_ub\",\n scope=tik.scope_ubuf)\n input_indices_ub = self.tik_instance.Tensor(self.dtype_indices, (8,),\n name=\"input_indices_ub\",\n scope=tik.scope_ubuf)\n self.tik_instance.data_move(input_indices_ub[0],\n self.input_indices_gm[0], 0, 1, 1, 0, 0)\n reg_start = self.tik_instance.Scalar(dtype=\"int32\")\n reg_start.set_as(input_indices_ub[0])\n reg_burst = self.tik_instance.Scalar(dtype=\"int32\")\n if self.dtype_x in (\"float32\", \"int32\"):\n reg_burst.set_as(reg_x_len // 8)\n else:\n reg_burst.set_as(reg_x_len // 16)\n\n with self.tik_instance.for_range(0, n_loop) as n_index:\n with self.tik_instance.if_scope(\n block_index * n_size + n_index != reg_start):\n self.tik_instance.data_move(\n data_input_ub[0],\n self.input_x_gm[(block_index * n_size + n_index) *\n reg_x_len], 0, 1, reg_burst, 0, 0)\n with self.tik_instance.else_scope():\n self.tik_instance.data_move(data_input_ub[0],\n self.input_v_gm[0], 0, 1, reg_burst,\n 0, 0)\n self.tik_instance.data_move(\n self.output_y_gm[(block_index * n_size + n_index) * reg_x_len],\n data_input_ub[0], 0, 1, reg_burst, 0, 0)", "def piecewise_transform(image, numcols=5, numrows=5, warp_left_right=10, warp_up_down=10, order=1):\n\n rows, cols = image.shape[0], image.shape[1]\n\n numcols = numcols\n numrows = numrows\n\n src_cols = np.linspace(0, cols, numcols, dtype=int)\n src_rows = np.linspace(0, rows, numrows, dtype=int)\n src_rows, src_cols = np.meshgrid(src_rows, src_cols)\n src = np.dstack([src_cols.flat, src_rows.flat])[0]\n\n src_rows_new = np.ndarray.transpose(src_rows)\n src_cols_new = np.ndarray.transpose(src_cols)\n # src_new = np.dstack([src_cols_new.flat, src_rows_new.flat])[0]\n\n dst_cols = np.ndarray(src_cols.shape)\n dst_rows = np.ndarray(src_rows.shape)\n for i in range(0, numcols):\n for j in range(0, numrows):\n if src_cols[i, j] == 0 or src_cols[i, j] == cols:\n dst_cols[i, j] = src_cols[i, j]\n else:\n dst_cols[i, j] = src_cols[i, j] + np.random.uniform(-1, 1) * warp_left_right\n\n if src_rows[i, j] == 0 or src_rows[i, j] == rows:\n dst_rows[i, j] = src_rows[i, j]\n else:\n dst_rows[i, j] = src_rows[i, j] + np.random.uniform(-1, 1) * warp_up_down\n\n dst = np.dstack([dst_cols.flat, dst_rows.flat])[0]\n\n # dst_rows_new = np.ndarray.transpose(dst_rows)\n # dst_cols_new = np.ndarray.transpose(dst_cols)\n # dst_new = np.dstack([dst_cols_new.flat, dst_rows_new.flat])[0]\n\n tform = transform.PiecewiseAffineTransform()\n tform.estimate(src, dst)\n\n img_new = transform.warp(image, tform, output_shape=(rows, cols), order=order, preserve_range=True)\n img_new = img_new.astype(image.dtype)\n \n return img_new", "def move(self,move):\n for x in range(len(self.coord)):\n self.coord[x] = np.array([y+np.array(move) for y in self.coord[x]])\n return self", "def move(self):\n self._move_range_shuffle(3)\n self._move_satisfy_random_constraint()\n # self._move_range_shuffle(3)\n #if (curr_energy > 50):\n # self._move_satisfy_random_constraint()\n #else:\n # self._move_range_shuffle(3)", "def hanoi(n, source, target, helper):\n if n > 0:\n hanoi(n-1, source, helper, target)\n print(\"move disk from\", source, \"to\", target)\n hanoi(n-1, helper, target, source)", "def warp(x, flo):\n x=torch.squeeze(x,2)\n flo=torch.squeeze(flo,2)\n B, C, H, W = x.size()\n # mesh grid \n xx = torch.arange(0, W).view(1,-1).repeat(H,1)\n yy = torch.arange(0, H).view(-1,1).repeat(1,W)\n xx = xx.view(1,1,H,W).repeat(B,1,1,1)\n yy = yy.view(1,1,H,W).repeat(B,1,1,1)\n grid = torch.cat((xx,yy),1).float()\n\n #if x.is_cuda:\n # grid = grid.cuda()\n vgrid = torch.Tensor(grid).cuda() - flo.cuda()\n\n # scale grid to [-1,1] \n vgrid[:,0,:,:] = 2.0*vgrid[:,0,:,:].clone() / max(W-1,1)-1.0\n vgrid[:,1,:,:] = 2.0*vgrid[:,1,:,:].clone() / max(H-1,1)-1.0\n\n vgrid = vgrid.permute(0,2,3,1) \n #x=x.cuda()\n output = nn.functional.grid_sample(x, vgrid,mode='bilinear')\n mask = torch.Tensor(torch.ones(x.size())).cuda()\n mask = nn.functional.grid_sample(mask, vgrid,mode='bilinear')\n\n # if W==128:\n # np.save('mask.npy', mask.cpu().data.numpy())\n # np.save('warp.npy', output.cpu().data.numpy())\n \n mask[mask<0.9999] = 0\n mask[mask>0] = 1\n return torch.unsqueeze(output,2),torch.unsqueeze(mask,2)", "def shift(shape, stride, anchors):\n shift_x = (keras.backend.arange(0, shape[1], dtype=keras.backend.floatx()) + keras.backend.constant(0.5,\n dtype=keras.backend.floatx())) * stride\n shift_y = (keras.backend.arange(0, shape[0], dtype=keras.backend.floatx()) + keras.backend.constant(0.5,\n dtype=keras.backend.floatx())) * stride\n\n shift_x, shift_y = meshgrid(shift_x, shift_y)\n shift_x = keras.backend.reshape(shift_x, [-1])\n shift_y = keras.backend.reshape(shift_y, [-1])\n\n shifts = keras.backend.stack([\n shift_x,\n shift_y,\n shift_x,\n shift_y\n ], axis=0)\n\n shifts = keras.backend.transpose(shifts)\n number_of_anchors = keras.backend.shape(anchors)[0]\n\n k = keras.backend.shape(shifts)[0] # number of base points = feat_h * feat_w\n\n shifted_anchors = keras.backend.reshape(anchors, [1, number_of_anchors, 4]) + keras.backend.cast(\n keras.backend.reshape(shifts, [k, 1, 4]), keras.backend.floatx())\n shifted_anchors = keras.backend.reshape(shifted_anchors, [k * number_of_anchors, 4])\n\n return shifted_anchors", "def flow_to_warp(flow):\n batch, _, ht, wd = flow.shape\n coords = torch.meshgrid(torch.arange(ht), torch.arange(wd))\n coords = torch.stack(coords[::-1], dim=0).float()\n coords = coords[None].repeat(batch, 1, 1, 1)\n return coords + flow", "def _schedule_winograd(cfg, s, op):\n # get ops and tensors\n output = op.output(0)\n\n Y = op.input_tensors[0]\n M, A = s[Y].op.input_tensors\n U, V = s[M].op.input_tensors\n d, B = s[V].op.input_tensors\n data_pad = s[d].op.input_tensors[0]\n\n # padding\n s[data_pad].compute_inline()\n\n # transform kernel\n if isinstance(U.op, tvm.te.ComputeOp):\n kernel, G = s[U].op.input_tensors\n s[G].compute_inline()\n (eps, nu, co, ci, vco) = s[U].op.axis\n if not autotvm.GLOBAL_SCOPE.in_tuning:\n r_kh, r_kw = s[U].op.reduce_axis\n s[U].reorder(co, ci, eps, nu, r_kh, r_kw, vco)\n _ = [s[U].unroll(x) for x in [eps, nu, r_kh, r_kw]]\n s[U].vectorize(vco)\n tile_and_bind(s, U, co, ci, 1, 256)\n\n # dilation\n if isinstance(kernel.op, tvm.te.ComputeOp) and \"dilate\" in kernel.op.tag:\n s[kernel].compute_inline()\n\n # transform image\n s[B].compute_inline()\n VL = s.cache_write(V, \"local\")\n\n eps, nu, p, ci, vp = s[V].op.axis\n s[V].reorder(p, ci, eps, nu, vp)\n for axis in [eps, nu]:\n s[V].unroll(axis)\n s[V].vectorize(vp)\n fused = s[V].fuse(p, ci)\n\n bb, tt = cfg[\"tile_t1\"].apply(s, V, fused)\n s[V].bind(bb, te.thread_axis(\"blockIdx.x\"))\n s[V].bind(tt, te.thread_axis(\"threadIdx.x\"))\n\n eps, nu, p, ci, vp = s[VL].op.axis\n r_a, r_b = s[VL].op.reduce_axis\n for axis in [eps, nu, r_a, r_b]:\n s[VL].unroll(axis)\n s[VL].vectorize(vp)\n s[d].compute_at(s[V], tt)\n s[VL].compute_at(s[V], tt)\n\n # batch gemm\n bna = cfg[\"tile_bna\"].val\n bnb = cfg[\"tile_bnb\"].val\n\n eps, nu, k, b = s[M].op.axis\n alpha = eps.dom.extent\n c = s[M].op.reduce_axis[0]\n yo, xo, yi, xi = s[M].tile(k, b, bna, bnb)\n c, c_unroll = cfg[\"c_unroll\"].apply(s, M, c)\n s[M].reorder(yo, xo, c, c_unroll, yi, xi)\n s[M].unroll(c_unroll)\n s[M].unroll(yi)\n s[M].vectorize(xi)\n z = s[M].fuse(eps, nu)\n tile_and_bind3d(s, M, z, yo, xo, 1, cfg[\"yt\"].val, 1)\n\n # inverse transform\n s[A].compute_inline()\n k, b, vh, vw = s[Y].op.axis\n r_a, r_b = s[Y].op.reduce_axis\n for axis in [vh, vw, r_a, r_b]:\n s[Y].unroll(axis)\n\n # schedule output and fusion\n if output.op not in s.outputs:\n s[output].compute_inline()\n output = s.outputs[0]\n\n n, co, h, w = s[output].op.axis\n m = alpha - 3 + 1\n h, w, hi, wi = s[output].tile(h, w, m, m)\n s[output].unroll(hi)\n s[output].unroll(wi)\n fused = s[output].fuse(n, co, h, w)\n bb, tt = cfg[\"tile_t2\"].apply(s, output, fused)\n s[output].bind(bb, te.thread_axis(\"blockIdx.x\"))\n s[output].bind(tt, te.thread_axis(\"threadIdx.x\"))\n\n s[Y].compute_at(s[output], tt)", "def init_shiftind(self, n_t):\n i = np.arange(n_t * n_t)\n i2 = np.arange(n_t).repeat(n_t)\n ik = np.arange(n_t).repeat(n_t)\n ii = np.arange(n_t)[np.newaxis].repeat(n_t, 0).flatten()\n\n si = ik * n_t + (ik + ii) % n_t\n self.shiftinds_fwd = np.roll(si.reshape((n_t, n_t)), int((n_t - 1) / 2), 1)[:, ::-1].flatten()\n\n si = ik * n_t + (ii - ik) % n_t\n self.shiftinds_back = np.roll(np.arange(n_t * n_t).reshape((n_t, n_t))[:, ::-1], -int((n_t - 1) / 2), 1).flatten()[si]\n\n self.shiftinds = ((i + i2 - n_t) % n_t + i2 * n_t).astype(int)\n self.shiftinds_neg = ((i + i2 - n_t) % n_t + i2 * n_t).astype(int)\n self.shiftinds_pos = ((-n_t + i - i2) % n_t + i2 * n_t).astype(int)\n # self.shiftinds = ((i + i2 - n_t) % n_t + i2 * n_t).astype(int).reshape((n_t, n_t)).transpose().flatten()\n # self.shiftinds_neg = ((i + i2 - n_t) % n_t + i2 * n_t).astype(int).reshape((n_t, n_t)).transpose().flatten()\n # self.shiftinds_pos = ((-n_t + i - i2) % n_t + i2 * n_t).astype(int).reshape((n_t, n_t)).transpose().flatten()", "def move_raw(self, pos):\n return self.put_par(\"raw_drive\", pos)", "def shift(self):\n r = self.std\n mid = self.mid_pixel #center pixel index of 384x384 image\n delta = self.size - self.mid_pixel - r\n \n x = np.random.randint(low=-1*delta,high=delta,size=1)[0]\n y = np.random.randint(low=-1*delta,high=delta,size=1)[0]\n\n self.x += x\n self.y += y\n image_shift = np.roll(self.image,shift=x,axis=0)\n self.image = np.roll(image_shift,shift=y,axis=1)\n \n return", "def _move(self, pos):\n self.put_par(\"drive\", pos)", "def shift(self, direction):\n direct, pos = tuple(direction)\n\n board = {'L': self.rows, 'R': self.rows, 'D': self.cols, 'U': self.cols}[direct]\n board[int(pos)].shift(direction=self.direct[direct])", "def update(frame_num, mat, grid, N):\n\n new_grid = np.copy(grid)\n #print(\"grid size:\", grid.shape)\n for i in range(1, grid.shape[0]-1):\n for j in range(1, grid.shape[1]-1):\n neighbors = int(grid[i-1, j] + grid[i+1, j] + \\\n grid[i, j+1] + grid[i, j-1] + \\\n grid[i-1,j-1] + grid[i+1,j+1] + \\\n grid[i+1,j-1] + grid[i-1,j+1])\n if grid[i, j] == ON:\n if not (2 <= neighbors <= 3):\n new_grid[i, j] = OFF\n elif grid[i, j] == OFF and neighbors == 3:\n # Grow a cell\n new_grid[i, j] = ON\n else:\n new_grid[i, j] = OFF\n\n ### Update new grid\n mat.set_data(new_grid)\n grid[:] = new_grid[:] # Brackets are important\n return mat", "def shift(image,shift_x,shift_y):\n return np.roll(np.roll(image,shift_y,axis=0),shift_x,axis=1)", "def pitchshift(snd_array, n, window_size=2**13, h=2**11):\n\tfactor = 2**(1.0 * n / 12.0)\n\tstretched = stretch(snd_array, 1.0/factor, window_size, h)\n\treturn speedx(stretched[window_size:], factor)", "def _shift(self, s):\n start_pos = self._relative_head_pos()\n l = 1 + 2 * self.shift_length\n shift = int(s * l - 0.000000001) - int(l / 2)\n for s in range(abs(shift)):\n if shift > 0:\n if self.head_pos == len(self.memory) - 1 and len(self.memory) < self.max_memory:\n self.memory = np.concatenate((self.memory, np.zeros((1, self.memory_unit_size))), 0)\n self.head_pos += 1\n else:\n self.head_pos = (self.head_pos + 1) % self.max_memory\n else:\n if self.head_pos == 0 and len(self.memory) < self.max_memory:\n self.memory = np.concatenate((np.zeros((1, self.memory_unit_size)), self.memory), 0)\n self.left_expands += 1\n else:\n self.head_pos = (self.head_pos - 1) % self.max_memory\n if self.history is not None:\n self.history[\"loc\"][-1].append((start_pos, 0.1))\n return np.sign(shift)", "def test_coreg_shift(self):\r\n with gw.open(l8_224077_20200518_B2) as target, gw.open(\r\n l8_224077_20200518_B4\r\n ) as reference:\r\n with tempfile.TemporaryDirectory() as tmp:\r\n # Shift by 1 pixel in each direction\r\n target_shifted = shift(target, x=1, y=1)\r\n tmp_file = Path(tmp) / '_tmp_shift.tif'\r\n target_shifted.gw.save(tmp_file, overwrite=True)\r\n with gw.open(tmp_file) as target_shifted:\r\n # Co-register the shifted data\r\n shifted = gw.coregister(\r\n target=target_shifted,\r\n reference=reference,\r\n ws=(256, 256),\r\n r_b4match=1,\r\n s_b4match=1,\r\n max_shift=5,\r\n resamp_alg_deshift='nearest',\r\n resamp_alg_calc='cubic',\r\n out_gsd=[target_shifted.gw.celly, reference.gw.celly],\r\n q=True,\r\n nodata=(0, 0),\r\n CPUs=1,\r\n )\r\n self.assertFalse(\r\n np.allclose(\r\n target.values[:, :-1, :-1],\r\n target_shifted.values[:, :-1, :-1],\r\n )\r\n )\r\n # Check if the corrected data match the original (unshifted) target\r\n self.assertTrue(\r\n np.allclose(\r\n target.values[:, :-1, :-1],\r\n shifted.values[:, :-1, :-1],\r\n )\r\n )", "def pitchshift(snd_array, n, window_size=2**13, h=2**11):\n factor = 2**(1.0 * n / 12.0)\n stretched = stretch(snd_array, 1.0/factor, window_size, h)\n return speedx(stretched[window_size:], factor)", "def pitchshift(snd_array, n, window_size=2**13, h=2**11):\n factor = 2**(1.0 * n / 12.0)\n stretched = stretch(snd_array, 1.0/factor, window_size, h)\n return speedx(stretched[window_size:], factor)", "def move_coarse(self, direction, count=1):\n if self._direction != direction and self.simulate_backlash:\n self._direction = direction\n backlash_offset = randint(-maximum_backlash, maximum_backlash)\n self._move(direction, 1, 8 + backlash_offset)\n self._move(direction, count - 1, 8)\n self.backlash_count += 1\n else:\n self._direction = direction\n self._move(direction, count, 8)", "def movingWindow(rawData, n):\n data = np.array([rawData[i:i+n] for i in range(rawData.shape[0] - (n-1))])\n return data", "def make_move(grid, n_columns, n_rows):\r\n # Generate the game grid to be manipulated\r\n new_grid = [[0] * (n_columns + 1) for i in range(n_rows + 1)]\r\n\r\n\r\n for i in range(n_rows):\r\n for j in range(n_columns):\r\n upper_left = grid[i-1][j-1] # neighbor to upper left of cell of interest\r\n upper = grid[i-1][j] # neighbor above cell of interest\r\n upper_right = grid[i-1][j+1] # neighbor to upper right of cell of interest\r\n left = grid[i][j-1] # neighbor to left of cell of interest\r\n right = grid[i][j+1] # neighbor to right of cell of interest\r\n bot_left = grid[i+1][j-1] # neighbor to bottom left cell of interest\r\n bot = grid[i+1][j] # neighbor below cell of interest\r\n bot_right = grid[i+1][j+1] # neighbor to bottom right of cell of interest\r\n\r\n # sum of the state of all neighbors\r\n on_neighbors = upper_left + upper + upper_right + left + right + bot_left + bot + bot_right\r\n\r\n # Any ON cell with fewer than two ON neighbors turns OFF\r\n if grid[i][j] == 1 and on_neighbors < 2:\r\n new_grid[i][j] = 0\r\n\r\n # Any ON cell with two or three ON neighbours stays ON\r\n elif grid[i][j] == 1 and (on_neighbors == 2 or on_neighbors == 3):\r\n new_grid[i][j] = 1\r\n\r\n # Any ON cell with more than three ON neighbors turns OFF\r\n elif grid[i][j] == 1 and on_neighbors > 3:\r\n new_grid[i][j] = 0\r\n\r\n # Any OFF cell with three ON neighbors turns ON\r\n elif grid[i][j] == 0 and on_neighbors == 3:\r\n new_grid[i][j] = 1\r\n\r\n return new_grid #manipulated game grid\r", "def move(self, is_forward):\n wh, lh = self.get_heading\n self.w += wh\n self.l += lh\n if self.get_pos() == blocks['wall']:\n self.w -= wh\n self.l -= lh", "def move(self, direction):\r\n # replace with your code\r\n initial_tile = self.__direct_top[direction]\r\n offset = OFFSETS[direction]\r\n direct_range = self.__direct_range[direction] \r\n backup_list = [[0 for dummy_col in range(self.grid_width)] for dummy_row in range(self.grid_height)]\r\n \r\n for initial_count, tile_cursor in enumerate(initial_tile):\r\n tem_list = []\r\n grid_cursor = tile_cursor\r\n for dummy_cursor in range(direct_range):\r\n \r\n tem_list.append(self.grid[grid_cursor[0]][grid_cursor[1]])\r\n grid_cursor = tuple(x + y for x,y in zip(grid_cursor,offset))\r\n \r\n new_list = merge(tem_list)\r\n if self.update_dict[direction] == 0:\r\n for col_cursor in range(direct_range):\r\n backup_list[col_cursor][initial_count] = new_list[col_cursor]\r\n elif self.update_dict[direction] == 1: \r\n for col_cursor in range(direct_range):\r\n backup_list[self.grid_height -1 - col_cursor][initial_count] = new_list[col_cursor]\r\n elif self.update_dict[direction] ==3:\r\n backup_list[initial_count] = new_list\r\n else:\r\n for col_cursor in range(direct_range):\r\n backup_list[initial_count][self.grid_width -1 - col_cursor] = new_list[col_cursor]\r\n \r\n flag = (self.grid == backup_list)\r\n self.grid = backup_list\r\n if not flag:\r\n self.new_tile()", "def shifts_projection(sc, clean):\n def shifts_projected(clean, axis):\n projected = clean.map(lambda x: x.mean(axis=axis)[:, :, np.newaxis])\n target = getTarget(projected, 30, 1)\n shifts = registerByPlane(sc, projected, target[:, :, np.newaxis], 10, False)\n return shifts[:, :, 0]\n\n # shifts_xy = shifts_projected(clean, 2)\n shifts_xz = shifts_projected(clean, 1)\n shifts_yz = shifts_projected(clean, 0)\n\n # x_shifts = np.mean(np.stack((shifts_xz[:, 0], shifts_xy[:, 0])), axis=0)\n z_shifts = np.mean(np.stack((shifts_xz[:, 1], shifts_yz[:, 1])), axis=0)\n # y_shifts = np.mean(np.stack((shifts_yz[:, 0], shifts_xy[:, 1])), axis=0)\n plt.figure()\n plt.plot(shifts_xz[:, 1])\n plt.plot(shifts_yz[:, 1])\n plt.plot(z_shifts)\n plt.title('Z')\n # plt.figure()\n # plt.plot(shifts_xz[:, 0])\n # plt.plot(shifts_xy[:, 0])\n # plt.plot(x_shifts)\n # plt.title('X')\n # plt.figure()\n # plt.plot(shifts_yz[:, 0])\n # plt.plot(shifts_xy[:, 1])\n # plt.plot(y_shifts)\n # plt.title('Y')\n # shifts_all = np.stack((x_shifts, y_shifts, z_shifts))\n\n def initReg(kv):\n from scipy.ndimage.interpolation import shift\n index, volume = kv\n current_shift = (0, 0, -1 * z_shifts[int(index[0])])\n shifted = shift(volume, current_shift)\n return shifted.astype(np.int16)\n\n reg = clean.map(initReg, with_keys=True, value_shape=clean.shape[1:], dtype=np.int16)\n reg.cache()\n reg.count()\n return reg", "def ShiftFrame(Frame, PixShift):\n \n import numpy as np\n \n F, R, C = Frame.shape\n \n if F > 1:\n msg = f\"'Frame' must be a 2D frame with shape (1, R, C) but has shape\"\\\n + f\" ({F}, {R}, {C}).\"\n \n raise Exception(msg)\n \n # Initialise ShiftedFrame:\n ShiftedFrame = np.zeros((1, R, C), dtype='uint')\n #ShiftedFrame = np.empty_like(Frame, dtype='uint') # this creates 42,932\n # unique values for some reason!\n \n #unique = UniqueItems(Nda=Frame, NonZero=False)\n #print(f'\\n---> There are {len(unique)} unique items in Frame')\n #unique = UniqueItems(Nda=ShiftedFrame, NonZero=False)\n #print(f'\\n---> There are {len(unique)} unique items in the initialised',\n # f'ShiftedFrame: {unique[:11]}...')\n \n di, dj, dk = PixShift\n \n ##ShiftedFrame[0, dj:, di:] = Frame[0, :-(1+dj), :-(1+di)]\n ##ShiftedFrame[0, :-(1+dj), :-(1+di)] = Frame[0, dj:, di:]\n #ShiftedFrame[0, :R-dj, :C-di] = Frame[0, dj:, di:]\n \n if di > 0 and dj > 0:\n ShiftedFrame[0, dj:, di:] = Frame[0, :-dj, :-di]\n \n elif di < 0 and dj < 0:\n ShiftedFrame[0, :dj, :di] = Frame[0, -dj:, -di:]\n \n elif di > 0 and dj < 0:\n ShiftedFrame[0, :dj, di:] = Frame[0, -dj:, :-di]\n \n elif di < 0 and dj > 0:\n ShiftedFrame[0, dj:, :di] = Frame[0, :-dj, -di:]\n \n elif di == 0 and dj > 0:\n ShiftedFrame[0, dj:, :] = Frame[0, :-dj, :]\n \n elif di == 0 and dj < 0:\n ShiftedFrame[0, :dj, :] = Frame[0, -dj:, :]\n \n elif di > 0 and dj == 0:\n ShiftedFrame[0, :, di:] = Frame[0, :, :-di]\n \n elif di < 0 and dj == 0:\n ShiftedFrame[0, :, :di] = Frame[0, :, -di:]\n \n elif di == 0 and dj == 0:\n ShiftedFrame[0] = Frame[0]\n \n #unique = UniqueItems(Nda=ShiftedFrame, NonZero=False)\n #print(f'\\n---> There are {len(unique)} unique items in the ShiftedFrame',\n # 'after shifting.')\n \n return ShiftedFrame", "def move(self, direction):\n # replace with your code\n\n indices = self.direction_indices[direction]\n for coordinate in indices:\n merged_coordinate_list = self.get_list(direction, coordinate)\n self.change_board(merged_coordinate_list, coordinate, direction)\n print(self.__str__())\n if self.board_is_not_full():\n self.new_tile()", "def rgbArray_move(self, rgbList, delay):\n # res\n\n res = self.rgbArrayOfs_move(0,rgbList,delay)\n return res", "def shift_rows(state):\n state[1][0], state[1][1], state[1][2], state[1][3] = state[1][1], state[1][2], state[1][3], state[1][0]\n state[2][0], state[2][1], state[2][2], state[2][3] = state[2][2], state[2][3], state[2][0], state[2][1]\n state[3][0], state[3][1], state[3][2], state[3][3] = state[3][3], state[3][0], state[3][1], state[3][2]", "def move(self, move):\n raise NotImplementedError()", "def move(self): # AH note. Swich move with extra_steps?\n if self.adjustment < 0:\n self.position += self.extra_steps\n super().move()\n self.no_moves += 1\n # Do the regular move", "def move(self, n: int) -> \"Linked[T]\":\n out = self\n if n >= 0:\n for _ in range(n):\n out = out.forward\n else:\n for _ in range(-n):\n out = out.backward\n return out", "def _data_move_in_last_dim_be_one_block(tik_inst, dst, src, data_pos_info):\n\n sub_h_size, sub_w_size, h_size, w_size, in_offset = data_pos_info\n data_size_one_block = _get_elment_cnt_one_block(src.dtype)\n is_w_block_align = w_size % data_size_one_block == 0\n sub_w_block = sub_w_size // data_size_one_block\n sub_w_left = sub_w_size % data_size_one_block\n w_block = w_size // data_size_one_block\n stride_len = w_block - sub_w_block\n\n with tik_inst.if_scope(is_w_block_align):\n with tik_inst.if_scope(tik.any(stride_len > MTE_STRIDES, sub_h_size > REPEAT_LIMIT_MTE)):\n with tik_inst.for_range(0, sub_h_size) as sub_h_idx:\n tik_inst.data_move(dst[sub_h_idx * sub_w_size],\n src[sub_h_idx * w_size + in_offset],\n 0, 1, sub_w_block, 0, 0)\n with tik_inst.else_scope():\n with tik_inst.if_scope(stride_len > 0):\n tik_inst.data_move(dst, src[in_offset], 0, sub_h_size, sub_w_block, stride_len, 0)\n with tik_inst.else_scope():\n tik_inst.data_move(dst, src[in_offset], 0, 1, sub_h_size * sub_w_block, 0, 0)\n\n with tik_inst.else_scope():\n # suppose w_size > data_size_one_block\n with tik_inst.for_range(0, sub_h_size) as sub_h_idx:\n with tik_inst.if_scope(sub_w_block > 0):\n tik_inst.data_move(dst[sub_h_idx * (sub_w_block + 1) * data_size_one_block],\n src[sub_h_idx * w_size + in_offset],\n 0, 1, sub_w_block, 0, 0)\n with tik_inst.if_scope(sub_w_left > 0):\n tik_inst.data_move(dst[(sub_h_idx * (sub_w_block + 1) + sub_w_block) *\n data_size_one_block],\n src[sub_h_idx * w_size + in_offset + sub_w_size -\n data_size_one_block],\n 0, 1, 1, 0, 0)", "def shift_image(img, shft_int = 1):\n no_cols = img[0].shape[1]\n lst_col = no_cols - 1\n col_sty = no_cols - shft_int \n col_idx = torch.cat([torch.zeros(col_sty, dtype = torch.bool),\n torch.ones(shft_int, dtype = torch.bool)])\n cols = torch.reshape(img[0][0,:,col_idx], (no_cols,shft_int))\n cols_sum = torch.sum(cols)\n inval_shft = torch.is_nonzero(cols_sum)\n\n if inval_shft:\n col_idx = torch.cat([torch.ones(shft_int, dtype = torch.bool),\n torch.zeros(col_sty, dtype = torch.bool)])\n cols = torch.reshape(img[0][0,:,col_idx], (no_cols,shft_int))\n cols_sum = torch.sum(cols)\n inval_shft = torch.is_nonzero(cols_sum)\n if inval_shft:\n raise ValueError('Consider shifting along another axis.')\n mod_img = torch.cat([img[0][0,:,~col_idx],cols], dim = 1)\n mod_img = torch.reshape(mod_img, (1,mod_img.shape[0], mod_img.shape[1]))\n mod_img = (mod_img,img[1])\n return mod_img\n \n mod_img = torch.cat([cols,img[0][0,:,~col_idx]], dim = 1)\n mod_img = torch.reshape(mod_img, (1,mod_img.shape[0], mod_img.shape[1]))\n mod_img = (mod_img,img[1])\n return mod_img", "def step(self, move):\r\n self.board.push_uci(move)\r\n self.num_halfmoves += 1", "def _data_move_out_last_dim_be_one_block(tik_inst, dst, src, data_pos_info):\n\n sub_h_size, sub_w_size, c_size, h_size, w_size, out_offset = data_pos_info\n data_size_one_block = _get_elment_cnt_one_block(src.dtype)\n is_w_block_align = w_size % data_size_one_block == 0\n sub_w_block = sub_w_size // data_size_one_block\n sub_w_left = sub_w_size % data_size_one_block\n cw_block = c_size * w_size // data_size_one_block\n stride_len = cw_block - sub_w_block\n\n with tik_inst.if_scope(is_w_block_align):\n with tik_inst.if_scope(tik.any(stride_len > MTE_STRIDES, sub_h_size > REPEAT_LIMIT_MTE)):\n with tik_inst.for_range(0, sub_h_size) as sub_h_idx:\n tik_inst.data_move(dst[sub_h_idx * c_size * w_size + out_offset],\n src[sub_h_idx * sub_w_size],\n 0, 1, sub_w_block, 0, 0)\n with tik_inst.else_scope():\n tik_inst.data_move(dst[out_offset], src, 0, sub_h_size, sub_w_block, 0, stride_len)\n\n with tik_inst.else_scope():\n # suppose w_size > data_size_one_block\n with tik_inst.for_range(0, sub_h_size) as sub_h_idx:\n with tik_inst.if_scope(sub_w_block > 0):\n tik_inst.data_move(dst[sub_h_idx * c_size * w_size + out_offset],\n src[sub_h_idx * (sub_w_block + 1) * data_size_one_block],\n 0, 1, sub_w_block, 0, 0)\n with tik_inst.if_scope(sub_w_left > 0):\n tik_inst.data_move(dst[sub_h_idx * c_size * w_size + out_offset +\n sub_w_size - data_size_one_block],\n src[(sub_h_idx * (sub_w_block + 1) + sub_w_block) *\n data_size_one_block],\n 0, 1, 1, 0, 0)", "def test_move(self):\n neq_gcmc_sphere_sampler.reset()\n\n # Just run one move, as they are a bit more expensive\n neq_gcmc_sphere_sampler.move(neq_gcmc_sphere_simulation.context, 1)\n\n # Check some of the variables have been updated as appropriate\n assert neq_gcmc_sphere_sampler.n_moves == 1\n assert 0 <= neq_gcmc_sphere_sampler.n_accepted <= 1\n assert len(neq_gcmc_sphere_sampler.Ns) == 1\n assert len(neq_gcmc_sphere_sampler.acceptance_probabilities) == 1\n\n # Check the NCMC-specific variables\n assert isinstance(neq_gcmc_sphere_sampler.velocities, Quantity)\n assert neq_gcmc_sphere_sampler.velocities.unit.is_compatible(nanometers/picosecond)\n assert len(neq_gcmc_sphere_sampler.insert_works) + len(neq_gcmc_sphere_sampler.delete_works) == 1\n assert 0 <= neq_gcmc_sphere_sampler.n_left_sphere <= 1\n assert 0 <= neq_gcmc_sphere_sampler.n_explosions <= 1\n\n return None", "def _rel_shift_legacy(self, xs):\n bs, qlen, klen, n_heads = xs.size()\n xs = xs.permute(1, 2, 0, 3).contiguous().view(qlen, klen, bs * n_heads)\n zero_pad = xs.new_zeros((qlen, 1, bs * n_heads))\n xs_shifted = torch.cat([zero_pad, xs], dim=1).view(klen + 1, qlen, bs * n_heads)[1:].view_as(xs)\n return xs_shifted.view(qlen, klen, bs, n_heads).permute(2, 0, 1, 3)", "def move(self, direction):\r\n # we are initializing the required variables\r\n num_steps=0\r\n if direction== UP or direction==DOWN:\r\n num_steps=self._height\r\n if direction==LEFT or direction==RIGHT:\r\n num_steps=self._width\r\n move_in=OFFSETS[direction]\r\n temp_list=[]\r\n moved=False \r\n # merging the list in the particular direction\r\n for start_cell in self._initial_cells[direction]:\r\n for step in range(num_steps):\r\n row = start_cell[0] + step * move_in[0]\r\n col = start_cell[1] + step * move_in[1]\r\n # creating a list of all the columns and rows in that direction \r\n temp_list.append(self._grid[row][col])\r\n # caling the merge function to calculate the resultant list\r\n merged_list=merge(temp_list)\r\n # putting back the resultant list\r\n for step in range(num_steps):\r\n row = start_cell[0] + step * move_in[0]\r\n col = start_cell[1] + step * move_in[1]\r\n self._grid[row][col]=merged_list[step]\r\n # cheking for any changes in the board\r\n if temp_list!=merged_list:\r\n moved=True\r\n temp_list=[]\r\n #adding anew tile\r\n if moved:\r\n self.new_tile()", "def shift(\n x, wrg=0.1, hrg=0.1, is_random=False, row_index=0, col_index=1, channel_index=2, fill_mode='nearest', cval=0.,\n order=1\n):\n h, w = x.shape[row_index], x.shape[col_index]\n if is_random:\n tx = np.random.uniform(-hrg, hrg) * h\n ty = np.random.uniform(-wrg, wrg) * w\n else:\n tx, ty = hrg * h, wrg * w\n translation_matrix = np.array([[1, 0, tx], [0, 1, ty], [0, 0, 1]])\n\n transform_matrix = translation_matrix # no need to do offset\n x = affine_transform(x, transform_matrix, channel_index, fill_mode, cval, order)\n return x", "def define_n_dimensional_shift_registers(\n parent_name: str,\n pixel_type: Kind,\n pixels_per_clock: list,\n image_sizes: list,\n needed_2ND_coordinates: list,\n last_in_dimension: bool) -> Circuit:\n class _SR(Circuit):\n\n name = \"shift_registers_for_{}_with_{}_pxPerClock_{}_imgSizes_{}_neededCoordinates_{}_lastInDimension\".format(\n parent_name,\n cleanName(str(pixels_per_clock)),\n cleanName(str(image_sizes)),\n cleanName(str(needed_2ND_coordinates)),\n str(last_in_dimension)\n )\n\n # since may be parallel in each dimension, amount of out ports\n # for each shift register is scaled down by amount of parallelism\n # will be maping over the top shift register for parallelism\n lengths_per_shift_register_per_dimension = [\n image_size // pixel_per_clock for (image_size, pixel_per_clock) in\n zip(image_sizes, pixels_per_clock)\n ]\n\n second_to_last_2ND_coordinate = [needed_2ND_coordinates[-2] - 1] if len(needed_2ND_coordinates) > 1 else []\n\n IO = ['I', In(pixel_type),\n 'O', Out(get_nested_type(pixel_type, needed_2ND_coordinates))] + \\\n (['next', Out(pixel_type)] if not last_in_dimension else []) + ClockInterface(has_ce=True)\n @classmethod\n def definition(cls):\n head_pixel_per_clock, *tail_pixels_per_clock = pixels_per_clock\n head_image_size, *tail_image_sizes = image_sizes\n head_needed_2ND_coordinates, *tail_needed_2ND_coordinates = needed_2ND_coordinates\n\n # don't make shift registers, just return SIPO if 1D case\n if len(image_sizes) == 1:\n # if need outputs from all or all but last element in this shift register, just use registers\n # otherwise use an SRAM\n rest_of_row_buffer_size = cls.lengths_per_shift_register_per_dimension[0] - head_needed_2ND_coordinates\n if rest_of_row_buffer_size == 1 and not last_in_dimension:\n sipos = SIPOAnyType(head_needed_2ND_coordinates + 1, pixel_type, 0, has_ce=True)\n else:\n sipos = SIPOAnyType(head_needed_2ND_coordinates, pixel_type, 0, has_ce=True)\n wire(cls.I, sipos.I)\n wire(cls.CE, sipos.CE)\n wire(sipos.O[0:head_needed_2ND_coordinates], cls.O)\n if not last_in_dimension:\n # feed one element every clock, delayed on output by size of buffer so equivalent to shift register\n if rest_of_row_buffer_size < 2:\n wire(sipos.O[-1], cls.next)\n else:\n rowbuffer = DelayedBuffer(pixel_type, rest_of_row_buffer_size, 1,\n rest_of_row_buffer_size, rest_of_row_buffer_size)\n wire(sipos.O[-1], rowbuffer.I[0])\n wire(rowbuffer.O[0], cls.next)\n valid_term = TermAnyType(Bit)\n wire(rowbuffer.valid, valid_term.I)\n wire(cls.CE, rowbuffer.CE)\n wire(cls.CE, rowbuffer.WE)\n\n return sipos\n\n else:\n\n # if this shift register is the last in its dimension, make the last of its children also a last\n # and only make enough of them to fill the output ports. Otherwise, make enough to fill this\n # dimension\n one_dimensional_lower_shift_register_def_not_last_in_dimension = define_n_dimensional_shift_registers(\n cls.name,\n pixel_type,\n tail_pixels_per_clock,\n tail_image_sizes,\n tail_needed_2ND_coordinates,\n False\n )\n\n one_dimensional_lower_shift_register_def_last_in_dimension = define_n_dimensional_shift_registers(\n cls.name,\n pixel_type,\n tail_pixels_per_clock,\n tail_image_sizes,\n tail_needed_2ND_coordinates,\n True\n )\n\n if last_in_dimension:\n one_dimensional_lower_shift_registers = [\n one_dimensional_lower_shift_register_def_not_last_in_dimension() for _ in\n range(head_needed_2ND_coordinates - 1)\n ]\n\n one_dimensional_lower_shift_registers += [\n one_dimensional_lower_shift_register_def_last_in_dimension()\n ]\n else:\n one_dimensional_lower_shift_registers = [\n one_dimensional_lower_shift_register_def_not_last_in_dimension() for _ in\n range(cls.lengths_per_shift_register_per_dimension[0])\n ]\n\n # connect each next's to the following's input, except the last\n for i in range(len(one_dimensional_lower_shift_registers) - 1):\n wire(one_dimensional_lower_shift_registers[i].next,\n one_dimensional_lower_shift_registers[i+1].I)\n\n # connect the edge input and next of the lower dimenisonal shift registers to those of this\n # higher dimensional shift register\n wire(cls.I, one_dimensional_lower_shift_registers[0].I)\n\n if not last_in_dimension:\n wire(one_dimensional_lower_shift_registers[-1].next, cls.next)\n\n for i in range(len(one_dimensional_lower_shift_registers)):\n wire(one_dimensional_lower_shift_registers[i].O, cls.O[i])\n wire(cls.CE, one_dimensional_lower_shift_registers[i].CE)\n\n\n return one_dimensional_lower_shift_registers\n\n return _SR", "def move(self, direction):\n # replace with your code\n for start_cell in self._directions[direction]:\n temp_list = []\n for step in range(self._height):\n temp_list.append(self.get_tile(start_cell[0] + step * OFFSETS[direction][0], start_cell[1] + step * OFFSETS[direction][1]))\n merged_list = merge(temp_list)\n for step in range(self._width):\n self.set_tile(start_cell[0] + step * OFFSETS[direction][0], start_cell[1] + step * OFFSETS[direction][1], merged_list[step])\n self.new_tile()", "def step(self, move):", "def test_center_of_coordinates_shift():\n # print sys._getframe().f_code.co_name\n # c = commons()\n\n pdb_inp = iotbx.pdb.input(source_info=None, lines=test_pdb_str_2)\n ncs_obj_phil = ncs.input(\n hierarchy=pdb_inp.construct_hierarchy())\n ncs_restraints_group_list = ncs_obj_phil.get_ncs_restraints_group_list()\n\n # ncs_restraints_group_list = c.ncs_restraints_group_list\n xrs = pdb_inp.xray_structure_simple()\n shifts = ncs_restraints_group_list.get_ncs_groups_centers(\n sites_cart = xrs.sites_cart())\n\n xyz = pdb_inp.atoms().extract_xyz()\n center_of_coor = (flex.vec3_double([xyz.sum()]) * (1/xyz.size())).round(8)\n # test shifts\n t1 = shifts[0].round(8)\n t2 = shifts[1].round(8)\n d1 = flex.sqrt((center_of_coor-t1).dot()).min_max_mean().as_tuple()\n d2 = flex.sqrt((center_of_coor-t2).dot()).min_max_mean().as_tuple()\n assert (d1 == d2)\n\n # test shift to center\n new_nrg = ncs_restraints_group_list.shift_translation_to_center(shifts = shifts)\n expected = (22.63275, 5.54625, 2.9375)\n assert (new_nrg[0].copies[0].t.round(5)).elems == expected\n # back to original coordinates system\n old_nrg = new_nrg.shift_translation_back_to_place(shifts=shifts)\n expected = (old_nrg[0].copies[0].t.round(5)).elems\n result = (ncs_restraints_group_list[0].copies[0].t.round(5)).elems\n assert result == expected", "def resampz(x, m_type, shift=1):\n sx = np.array(x.shape)\n\n if m_type == 0 or m_type == 1:\n y = np.zeros((sx[0] + np.abs(shift * (sx[1] - 1)), sx[1]))\n\n if m_type == 0:\n shift1 = np.arange(0, sx[1]) * (- shift)\n else:\n shift1 = np.arange(0, sx[1]) * shift\n\n if shift1[-1] < 0:\n shift1 = shift1 - shift1[-1]\n\n for n in range(sx[1]):\n y[shift1[n] + np.arange(0, sx[0]), n] = x[:, n]\n\n # Remove extra rows\n start = 0\n finish = y.shape[0]\n\n while np.linalg.norm(y[start, :], 2) == 0:\n start += 1\n\n while np.linalg.norm(y[finish-1, :], 2) == 0:\n finish -= 1\n\n y = y[start:finish, :]\n\n elif m_type == 2 or m_type == 3:\n y = np.zeros((sx[0], sx[1] + np.abs(shift * (sx[0] - 1))))\n\n if m_type == 2:\n shift2 = np.arange(0, sx[0]) * (- shift)\n else:\n shift2 = np.arange(0, sx[0]) * shift\n\n if shift2[-1] < 0:\n shift2 = shift2 - shift2[-1]\n\n for m in range(sx[0]):\n y[m, shift2[m] + np.arange(0, sx[1])] = x[m, :]\n\n # Remove extra rows\n start = 0\n finish = y.shape[1]\n\n while np.linalg.norm(y[:, start], 2) == 0:\n start += 1\n\n while np.linalg.norm(y[:, finish-1], 2) == 0:\n finish -= 1\n\n y = y[:, start:finish]\n\n else:\n print('Error: type not valid.')\n y = 0\n\n return y", "def _shift_wall_index(self):\n if self.wall_index > 0:\n self.wall_index -= 1\n else:\n self._shift_wall_wind()\n self.wall_index = len(self.current_wall) - 1", "def move_multi_wire_gates(self, operator_grid):\n n = operator_grid.num_layers\n i = -1\n while i < n - 1:\n i += 1\n\n this_layer = operator_grid.layer(i)\n layer_ops = _remove_duplicates(this_layer)\n other_layer = [None] * operator_grid.num_wires\n\n for j in range(len(layer_ops)):\n op = layer_ops[j]\n\n if op is None:\n continue\n\n # translate wires to their indices on the device\n wire_indices = self.active_wires.indices(op.wires)\n\n if len(op.wires) > 1:\n\n sorted_wires = wire_indices.copy()\n sorted_wires.sort()\n\n blocked_wires = list(range(sorted_wires[0], sorted_wires[-1] + 1))\n\n for k in range(j + 1, len(layer_ops)):\n other_op = layer_ops[k]\n\n if other_op is None:\n continue\n\n # translate wires to their indices on the device\n other_wire_indices = self.active_wires.indices(other_op.wires)\n other_sorted_wire_indices = other_wire_indices.copy()\n other_sorted_wire_indices.sort()\n other_blocked_wires = list(\n range(other_sorted_wire_indices[0], other_sorted_wire_indices[-1] + 1)\n )\n\n if not set(other_blocked_wires).isdisjoint(set(blocked_wires)):\n op_indices = [\n idx for idx, layer_op in enumerate(this_layer) if layer_op == op\n ]\n\n for l in op_indices:\n other_layer[l] = op\n this_layer[l] = None\n\n break\n\n if not all([item is None for item in other_layer]):\n operator_grid.insert_layer(i + 1, other_layer)\n n += 1", "def randomize_position(self, w, steps = 3):\n \n #self.red.set_power(0)\n \n for k in range(steps):\n for idx,waveplate in enumerate(w):\n print '* Randomizing %s waveplate (step %d) ...'%(waveplate, k)\n self.rotator.quick_scan(np.random.uniform(low = -20000, high = 20000) ,getattr(self,'_'+waveplate+'_channel'))", "def moveFactory(self, oldRowIndex: int, oldColIndex: int, newRowIndex: int, newColIndex: int) -> None:\n ...", "def move_forward(self, steps):\n\t\tif self.movement <= steps:\n\t\t\tif self.heading == 0:\n\t\t\t\tself.grid_y -= steps\n\t\t\telif self.heading == 90:\n\t\t\t\tself.grid_x += steps\n\t\t\telif self.heading == 180:\n\t\t\t\tself.grid_y += steps\n\t\t\telif self.heading == 270:\n\t\t\t\tself.grid_x -= steps", "def steiner3D(imOut, n, directions, grid=DEFAULT_GRID3D):\n imOut.reset()\n (w,h) = imOut.getSize()\n l = imOut.getLength()\n \n v = computeMaxRange(imOut[0])[1]\n imOut.setPixel(v, (w/2,h/2,l/2))\n \n ses = []\n for d in directions:\n ses.append(structuringElement3D([0,d],grid))\n \n for i in range(n):\n for se in ses:\n dilate3D(imOut, imOut, 1, se=se)\n imOut.updateDisplay()", "def sow_step(self, player, move):\n init_pit = move\n stones = self.p_pits(player.index)[init_pit]\n clen = 2 * self.M + 1\n\n if player.index == 1:\n cstate = self.state[:-1]\n else:\n cstate = self.p2_pits() + [self.p2_store()] + self.p1_pits()\n\n per_add = stones // clen\n dis_pit = stones % clen\n\n cstate[init_pit] = 0\n last_pit = (init_pit + dis_pit) % clen\n new_state = [i + per_add for i in cstate]\n if last_pit > init_pit:\n new_state = [\n v + 1 if init_pit < i <= last_pit else v\n for i, v in enumerate(new_state)\n ]\n elif last_pit < init_pit:\n new_state = [\n v + 1 if (init_pit < i or i <= last_pit) else v\n for i, v in enumerate(new_state)\n ]\n else:\n pass\n\n if player.index == 1:\n return new_state + [self.p2_store()], last_pit\n else:\n return new_state[-self.M:] + [self.p1_store()\n ] + new_state[:-self.M], last_pit", "def shift_cells(self, cells, direction):\n merge = False\n # sorted so cells within the same piece won't run into each other\n sorted_cells = TransformPiece.sort_cells(cells, direction)\n # if trying to shift past a wall, abort mission\n for cell in sorted_cells:\n value = self.get_cell_value(cell)\n if value < 1:\n continue\n adjacent_coords = TransformPiece.get_adjacent_coordinates(cell, direction)\n adjacent_value = self.get_cell_value(adjacent_coords)\n if adjacent_value == -1 and not self.is_in_buffer(adjacent_coords):\n return\n # do shift\n for cell in sorted_cells:\n value = self.get_cell_value(cell)\n if value < 1:\n continue\n adjacent_coords = TransformPiece.get_adjacent_coordinates(cell, direction)\n adjacent_value = self.get_cell_value(adjacent_coords)\n if adjacent_value < 1:\n # shift cell into empty space\n self.set_cell_value(adjacent_coords, value)\n self.clear_cell(cell)\n elif adjacent_value == value:\n # do merge\n self.set_cell_value(adjacent_coords, value * 2)\n self.clear_cell(cell)\n merge = True\n return merge", "def move(self):\n raise NotImplementedError", "def _data_move_in_mc_on_h(tik_inst, dst, src, data_pos_info):\n\n sub_h_size, sub_w_size, h_size, w_size, in_offset = data_pos_info\n data_cnt_one_block = _get_elment_cnt_one_block(src.dtype)\n sub_w_block = _ceil_div(sub_w_size, data_cnt_one_block)\n sub_h_align_block_size = sub_h_size // data_cnt_one_block * data_cnt_one_block\n sub_h_left = sub_h_size % data_cnt_one_block\n sub_hw_align_block = _ceil_div(sub_h_size * w_size, data_cnt_one_block)\n is_not_w_block_align = w_size % data_cnt_one_block > 0\n is_h_size_smaller_one_block = h_size < data_cnt_one_block\n is_subw_equal_w = sub_w_size == w_size\n\n def _move_in_one_more_block():\n \"\"\"\n move in one more block of h when h > sub_h and sub_h is not block align\n \"\"\"\n with tik_inst.for_range(0, sub_h_align_block_size) as sub_h_idx_0:\n tik_inst.data_move(dst[sub_w_block * data_cnt_one_block * sub_h_idx_0],\n src[in_offset + sub_h_idx_0 * w_size],\n 0, 1, sub_w_block, 0, 0)\n # move in one more block of h\n with tik_inst.for_range(0, data_cnt_one_block) as sub_h_idx_1:\n tik_inst.data_move(\n dst[sub_w_block * data_cnt_one_block * (sub_h_align_block_size + sub_h_idx_1)],\n src[in_offset + (sub_h_idx_1 + sub_h_size - data_cnt_one_block) * w_size],\n 0, 1, sub_w_block, 0, 0)\n\n with tik_inst.if_scope(is_subw_equal_w):\n # no need to move in one more block\n with tik_inst.if_scope(tik.any(sub_h_left == 0, is_h_size_smaller_one_block)):\n tik_inst.data_move(dst, src[in_offset], 0, 1, sub_hw_align_block, 0, 0)\n with tik_inst.else_scope():\n _move_in_one_more_block()\n\n with tik_inst.else_scope():\n # no need move in one more block of h\n with tik_inst.if_scope(tik.any(sub_h_left == 0, is_h_size_smaller_one_block)):\n src_strides = w_size // data_cnt_one_block - sub_w_block\n # mte max strides value is 65535\n with tik_inst.if_scope(tik.any(src_strides > MTE_STRIDES, is_not_w_block_align)):\n with tik_inst.for_range(0, sub_h_size) as sub_h_idx:\n tik_inst.data_move(dst[sub_w_block * data_cnt_one_block * sub_h_idx],\n src[in_offset + w_size * sub_h_idx], 0, 1, sub_w_block, 0, 0)\n with tik_inst.else_scope():\n tik_inst.data_move(dst, src[in_offset], 0, sub_h_size, sub_w_block, src_strides, 0)\n with tik_inst.else_scope():\n _move_in_one_more_block()", "def move(self, rel_pos):\n self.pos = (self.pos[0] + rel_pos[0] * GRID, self.pos[1] + rel_pos[1] * GRID)", "def _increment_move(move, direction, n):\n # print(move)\n move = list(map(sum, zip(move, direction)))\n #move = (move[0]+direction[0], move[1]+direction[1])\n while all(map(lambda x: 0 <= x < n, move)):\n #while 0<=move[0] and move[0]<n and 0<=move[1] and move[1]<n:\n yield move\n move=list(map(sum,zip(move,direction)))\n #move = (move[0]+direction[0],move[1]+direction[1])", "def move(self, model):\n grid = model.grid\n possible_steps = grid.get_neighborhood(\n self.pos, moore=True, include_center=True)\n choice = random.choice(possible_steps)\n grid.move_agent(self, choice)", "def pixel_shift_fun(self, i, points, image_shape):\n self.delta_0 = np.round(self.displacements[:, i, 0]).astype(int)\n self.delta_1 = np.round(self.displacements[:, i, 1]).astype(int)\n \n # Exlude the points that have displacement going outside of the image range\n out_of_range_it = np.logical_or(self.delta_0 + points[:, 0] > image_shape[0] - 1, self.delta_1 + points[:, 1] > image_shape[1] - 1)\n if np.any(out_of_range_it):\n self.delta_0[out_of_range_it] = 0\n self.delta_1[out_of_range_it] = 0\n self.valid_points[out_of_range_it] = False\n warnings.warn('Displacement is going outside of the image range! The valid points are saved in self.method.valid_points')\n self.displacements[~self.valid_points, i, :] = np.nan", "def shifter(self):\n #self.BA_shift = self.timeshift_latitude(self.latB, self.latA)\n #self.BC_shift = self.timeshift_latitude(self.latB, self.latC)\n\n\n self.shifted = True #changing boolean to True when function is called.\n\n secondsA = self.secondsA\n secondsB = self.secondsB\n secondsC = self.secondsC\n\n NeA = self.holefill(self.NeA, secondsA)\n NeB = self.holefill(self.NeB, secondsB)\n NeC = self.holefill(self.NeC, secondsC)\n\n start = 0\n stop = len(NeA) - np.max(np.array([self.BA_shift, self.BC_shift]))\n\n startA = start + self.BA_shift\n stopA = stop + self.BA_shift\n\n startC = start + self.BC_shift\n stopC = stop + self.BC_shift\n\n NeA = NeA[startA:stopA]\n NeB = NeB[start:stop]\n NeC = NeC[startC:stopC]\n\n longA = self.holefill(self.longA, secondsA)\n longB = self.holefill(self.longB, secondsB)\n longC = self.holefill(self.longC, secondsC)\n longA = longA[startA:stopA]\n longB = longB[start:stop]\n longC = longC[startC:stopC]\n\n latA = self.holefill(self.latA, secondsA)\n latB = self.holefill(self.latB, secondsB)\n latC = self.holefill(self.latC, secondsC)\n latA = latA[startA:stopA]\n latB = latB[start:stop]\n latC = latC[startC:stopC]\n\n radA = self.holefill(self.radA, secondsA)\n radB = self.holefill(self.radB, secondsB)\n radC = self.holefill(self.radC, secondsC)\n radA = radA[startA:stopA]\n radB = radB[start:stop]\n radC = radC[startC:stopC]\n\n velA = self.holefill(self.velA, secondsA)\n velB = self.holefill(self.velB, secondsB)\n velC = self.holefill(self.velC, secondsC)\n velA = velA[startA:stopA]\n velB = velB[start:stop]\n velC = velC[start:stop]\n\n altA = self.holefill(self.altA, secondsA)\n altB = self.holefill(self.altB, secondsB)\n altC = self.holefill(self.altC, secondsC)\n altA = altA[startA:stopA]\n altB = altB[start:stop]\n altC = altC[startC:stopC]\n\n\n mlatA = self.holefill(self.mlatA, secondsA)\n mlatB = self.holefill(self.mlatB, secondsB)\n mlatC = self.holefill(self.mlatC, secondsC)\n mlatA = mlatA[startA:stopA]\n mlatB = mlatB[start:stop]\n mlatC = mlatC[startC:stopC]\n\n mlongA = self.holefill(self.mlongA, secondsA)\n mlongB = self.holefill(self.mlongB, secondsB)\n mlongC = self.holefill(self.mlongC, secondsC)\n mlongA = mlongA[startA:stopA]\n mlongB = mlongB[start:stop]\n mlongC = mlongC[startC:stopC]\n\n mltA = self.holefill(self.mltA, secondsA)\n mltB = self.holefill(self.mltB, secondsB)\n mltC = self.holefill(self.mltC, secondsC)\n mltA = mltA[startA:stopA]\n mltB = mltB[start:stop]\n mltC = mltC[startC:stopC]\n\n secondsA = self.holefill(secondsA, secondsA)\n secondsB = self.holefill(secondsB, secondsB)\n secondsC = self.holefill(secondsC, secondsC)\n secondsA = secondsA[startA:stopA]\n secondsB = secondsB[start:stop]\n secondsC = secondsC[startC:stopC]\n\n indsA = np.nonzero(secondsA)[0]\n indsB = np.nonzero(secondsB)[0]\n indsC = np.nonzero(secondsC)[0]\n\n inds = np.intersect1d(indsA, indsB)\n inds = np.intersect1d(inds, indsC)\n\n self.NeA = NeA[inds]\n self.NeB = NeB[inds]\n self.NeC = NeC[inds]\n\n self.longA = longA[inds]\n self.longB = longB[inds]\n self.longC = longC[inds]\n\n self.latA = latA[inds]\n self.latB = latB[inds]\n self.latC = latC[inds]\n\n self.radA = radA[inds]\n self.radB = radB[inds]\n self.radC = radC[inds]\n\n self.velA = velA[inds]\n self.velB = velB[inds]\n self.velC = velC[inds]\n\n self.altA = altA[inds]\n self.altB = altB[inds]\n self.altC = altC[inds]\n\n self.mlatA = mlatA[inds]\n self.mlatB = mlatB[inds]\n self.mlatC = mlatC[inds]\n\n self.mlongA = mlongA[inds]\n self.mlongB = mlongB[inds]\n self.mlongC = mlongC[inds]\n\n self.mltA = mltA[inds]\n self.mltB = mltB[inds]\n self.mltC = mltC[inds]\n\n self.secondsA = secondsA[inds]\n self.secondsB = secondsB[inds]\n self.secondsC = secondsC[inds]", "def move(self):\n for agent in self.agents:\n if not agent.fidelity:\n options = agent.get_move_options(agent.hex, self.kernel_size, None, extend=True)\n target = random36.choices(population=options,weights=[x.quality**2 for x in options])\n agent.move(target[0])", "def _rel_shift(self, xs):\n bs, qlen, klen, n_heads = xs.size()\n xs = xs.permute(0, 3, 2, 1)\n idx = torch.arange(klen, device=xs.device)\n k_idx, q_idx = idx.unsqueeze(0), idx.unsqueeze(1)\n rel_pos_idx = torch.abs(k_idx - q_idx)\n if klen != qlen:\n rel_pos_idx = rel_pos_idx[:, :qlen]\n mask = xs.new_ones(qlen, klen, dtype=torch.bool if torch_12_plus else torch.uint8)\n mask = torch.tril(mask, diagonal=0).transpose(1, 0)\n rel_pos_idx[mask] *= -1\n rel_pos_idx = klen - qlen - rel_pos_idx\n rel_pos_idx[rel_pos_idx < 0] *= -1\n if self.clamp_len > 0:\n rel_pos_idx.clamp_(max=self.clamp_len)\n rel_pos_idx = rel_pos_idx.expand_as(xs)\n x_shift = torch.gather(xs, dim=2, index=rel_pos_idx)\n x_shift = x_shift.permute(0, 3, 2, 1)\n return x_shift", "def get_move(arr=None):\n if arr is None or np.sum(arr!=0) < 55:\n return []\n \n moves = [] # (coord, dir) ex ((3, 4), 0) means move (3, 4) to right, 0 right, 1 up, 2 left, 3 down\n mask_moved = np.ones_like(arr)\n replace_value = 0\n # detect 2 consecutive\n for key in filters:\n for rot in [1, 3, 0, 2]:\n early_break = False\n out = signal.correlate2d(arr, np.rot90(filters[key], rot), mode='same', fillvalue=100)\n \n mask = (out==arr).astype(np.float)\n tmp = np.stack(np.where(mask), -1)\n # print(tmp)\n for idx in range(tmp.shape[0]):\n # if mask_moved[tuple(tmp[idx])] == 1:\n if mask_moved[tuple(tmp[idx])] == 1 and mask_moved[tuple(tmp[idx]+dirs[rot])] == 1:\n # if mask_moved[tuple(tmp[idx])] == 1 and mask_moved[tuple(tmp[idx]+dirs[rot])] == 1 and arr[tuple(tmp[idx]+dirs[rot])] != replace_value:\n moves.append((tmp[idx], rot))\n # mask_moved[tuple(tmp[idx])] = 0\n mask_moved[tuple(tmp[idx]+dirs[rot])] = 0\n arr[tuple(tmp[idx])], arr[tuple(tmp[idx]+dirs[rot])] = arr[tuple(tmp[idx]+dirs[rot])], arr[tuple(tmp[idx])]\n arr[tuple(tmp[idx]+dirs[rot])] = replace_value\n if key == 3:\n mask_moved[tuple(tmp[idx]+dirs[rot]*2)] = 0\n mask_moved[tuple(tmp[idx]+dirs[rot]*3)] = 0\n arr[tuple(tmp[idx]+dirs[rot]*2)] = replace_value\n arr[tuple(tmp[idx]+dirs[rot]*3)] = replace_value\n elif key == 2:\n mask_moved[tuple(tmp[idx]+dirs[rot]+dirs[(rot+1)%4])] = 0\n mask_moved[tuple(tmp[idx]+dirs[rot]+dirs[(rot+3)%4])] = 0\n arr[tuple(tmp[idx]+dirs[rot]+dirs[(rot+1)%4])] = replace_value\n arr[tuple(tmp[idx]+dirs[rot]+dirs[(rot+3)%4])] = replace_value\n elif key == 0:\n mask_moved[tuple(tmp[idx]+dirs[rot]+dirs[(rot+1)%4])] = 0\n mask_moved[tuple(tmp[idx]+dirs[rot]+2*dirs[(rot+1)%4])] = 0\n arr[tuple(tmp[idx]+dirs[rot]+dirs[(rot+1)%4])] = replace_value\n arr[tuple(tmp[idx]+dirs[rot]+2*dirs[(rot+1)%4])] = replace_value\n else:\n mask_moved[tuple(tmp[idx]+dirs[rot]+dirs[(rot+3)%4])] = 0\n mask_moved[tuple(tmp[idx]+dirs[rot]+2*dirs[(rot+3)%4])] = 0\n arr[tuple(tmp[idx]+dirs[rot]+dirs[(rot+3)%4])] = replace_value\n arr[tuple(tmp[idx]+dirs[rot]+2*dirs[(rot+3)%4])] = replace_value\n early_break = True\n break\n if early_break:\n break\n \n if len(moves) > 5: # early break to save computing resources\n break\n\n if len(moves) == 0:\n icon_other = np.stack(np.where(arr==0), -1)\n for idx in range(icon_other.shape[0]):\n moves.append((icon_other[idx], np.random.randint(0, 4)))\n\n return moves", "def _increment_move(move, direction, n):\n # print(move)\n move = list(map(sum, zip(move, direction)))\n #move = (move[0]+direction[0], move[1]+direction[1])\n while all(map(lambda x: 0 <= x < n, move)): \n #while 0<=move[0] and move[0]<n and 0<=move[1] and move[1]<n:\n yield move\n move=list(map(sum,zip(move,direction)))\n #move = (move[0]+direction[0],move[1]+direction[1])", "def shift_shuffle(self, continuum: Continuum) -> None:\n shift_max = self.magnitude * self.SHIFT_FACTOR * \\\n self._reference_continuum.avg_length_unit\n for annotator in continuum.annotators:\n for unit in continuum[annotator]:\n continuum.remove(annotator, unit)\n start_seg, end_seg = 0.0, 0.0\n while start_seg >= end_seg:\n start_seg = unit.segment.start + np.random.uniform(-1, 1) * shift_max\n end_seg = unit.segment.end + np.random.uniform(-1, 1) * shift_max\n continuum.add(annotator, Segment(start_seg, end_seg), unit.annotation)", "def _data_move_out_last_dim_lt_one_block(tik_inst, dst, src, data_pos_info):\n sub_axis_1, sub_axis_0, axis_0, axis_1, axis_2, out_offset = data_pos_info\n data_size_one_block = _get_elment_cnt_one_block(src.dtype)\n\n with tik_inst.if_scope(sub_axis_1 == 1):\n with tik_inst.if_scope(sub_axis_0 * axis_2 > data_size_one_block):\n sub_axis_0_2 = sub_axis_0 * axis_2\n sub_axis_0_block_align = sub_axis_0_2 // data_size_one_block\n left_data = sub_axis_0_2 % data_size_one_block\n tik_inst.data_move(dst[out_offset], src, 0, 1, sub_axis_0_block_align, 0, 0)\n with tik_inst.if_scope(left_data > 0):\n with tik_inst.new_stmt_scope():\n reg_temp = [tik_inst.Scalar(src.dtype)\n for i in ADDR_IDX_LIST[:data_size_one_block]]\n for idx in ADDR_IDX_LIST[:data_size_one_block]:\n reg_temp[idx].set_as(src[sub_axis_0_2 - data_size_one_block + idx])\n for idx in ADDR_IDX_LIST[:data_size_one_block]:\n src[idx].set_as(reg_temp[idx])\n tik_inst.data_move(dst[out_offset + sub_axis_0_2 - data_size_one_block],\n src, 0, 1, 1, 0, 0)\n with tik_inst.else_scope():\n # for case sub_axis_0 * axis_2 < data_size_one_block\n with tik_inst.if_scope(axis_0 * axis_1 * axis_2 > data_size_one_block):\n m_sub_axis_0 = _ceil_div(data_size_one_block, axis_2)\n sub_axis_0_2 = m_sub_axis_0 * axis_2\n sub_axis_0_block_align = sub_axis_0_2 // data_size_one_block\n left_data = sub_axis_0_2 % data_size_one_block\n tik_inst.data_move(dst[out_offset + (sub_axis_0 - m_sub_axis_0) * axis_2],\n src, 0, 1, sub_axis_0_block_align, 0, 0)\n with tik_inst.if_scope(left_data > 0):\n with tik_inst.new_stmt_scope():\n reg_temp = [tik_inst.Scalar(src.dtype)\n for i in ADDR_IDX_LIST[:data_size_one_block]]\n for idx in ADDR_IDX_LIST[:data_size_one_block]:\n reg_temp[idx].set_as(src[sub_axis_0_2 - data_size_one_block + idx])\n for idx in ADDR_IDX_LIST[:data_size_one_block]:\n src[idx].set_as(reg_temp[idx])\n tik_inst.data_move(dst[out_offset + sub_axis_0 * axis_2 -\n data_size_one_block],\n src, 0, 1, 1, 0, 0)\n with tik_inst.else_scope():\n # the shape size is not bigger than one block size\n tik_inst.data_move(dst[out_offset], src, 0, 1,\n _ceil_div(axis_0 * axis_1 * axis_2, data_size_one_block), 0, 0)\n\n with tik_inst.else_scope():\n with tik_inst.if_scope(sub_axis_1 * axis_0 * axis_2 > data_size_one_block):\n sub_axis_1_0_2 = sub_axis_1 * axis_0 * axis_2\n sub_axis_1_0_2_block_align = sub_axis_1_0_2 // data_size_one_block\n left_data = sub_axis_1_0_2 % data_size_one_block\n tik_inst.data_move(dst[out_offset], src, 0, 1, sub_axis_1_0_2_block_align, 0, 0)\n with tik_inst.if_scope(left_data > 0):\n with tik_inst.new_stmt_scope():\n reg_temp = [tik_inst.Scalar(src.dtype)\n for i in ADDR_IDX_LIST[:data_size_one_block]]\n for idx in ADDR_IDX_LIST[:data_size_one_block]:\n reg_temp[idx].set_as(src[sub_axis_1_0_2 - data_size_one_block + idx])\n for idx in ADDR_IDX_LIST[:data_size_one_block]:\n src[idx].set_as(reg_temp[idx])\n tik_inst.data_move(dst[out_offset + sub_axis_1_0_2 - data_size_one_block],\n src, 0, 1, 1, 0, 0)\n with tik_inst.else_scope():\n # to make sure move in data size is not less than one block\n with tik_inst.if_scope(axis_0 * axis_1 * axis_2 > data_size_one_block):\n m_sub_axis_1 = _ceil_div(data_size_one_block, axis_0 * axis_2)\n sub_axis_1_0_2 = m_sub_axis_1 * axis_0 * axis_2\n sub_axis_0_block_align = sub_axis_1_0_2 // data_size_one_block\n left_data = sub_axis_1_0_2 % data_size_one_block\n tik_inst.data_move(dst[out_offset + (sub_axis_1 - m_sub_axis_1) * axis_0 * axis_2],\n src, 0, 1, sub_axis_0_block_align, 0, 0)\n with tik_inst.if_scope(left_data > 0):\n with tik_inst.new_stmt_scope():\n reg_temp = [tik_inst.Scalar(src.dtype)\n for i in ADDR_IDX_LIST[:data_size_one_block]]\n for idx in ADDR_IDX_LIST[:data_size_one_block]:\n reg_temp[idx].set_as(src[sub_axis_1_0_2 - data_size_one_block + idx])\n for idx in ADDR_IDX_LIST[:data_size_one_block]:\n src[idx].set_as(reg_temp[idx])\n tik_inst.data_move(dst[out_offset + sub_axis_1 * axis_0 * axis_2 -\n data_size_one_block],\n src, 0, 1, 1, 0, 0)\n with tik_inst.else_scope():\n # the shape size is not bigger than one block size\n tik_inst.data_move(dst[out_offset], src, 0, 1,\n _ceil_div(axis_0 * axis_1 * axis_2, data_size_one_block), 0, 0)", "def move(self, direction):\n no_change = True\n if direction == UP or direction == DOWN:\n other_direction = self.get_grid_height()\n elif direction == LEFT or direction == RIGHT:\n other_direction = self.get_grid_width()\n for first_index in self._indices[direction]:\n row = first_index[0]\n col = first_index[1]\n line = []\n for _ in range(other_direction):\n line.append(self.get_tile(row, col))\n row += OFFSETS[direction][0]\n col += OFFSETS[direction][1]\n merged_line = merge(line)\n \n if merged_line != line:\n no_change = False\n \n row = first_index[0]\n col = first_index[1]\n for idx in range(other_direction):\n self.set_tile(row, col, merged_line[idx])\n row += OFFSETS[direction][0]\n col += OFFSETS[direction][1]\n if no_change == False: \n self.new_tile()", "def fshift_nb(a, n):\n return fshift_1d_nb(a, n)", "def move(t, length):\n pu(t)\n\t\n fd(t, length)\n pd(t)", "def data_move_case_zero(self, tik_instance, ub_ori, ub_cast_fp16, ub_trans,\n ub_cast_int8, is_last, num_outer_axis,\n num_loop_time, loop_time, loop_col, loop_len):\n with tik_instance.if_scope(tik.all(loop_time == self.dst_shape[-4] //\n loop_col - 1,\n self.dst_shape[-4] % loop_col ==\n 0)):\n is_last.set_as(1)\n num_data_one_loop = self.dst_shape[-4] * self.dst_shape[-3] * \\\n self.dst_shape[-2] * self.dst_shape[-1]\n src_ub_index = 0\n if self.src_shape[-1] % CUBE_SIZE_2 != 0 or \\\n (self.src_shape[-1] - loop_len * CUBE_SIZE_2) // \\\n self.num_data > MAX_STRIDE_BLK:\n with tik_instance.if_scope(num_loop_time ==\n self.dst_shape[-3] - 1):\n if self.src_shape[-2] % CUBE_SIZE != 0:\n with tik_instance.for_range(0, self.src_shape[-2] %\n CUBE_SIZE) as num_cube_col:\n src_gm_index = num_outer_axis * self.src_shape[-1] * \\\n self.src_shape[-2] + \\\n (num_loop_time * CUBE_SIZE +\n num_cube_col) * self.src_shape[-1] + \\\n loop_time * loop_col * CUBE_SIZE_2\n tik_instance.data_move(ub_ori[loop_len *\n CUBE_SIZE_2 *\n num_cube_col],\n self.src_gm[src_gm_index],\n 0, 1,\n loop_len * CUBE_SIZE_2 //\n self.num_data, 0, 0)\n else:\n with tik_instance.for_range(0, CUBE_SIZE) \\\n as num_cube_col:\n src_gm_index = num_outer_axis * self.src_shape[-1] * \\\n self.src_shape[-2] + \\\n (num_loop_time * CUBE_SIZE +\n num_cube_col) * self.src_shape[-1] + \\\n loop_time * loop_col * CUBE_SIZE_2\n tik_instance.data_move(ub_ori[loop_len *\n CUBE_SIZE_2 *\n num_cube_col],\n self.src_gm[src_gm_index],\n 0, 1,\n loop_len * CUBE_SIZE_2 //\n self.num_data, 0, 0)\n with tik_instance.else_scope():\n with tik_instance.for_range(0, CUBE_SIZE) as num_cube_col:\n src_gm_index = num_outer_axis * self.src_shape[-1] * \\\n self.src_shape[-2] + \\\n (num_loop_time * CUBE_SIZE +\n num_cube_col) * self.src_shape[-1] + \\\n loop_time * loop_col * CUBE_SIZE_2\n tik_instance.data_move(ub_ori[loop_len * CUBE_SIZE_2 *\n num_cube_col],\n self.src_gm[src_gm_index],\n 0, 1,\n loop_len * CUBE_SIZE_2 //\n self.num_data, 0, 0)\n else:\n src_gm_index = num_outer_axis * self.src_shape[-1] * \\\n self.src_shape[-2] + num_loop_time * CUBE_SIZE * \\\n self.src_shape[-1] + loop_time * loop_col * \\\n CUBE_SIZE_2\n with tik_instance.if_scope(num_loop_time ==\n self.dst_shape[-3] - 1):\n if self.src_shape[-2] % CUBE_SIZE != 0:\n tik_instance.data_move(ub_ori[src_ub_index],\n self.src_gm[src_gm_index], 0,\n self.src_shape[-2] % CUBE_SIZE,\n loop_len,\n (self.src_shape[-1] -\n loop_len * CUBE_SIZE_2) //\n self.num_data,\n 0)\n else:\n tik_instance.data_move(ub_ori[src_ub_index],\n self.src_gm[src_gm_index],\n 0, CUBE_SIZE,\n loop_len,\n (self.src_shape[-1] -\n loop_len * CUBE_SIZE_2) //\n self.num_data,\n 0)\n with tik_instance.else_scope():\n tik_instance.data_move(ub_ori[src_ub_index],\n self.src_gm[src_gm_index],\n 0, CUBE_SIZE,\n loop_len,\n (self.src_shape[-1] - loop_len *\n CUBE_SIZE_2) // self.num_data, 0)\n\n self.data_rearrange_case_one(tik_instance, ub_ori, ub_cast_fp16,\n ub_trans, ub_cast_int8, num_loop_time,\n loop_len, is_last)\n\n if((self.dst_shape[-3] - 1) * self.dst_shape[-1] *\n self.dst_shape[-2] // self.num_data > MAX_STRIDE_BLK):\n with tik_instance.for_range(0, loop_len) as \\\n num_col_cube:\n dst_gm_index = num_outer_axis * num_data_one_loop + \\\n num_loop_time * self.dst_shape[-1] * \\\n self.dst_shape[-2] + \\\n (loop_time * loop_col + num_col_cube) * \\\n self.dst_shape[-1] * self.dst_shape[-2] * \\\n self.dst_shape[-3]\n tik_instance.data_move(self.dst_gm[dst_gm_index],\n ub_cast_int8[num_col_cube *\n CUBE_SIZE_2 *\n (CUBE_SIZE + 1)],\n 0, 1,\n self.dst_shape[-1] *\n self.dst_shape[-2] //\n self.num_data,\n 0, 0)\n else:\n dst_gm_index = num_outer_axis * num_data_one_loop + \\\n num_loop_time * self.dst_shape[-1] * \\\n self.dst_shape[-2] + loop_time * \\\n loop_col * self.dst_shape[-1] * \\\n self.dst_shape[-2] * \\\n self.dst_shape[-3]\n tik_instance.data_move(self.dst_gm[dst_gm_index],\n ub_cast_int8[0],\n 0, loop_len,\n self.dst_shape[-1] * self.dst_shape[-2] //\n self.num_data, self.num_byte,\n (self.dst_shape[-3] - 1) *\n self.dst_shape[-1] *\n self.dst_shape[-2] // self.num_data)", "def apply_move(self, move, state):\n x, y , heading, grid_data = state\n map_data = [row[:] for row in grid_data]\n if move == self.MOVE_FORWARD:\n # get coordinates for next cell\n if heading == self.UP:\n next_y = y - 1\n next_x = x\n elif heading == self.DOWN:\n next_y = y + 1\n next_x = x\n elif heading == self.LEFT:\n next_y = y\n next_x = x - 1\n else:\n next_y = y\n next_x = x + 1\n\n # handle special tile types\n if map_data[next_y][next_x] == self.ICE_SYMBOL:\n # handle ice tile - slide until first non-ice tile or blocked\n if heading == self.UP:\n for i in range(next_y, -1, -1):\n if map_data[i][next_x] != self.ICE_SYMBOL:\n if map_data[i][next_x] == self.WATER_SYMBOL:\n # slide into water - game over\n return self.GAME_OVER\n elif self.cell_is_blocked(i, next_x, map_data):\n # if blocked, stop on last ice cell\n next_y = i + 1\n break\n else:\n next_y = i\n break\n elif heading == self.DOWN:\n for i in range(next_y, self.y_size):\n if map_data[i][next_x] != self.ICE_SYMBOL:\n if map_data[i][next_x] == self.WATER_SYMBOL:\n # slide into water - game over\n return self.GAME_OVER\n elif self.cell_is_blocked(i, next_x, map_data):\n # if blocked, stop on last ice cell\n next_y = i - 1\n break\n else:\n next_y = i\n break\n elif heading == self.LEFT:\n for i in range(next_x, -1, -1):\n if map_data[next_y][i] != self.ICE_SYMBOL:\n if map_data[next_y][i] == self.WATER_SYMBOL:\n # slide into water - game over\n return self.GAME_OVER\n elif self.cell_is_blocked(next_y, i, map_data):\n # if blocked, stop on last ice cell\n next_x = i + 1\n break\n else:\n next_x = i\n break\n else:\n for i in range(next_x, self.x_size):\n if map_data[next_y][i] != self.ICE_SYMBOL:\n if map_data[next_y][i] == self.WATER_SYMBOL:\n # slide into water - game over\n return self.GAME_OVER\n elif self.cell_is_blocked(next_y, i, map_data):\n # if blocked, stop on last ice cell\n next_x = i - 1\n break\n else:\n next_x = i\n break\n if map_data[next_y][next_x] == self.TELEPORT_SYMBOL:\n # handle teleport - find the other teleporter\n tpy, tpx = (None, None)\n for i in range(self.y_size):\n for j in range(self.x_size):\n if map_data[i][j] == self.TELEPORT_SYMBOL and (i != next_y or j != next_x):\n tpy, tpx = (i, j)\n break\n if tpy is not None:\n break\n if tpy is None:\n raise Exception(\"LaserTank Map Error: Unmatched teleport symbol\")\n next_y, next_x = (tpy, tpx)\n else:\n # if not ice or teleport, perform collision check\n if self.cell_is_blocked(next_y, next_x, map_data):\n return self.COLLISION\n\n # check for game over conditions\n if self.cell_is_game_over(next_y, next_x, map_data):\n return self.GAME_OVER\n\n # no collision and no game over - update player position\n y = next_y\n x = next_x\n return (x, y, heading, map_data)\n\n elif move == self.TURN_LEFT:\n # no collision or game over possible\n if heading == self.UP:\n heading = self.LEFT\n elif heading == self.DOWN:\n heading = self.RIGHT\n elif heading == self.LEFT:\n heading = self.DOWN\n else:\n heading = self.UP\n return (x, y, heading, map_data)\n\n elif move == self.TURN_RIGHT:\n # no collision or game over possible\n if heading == self.UP:\n heading = self.RIGHT\n elif heading == self.DOWN:\n heading = self.LEFT\n elif heading == self.LEFT:\n heading = self.UP\n else:\n heading = self.DOWN\n return (x, y, heading, map_data)\n\n elif move == self.SHOOT_LASER:\n # set laser direction\n if heading == self.UP:\n laserheading = self.UP\n dy, dx = (-1, 0)\n elif heading == self.DOWN:\n laserheading = self.DOWN\n dy, dx = (1, 0)\n elif heading == self.LEFT:\n laserheading = self.LEFT\n dy, dx = (0, -1)\n else:\n laserheading = self.RIGHT\n dy, dx = (0, 1)\n\n # loop until laser blocking object reached\n ly, lx = (y, x)\n while True:\n ly += dy\n lx += dx\n\n # handle boundary and immovable obstacles\n if ly < 0 or ly >= self.y_size or \\\n lx < 0 or lx >= self.x_size or \\\n map_data[ly][lx] == self.OBSTACLE_SYMBOL:\n # laser stopped without effect\n return self.COLLISION\n\n # handle movable objects\n elif self.cell_is_laser_movable(ly, lx, laserheading, map_data):\n # check if tile can be moved without collision\n if self.cell_is_blocked(ly + dy, lx + dx, map_data) or \\\n map_data[ly + dy][lx + dx] == self.ICE_SYMBOL or \\\n map_data[ly + dy][lx + dx] == self.TELEPORT_SYMBOL or \\\n map_data[ly + dy][lx + dx] == self.FLAG_SYMBOL or \\\n (ly + dy == y and lx + dx == x):\n # tile cannot be moved\n return self.COLLISION\n else:\n old_symbol = map_data[ly][lx]\n map_data[ly][lx] = self.LAND_SYMBOL\n if map_data[ly + dy][lx + dx] == self.WATER_SYMBOL:\n # if new bridge position is water, convert to land tile\n if old_symbol == self.BRIDGE_SYMBOL:\n map_data[ly + dy][lx + dx] = self.LAND_SYMBOL\n # otherwise, do not replace the old symbol\n else:\n # otherwise, move the tile forward\n map_data[ly + dy][lx + dx] = old_symbol\n break\n\n # handle bricks\n elif map_data[ly][lx] == self.BRICK_SYMBOL:\n # remove brick, replace with land\n map_data[ly][lx] = self.LAND_SYMBOL\n break\n\n # handle facing anti-tanks\n elif (map_data[ly][lx] == self.ANTI_TANK_UP_SYMBOL and laserheading == self.DOWN) or \\\n (map_data[ly][lx] == self.ANTI_TANK_DOWN_SYMBOL and laserheading == self.UP) or \\\n (map_data[ly][lx] == self.ANTI_TANK_LEFT_SYMBOL and laserheading == self.RIGHT) or \\\n (map_data[ly][lx] == self.ANTI_TANK_RIGHT_SYMBOL and laserheading == self.LEFT):\n # mark anti-tank as destroyed\n map_data[ly][lx] = self.ANTI_TANK_DESTROYED_SYMBOL\n break\n\n # handle player laser collision\n elif ly == y and lx == x:\n return self.GAME_OVER\n\n # handle facing mirrors\n elif (map_data[ly][lx] == self.MIRROR_UL_SYMBOL and laserheading == self.RIGHT) or \\\n (map_data[ly][lx] == self.MIRROR_UR_SYMBOL and laserheading == self.LEFT):\n # new direction is up\n dy, dx = (-1, 0)\n laserheading = self.UP\n elif (map_data[ly][lx] == self.MIRROR_DL_SYMBOL and laserheading == self.RIGHT) or \\\n (self.grid_data[ly][lx] == self.MIRROR_DR_SYMBOL and laserheading == self.LEFT):\n # new direction is down\n dy, dx = (1, 0)\n laserheading = self.DOWN\n elif (map_data[ly][lx] == self.MIRROR_UL_SYMBOL and laserheading == self.DOWN) or \\\n (map_data[ly][lx] == self.MIRROR_DL_SYMBOL and laserheading == self.UP):\n # new direction is left\n dy, dx = (0, -1)\n laserheading = self.LEFT\n elif (map_data[ly][lx] == self.MIRROR_UR_SYMBOL and laserheading == self.DOWN) or \\\n (map_data[ly][lx] == self.MIRROR_DR_SYMBOL and laserheading == self.UP):\n # new direction is right\n dy, dx = (0, 1)\n laserheading = self.RIGHT\n # do not terminate laser on facing mirror - keep looping\n\n # check for game over condition after effect of laser\n if self.cell_is_game_over(y, x, map_data):\n return self.GAME_OVER\n return (x, y, heading, map_data)\n return self.SUCCESS", "def move_move(self, event):\n self.canvas.scan_dragto(event.x, event.y, gain=1)", "def warp(x, flo):\n B, C, H, W = x.size()\n # mesh grid\n xx = torch.arange(0, W).view(1, -1).repeat(H, 1)\n yy = torch.arange(0, H).view(-1, 1).repeat(1, W)\n xx = xx.view(1, 1, H, W).repeat(B, 1, 1, 1)\n yy = yy.view(1, 1, H, W).repeat(B, 1, 1, 1)\n grid = torch.cat((xx, yy), 1).float()\n\n if x.is_cuda:\n grid = grid.cuda()\n vgrid = Variable(grid) + flo\n\n # scale grid to [-1,1]\n vgrid[:, 0, :, :] = 2.0 * vgrid[:, 0, :, :].clone() / max(W - 1, 1) - 1.0\n vgrid[:, 1, :, :] = 2.0 * vgrid[:, 1, :, :].clone() / max(H - 1, 1) - 1.0\n\n vgrid = vgrid.permute(0, 2, 3, 1)\n output = nn.functional.grid_sample(x, vgrid, padding_mode='border', align_corners=True)\n\n return output", "def shift_board(self, dx, dy):\n self.board = np.roll(self.board, dy, axis=0)\n self.board = np.roll(self.board, dx, axis=1)\n self.agent_locs += [dy, dx]\n self.agent_locs %= self.board.shape\n self.update_exit_locs()", "def _move(self, direction, difference):\n future_tile_number = self.get_number() + difference\n if future_tile_number in range(1, Tile.total_tiles + 1):\n future_tile = Tile.get_tile(future_tile_number)\n if future_tile.walkable:\n self.set_target(future_tile)\n self.rotate(direction)", "def column_move(plateau,num_col,sens):\n if check_room(plateau, 3, num_col) == False or (sens != 1 and sens != 0):\n return \"Erreur !\"\n if sens==1:\n for i in range(0,3):\n if is_room_empty(plateau,i,num_col):\n column_pack(plateau,num_col,i,sens)\n break\n if get_value(plateau,i,num_col)==get_value(plateau,i+1,num_col)and get_value(plateau,i,num_col)%3==0:\n set_value(plateau,i,num_col,get_value(plateau,i,num_col)*2)\n column_pack(plateau,num_col,i+1,sens)\n break\n if get_value(plateau,i,num_col)==1 and get_value(plateau,i+1,num_col)==2:\n set_value(plateau,i,num_col,3)\n column_pack(plateau,num_col,i+1,sens)\n break\n if get_value(plateau,i,num_col)==2 and get_value(plateau,i+1,num_col)==1:\n set_value(plateau,i,num_col,3)\n column_pack(plateau,num_col,i+1,sens)\n break\n\n else:\n for i in range(3,0,-1):\n if is_room_empty(plateau,i,num_col):\n column_pack(plateau,num_col,i,sens)\n break\n if get_value(plateau,i,num_col)==get_value(plateau,i-1,num_col) and get_value(plateau,i,num_col)%3==0:\n set_value(plateau,i,num_col,get_value(plateau,i,num_col)*2)\n column_pack(plateau,num_col,i-1,sens)\n break\n if get_value(plateau,i,num_col)==1 and get_value(plateau,i-1,num_col)==2:\n set_value(plateau,i,num_col,3)\n column_pack(plateau,num_col,i-1,sens)\n break\n if get_value(plateau,i,num_col)==2 and get_value(plateau,i-1,num_col)==1:\n set_value(plateau,i,num_col,3)\n column_pack(plateau,num_col,i-1,sens)\n break", "def move(self, *step):\n self.x += step[0]\n self.y += step[1]", "def loss_vxm(warp_fname, moving_rmli_fname,\n fixed_rmli_fname, moved_fname, crop_center, crop_size,\n reg_weight, ncc_win):\n\n # Import voxelmorph with pytorch backend\n os.environ['VXM_BACKEND'] = 'pytorch'\n import voxelmorph as vxm\n\n rg_crop = crop_size[0]\n az_crop = crop_size[1]\n rg_cen = crop_center[0]\n az_cen = crop_center[1]\n\n # Read the voxelmorph warp file\n warp_file = np.load(warp_fname)\n warp = warp_file['offs']\n warp = warp[np.newaxis, :, :, :]\n\n # Read moved scene\n moved_file = np.load(moved_fname)\n moved = moved_file['scene']\n moved = moved[np.newaxis, np.newaxis, :, :]\n\n # Read, crop, and scale the fixed RMLI\n fixed_rmli = gx.MLI(fixed_rmli_fname,\n par=gx.MLI_Par(fixed_rmli_fname + '.par'))\n rmli_dim = fixed_rmli.dim\n fixed_full = fixed_rmli.array\n fixed = fixed_full[rg_cen - rg_crop // 2:rg_cen + rg_crop // 2,\n az_cen - az_crop // 2:az_cen + az_crop // 2]\n fixed = scale_rmli(fixed)\n fixed = fixed[np.newaxis, np.newaxis, :, :]\n\n # Read, crop, and scale the moving RMLI\n moving_rmli = gx.MLI(moving_rmli_fname,\n par=gx.MLI_Par(moving_rmli_fname + '.par'))\n moving_full = moving_rmli.array\n moving = moving_full[rg_cen - rg_crop // 2:rg_cen + rg_crop // 2,\n az_cen - az_crop // 2:az_cen + az_crop // 2]\n moving = scale_rmli(moving)\n moving = moving[np.newaxis, np.newaxis, :, :]\n\n # Prepare ncc loss with square window\n ndims = len(list(fixed.shape)) - 2\n assert ndims in [1, 2, 3], \"volumes should be 1 to 3 dimensions. found: %d\" % ndims\n ncc_win_sq = [ncc_win] * ndims # Build a square window\n ncc = vxm.losses.NCC(ncc_win_sq, cuda=False)\n\n # Now we have all the data, compute the losses\n loss_sim_vxm = ncc.loss(torch.from_numpy(fixed).float(), torch.from_numpy(moved).float())\n\n grad = vxm.losses.Grad(penalty='l2')\n loss_smooth_vxm = grad.loss(None, torch.from_numpy(warp).float())\n\n loss_total_vxm = loss_sim_vxm + (reg_weight * loss_smooth_vxm)\n\n # Print everything\n # print('Lambda: {}\\n'.format(reg_weight))\n # print('Voxelmorph:\\nSimilarity loss: {}\\nSmoothness loss: {}\\n'\n #'Total: {}\\n'.format(loss_sim_vxm, loss_smooth_vxm, loss_total_vxm))\n \n return loss_sim_vxm, loss_smooth_vxm, loss_total_vxm" ]
[ "0.68111503", "0.654992", "0.6478761", "0.5682042", "0.55264264", "0.5491822", "0.5434507", "0.54266405", "0.54103285", "0.53249466", "0.52720374", "0.52512574", "0.5243212", "0.51999307", "0.51817954", "0.5173916", "0.5150899", "0.51255804", "0.5119478", "0.51185614", "0.5106558", "0.51044023", "0.50996846", "0.5088775", "0.508422", "0.50755394", "0.5070164", "0.5068276", "0.50580907", "0.5054507", "0.50470287", "0.50324696", "0.50210226", "0.501512", "0.50054854", "0.50007", "0.49985072", "0.49945122", "0.49916473", "0.4990884", "0.4990884", "0.4984365", "0.4982423", "0.49810582", "0.4979407", "0.4973289", "0.4972089", "0.49709973", "0.49641648", "0.49563175", "0.49534228", "0.4950387", "0.494587", "0.49437937", "0.4940504", "0.4935793", "0.49306375", "0.49258107", "0.4921113", "0.49175072", "0.49149036", "0.49041134", "0.48959574", "0.48893705", "0.4883301", "0.48822936", "0.48763517", "0.48716667", "0.48700115", "0.4868838", "0.48608357", "0.48540533", "0.48534715", "0.48527315", "0.48515263", "0.4847971", "0.4847243", "0.4841019", "0.4841", "0.48368305", "0.48363933", "0.4836016", "0.4834043", "0.48340076", "0.4833981", "0.4831114", "0.48286486", "0.48197484", "0.48041904", "0.4800498", "0.47988296", "0.47956026", "0.4789235", "0.47787663", "0.47745758", "0.47681874", "0.4764463", "0.4763354", "0.4759526", "0.4759129" ]
0.7541371
0
Shift the field 'field_array' by n_move cells on CPU. This is done in spectral space and corresponds to multiplying the fields with the factor exp(ikz_truedz)n_move .
def shift_spect_array_cpu( field_array, shift_factor, n_move ): Nz, Nr = field_array.shape # Loop over the 2D array (in parallel over z if threading is enabled) for iz in prange( Nz ): power_shift = 1. + 0.j # Calculate the shift factor (raising to the power n_move ; # for negative n_move, we take the complex conjugate, since # shift_factor is of the form e^{i k dz}) for i in range( abs(n_move) ): power_shift *= shift_factor[iz] if n_move < 0: power_shift = power_shift.conjugate() # Shift the fields for ir in range( Nr ): field_array[iz, ir] *= power_shift
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def shift_spect_array_gpu( field_array, shift_factor, n_move ):\n # Get a 2D CUDA grid\n iz, ir = cuda.grid(2)\n\n # Only access values that are actually in the array\n if ir < field_array.shape[1] and iz < field_array.shape[0]:\n power_shift = 1. + 0.j\n # Calculate the shift factor (raising to the power n_move ;\n # for negative n_move, we take the complex conjugate, since\n # shift_factor is of the form e^{i k dz})\n for i in range( abs(n_move) ):\n power_shift *= shift_factor[iz]\n if n_move < 0:\n power_shift = power_shift.conjugate()\n # Shift fields\n field_array[iz, ir] *= power_shift", "def move_grids(self, fld, comm, time):\n # To avoid discrepancies between processors, only the first proc\n # decides whether to send the data, and broadcasts the information.\n dz = comm.dz\n if comm.rank==0:\n # Move the continuous position of the moving window object\n self.zmin += self.v * (time - self.t_last_move)\n # Find the number of cells by which the window should move\n zmin_global_domain, zmax_global_domain = comm.get_zmin_zmax(\n local=False, with_damp=False, with_guard=False )\n n_move = int( (self.zmin - zmin_global_domain)/dz )\n else:\n n_move = None\n # Broadcast the information to all proc\n if comm.size > 1:\n n_move = comm.mpi_comm.bcast( n_move )\n\n # Move the grids\n if n_move != 0:\n # Move the global domain\n comm.shift_global_domain_positions( n_move*dz )\n # Shift the fields\n Nm = len(fld.interp)\n for m in range(Nm):\n # Modify the values of the corresponding z's\n fld.interp[m].zmin += n_move*fld.interp[m].dz\n fld.interp[m].zmax += n_move*fld.interp[m].dz\n # Shift/move fields by n_move cells in spectral space\n self.shift_spect_grid( fld.spect[m], n_move )\n\n # Because the grids have just been shifted, there is a shift\n # in the cell indices that are used for the prefix sum.\n if fld.use_cuda:\n fld.prefix_sum_shift += n_move\n # This quantity is reset to 0 whenever prefix_sum is recalculated\n\n # Prepare the positions of injection for the particles\n # (The actual creation of particles is done when the routine\n # exchange_particles of boundary_communicator.py is called)\n if comm.rank == comm.size-1:\n # Move the injection position\n self.z_inject += self.v * (time - self.t_last_move)\n # Take into account the motion of the end of the plasma\n self.z_end_plasma += self.v_end_plasma * (time - self.t_last_move)\n # Increment the number of particle cells to add\n nz_new = int( (self.z_inject - self.z_end_plasma)/dz )\n self.nz_inject += nz_new\n # Increment the virtual position of the end of the plasma\n # (When `generate_particles` is called, then the plasma\n # is injected between z_end_plasma - nz_inject*dz and z_end_plasma,\n # and afterwards nz_inject is set to 0.)\n self.z_end_plasma += nz_new*dz\n\n # Change the time of the last move\n self.t_last_move = time", "def TransformUpMovement(field):\n i = 0\n side = int(math.sqrt(len(field)))\n while i < side:\n j = len(field) - side + i\n line = []\n l = i\n while l <= j:\n line.append(field[l])\n l = l + side\n\n line = move(line)\n j = len(field) - side + i\n l = i\n k = 0\n while l <= j:\n field[l] = line[k]\n l = l + side\n k = k + 1\n i = i + 1\n return field", "def shift_spect_grid( self, grid, n_move,\n shift_rho=True, shift_currents=True ):\n if grid.use_cuda:\n shift = grid.d_field_shift\n # Get a 2D CUDA grid of the size of the grid\n tpb, bpg = cuda_tpb_bpg_2d( grid.Ep.shape[0], grid.Ep.shape[1] )\n # Shift all the fields on the GPU\n shift_spect_array_gpu[tpb, bpg]( grid.Ep, shift, n_move )\n shift_spect_array_gpu[tpb, bpg]( grid.Em, shift, n_move )\n shift_spect_array_gpu[tpb, bpg]( grid.Ez, shift, n_move )\n shift_spect_array_gpu[tpb, bpg]( grid.Bp, shift, n_move )\n shift_spect_array_gpu[tpb, bpg]( grid.Bm, shift, n_move )\n shift_spect_array_gpu[tpb, bpg]( grid.Bz, shift, n_move )\n if shift_rho:\n shift_spect_array_gpu[tpb, bpg]( grid.rho_prev, shift, n_move )\n if shift_currents:\n shift_spect_array_gpu[tpb, bpg]( grid.Jp, shift, n_move )\n shift_spect_array_gpu[tpb, bpg]( grid.Jm, shift, n_move )\n shift_spect_array_gpu[tpb, bpg]( grid.Jz, shift, n_move )\n else:\n shift = grid.field_shift\n # Shift all the fields on the CPU\n shift_spect_array_cpu( grid.Ep, shift, n_move )\n shift_spect_array_cpu( grid.Em, shift, n_move )\n shift_spect_array_cpu( grid.Ez, shift, n_move )\n shift_spect_array_cpu( grid.Bp, shift, n_move )\n shift_spect_array_cpu( grid.Bm, shift, n_move )\n shift_spect_array_cpu( grid.Bz, shift, n_move )\n if shift_rho:\n shift_spect_array_cpu( grid.rho_prev, shift, n_move )\n if shift_currents:\n shift_spect_array_cpu( grid.Jp, shift, n_move )\n shift_spect_array_cpu( grid.Jm, shift, n_move )\n shift_spect_array_cpu( grid.Jz, shift, n_move )", "def TransformLeftMovement(field):\n i = 0\n side = int(math.sqrt(len(field)))\n while i < len(field):\n j = (i + side)\n line = []\n for x in range(i, j):\n line.append(field[x])\n\n line = move(line)\n k = 0\n for x in range(i, j):\n field[x] = line[k]\n k = k + 1\n i = i + side\n return field", "def make_move(self, board, fieldy, fieldx):\n board[self.posy][self.posx], board[fieldy][fieldx] = board[fieldy][fieldx], board[self.posy][self.posx]\n self.posy = fieldy\n self.posx = fieldx", "def TransformRightMovement(field):\n i = 0\n side = int(math.sqrt(len(field)))\n while i < len(field):\n j = (i + side) - 1\n line = []\n for x in range(j, i - 1, -1):\n line.append(field[x])\n line = move(line)\n k = 0\n for x in range(j, i - 1, -1):\n field[x] = line[k]\n k = k + 1\n i = i + side\n return field", "def grid_shift(grid, advection, trim_edges=0, field_list=None):\n if trim_edges == 0:\n trim_slice = slice(None, None)\n else:\n trim_slice = slice(int(trim_edges), -int(trim_edges))\n\n shifted_grid = copy.deepcopy(grid)\n\n # grab the x and y axis and trim\n shifted_grid.x[\"data\"] = grid.x[\"data\"][trim_slice].copy()\n shifted_grid.y[\"data\"] = grid.y[\"data\"][trim_slice].copy()\n\n # shift each field.\n if field_list is None:\n field_list = grid.fields.keys()\n\n for field in field_list:\n # copy data and fill with nans\n data = grid.fields[field][\"data\"].copy()\n data = np.ma.filled(data, np.nan)\n\n # shift the data\n shifted_data = shift(data, [0, advection[0], advection[1]], prefilter=False)\n\n # mask invalid, trim and place into grid\n shifted_data = np.ma.fix_invalid(\n shifted_data, copy=False, fill_value=get_fillvalue()\n )\n shifted_data = shifted_data[:, trim_slice, trim_slice]\n shifted_grid.fields[field][\"data\"] = shifted_data\n\n return shifted_grid", "def TransformDownMovement(field):\n i = 0\n side = int(math.sqrt(len(field)))\n while i < side:\n j = len(field) - side + i\n line = []\n l = j\n while l >= i:\n line.append(field[l])\n l = l - side\n\n line = move(line)\n j = len(field) - side + i\n l = j\n k = 0\n while l >= i:\n field[l] = line[k]\n l = l - side\n k = k + 1\n i = i + 1\n return field", "def shift_rows(state):\n state[1][0], state[1][1], state[1][2], state[1][3] = state[1][1], state[1][2], state[1][3], state[1][0]\n state[2][0], state[2][1], state[2][2], state[2][3] = state[2][2], state[2][3], state[2][0], state[2][1]\n state[3][0], state[3][1], state[3][2], state[3][3] = state[3][3], state[3][0], state[3][1], state[3][2]", "def _move_in_one_more_block():\n with tik_inst.for_range(0, sub_h_align_block_size) as sub_h_idx:\n tik_inst.data_move(dst[sub_w_block * data_cnt_one_block * sub_h_idx],\n src[w_offset + w_size * sub_h_idx], 0, 1, sub_w_block, 0, 0)\n # in order to avoid dirty data when multiple core\n with tik_inst.for_range(0, data_cnt_one_block) as sub_h_idx_1:\n tik_inst.data_move(dst[sub_w_block * data_cnt_one_block *\n (sub_h_align_block_size + sub_h_idx_1)],\n src[w_offset +\n w_size * (sub_h_size - data_cnt_one_block + sub_h_idx_1)],\n 0, 1, sub_w_block, 0, 0)", "def inv_shift_rows(state):\n state[1][0], state[1][1], state[1][2], state[1][3] = state[1][3], state[1][0], state[1][1], state[1][2]\n state[2][0], state[2][1], state[2][2], state[2][3] = state[2][2], state[2][3], state[2][0], state[2][1]\n state[3][0], state[3][1], state[3][2], state[3][3] = state[3][1], state[3][2], state[3][3], state[3][0]", "def DELAY(A, n):\r\n At = pivot_table(A)\r\n res = At.shift(n)\r\n res = stack_table(res)\r\n return res", "def realign_image(arr, shift, angle=0):\n # if both shifts are integers, do circular shift; otherwise perform Fourier shift.\n if np.count_nonzero(np.abs(np.array(shift) - np.round(shift)) < 0.01) == 2:\n temp = np.roll(arr, int(shift[0]), axis=0)\n temp = np.roll(temp, int(shift[1]), axis=1)\n temp = temp.astype('float32')\n else:\n temp = fourier_shift(np.fft.fftn(arr), shift)\n temp = np.fft.ifftn(temp)\n temp = np.abs(temp).astype('float32')\n return temp", "def move(self):\n x = y = z = 0.0\n for cell in self.cells:\n x += (cell.x)#*n\n y += (cell.y)#*n\n z += (cell.z)#*n\n np = float(len(self.cells))\n med = numpy.array([x/np,y/np,z/np])\n \n dists = []\n for cell in self.cells:\n d = (cell.x-self.x)**2+(cell.y-self.y)**2+(cell.z-self.z)**2\n d = numpy.sqrt(d)\n dists.append(d)\n #md = (cell.x-med[0])**2+(cell.y-med[1])**2+(cell.z-med[2])**2\n #dists[-1] = (dists[-1]+md)/2\n cell = self.cells[numpy.argmin(dists)]\n cc = numpy.array([cell.x, cell.y, cell.z])\n \n t = self.t\n if abs(self.dnp) * ( self.np-self.np_req) > 0:\n t = self.tr\n self.dcenter = (1-t)*(med-self.center + self.u*(cc-med))\n self.x,self.y,self.z = self.center = self.center + self.dcenter", "def fshift_nb(a, n):\n return fshift_1d_nb(a, n)", "def Repeater(arr,n):\n new_arr = np.zeros((arr.shape[0]*n,arr.shape[1]),dtype=object)\n for i in range(0,arr.shape[0]):\n new_row = np.tile(arr[i,:],(n,1))\n new_arr[i*n:(i+1)*n,:] = new_row\n return new_arr", "def rgbArray_move(self, rgbList, delay):\n # res\n\n res = self.rgbArrayOfs_move(0,rgbList,delay)\n return res", "def move_element(self,n_a,n_b):\n self.element_array.insert(n_b,self.element_array.pop(n_a))", "def yank(self):\r\n self.block.bucket_array.yank_cell(self)", "def fshift_1d_nb(a, n):\n out = np.empty_like(a, dtype=np.float_)\n out[:n] = np.nan\n out[n:] = a[:-n]\n return out", "def move(self,move):\n for x in range(len(self.coord)):\n self.coord[x] = np.array([y+np.array(move) for y in self.coord[x]])\n return self", "def Forward(Fin, z, sizenew, Nnew ):\n if z <= 0:\n raise ValueError('Forward does not support z<=0')\n Fout = Field.begin(sizenew, Fin.lam, Nnew, Fin._dtype)\n \n field_in = Fin.field\n field_out = Fout.field\n \n field_out[:,:] = 0.0 #default is ones, clear\n \n old_size = Fin.siz\n old_n = Fin.N\n new_size = sizenew #renaming to match cpp code\n new_n = Nnew\n\n on2 = int(old_n/2)\n nn2 = int(new_n/2) #read \"new n over 2\"\n dx_new = new_size/(new_n-1)\n dx_old = old_size/(old_n-1)\n #TODO again, dx seems better defined without -1, check this\n \n R22 = _np.sqrt(1/(2*Fin.lam*z))\n\n X_new = _np.arange(-nn2, new_n-nn2) * dx_new\n Y_new = X_new #same\n X_old = _np.arange(-on2, old_n-on2) * dx_old\n Y_old = X_old #same\n for i_new in range(new_n):\n x_new = X_new[i_new]\n \n P1 = R22*(2*(X_old-x_new)+dx_old)\n P3 = R22*(2*(X_old-x_new)-dx_old)\n Fs1, Fc1 = _fresnel(P1)\n Fs3, Fc3 = _fresnel(P3)\n for j_new in range(new_n):\n y_new = Y_new[j_new]\n \n P2 = R22*(2*(Y_old-y_new)-dx_old)\n P4 = R22*(2*(Y_old-y_new)+dx_old)\n Fs2, Fc2 = _fresnel(P2)\n Fs4, Fc4 = _fresnel(P4)\n \n C4C1=_np.outer(Fc4, Fc1) #out[i, j] = a[i] * b[j] \n C2S3=_np.outer(Fc2, Fs3) #-> out[j,i] = a[j]*b[i] here\n C4S1=_np.outer(Fc4, Fs1)\n S4C1=_np.outer(Fs4, Fc1)\n S2C3=_np.outer(Fs2, Fc3)\n C2S1=_np.outer(Fc2, Fs1)\n S4C3=_np.outer(Fs4, Fc3)\n S2C1=_np.outer(Fs2, Fc1)\n C4S3=_np.outer(Fc4, Fs3)\n S2S3=_np.outer(Fs2, Fs3)\n S2S1=_np.outer(Fs2, Fs1)\n C2C3=_np.outer(Fc2, Fc3)\n S4S1=_np.outer(Fs4, Fs1)\n C4C3=_np.outer(Fc4, Fc3)\n C4C1=_np.outer(Fc4, Fc1)\n S4S3=_np.outer(Fs4, Fs3)\n C2C1=_np.outer(Fc2, Fc1)\n \n Fr = 0.5 * field_in.real\n Fi = 0.5 * field_in.imag\n Temp_c = (Fr * (C2S3 + C4S1 + S4C1 + S2C3\n - C2S1 - S4C3 - S2C1 - C4S3)\n + Fi * (-S2S3 + S2S1 + C2C3 - S4S1\n - C4C3 + C4C1 + S4S3 - C2C1)\n + 1j * Fr *(-C4C1 + S2S3 + C4C3 - S4S3\n + C2C1 - S2S1 + S4S1 - C2C3)\n + 1j * Fi*(C2S3 + S2C3 + C4S1 + S4C1\n - C4S3 - S4C3 - C2S1 - S2C1))\n field_out[j_new, i_new] = Temp_c.sum() #complex elementwise sum\n Fout._IsGauss=False\n return Fout", "def _assemble_tiles(i, n, tile, tsincr_g, output_dir, outtype):\n # pylint: disable=too-many-arguments\n tsincr_file = os.path.join(output_dir, '{}_{}.npy'.format(outtype, n))\n tsincr = np.load(file=tsincr_file)\n tsincr_g[tile.top_left_y:tile.bottom_right_y, tile.top_left_x:tile.bottom_right_x] = tsincr[:, :, i]", "def _extend_contiguous_traj_field(self, run_idx, traj_idx, field_path, field_data):\n\n traj_grp = self.h5['{}/{}/{}/{}'.format(RUNS, run_idx, TRAJECTORIES, traj_idx)]\n field = traj_grp[field_path]\n\n # make sure this is a feature vector\n assert len(field_data.shape) > 1, \\\n \"field_data must be a feature vector with the same number of dimensions as the number\"\n\n # of datase new frames\n n_new_frames = field_data.shape[0]\n\n # check the field to make sure it is not empty\n if all([i == 0 for i in field.shape]):\n\n # check the feature shape against the maxshape which gives\n # the feature dimensions for an empty dataset\n assert field_data.shape[1:] == field.maxshape[1:], \\\n \"field feature dimensions must be the same, i.e. all but the first dimension\"\n\n # if it is empty resize it to make an array the size of\n # the new field_data with the maxshape for the feature\n # dimensions\n feature_dims = field.maxshape[1:]\n field.resize( (n_new_frames, *feature_dims) )\n\n # set the new data to this\n field[0:, ...] = field_data\n\n else:\n # make sure the new data has the right dimensions against\n # the shape it already has\n assert field_data.shape[1:] == field.shape[1:], \\\n \"field feature dimensions must be the same, i.e. all but the first dimension\"\n\n\n # append to the dataset on the first dimension, keeping the\n # others the same, these must be feature vectors and therefore\n # must exist\n field.resize( (field.shape[0] + n_new_frames, *field.shape[1:]) )\n # add the new data\n field[-n_new_frames:, ...] = field_data", "def shift(image,shift_x,shift_y):\n return np.roll(np.roll(image,shift_y,axis=0),shift_x,axis=1)", "def offsetElements(self, i):\n\n #iterate over each tile and subtract\n #if the value is -1, indicating a blank tile, leave it as that\n for y in range(0, len(self.array)):\n for x in range(0, len(self.array[0])):\n if self.array[y][x] != -1:\n self.array[y][x] -= i", "def roll(arrayin, shift = (0, 0), silent = True):\r\n arrayout = arrayin.copy()\r\n # if shift is integer valued then use np.roll\r\n if (type(shift[0]) == int) or (type(shift[0]) == np.int) or (type(shift[0]) == np.int32) or (type(shift[0]) == np.int64):\r\n if shift[-1] != 0 :\r\n if silent == False :\r\n print 'arrayout = np.roll(arrayout, shift[-1], -1)'\r\n arrayout = np.roll(arrayout, shift[-1], -1)\r\n # if shift is 1d then don't roll the other dim (if it even exists)\r\n if len(arrayout.shape) >= 2 :\r\n if shift[-2] != 0 :\r\n if silent == False :\r\n print 'arrayout = np.roll(arrayout, shift[-2], -2)'\r\n arrayout = np.roll(arrayout, shift[-2], -2)\r\n # if shift is float valued then use the Fourier shift theorem\r\n elif (type(shift[0]) == float) or (type(shift[0]) == np.float32) or (type(shift[0]) == np.float64):\r\n # if shift is 1d\r\n if len(shift) == 1 :\r\n if silent == False :\r\n print 'arrayout = fftn_1d(arrayout)'\r\n print 'arrayout = arrayout * phase_ramp(arrayout.shape, shift, origin = (0, 0))'\r\n print 'arrayout = ifftn_1d(arrayout)'\r\n arrayout = fftn_1d(arrayout)\r\n arrayout = arrayout * phase_ramp(arrayout.shape, shift, origin = (0, 0))\r\n arrayout = ifftn_1d(arrayout)\r\n elif len(shift) == 2 :\r\n if silent == False :\r\n print 'arrayout = fftn(arrayout)'\r\n print 'arrayout = arrayout * phase_ramp(arrayout.shape, shift, origin = (0, 0))'\r\n print 'arrayout = ifftn(arrayout)'\r\n arrayout = fftn(arrayout)\r\n arrayout = arrayout * phase_ramp(arrayout.shape, shift, origin = (0, 0))\r\n arrayout = ifftn(arrayout)\r\n return arrayout", "def move(self, direction):\n original_grid = []\n for row in self._grid:\n original_row = list(row)\n original_grid.append(original_row)\n steps = 0\n if direction == UP or direction == DOWN:\n steps = self._grid_height\n elif direction == LEFT or direction == RIGHT:\n steps = self._grid_width\n to_move = []\n for initial_cell in self._initial_cells[direction]:\n for step in range(steps):\n new_row = initial_cell[0] + step * OFFSETS[direction][0]\n new_column = initial_cell[1] + step * OFFSETS[direction][1]\n to_move.append(self._grid[new_row][new_column])\n to_move = merge(to_move)\n row = initial_cell[0]\n column = initial_cell[1]\n for step in range(steps):\n self._grid[row + OFFSETS[direction][0] * step][column + OFFSETS[direction][1] * step] = to_move[step]\n to_move = []\n if original_grid != self._grid:\n self.new_tile()", "def _field_Fresnel(z, field, dx, lam, dtype, usepyFFTW):\n \n \"\"\" *************************************************************\n Major differences to Cpp based LP version:\n - dx =siz/N instead of dx=siz/(N-1), more consistent with physics \n and rest of LP package\n - fftw DLL uses no normalization, numpy uses 1/N on ifft -> omitted\n factor of 1/(2*N)**2 in final calc before return\n - bug in Cpp version: did not touch top row/col, now we extract one\n more row/col to fill entire field. No errors noticed with the new\n method so far\n ************************************************************* \"\"\"\n _using_pyfftw = False # determined if loading is successful \n if usepyFFTW or _USE_PYFFTW:\n try:\n import pyfftw as _pyfftw\n from pyfftw.interfaces.numpy_fft import fft2 as _fft2\n from pyfftw.interfaces.numpy_fft import ifft2 as _ifft2\n _fftargs = {'planner_effort': 'FFTW_ESTIMATE',\n 'overwrite_input': True,\n 'threads': -1} #<0 means use multiprocessing.cpu_count()\n _using_pyfftw = True \n except ImportError:\n #import warnings\n #warnings.warn(_WARNING)\n _WARNING = '\\n**************************** WARNING ***********************\\n'\\\n +'In the Fresnel command you required FFT with the pyFFTW package.\\n'\\\n +'or _USE_PYFFTW = True in your config.py file.\\n'\\\n +'However LightPipes cannot import pyFFTW because it is not installed.\\n'\\\n +'Falling back to numpy.fft.\\n'\\\n +'(Try to) install pyFFTW on your computer for faster performance.\\n'\\\n +'Enter at a terminal prompt: python -m pip install pyfftw.\\n'\\\n +'Or reinstall LightPipes with the option pyfftw\\n'\\\n +'Enter: python -m pip install lightpipes[pyfftw]\\n\\n'\\\n +'*************************************************************'\n print(_WARNING)\n if not _using_pyfftw:\n from numpy.fft import fft2 as _fft2\n from numpy.fft import ifft2 as _ifft2\n _fftargs = {}\n tictoc.tic()\n N = field.shape[0] #assert square\n \n legacy = True #switch on to numerically compare oldLP/new results\n if legacy:\n kz = 2.*3.141592654/lam * z\n siz = N*dx\n dx = siz/(N-1) #like old Cpp code, even though unlogical\n else:\n kz = 2*_np.pi/lam*z\n \n \n cokz = _np.cos(kz)\n sikz = _np.sin(kz)\n \n No2 = int(N/2) #\"N over 2\"\n \"\"\"The following section contains a lot of uses which boil down to\n 2*No2. For even N, this is N. For odd N, this is NOT redundant:\n 2*No2 is N-1 for odd N, therefore sampling an even subset of the\n field instead of the whole field. Necessary for symmetry of first\n step involving Fresnel integral calc.\n \"\"\"\n if _using_pyfftw:\n in_outF = _pyfftw.zeros_aligned((2*N, 2*N),dtype=dtype)\n in_outK = _pyfftw.zeros_aligned((2*N, 2*N),dtype=dtype)\n else:\n in_outF = _np.zeros((2*N, 2*N),dtype=dtype)\n in_outK = _np.zeros((2*N, 2*N),dtype=dtype)\n \n \"\"\"Our grid is zero-centered, i.e. the 0 coordiante (beam axis) is\n not at field[0,0], but field[No2, No2]. The FFT however is implemented\n such that the frequency 0 will be the first element of the output array,\n and it also expects the input to have the 0 in the corner.\n For the correct handling, an fftshift is necessary before *and* after\n the FFT/IFFT:\n X = fftshift(fft(ifftshift(x))) # correct magnitude and phase\n x = fftshift(ifft(ifftshift(X))) # correct magnitude and phase\n X = fftshift(fft(x)) # correct magnitude but wrong phase !\n x = fftshift(ifft(X)) # correct magnitude but wrong phase !\n A numerically faster way to achieve the same result is by multiplying\n with an alternating phase factor as done below.\n Speed for N=2000 was ~0.4s for a double fftshift and ~0.1s for a double\n phase multiplication -> use the phase factor approach (iiij).\n \"\"\"\n # Create the sign-flip pattern for largest use case and \n # reference smaller grids with a view to the same data for\n # memory saving.\n ii2N = _np.ones((2*N),dtype=float)\n ii2N[1::2] = -1 #alternating pattern +,-,+,-,+,-,...\n iiij2N = _np.outer(ii2N, ii2N)\n iiij2No2 = iiij2N[:2*No2,:2*No2] #slice to size used below\n iiijN = iiij2N[:N, :N]\n\n RR = _np.sqrt(1/(2*lam*z))*dx*2\n io = _np.arange(0, (2*No2)+1) #add one extra to stride fresnel integrals\n R1 = RR*(io - No2)\n fs, fc = _fresnel(R1)\n fss = _np.outer(fs, fs) # out[i, j] = a[i] * b[j]\n fsc = _np.outer(fs, fc)\n fcs = _np.outer(fc, fs)\n fcc = _np.outer(fc, fc)\n \n \"\"\"Old notation (0.26-0.33s):\n temp_re = (a + b + c - d + ...)\n # numpy func add takes 2 operands A, B only\n # -> each operation needs to create a new temporary array, i.e.\n # ((((a+b)+c)+d)+...)\n # since python does not optimize to += here (at least is seems)\n New notation (0.14-0.16s):\n temp_re = (a + b) #operation with 2 operands\n temp_re += c\n temp_re -= d\n ...\n Wrong notation:\n temp_re = a #copy reference to array a\n temp_re += b\n ...\n # changing `a` in-place, re-using `a` will give corrupted\n # result\n \"\"\"\n temp_re = (fsc[1:, 1:] #s[i+1]c[j+1]\n + fcs[1:, 1:]) #c[+1]s[+1]\n temp_re -= fsc[:-1, 1:] #-scp [p=+1, without letter =+0]\n temp_re -= fcs[:-1, 1:] #-csp\n temp_re -= fsc[1:, :-1] #-spc\n temp_re -= fcs[1:, :-1] #-cps\n temp_re += fsc[:-1, :-1] #sc\n temp_re += fcs[:-1, :-1] #cs\n \n temp_im = (-fcc[1:, 1:] #-cpcp\n + fss[1:, 1:]) # +spsp\n temp_im += fcc[:-1, 1:] # +ccp\n temp_im -= fss[:-1, 1:] # -ssp\n temp_im += fcc[1:, :-1] # +cpc\n temp_im -= fss[1:, :-1] # -sps\n temp_im -= fcc[:-1, :-1] # -cc\n temp_im += fss[:-1, :-1]# +ss\n \n temp_K = 1j * temp_im # a * b creates copy and casts to complex\n temp_K += temp_re\n temp_K *= iiij2No2\n temp_K *= 0.5\n in_outK[(N-No2):(N+No2), (N-No2):(N+No2)] = temp_K\n \n in_outF[(N-No2):(N+No2), (N-No2):(N+No2)] \\\n = field[(N-2*No2):N,(N-2*No2):N] #cutting off field if N odd (!)\n in_outF[(N-No2):(N+No2), (N-No2):(N+No2)] *= iiij2No2\n \n tictoc.tic()\n in_outK = _fft2(in_outK, **_fftargs)\n in_outF = _fft2(in_outF, **_fftargs)\n t_fft1 = tictoc.toc()\n \n in_outF *= in_outK\n \n in_outF *= iiij2N\n tictoc.tic()\n in_outF = _ifft2(in_outF, **_fftargs)\n t_fft2 = tictoc.toc()\n #TODO check normalization if USE_PYFFTW\n \n Ftemp = (in_outF[No2:N+No2, No2:N+No2]\n - in_outF[No2-1:N+No2-1, No2:N+No2])\n Ftemp += in_outF[No2-1:N+No2-1, No2-1:N+No2-1]\n Ftemp -= in_outF[No2:N+No2, No2-1:N+No2-1]\n comp = complex(cokz, sikz)\n Ftemp *= 0.25 * comp\n Ftemp *= iiijN\n field = Ftemp #reassign without data copy\n ttotal = tictoc.toc()\n t_fft = t_fft1 + t_fft2\n t_outside = ttotal - t_fft\n debug_time = False\n if debug_time:\n print('Time total = fft + rest: {:.2f}={:.2f}+{:.2f}'.format(\n ttotal, t_fft, t_outside))\n return field", "def ShiftFrame(Frame, PixShift):\n \n import numpy as np\n \n F, R, C = Frame.shape\n \n if F > 1:\n msg = f\"'Frame' must be a 2D frame with shape (1, R, C) but has shape\"\\\n + f\" ({F}, {R}, {C}).\"\n \n raise Exception(msg)\n \n # Initialise ShiftedFrame:\n ShiftedFrame = np.zeros((1, R, C), dtype='uint')\n #ShiftedFrame = np.empty_like(Frame, dtype='uint') # this creates 42,932\n # unique values for some reason!\n \n #unique = UniqueItems(Nda=Frame, NonZero=False)\n #print(f'\\n---> There are {len(unique)} unique items in Frame')\n #unique = UniqueItems(Nda=ShiftedFrame, NonZero=False)\n #print(f'\\n---> There are {len(unique)} unique items in the initialised',\n # f'ShiftedFrame: {unique[:11]}...')\n \n di, dj, dk = PixShift\n \n ##ShiftedFrame[0, dj:, di:] = Frame[0, :-(1+dj), :-(1+di)]\n ##ShiftedFrame[0, :-(1+dj), :-(1+di)] = Frame[0, dj:, di:]\n #ShiftedFrame[0, :R-dj, :C-di] = Frame[0, dj:, di:]\n \n if di > 0 and dj > 0:\n ShiftedFrame[0, dj:, di:] = Frame[0, :-dj, :-di]\n \n elif di < 0 and dj < 0:\n ShiftedFrame[0, :dj, :di] = Frame[0, -dj:, -di:]\n \n elif di > 0 and dj < 0:\n ShiftedFrame[0, :dj, di:] = Frame[0, -dj:, :-di]\n \n elif di < 0 and dj > 0:\n ShiftedFrame[0, dj:, :di] = Frame[0, :-dj, -di:]\n \n elif di == 0 and dj > 0:\n ShiftedFrame[0, dj:, :] = Frame[0, :-dj, :]\n \n elif di == 0 and dj < 0:\n ShiftedFrame[0, :dj, :] = Frame[0, -dj:, :]\n \n elif di > 0 and dj == 0:\n ShiftedFrame[0, :, di:] = Frame[0, :, :-di]\n \n elif di < 0 and dj == 0:\n ShiftedFrame[0, :, :di] = Frame[0, :, -di:]\n \n elif di == 0 and dj == 0:\n ShiftedFrame[0] = Frame[0]\n \n #unique = UniqueItems(Nda=ShiftedFrame, NonZero=False)\n #print(f'\\n---> There are {len(unique)} unique items in the ShiftedFrame',\n # 'after shifting.')\n \n return ShiftedFrame", "def _roll(self):\n self.order = np.roll(self.order, 1)", "def move(self, direction):\r\n # replace with your code\r\n row_dir = OFFSETS[direction][0]\r\n col_dir = OFFSETS[direction][1]\r\n \r\n if row_dir == 0:\r\n new_cells = self._cells\r\n new_dir = col_dir\r\n else:\r\n new_tuples = zip(*self._cells)\r\n new_cells = [list(item) for item in new_tuples]\r\n new_dir = row_dir\r\n \r\n tmp_cells = []\r\n for lists in new_cells:\r\n lists = lists[::new_dir]\r\n merge_lists = merge(lists)\r\n tmp_cells.append(merge_lists[::new_dir])\r\n \r\n if row_dir == 0:\r\n self._cells = tmp_cells\r\n else:\r\n new_tuples = zip(*tmp_cells)\r\n new_cells = [list(item) for item in new_tuples]\r\n self._cells = new_cells\r\n \r\n self.new_tile()", "def make_move(self, move):\n self.board[int(move) - 1] = self.nplayer", "def pitchshift(snd_array, n, window_size=2**13, h=2**11):\n\tfactor = 2**(1.0 * n / 12.0)\n\tstretched = stretch(snd_array, 1.0/factor, window_size, h)\n\treturn speedx(stretched[window_size:], factor)", "def piecewise_transform(image, numcols=5, numrows=5, warp_left_right=10, warp_up_down=10, order=1):\n\n rows, cols = image.shape[0], image.shape[1]\n\n numcols = numcols\n numrows = numrows\n\n src_cols = np.linspace(0, cols, numcols, dtype=int)\n src_rows = np.linspace(0, rows, numrows, dtype=int)\n src_rows, src_cols = np.meshgrid(src_rows, src_cols)\n src = np.dstack([src_cols.flat, src_rows.flat])[0]\n\n src_rows_new = np.ndarray.transpose(src_rows)\n src_cols_new = np.ndarray.transpose(src_cols)\n # src_new = np.dstack([src_cols_new.flat, src_rows_new.flat])[0]\n\n dst_cols = np.ndarray(src_cols.shape)\n dst_rows = np.ndarray(src_rows.shape)\n for i in range(0, numcols):\n for j in range(0, numrows):\n if src_cols[i, j] == 0 or src_cols[i, j] == cols:\n dst_cols[i, j] = src_cols[i, j]\n else:\n dst_cols[i, j] = src_cols[i, j] + np.random.uniform(-1, 1) * warp_left_right\n\n if src_rows[i, j] == 0 or src_rows[i, j] == rows:\n dst_rows[i, j] = src_rows[i, j]\n else:\n dst_rows[i, j] = src_rows[i, j] + np.random.uniform(-1, 1) * warp_up_down\n\n dst = np.dstack([dst_cols.flat, dst_rows.flat])[0]\n\n # dst_rows_new = np.ndarray.transpose(dst_rows)\n # dst_cols_new = np.ndarray.transpose(dst_cols)\n # dst_new = np.dstack([dst_cols_new.flat, dst_rows_new.flat])[0]\n\n tform = transform.PiecewiseAffineTransform()\n tform.estimate(src, dst)\n\n img_new = transform.warp(image, tform, output_shape=(rows, cols), order=order, preserve_range=True)\n img_new = img_new.astype(image.dtype)\n \n return img_new", "def update_shift_count(self, move):\n if len(move) == 2:\n self.shift_count += 1\n else:\n self.shift_count = 0", "def _StepsLoopElim(z, nstep, _refr, Fin):\n if Fin._curvature != 0.0:\n raise ValueError('Cannot operate on spherical coords.'\n + 'Use Convert() first')\n\n if type(_refr) != _np.ndarray:\n refr=_np.ones((Fin.N,Fin.N))*_refr\n else:\n refr = _refr\n \n if Fin.field.shape != refr.T.shape:\n #TODO fix the .T problem\n raise ValueError('refractive index refr must have same NxN'\n + ' dimension as field.')\n \n Fout = Field.copy(Fin)\n N = Fout.N\n lam = Fout.lam\n size = Fout.siz\n dtype = Fout._dtype\n \n legacy = True\n if legacy:\n Pi = 3.141592654 #to compare Cpp results accurately\n else:\n Pi = _np.pi\n K = 2.*Pi/lam\n dz = z/2. #since 2 staggered steps, each row then col-wise\n Pi4lz = 2*K/dz\n imPi4lz = 1j * Pi4lz\n \n delta = size/(N-1.) #dx\n delta2 = delta*delta\n \n \"\"\"\n /* absorption at the borders is described here */\n \"\"\"\n AA= -10./dz/nstep #/* total absorption */\n band_pow=2. #/* profile of the absorption border, 2=quadratic*/\n \"\"\"\n /* width of the absorption border */\n \"\"\"\n i_left = N/2 + 1.0 - 0.4*N\n i_right = N/2 + 1.0 + 0.4*N\n \n \"\"\"\n ///* absorption borders are formed here */\n \"\"\"\n c_absorb_x = _np.zeros(N, dtype=dtype)\n iv = _np.arange(N, dtype=int)\n mask = iv+1<=i_left\n iii = i_left - iv[mask]\n c_absorb_x[mask] = 1j* (AA*K)*_np.power(iii/i_left, band_pow)\n \n mask2 = iv+1 >= i_right\n iii = iv[mask2]-i_right+2\n im = N-i_right+1\n c_absorb_x[mask2] = 1j* (AA*K)*_np.power(iii/im, band_pow)\n \n \n c_absorb_x2 = _np.zeros(N, dtype=dtype)\n mask = iv+1<=i_left\n iii = i_left - iv[mask]\n c_absorb_x2[mask] = 1j* (AA*K)*_np.power(iii/i_left, band_pow)\n \n mask2 = iv+1 >= i_right\n iii = iv[mask2]-i_right+1 #TODO why 1 difference\n im = N-i_right+1\n c_absorb_x2[mask2] = 1j* (AA*K)*_np.power(iii/im, band_pow)\n \n \n c_absorb_y = _np.zeros(N, dtype=dtype)\n jv = _np.arange(N, dtype=int)\n mask = jv+1<=i_left\n iii = i_left - jv[mask] -1# REM +1 in i direction, why different here?\n c_absorb_y[mask] = 1j* (AA*K)*_np.power(iii/i_left, band_pow)\n \n mask2 = jv+1 >= i_right\n iii = jv[mask2]-i_right+1\n im = N-i_right+1\n c_absorb_y[mask2] = 1j* (AA*K)*_np.power(iii/im, band_pow)\n \n \n # c_absorb_y2 = _np.zeros(N, dtype=complex)\n # mask = jv+1<=i_left\n # iii = i_left - jv[mask]-1# REM +1 in i direction, why different here?\n # c_absorb_y2[mask] = 1j* (AA*K)*_np.power(iii/i_left, band_pow)\n \n # mask2 = jv+1 >= i_right\n # iii = jv[mask2] +1-i_right#REM +1 for i-direction loop, why different?\n # im = N-i_right+1\n # c_absorb_y2[mask2] = 1j* (AA*K)*_np.power(iii/im, band_pow)\n \n c_absorb_y2 = c_absorb_y\n #TODO last two were identical, why are absorbx,x2,y different?\n # probably can just assume same everywhere after legacy=False...\n \"\"\"\n ///* end absorption */\n \"\"\"\n \n refr = refr.T #TODO I messed up somewhere...\n \n \"\"\"The refraction part (real part of refr. index n) is separated out\n and applied as a phase term instead of stepping through like the\n imaginary part. According to LightPipes for MATLAB manual, this proved\n to be more stable.\"\"\"\n \n # expfi4 = _np.exp(1j*0.25*K*dz*(refr.real-1.0))\n tempfi = 1j*refr.real\n tempfi -= 1j*1.0 #avoid mem copies where possible\n tempfi *= 0.25*K*dz \n expfi4 = _np.exp(tempfi, out=tempfi) #quarter phase fi, for half phase apply twice\n \n # medium = (-1j*K)*refr.imag\n mediumIm = -K*refr.imag #half the RAM vs. complex, real part 0 anyway\n \n CCX = -2/delta2 + 1j*(Pi4lz + mediumIm)\n CCX[1:N-2:2,:] -= c_absorb_x\n CCX[2:N-1:2,:] -= c_absorb_x2\n CCY = -2/delta2 + 1j*(Pi4lz + mediumIm)\n CCY[:,1:N-2:2] -= c_absorb_y.reshape((-1,1)) #to column vector\n CCY[:,2:N-1:2] -= c_absorb_y2.reshape((-1,1)) #to column vector\n \n #Variables for elimination function elim():\n a = -1/delta2\n b = -1/delta2\n uu = _np.zeros(N, dtype=dtype)\n uu2 = _np.zeros(N, dtype=dtype)\n alpha = _np.zeros(N, dtype=dtype)\n beta = _np.zeros(N, dtype=dtype)\n p = _np.zeros(N, dtype=dtype)\n \n \"\"\"\n /* Main loop, steps here */\n \"\"\"\n for istep in range(nstep):\n \"\"\"\n /* Elimination in the direction i, halfstep */\n \"\"\"\n Fout.field *= expfi4 #*=_np.exp(1j*(0.25*K*dz*(refr.real-1.0)))\n \n for j in range(1, N-1):\n uij = Fout.field[j, 1:N-1]\n uij1 = Fout.field[j+1, 1:N-1]\n uij_1 = Fout.field[j-1, 1:N-1]\n p[1:N-1] = -1/delta2 * (uij_1 + uij1 -2.0 * uij) + imPi4lz * uij\n \n elim(N, a, b, CCX[j,:], p, uu, alpha, beta)\n \n Fout.field[j-1, :] = uu2[:] #apply result from previous elim!\n uu2[:] = uu[:] #store this elim for next application\n # this is necessary to not overwrite the data used in the next\n # elim step\n \n Fout.field[N-1, :] = uu2[:] #apply final elim in this direction\n \n Fout.field *= expfi4 #*=_np.exp(1j*(0.25*K*dz*(refr.real-1.0)))\n Fout.field *= expfi4 #twice makes it 0.5*k*dz*(n-1)\n \n \"\"\"\n /* Elimination in the j direction is here, halfstep */\n \"\"\"\n uu2[:] = 0.0\n \n for i in range(1, N-1):\n uij = Fout.field[1:N-1, i]\n ui1j = Fout.field[1:N-1, i+1]\n ui_1j = Fout.field[1:N-1, i-1]\n p[1:N-1] = -1/delta2 * (ui_1j + ui1j -2.0 * uij) + imPi4lz * uij\n \n elim(N, a, b, CCY[:,i], p, uu, alpha, beta)\n \n Fout.field[:, i-1] = uu2[:]\n uu2[:] = uu[:]\n \n #TODO BUG! why are we accessing i here? out of scope. Last value:\n #resulting from for(ii in range(1, N-2, 2))\n # -> last ii in loop is int((N-2)/2)*2-1\n # and i=ii+1\n # -> i_final = int((N-2)/2)*2-1+1 = int((N-2)/2)*2\n # tested OK for even and odd N -> works for all N\n i = int((N-2)/2)*2\n #TODO also, why 0:N-1 where all else is 0:N?\n Fout.field[0:N-1, i] = uu2[1:N]\n \"\"\"\n ///* end j */\n \"\"\"\n #TODO should this be in nstep loop??\n # seems so, that would add up to 1*ikz*n, right now its 3/4*ikz per iter\n # and a final 1/4 ??\n Fout.field *= expfi4 #*=_np.exp(1j*(0.25*K*dz*(refr.real-1.0)))\n Fout._IsGauss=False\n return Fout", "def resampz(x, m_type, shift=1):\n sx = np.array(x.shape)\n\n if m_type == 0 or m_type == 1:\n y = np.zeros((sx[0] + np.abs(shift * (sx[1] - 1)), sx[1]))\n\n if m_type == 0:\n shift1 = np.arange(0, sx[1]) * (- shift)\n else:\n shift1 = np.arange(0, sx[1]) * shift\n\n if shift1[-1] < 0:\n shift1 = shift1 - shift1[-1]\n\n for n in range(sx[1]):\n y[shift1[n] + np.arange(0, sx[0]), n] = x[:, n]\n\n # Remove extra rows\n start = 0\n finish = y.shape[0]\n\n while np.linalg.norm(y[start, :], 2) == 0:\n start += 1\n\n while np.linalg.norm(y[finish-1, :], 2) == 0:\n finish -= 1\n\n y = y[start:finish, :]\n\n elif m_type == 2 or m_type == 3:\n y = np.zeros((sx[0], sx[1] + np.abs(shift * (sx[0] - 1))))\n\n if m_type == 2:\n shift2 = np.arange(0, sx[0]) * (- shift)\n else:\n shift2 = np.arange(0, sx[0]) * shift\n\n if shift2[-1] < 0:\n shift2 = shift2 - shift2[-1]\n\n for m in range(sx[0]):\n y[m, shift2[m] + np.arange(0, sx[1])] = x[m, :]\n\n # Remove extra rows\n start = 0\n finish = y.shape[1]\n\n while np.linalg.norm(y[:, start], 2) == 0:\n start += 1\n\n while np.linalg.norm(y[:, finish-1], 2) == 0:\n finish -= 1\n\n y = y[:, start:finish]\n\n else:\n print('Error: type not valid.')\n y = 0\n\n return y", "def iquadshift(a):\r\n if len(a.shape) == 1 :\r\n b = np.roll(a, +(a.shape[-1]/2-1), -1)\r\n else :\r\n b = np.roll(a, +(a.shape[-2]/2-1), -2)\r\n b = np.roll(b, +(b.shape[-1]/2-1), -1)\r\n return b", "def pitchshift(snd_array, n, window_size=2**13, h=2**11):\n factor = 2**(1.0 * n / 12.0)\n stretched = stretch(snd_array, 1.0/factor, window_size, h)\n return speedx(stretched[window_size:], factor)", "def pitchshift(snd_array, n, window_size=2**13, h=2**11):\n factor = 2**(1.0 * n / 12.0)\n stretched = stretch(snd_array, 1.0/factor, window_size, h)\n return speedx(stretched[window_size:], factor)", "def _shift_amplitudes(qc, n, inplace=False):\n if not inplace:\n qc = qc.copy()\n for q_reg in qc.qregs:\n # Unitary gate representing the shift operation on n qubits\n shift_matrix = np.roll(np.eye(2**q_reg.size), n, axis=1)\n # Add the gate to the circuit\n qc.append(UnitaryGate(shift_matrix), q_reg)\n return qc", "def make_move(self, row:int, col:int,curr_move):\n self.array[row][col] = curr_move", "def move(t, length):\n pu(t)\n\t\n fd(t, length)\n pd(t)", "def _rel_shift_legacy(self, xs):\n bs, qlen, klen, n_heads = xs.size()\n xs = xs.permute(1, 2, 0, 3).contiguous().view(qlen, klen, bs * n_heads)\n zero_pad = xs.new_zeros((qlen, 1, bs * n_heads))\n xs_shifted = torch.cat([zero_pad, xs], dim=1).view(klen + 1, qlen, bs * n_heads)[1:].view_as(xs)\n return xs_shifted.view(qlen, klen, bs, n_heads).permute(2, 0, 1, 3)", "def driftvel_mobility_vs_field(datdir, kpts, fields, f_lowfield):\n c = preprocessing_largegrid.PhysicalConstants()\n mu = []\n mu_lin = []\n vd = []\n vd_lin = []\n vd_rta = []\n meanE = []\n mean_en_rta = []\n rta = np.load(data_loc + 'f_simplelin_rta.npy')\n meanE_lin = []\n noneqn = []\n noneqn_lin = []\n n_new = []\n n_g = []\n n_l = []\n\n for ee in fields:\n psi_i = np.load(datdir + '/psi/psi_iter_{:.1E}_field.npy'.format(ee))\n chi_i = psi2chi(psi_i, kpts)\n psi_new = np.load(datdir + '/psi_zeroic/psi_iter_{:.1E}_field.npy'.format(ee))\n chi_new = psi2chi(psi_new, kpts)\n chi_lowfield = f2chi(f_lowfield, kpts, c, arbfield=ee)\n chi_rta = f2chi(rta, kpts, c, arbfield=ee)\n mu.append(noise_power.calc_mobility(chi_i, kpts, c, E=ee))\n mu_lin.append(noise_power.calc_mobility(f_lowfield, kpts, c))\n vd.append(noise_power.drift_velocity(chi_i, kpts, c))\n vd_lin.append(noise_power.drift_velocity(chi_lowfield, kpts, c))\n vd_rta.append(noise_power.drift_velocity(chi_rta, kpts, c))\n meanE.append(noise_power.mean_energy(chi_i, kpts, c))\n mean_en_rta.append(noise_power.mean_energy(chi_rta, kpts, c))\n meanE_lin.append(noise_power.mean_energy(chi_lowfield, kpts, c))\n noneqn.append(noise_power.noneq_density(chi_i, kpts, c))\n noneqn_lin.append(noise_power.noneq_density(chi_lowfield, kpts, c))\n n_new.append(noise_power.noneq_density(chi_new, kpts, c))\n ng, nl = noise_power.calc_L_Gamma_ratio(chi_i, kpts, c)\n n_g.append(ng)\n n_l.append(nl)\n\n kvcm = np.array(fields) * 1E-5\n plt.figure()\n plt.plot(kvcm, mu, 'o-', linewidth=2, label='FDM solns')\n plt.plot(kvcm, mu_lin, linewidth=2, label='low field iterative')\n plt.xlabel('Field [kV/m]')\n plt.ylabel(r'Mobility [$cm^2 V^{-1} s^{-1}$]')\n plt.legend()\n\n plt.figure(figsize=(5, 4.5))\n ax = plt.axes([0.22, 0.15, 0.73, 0.73])\n plt.plot(kvcm, vd, 'o-', linewidth=2, label='FDM solns', color='C1')\n plt.plot(kvcm, vd_lin, linewidth=1.5, label='Low field iterative', color='black')\n # plt.plot(kvcm, vd_rta, linewidth=2, label='RTA')\n plt.xlabel('Field [kV/cm]')\n plt.ylabel('Drift velocity [m/s]')\n # plt.savefig(plots_loc + 'drift velocity vs field.png', dpi=300)\n plt.legend()\n \n plt.figure()\n plt.plot(kvcm, meanE, 'o-', linewidth=2, label='FDM solns')\n # plt.plot(kvcm, mean_en_rta, '-', linewidth=2, label='RTA')\n plt.plot(kvcm, meanE_lin, linewidth=2, label='low field iterative')\n plt.xlabel('Field [kV/cm]')\n plt.ylabel('Mean Energy [eV]')\n plt.legend()\n\n plt.figure()\n plt.plot(kvcm, noneqn, 'o-', linewidth=2, label='IC only in finite difference')\n plt.plot(kvcm, noneqn_lin, linewidth=2, label='Linear in E solns')\n plt.plot(kvcm, n_new, 'o-', linewidth=2, label='Zero IC directly in solution')\n plt.xlabel('Field [kV/cm]')\n plt.ylabel('Total Carrier Population [m^-3]')\n plt.legend()\n\n plt.figure()\n plt.plot(kvcm, n_g, 'o-', linewidth=2, label='FDM Gamma')\n plt.plot(kvcm, n_l, 'o-', linewidth=2, label='FDM L')\n plt.xlabel('Field [kV/cm]')\n plt.ylabel('Carrier Population [m^-3]')\n plt.legend()", "def AlignFieldmaps(self):\n for entry in self.entry_map['fmap']:\n info = self.info[entry]\n\n# Register the magnitude image at the shortest TR to the T1-IR\n# structural image.\n target = self.info[self.norm_src]['imgfile'] + \\\n self.info[self.norm_src]['suffix']\n source = info['magfile'] + info['suffix']\n matfile = info['matfile']\n fmt = '3dAllineate -prefix NULL -1Dmatrix_save %s -base %s ' + \\\n '-source %s -cost mi -warp shift_rotate'\n cmd = fmt % (info['matfile'], target, source)\n self.CheckExec(cmd, [info['matfile']])\n\n# Convert to unitary matrix (remove scaling component.)\n cmd = 'cat_matvec -ONELINE %s -P > %s' % \\\n (info['matfile'], info['matfile_unitary'])\n self.CheckExec(cmd, [info['matfile_unitary']])\n\n# Rotate the magnitude image to the new grid.\n fmt = '3dAllineate -prefix %s -interp cubic -1Dmatrix_apply %s %s'\n cmd = fmt % (info['magfile_r']+info['suffix'], \\\n info['matfile_unitary'], info['magfile'] + info['suffix'])\n self.CheckExec(cmd, [info['magfile_r']+info['suffix']])\n\n# Rotate the fieldmap to the new grid.\n fmt = '3dAllineate -prefix %s -interp cubic -1Dmatrix_apply %s %s'\n cmd = fmt % (info['imgfile_r']+info['suffix'], \\\n info['matfile_unitary'], info['imgfile'] + info['suffix'])\n self.CheckExec(cmd, [info['imgfile_r']+info['suffix']])", "def _roll(self):\n order = np.array(self.order)\n nsteps = np.array(self.nsteps)\n order[nsteps > 1] = np.roll(order[nsteps > 1], 1)\n self.order = order.tolist()", "def move_members(_) -> int:\n return 1 << 24", "def move_members(_) -> int:\n return 1 << 24", "def _move_in_one_more_block():\n with tik_inst.for_range(0, sub_h_align_block_size) as sub_h_idx_0:\n tik_inst.data_move(dst[sub_w_block * data_cnt_one_block * sub_h_idx_0],\n src[in_offset + sub_h_idx_0 * w_size],\n 0, 1, sub_w_block, 0, 0)\n # move in one more block of h\n with tik_inst.for_range(0, data_cnt_one_block) as sub_h_idx_1:\n tik_inst.data_move(\n dst[sub_w_block * data_cnt_one_block * (sub_h_align_block_size + sub_h_idx_1)],\n src[in_offset + (sub_h_idx_1 + sub_h_size - data_cnt_one_block) * w_size],\n 0, 1, sub_w_block, 0, 0)", "def c_not_align_split_n_fp32(self, tik_instance):\n n_d, d_d, h_d, w_d, c_d = self.dst_shape\n dhw_d = d_d * h_d * w_d\n nc_one = self.ub_ele // dhw_d\n n_ub = nc_one // 2 // self.cp_align_len // c_d\n\n all_core = _ceil_div(n_d, n_ub)\n ac_num = _set_core_num(all_core)\n\n with tik_instance.for_range(0, ac_num, block_num=ac_num) as num_core:\n ub_ori = tik_instance.Tensor(\"float16\",\n (self.ub_ele * 2,),\n name=\"ub_ori\",\n scope=tik.scope_ubuf)\n ub_trans = tik_instance.Tensor(\"float16\",\n (self.ub_ele * 2,),\n name=\"ub_trans\",\n scope=tik.scope_ubuf)\n ub_tail = tik_instance.Tensor(\"float16\",\n (16,),\n name=\"ub_tail\",\n scope=tik.scope_ubuf)\n\n ub_loop = _set_loop(tik_instance, num_core, ac_num, all_core)\n\n with tik_instance.for_range(0, ub_loop) as num_u:\n core_index = num_u * ac_num + num_core\n\n with tik_instance.if_scope(core_index < all_core - 1):\n n_len = n_ub\n n_before = n_ub * core_index\n args = tik_instance, ub_ori, ub_trans, ub_tail, \\\n n_before, n_len\n self.func_c_not_align_split_n_fp32(args)\n\n with tik_instance.else_scope():\n n_before = (all_core - 1) * n_ub\n n_len = n_d - n_before\n args = tik_instance, ub_ori, ub_trans, ub_tail, \\\n n_before, n_len\n self.func_c_not_align_split_n_fp32(args)\n\n return tik_instance", "def move(self, direction):\n new_grid = []\n # get the indices of specific direction\n new_indices = self._grid_indices[direction]\n for cell in new_indices:\n lst = self.traversed_list(cell, direction)\n merged_list = merge(lst)\n new_grid.append(merged_list)\n \n adjusted_grid = adjust_grid(new_grid,direction)\n if self.is_changed(adjusted_grid):\n self.update_grid(adjusted_grid)\n self.new_tile()", "def get_move(arr=None):\n if arr is None or np.sum(arr!=0) < 55:\n return []\n \n moves = [] # (coord, dir) ex ((3, 4), 0) means move (3, 4) to right, 0 right, 1 up, 2 left, 3 down\n mask_moved = np.ones_like(arr)\n replace_value = 0\n # detect 2 consecutive\n for key in filters:\n for rot in [1, 3, 0, 2]:\n early_break = False\n out = signal.correlate2d(arr, np.rot90(filters[key], rot), mode='same', fillvalue=100)\n \n mask = (out==arr).astype(np.float)\n tmp = np.stack(np.where(mask), -1)\n # print(tmp)\n for idx in range(tmp.shape[0]):\n # if mask_moved[tuple(tmp[idx])] == 1:\n if mask_moved[tuple(tmp[idx])] == 1 and mask_moved[tuple(tmp[idx]+dirs[rot])] == 1:\n # if mask_moved[tuple(tmp[idx])] == 1 and mask_moved[tuple(tmp[idx]+dirs[rot])] == 1 and arr[tuple(tmp[idx]+dirs[rot])] != replace_value:\n moves.append((tmp[idx], rot))\n # mask_moved[tuple(tmp[idx])] = 0\n mask_moved[tuple(tmp[idx]+dirs[rot])] = 0\n arr[tuple(tmp[idx])], arr[tuple(tmp[idx]+dirs[rot])] = arr[tuple(tmp[idx]+dirs[rot])], arr[tuple(tmp[idx])]\n arr[tuple(tmp[idx]+dirs[rot])] = replace_value\n if key == 3:\n mask_moved[tuple(tmp[idx]+dirs[rot]*2)] = 0\n mask_moved[tuple(tmp[idx]+dirs[rot]*3)] = 0\n arr[tuple(tmp[idx]+dirs[rot]*2)] = replace_value\n arr[tuple(tmp[idx]+dirs[rot]*3)] = replace_value\n elif key == 2:\n mask_moved[tuple(tmp[idx]+dirs[rot]+dirs[(rot+1)%4])] = 0\n mask_moved[tuple(tmp[idx]+dirs[rot]+dirs[(rot+3)%4])] = 0\n arr[tuple(tmp[idx]+dirs[rot]+dirs[(rot+1)%4])] = replace_value\n arr[tuple(tmp[idx]+dirs[rot]+dirs[(rot+3)%4])] = replace_value\n elif key == 0:\n mask_moved[tuple(tmp[idx]+dirs[rot]+dirs[(rot+1)%4])] = 0\n mask_moved[tuple(tmp[idx]+dirs[rot]+2*dirs[(rot+1)%4])] = 0\n arr[tuple(tmp[idx]+dirs[rot]+dirs[(rot+1)%4])] = replace_value\n arr[tuple(tmp[idx]+dirs[rot]+2*dirs[(rot+1)%4])] = replace_value\n else:\n mask_moved[tuple(tmp[idx]+dirs[rot]+dirs[(rot+3)%4])] = 0\n mask_moved[tuple(tmp[idx]+dirs[rot]+2*dirs[(rot+3)%4])] = 0\n arr[tuple(tmp[idx]+dirs[rot]+dirs[(rot+3)%4])] = replace_value\n arr[tuple(tmp[idx]+dirs[rot]+2*dirs[(rot+3)%4])] = replace_value\n early_break = True\n break\n if early_break:\n break\n \n if len(moves) > 5: # early break to save computing resources\n break\n\n if len(moves) == 0:\n icon_other = np.stack(np.where(arr==0), -1)\n for idx in range(icon_other.shape[0]):\n moves.append((icon_other[idx], np.random.randint(0, 4)))\n\n return moves", "def vec_shift_left_n(x, n):\n return jnp.zeros_like(x).at[0:-n].set(x[n:])", "def bindown_tile(arrayin, new_res):\r\n N = arrayin.shape[0]\r\n array = interpolate(arrayin, new_res)\r\n array = fft2(array)\r\n # find the smallest integer n such that n x new_res > N\r\n n = 0\r\n while n*new_res < N :\r\n n += 1\r\n M = n*new_res\r\n array2 = np.zeros((M,M),dtype=np.complex128)\r\n \r\n for i in range(new_res):\r\n for j in range(new_res):\r\n ii = (i+1)*n - 1\r\n jj = (j+1)*n - 1\r\n array2[ii,jj] = array[i,j]\r\n \r\n array2 = ifft2(array2)\r\n arrayout = unpadd(array2,N) \r\n arrayout = np.array(arrayout, dtype = arrayin.dtype)\r\n return arrayout", "def vec_rotate_left_n(x, n):\n return jnp.roll(x, -n)", "def move(degs, i, j, n):\n if n > 0:\n temp = 3 - i - j\n move(degs, i, temp, n - 1) \n degs[j].append(degs[i].pop(-1))\n print(degs)\n move(degs, temp, j, n - 1)", "def _shift(self, s):\n start_pos = self._relative_head_pos()\n l = 1 + 2 * self.shift_length\n shift = int(s * l - 0.000000001) - int(l / 2)\n for s in range(abs(shift)):\n if shift > 0:\n if self.head_pos == len(self.memory) - 1 and len(self.memory) < self.max_memory:\n self.memory = np.concatenate((self.memory, np.zeros((1, self.memory_unit_size))), 0)\n self.head_pos += 1\n else:\n self.head_pos = (self.head_pos + 1) % self.max_memory\n else:\n if self.head_pos == 0 and len(self.memory) < self.max_memory:\n self.memory = np.concatenate((np.zeros((1, self.memory_unit_size)), self.memory), 0)\n self.left_expands += 1\n else:\n self.head_pos = (self.head_pos - 1) % self.max_memory\n if self.history is not None:\n self.history[\"loc\"][-1].append((start_pos, 0.1))\n return np.sign(shift)", "def shift(a, n=1):\n return a[n:] + a[:n]", "def move(self, direction):\r\n # replace with your code\r\n initial_tile = self.__direct_top[direction]\r\n offset = OFFSETS[direction]\r\n direct_range = self.__direct_range[direction] \r\n backup_list = [[0 for dummy_col in range(self.grid_width)] for dummy_row in range(self.grid_height)]\r\n \r\n for initial_count, tile_cursor in enumerate(initial_tile):\r\n tem_list = []\r\n grid_cursor = tile_cursor\r\n for dummy_cursor in range(direct_range):\r\n \r\n tem_list.append(self.grid[grid_cursor[0]][grid_cursor[1]])\r\n grid_cursor = tuple(x + y for x,y in zip(grid_cursor,offset))\r\n \r\n new_list = merge(tem_list)\r\n if self.update_dict[direction] == 0:\r\n for col_cursor in range(direct_range):\r\n backup_list[col_cursor][initial_count] = new_list[col_cursor]\r\n elif self.update_dict[direction] == 1: \r\n for col_cursor in range(direct_range):\r\n backup_list[self.grid_height -1 - col_cursor][initial_count] = new_list[col_cursor]\r\n elif self.update_dict[direction] ==3:\r\n backup_list[initial_count] = new_list\r\n else:\r\n for col_cursor in range(direct_range):\r\n backup_list[initial_count][self.grid_width -1 - col_cursor] = new_list[col_cursor]\r\n \r\n flag = (self.grid == backup_list)\r\n self.grid = backup_list\r\n if not flag:\r\n self.new_tile()", "def move(self, step):\n for point in self.points:\n l = min(len(step), len(point.position))\n for i in range(l):\n point.position[i] = step[i]", "def rollrows(array, shifts):\n array = np.asarray(array)\n shifts = np.asarray(shifts)\n\n if array.ndim < 2:\n return np.roll(array, shifts)\n\n if shifts.ndim < 1:\n shifts = np.tile(shifts, array.shape[0])\n\n rows, cols = np.ogrid[: array.shape[0], : array.shape[1]]\n cols = cols - (shifts % array.shape[1])[:, np.newaxis]\n cols[cols < 0] += array.shape[1]\n return array[rows, cols]", "def train_sample_windowize(field, delta=1, n=20):\n padded = np.pad(field, delta, mode='constant', constant_values=-1)\n X = np.zeros((n * n, (1 + delta * 2) ** 2))\n for i in range(n):\n for j in range(n):\n X[i * n + j] = padded[i:i + 2 * delta + 1, j:j + 2 * delta + 1].ravel()\n return X", "def step(self):\n\t\tnewBoard = CellArray(self.size)\n\t\tfor i in range(0, self.size, 1):\n\t\t\tfor j in range(0, self.size, 1):\n\t\t\t\tnewBoard.board[i][j] = self.changeCell(i, j)\n\t\tself.board = newBoard.board", "def move(a, i):\n x, d = a[i]\n if is_movable(a, i):\n if d == left:\n swap(a, i, i - 1)\n elif d == right:\n swap(a, i, i + 1)\n else:\n raise ValueError(\"unknown direction d = {}\".format(d))\n else:\n raise ValueError(\"not movable\")", "def column_move(plateau,num_col,sens):\n if check_room(plateau, 3, num_col) == False or (sens != 1 and sens != 0):\n return \"Erreur !\"\n if sens==1:\n for i in range(0,3):\n if is_room_empty(plateau,i,num_col):\n column_pack(plateau,num_col,i,sens)\n break\n if get_value(plateau,i,num_col)==get_value(plateau,i+1,num_col)and get_value(plateau,i,num_col)%3==0:\n set_value(plateau,i,num_col,get_value(plateau,i,num_col)*2)\n column_pack(plateau,num_col,i+1,sens)\n break\n if get_value(plateau,i,num_col)==1 and get_value(plateau,i+1,num_col)==2:\n set_value(plateau,i,num_col,3)\n column_pack(plateau,num_col,i+1,sens)\n break\n if get_value(plateau,i,num_col)==2 and get_value(plateau,i+1,num_col)==1:\n set_value(plateau,i,num_col,3)\n column_pack(plateau,num_col,i+1,sens)\n break\n\n else:\n for i in range(3,0,-1):\n if is_room_empty(plateau,i,num_col):\n column_pack(plateau,num_col,i,sens)\n break\n if get_value(plateau,i,num_col)==get_value(plateau,i-1,num_col) and get_value(plateau,i,num_col)%3==0:\n set_value(plateau,i,num_col,get_value(plateau,i,num_col)*2)\n column_pack(plateau,num_col,i-1,sens)\n break\n if get_value(plateau,i,num_col)==1 and get_value(plateau,i-1,num_col)==2:\n set_value(plateau,i,num_col,3)\n column_pack(plateau,num_col,i-1,sens)\n break\n if get_value(plateau,i,num_col)==2 and get_value(plateau,i-1,num_col)==1:\n set_value(plateau,i,num_col,3)\n column_pack(plateau,num_col,i-1,sens)\n break", "def shift_to_match(folder, x=0, y=0, z=0, angle=0, dim=120, energies=['40kVp', '80kVp'],\n directory='D:/Research/Python Data/CBCT/'):\n path = directory + folder + '/'\n\n for energy in energies:\n\n load_path = path + energy\n\n gof.create_folder(folder_name='Shifted Matrices', directory_path=load_path)\n\n load_path = load_path + '/RawMatrices/'\n save_path = path + energy + '/Shifted Matrices/'\n\n # Get all the slices to shift\n files = os.listdir(load_path)\n\n for file in files:\n temp = np.load(load_path + file)\n\n if energy is '40kVp':\n # Don't need to do anything for 40 kVp images\n np.save(save_path + file, temp)\n else:\n savefile = file\n # Shift within XY plane (the slice plane)\n if y is not 0:\n temp = np.roll(temp, y, axis=0) # Y shift\n if x is not 0:\n temp = np.roll(temp, x, axis=1) # X shift\n\n # Rotation\n if angle is not 0:\n index = np.round(np.abs(angle), decimals=0)\n index = int(index)\n temp = rotate(temp, angle)\n temp = temp[index:index + dim, index:index + dim]\n\n # Shift slices in the z (rename files)\n if z is not 0:\n file = file.replace('.npy', '')\n file = file.replace('volume0', '')\n file = int(file) + z\n if file < 10:\n savefile = 'volume000' + str(file) + '.npy'\n elif file < 100 and file >= 10:\n savefile = 'volume00' + str(file) + '.npy'\n else:\n savefile = 'volume0' + str(file) + '.npy'\n\n np.save(save_path + savefile, temp)", "def push_up (grid):\r\n for a in range (4): \r\n for i in range(3,0,-1): \r\n for j in range(4): \r\n if grid[i-1][j]==0: \r\n grid[i-1][j]=grid[i][j] \r\n grid[i][j]=0\r\n #joining like numbers \r\n for i in range(3): \r\n for j in range(4): \r\n if grid[i][j]==grid[i+1][j]: \r\n grid[i][j]=(grid[i][j])*2\r\n grid[i+1][j]=0\r\n #pafter adding the numbers continue to move them \r\n for a in range (4): \r\n for i in range(3,0,-1): \r\n for j in range(4): \r\n if grid[i-1][j]==0: \r\n grid[i-1][j]=grid[i][j] \r\n grid[i][j]=0", "def step(self, move):", "def inline_reduce_fixed_shared(N, buf, x, stride_x, pos, count,\r\n manner_fn, manner_init,\r\n b='', stride_b='', dtype='float32'):\r\n if b:\r\n init = manner_init(\"%(x)s[%(pos)s * %(stride_x)s] +\"\r\n \" %(b)s[%(pos)s * %(stride_b)s]\" % locals())\r\n loop_line = manner_fn(\"red\",\r\n manner_init(\"%(x)s[i * %(stride_x)s] + \"\r\n \"%(b)s[i * %(stride_b)s]\" %\r\n locals()))\r\n else:\r\n init = manner_init(\"%(x)s[%(pos)s * %(stride_x)s]\" % locals())\r\n loop_line = manner_fn(\"red\", manner_init(\"%(x)s[i * %(stride_x)s]\" %\r\n locals()))\r\n loop_line2 = manner_fn(\"%s[%s]\" % (buf, pos),\r\n \"%s[i]\" % buf)\r\n r_16 = manner_fn(\"%s[%s]\" % (buf, pos), \"%s[%s+16]\" % (buf, pos))\r\n r_8 = manner_fn(\"%s[%s]\" % (buf, pos), \"%s[%s+8]\" % (buf, pos))\r\n r_4 = manner_fn(\"%s[%s]\" % (buf, pos), \"%s[%s+4]\" % (buf, pos))\r\n r_2 = manner_fn(\"%s[%s]\" % (buf, pos), \"%s[%s+2]\" % (buf, pos))\r\n r_1 = manner_fn(\"%s[%s]\" % (buf, pos), \"%s[%s+1]\" % (buf, pos))\r\n\r\n return \"\"\"\r\n {\r\n // This function trashes buf[1..n_threads],\r\n // leaving the reduction result in buf[0].\r\n npy_%(dtype)s red = %(init)s;\r\n #pragma unroll 16\r\n for (int i = %(pos)s + %(count)s; i<%(N)s; i += %(count)s){\r\n red = %(loop_line)s;\r\n }\r\n buf[%(pos)s] = red;\r\n __syncthreads();\r\n if (%(pos)s < warpSize)\r\n {\r\n for (int i = %(pos)s + warpSize; i < %(count)s; i += warpSize)\r\n {\r\n %(buf)s[%(pos)s] = %(loop_line2)s;\r\n }\r\n if (%(pos)s < 16)\r\n {\r\n //reduce so that %(pos)s 0 has the reduction of everything\r\n if(%(pos)s + 16 < %(N)s)\r\n %(buf)s[%(pos)s] = %(r_16)s;\r\n if(%(pos)s + 8 < %(N)s)\r\n %(buf)s[%(pos)s] = %(r_8)s;\r\n if(%(pos)s + 4 < %(N)s)\r\n %(buf)s[%(pos)s] = %(r_4)s;\r\n if(%(pos)s + 2 < %(N)s)\r\n %(buf)s[%(pos)s] = %(r_2)s;\r\n if(%(pos)s + 1 < %(N)s)\r\n %(buf)s[%(pos)s] = %(r_1)s;\r\n }\r\n }\r\n }\r\n \"\"\" % locals()", "def init_shiftind(self, n_t):\n i = np.arange(n_t * n_t)\n i2 = np.arange(n_t).repeat(n_t)\n ik = np.arange(n_t).repeat(n_t)\n ii = np.arange(n_t)[np.newaxis].repeat(n_t, 0).flatten()\n\n si = ik * n_t + (ik + ii) % n_t\n self.shiftinds_fwd = np.roll(si.reshape((n_t, n_t)), int((n_t - 1) / 2), 1)[:, ::-1].flatten()\n\n si = ik * n_t + (ii - ik) % n_t\n self.shiftinds_back = np.roll(np.arange(n_t * n_t).reshape((n_t, n_t))[:, ::-1], -int((n_t - 1) / 2), 1).flatten()[si]\n\n self.shiftinds = ((i + i2 - n_t) % n_t + i2 * n_t).astype(int)\n self.shiftinds_neg = ((i + i2 - n_t) % n_t + i2 * n_t).astype(int)\n self.shiftinds_pos = ((-n_t + i - i2) % n_t + i2 * n_t).astype(int)\n # self.shiftinds = ((i + i2 - n_t) % n_t + i2 * n_t).astype(int).reshape((n_t, n_t)).transpose().flatten()\n # self.shiftinds_neg = ((i + i2 - n_t) % n_t + i2 * n_t).astype(int).reshape((n_t, n_t)).transpose().flatten()\n # self.shiftinds_pos = ((-n_t + i - i2) % n_t + i2 * n_t).astype(int).reshape((n_t, n_t)).transpose().flatten()", "def vec_rotate_right_n(x, n):\n return jnp.roll(x, n)", "def bshift_1d_nb(a, n):\n out = np.empty_like(a, dtype=np.float_)\n out[-n:] = np.nan\n out[:-n] = a[n:]\n return out", "def shifter(self):\n #self.BA_shift = self.timeshift_latitude(self.latB, self.latA)\n #self.BC_shift = self.timeshift_latitude(self.latB, self.latC)\n\n\n self.shifted = True #changing boolean to True when function is called.\n\n secondsA = self.secondsA\n secondsB = self.secondsB\n secondsC = self.secondsC\n\n NeA = self.holefill(self.NeA, secondsA)\n NeB = self.holefill(self.NeB, secondsB)\n NeC = self.holefill(self.NeC, secondsC)\n\n start = 0\n stop = len(NeA) - np.max(np.array([self.BA_shift, self.BC_shift]))\n\n startA = start + self.BA_shift\n stopA = stop + self.BA_shift\n\n startC = start + self.BC_shift\n stopC = stop + self.BC_shift\n\n NeA = NeA[startA:stopA]\n NeB = NeB[start:stop]\n NeC = NeC[startC:stopC]\n\n longA = self.holefill(self.longA, secondsA)\n longB = self.holefill(self.longB, secondsB)\n longC = self.holefill(self.longC, secondsC)\n longA = longA[startA:stopA]\n longB = longB[start:stop]\n longC = longC[startC:stopC]\n\n latA = self.holefill(self.latA, secondsA)\n latB = self.holefill(self.latB, secondsB)\n latC = self.holefill(self.latC, secondsC)\n latA = latA[startA:stopA]\n latB = latB[start:stop]\n latC = latC[startC:stopC]\n\n radA = self.holefill(self.radA, secondsA)\n radB = self.holefill(self.radB, secondsB)\n radC = self.holefill(self.radC, secondsC)\n radA = radA[startA:stopA]\n radB = radB[start:stop]\n radC = radC[startC:stopC]\n\n velA = self.holefill(self.velA, secondsA)\n velB = self.holefill(self.velB, secondsB)\n velC = self.holefill(self.velC, secondsC)\n velA = velA[startA:stopA]\n velB = velB[start:stop]\n velC = velC[start:stop]\n\n altA = self.holefill(self.altA, secondsA)\n altB = self.holefill(self.altB, secondsB)\n altC = self.holefill(self.altC, secondsC)\n altA = altA[startA:stopA]\n altB = altB[start:stop]\n altC = altC[startC:stopC]\n\n\n mlatA = self.holefill(self.mlatA, secondsA)\n mlatB = self.holefill(self.mlatB, secondsB)\n mlatC = self.holefill(self.mlatC, secondsC)\n mlatA = mlatA[startA:stopA]\n mlatB = mlatB[start:stop]\n mlatC = mlatC[startC:stopC]\n\n mlongA = self.holefill(self.mlongA, secondsA)\n mlongB = self.holefill(self.mlongB, secondsB)\n mlongC = self.holefill(self.mlongC, secondsC)\n mlongA = mlongA[startA:stopA]\n mlongB = mlongB[start:stop]\n mlongC = mlongC[startC:stopC]\n\n mltA = self.holefill(self.mltA, secondsA)\n mltB = self.holefill(self.mltB, secondsB)\n mltC = self.holefill(self.mltC, secondsC)\n mltA = mltA[startA:stopA]\n mltB = mltB[start:stop]\n mltC = mltC[startC:stopC]\n\n secondsA = self.holefill(secondsA, secondsA)\n secondsB = self.holefill(secondsB, secondsB)\n secondsC = self.holefill(secondsC, secondsC)\n secondsA = secondsA[startA:stopA]\n secondsB = secondsB[start:stop]\n secondsC = secondsC[startC:stopC]\n\n indsA = np.nonzero(secondsA)[0]\n indsB = np.nonzero(secondsB)[0]\n indsC = np.nonzero(secondsC)[0]\n\n inds = np.intersect1d(indsA, indsB)\n inds = np.intersect1d(inds, indsC)\n\n self.NeA = NeA[inds]\n self.NeB = NeB[inds]\n self.NeC = NeC[inds]\n\n self.longA = longA[inds]\n self.longB = longB[inds]\n self.longC = longC[inds]\n\n self.latA = latA[inds]\n self.latB = latB[inds]\n self.latC = latC[inds]\n\n self.radA = radA[inds]\n self.radB = radB[inds]\n self.radC = radC[inds]\n\n self.velA = velA[inds]\n self.velB = velB[inds]\n self.velC = velC[inds]\n\n self.altA = altA[inds]\n self.altB = altB[inds]\n self.altC = altC[inds]\n\n self.mlatA = mlatA[inds]\n self.mlatB = mlatB[inds]\n self.mlatC = mlatC[inds]\n\n self.mlongA = mlongA[inds]\n self.mlongB = mlongB[inds]\n self.mlongC = mlongC[inds]\n\n self.mltA = mltA[inds]\n self.mltB = mltB[inds]\n self.mltC = mltC[inds]\n\n self.secondsA = secondsA[inds]\n self.secondsB = secondsB[inds]\n self.secondsC = secondsC[inds]", "def shiftFlows(self, targetFlows, stepSize):\n for l in self.link:\n current = self.link[l].flow\n target = targetFlows[l]\n new = current *(1 - stepSize)+target*stepSize\n self.link[l].flow = new\n self.link[l].updateCost()", "def rotate(arr: StaticArray, steps: int) -> StaticArray:\n length = arr.size()\n new_array = StaticArray(length) # Creates a new array to preserve original elements\n for index in range(length):\n pos = index + steps # Adds steps to index number\n if pos == length: # If index is 1 more than last index, change to first index\n pos = 0\n while pos > (length - 1): # If index is greater than the range, subtract by size of array\n pos -= length\n while pos < 0: # If index is less than the range, add by size of array\n pos += length\n new_array.set(pos, arr.get(index)) # Set position\n\n return new_array", "def _rel_shift(self, xs):\n bs, qlen, klen, n_heads = xs.size()\n xs = xs.permute(0, 3, 2, 1)\n idx = torch.arange(klen, device=xs.device)\n k_idx, q_idx = idx.unsqueeze(0), idx.unsqueeze(1)\n rel_pos_idx = torch.abs(k_idx - q_idx)\n if klen != qlen:\n rel_pos_idx = rel_pos_idx[:, :qlen]\n mask = xs.new_ones(qlen, klen, dtype=torch.bool if torch_12_plus else torch.uint8)\n mask = torch.tril(mask, diagonal=0).transpose(1, 0)\n rel_pos_idx[mask] *= -1\n rel_pos_idx = klen - qlen - rel_pos_idx\n rel_pos_idx[rel_pos_idx < 0] *= -1\n if self.clamp_len > 0:\n rel_pos_idx.clamp_(max=self.clamp_len)\n rel_pos_idx = rel_pos_idx.expand_as(xs)\n x_shift = torch.gather(xs, dim=2, index=rel_pos_idx)\n x_shift = x_shift.permute(0, 3, 2, 1)\n return x_shift", "def func_c_align_split_n(self, args):\n tik_instance, ub_ori, ub_trans, n_before, n_len = args\n\n n_d, d_d, h_d, w_d, c_d = self.dst_shape\n dhw_d = d_d * h_d * w_d\n hw_d = h_d * w_d\n\n data_offset = n_before * self.c_0\n ub_offset = 0\n ori_nburst = dhw_d * self.c_1\n burst_len = n_len * self.c_0 // self.cp_align_len\n src_stride = (n_d - n_len) * self.c_0 // self.cp_align_len\n dst_stride = 0\n args = tik_instance, self.src_gm, ub_ori, data_offset, ub_offset, \\\n ori_nburst, burst_len, src_stride, dst_stride, self.cp_align_len\n _gm_to_ub_one(args)\n\n hwnoni = hw_d * n_len\n with tik_instance.for_range(0, d_d) as num_d:\n with tik_instance.for_range(0, self.c_1) as num_c1:\n ori_cur = num_d * self.c_1 * hwnoni * self.c_0 \\\n + num_c1 * hwnoni * self.c_0\n trans_cur = num_d * self.c_1 * hwnoni * self.c_0 \\\n + num_c1 * self.c_0\n nburst = hwnoni\n burst_len = self.c_0 // self.cp_align_len\n src_stride = 0\n dst_stride = (self.c_1 - 1) * self.c_0 // self.cp_align_len\n tik_instance.data_move(\n ub_trans[trans_cur],\n ub_ori[ori_cur],\n 0, nburst, burst_len, src_stride, dst_stride)\n\n with tik_instance.for_range(0, dhw_d) as num_dhw:\n src_cur = num_dhw * n_len * c_d\n dst_cur = num_dhw * c_d\n nburst = n_len\n burst_len = c_d // self.cp_align_len\n src_stride = 0\n dst_stride = (dhw_d - 1) * c_d // self.cp_align_len\n tik_instance.data_move(\n ub_ori[dst_cur],\n ub_trans[src_cur],\n 0, nburst, burst_len, src_stride, dst_stride)\n\n dst_offset = n_before * dhw_d * c_d\n burst_len = n_len * dhw_d * c_d // self.cp_align_len\n tik_instance.data_move(self.dst_gm[dst_offset],\n ub_ori,\n 0, 1, burst_len, 0, 0)", "def data_rearrange_case_five(self, tik_instance, ub_ori, ub_trans,\n loop_num):\n scalar_zero = tik_instance.Scalar(dtype=self.dtype, init_value=0.0)\n with tik_instance.for_range(0, self.dst_shape[-4] // NUM_CUBE) \\\n as num_col_cube:\n with tik_instance.for_range(0, CUBE_SIZE * loop_num //\n MAX_REPEATS) as num_repeat_one:\n tik_instance.vadds(self.vadds_mask,\n ub_trans[num_col_cube * NUM_CUBE *\n loop_num * self.dst_shape[-2] *\n self.dst_shape[-1] +\n MAX_REPEATS *\n num_repeat_one * CUBE_SIZE +\n MAX_MASK * num_col_cube],\n ub_ori[MAX_REPEATS * num_repeat_one *\n self.dst_shape[-1] *\n self.dst_shape[-4] +\n num_col_cube * MAX_MASK],\n scalar_zero, MAX_REPEATS, loop_num *\n self.dst_shape[-2] *\n self.dst_shape[-1] // self.num_data +\n self.num_byte // 2, self.num_byte // 2,\n self.num_byte // 2,\n self.dst_shape[-4] * self.dst_shape[-1] //\n self.num_data)\n if self.vadds_mask == MAX_MASK // 2:\n tik_instance.vadds(self.vadds_mask,\n ub_trans[num_col_cube * NUM_CUBE *\n loop_num *\n self.dst_shape[-2] *\n self.dst_shape[-1] +\n MAX_REPEATS *\n num_repeat_one * CUBE_SIZE +\n MAX_MASK * num_col_cube +\n CUBE_SIZE // 2],\n ub_ori[MAX_REPEATS * num_repeat_one *\n self.dst_shape[-1] *\n self.dst_shape[-4] +\n num_col_cube * MAX_MASK +\n CUBE_SIZE // 2],\n scalar_zero, MAX_REPEATS, loop_num *\n self.dst_shape[-2] *\n self.dst_shape[-1] // self.num_data +\n self.num_byte // 2, self.num_byte // 2,\n self.num_byte // 2,\n self.dst_shape[-4] *\n self.dst_shape[-1] // self.num_data)\n with tik_instance.if_scope((CUBE_SIZE * loop_num) %\n MAX_REPEATS != 0):\n tik_instance.vadds(self.vadds_mask,\n ub_trans[num_col_cube * NUM_CUBE *\n loop_num * self.dst_shape[-2] *\n self.dst_shape[-1] +\n MAX_REPEATS *\n ((CUBE_SIZE * loop_num) //\n MAX_REPEATS) * CUBE_SIZE +\n MAX_MASK * num_col_cube],\n ub_ori[MAX_REPEATS *\n ((CUBE_SIZE * loop_num) //\n MAX_REPEATS) *\n self.dst_shape[-1] *\n self.dst_shape[-4] + num_col_cube *\n MAX_MASK],\n scalar_zero,\n (CUBE_SIZE * loop_num) % MAX_REPEATS,\n loop_num *\n self.dst_shape[-2] *\n self.dst_shape[-1] // self.num_data +\n self.num_byte // 2, self.num_byte // 2,\n self.num_byte // 2,\n self.dst_shape[-4] * self.dst_shape[-1] //\n self.num_data)\n if self.vadds_mask == MAX_MASK // 2:\n tik_instance.vadds(self.vadds_mask,\n ub_trans[num_col_cube * NUM_CUBE *\n loop_num *\n self.dst_shape[-2] *\n self.dst_shape[-1] +\n MAX_REPEATS *\n ((CUBE_SIZE * loop_num) //\n MAX_REPEATS) * CUBE_SIZE +\n MAX_MASK * num_col_cube +\n CUBE_SIZE // 2],\n ub_ori[MAX_REPEATS *\n ((CUBE_SIZE * loop_num) //\n MAX_REPEATS) *\n self.dst_shape[-1] *\n self.dst_shape[-4] +\n num_col_cube * MAX_MASK +\n CUBE_SIZE // 2],\n scalar_zero,\n (CUBE_SIZE * loop_num) % MAX_REPEATS,\n loop_num *\n self.dst_shape[-2] *\n self.dst_shape[-1] // self.num_data +\n self.num_byte // 2, self.num_byte // 2,\n self.num_byte // 2,\n self.dst_shape[-4] *\n self.dst_shape[-1] //\n self.num_data)\n if self.dst_shape[-4] % NUM_CUBE != 0:\n with tik_instance.for_range(0, CUBE_SIZE * loop_num //\n MAX_REPEATS) as num_repeat_one:\n tik_instance.vadds((self.dst_shape[-4] % NUM_CUBE) *\n CUBE_SIZE * self.vadds_mask // MAX_MASK,\n ub_trans[self.dst_shape[-4] // NUM_CUBE *\n NUM_CUBE * loop_num *\n self.dst_shape[-2] *\n self.dst_shape[-1] + MAX_REPEATS *\n num_repeat_one * CUBE_SIZE +\n self.dst_shape[-4] // NUM_CUBE *\n MAX_MASK],\n ub_ori[MAX_REPEATS * num_repeat_one *\n self.dst_shape[-1] *\n self.dst_shape[-4] +\n self.dst_shape[-4] // NUM_CUBE *\n MAX_MASK],\n scalar_zero, MAX_REPEATS, loop_num *\n self.dst_shape[-2] *\n self.dst_shape[-1] // self.num_data +\n self.num_byte // 2, self.num_byte // 2,\n self.num_byte // 2,\n self.dst_shape[-4] * self.dst_shape[-1] //\n self.num_data)\n if self.vadds_mask == MAX_MASK // 2:\n tik_instance.vadds((self.dst_shape[-4] % NUM_CUBE) *\n CUBE_SIZE * self.vadds_mask // MAX_MASK,\n ub_trans[self.dst_shape[-4] //\n NUM_CUBE * NUM_CUBE *\n loop_num *\n self.dst_shape[-2] *\n self.dst_shape[-1] +\n MAX_REPEATS *\n num_repeat_one * CUBE_SIZE +\n self.dst_shape[-4] //\n NUM_CUBE * MAX_MASK +\n CUBE_SIZE // 2],\n ub_ori[MAX_REPEATS * num_repeat_one *\n self.dst_shape[-1] *\n self.dst_shape[-4] +\n self.dst_shape[-4] // NUM_CUBE *\n MAX_MASK + CUBE_SIZE // 2],\n scalar_zero, MAX_REPEATS, loop_num *\n self.dst_shape[-2] *\n self.dst_shape[-1] // self.num_data +\n self.num_byte // 2, self.num_byte // 2,\n self.num_byte // 2,\n self.dst_shape[-4] *\n self.dst_shape[-1] // self.num_data)\n with tik_instance.if_scope((CUBE_SIZE * loop_num) %\n MAX_REPEATS != 0):\n tik_instance.vadds((self.dst_shape[-4] % NUM_CUBE) *\n CUBE_SIZE * self.vadds_mask // MAX_MASK,\n ub_trans[self.dst_shape[-4] // NUM_CUBE *\n NUM_CUBE * loop_num *\n self.dst_shape[-2] *\n self.dst_shape[-1] +\n MAX_REPEATS *\n ((CUBE_SIZE * loop_num) //\n MAX_REPEATS) * CUBE_SIZE +\n self.dst_shape[-4] // NUM_CUBE *\n MAX_MASK],\n ub_ori[MAX_REPEATS *\n ((CUBE_SIZE * loop_num) //\n MAX_REPEATS) *\n self.dst_shape[-1] *\n self.dst_shape[-4] +\n self.dst_shape[-4] // NUM_CUBE *\n MAX_MASK],\n scalar_zero,\n (CUBE_SIZE * loop_num) % MAX_REPEATS,\n loop_num *\n self.dst_shape[-2] *\n self.dst_shape[-1] // self.num_data +\n self.num_byte // 2, self.num_byte // 2,\n self.num_byte // 2,\n self.dst_shape[-4] * self.dst_shape[-1] //\n self.num_data)\n if self.vadds_mask == MAX_MASK // 2:\n tik_instance.vadds((self.dst_shape[-4] % NUM_CUBE) *\n CUBE_SIZE * self.vadds_mask // MAX_MASK,\n ub_trans[self.dst_shape[-4] //\n NUM_CUBE * NUM_CUBE *\n loop_num *\n self.dst_shape[-2] *\n self.dst_shape[-1] +\n MAX_REPEATS *\n ((CUBE_SIZE * loop_num) //\n MAX_REPEATS) * CUBE_SIZE +\n self.dst_shape[-4] //\n NUM_CUBE * MAX_MASK +\n CUBE_SIZE // 2],\n ub_ori[MAX_REPEATS *\n ((CUBE_SIZE * loop_num) //\n MAX_REPEATS) *\n self.dst_shape[-1] *\n self.dst_shape[-4] +\n self.dst_shape[-4] // NUM_CUBE *\n MAX_MASK + CUBE_SIZE // 2],\n scalar_zero,\n (CUBE_SIZE * loop_num) % MAX_REPEATS,\n loop_num *\n self.dst_shape[-2] *\n self.dst_shape[-1] // self.num_data +\n self.num_byte // 2, self.num_byte // 2,\n self.num_byte // 2,\n self.dst_shape[-4] *\n self.dst_shape[-1] //\n self.num_data)", "def hslArray_move(self, hslList, delay):\n # res\n\n res = self.hslArrayOfs_move(0,hslList, delay)\n return res", "def move(self): # AH note. Swich move with extra_steps?\n if self.adjustment < 0:\n self.position += self.extra_steps\n super().move()\n self.no_moves += 1\n # Do the regular move", "def fun_no_cut(self, reg_x_len, n_size, block_index, n_loop):\n data_input_ub = self.tik_instance.Tensor(self.dtype_x,\n self.shape_v,\n name=\"data_input_ub\",\n scope=tik.scope_ubuf)\n input_indices_ub = self.tik_instance.Tensor(self.dtype_indices, (8,),\n name=\"input_indices_ub\",\n scope=tik.scope_ubuf)\n self.tik_instance.data_move(input_indices_ub[0],\n self.input_indices_gm[0], 0, 1, 1, 0, 0)\n reg_start = self.tik_instance.Scalar(dtype=\"int32\")\n reg_start.set_as(input_indices_ub[0])\n reg_burst = self.tik_instance.Scalar(dtype=\"int32\")\n if self.dtype_x in (\"float32\", \"int32\"):\n reg_burst.set_as(reg_x_len // 8)\n else:\n reg_burst.set_as(reg_x_len // 16)\n\n with self.tik_instance.for_range(0, n_loop) as n_index:\n with self.tik_instance.if_scope(\n block_index * n_size + n_index != reg_start):\n self.tik_instance.data_move(\n data_input_ub[0],\n self.input_x_gm[(block_index * n_size + n_index) *\n reg_x_len], 0, 1, reg_burst, 0, 0)\n with self.tik_instance.else_scope():\n self.tik_instance.data_move(data_input_ub[0],\n self.input_v_gm[0], 0, 1, reg_burst,\n 0, 0)\n self.tik_instance.data_move(\n self.output_y_gm[(block_index * n_size + n_index) * reg_x_len],\n data_input_ub[0], 0, 1, reg_burst, 0, 0)", "def moveZeroes(self, n: List[int]) -> None:\n w = 0\n for r in range(len(n)):\n if n[r] != 0:\n n[r], n[w] = 0, n[r]\n w += 1", "def move(self, offset):\n self._transform(\n [\n [1, 0, 0],\n [0, 1, 0],\n [0, 0, 1],\n ], center=None, offset=list(offset))", "def _shift(BD):\n bsz, n_head, max_len, _ = BD.size()\n zero_pad = layers.zeros(shape=(bsz, n_head, max_len, 1))\n BD = layers.reshape(x=layers.concat([BD, zero_pad], axis=-1),\n shape=(bsz, n_head, -1, max_len))\n BD = layers.reshape(x=BD[:, :, :-1], shape=(bsz, n_head, max_len, -1))\n BD = BD[:, :, :, max_len:]\n return BD", "def move_egg(array):\n new_array = deepcopy(array)\n old_x, old_y = get_random_position(array, 1)\n new_x, new_y = get_random_position(array, 0)\n new_array[old_y][old_x] = 0\n new_array[new_y][new_x] = 1\n return new_array", "def left_rotate_s4(arr, d):\n n = len(arr)\n g = gcd(d, n)\n for i in range(g):\n\n # move i-th values of blocks\n temp = arr[i]\n j = i\n while 1:\n k = j + d\n # print(\"K >= n : {} >= {}\".format(k, n), end=\"\\n\")\n if k >= n:\n k = k - n\n # print(\"K == i : {} == {}\".format(k, i), end=\"\\n\")\n if k == i:\n break\n # print(\"i: {}, j: {}, k: {}\".format(i, j, k), end=\"\\n\")\n arr[j] = arr[k]\n j = k\n\n arr[j] = temp", "def pre_or_post_turn(self, game_field, all_ghost_out:bool):\r\n\r\n reference_pos = self.pos[0] + self.grid_size // 2, self.pos[1] + self.grid_size // 2 #< Positon is set to center of Pac-Man so there is no difference in which direction he moves\r\n field = game_field.possible_way(reference_pos, self.last_dir)\r\n self.cnt_points(field, all_ghost_out)\r\n self.dist = reference_pos[0] % self.grid_size, reference_pos[1] % self.grid_size\r\n\r\n # Check if Pac-Man is moving to the right \r\n if self.direction == 'r':\r\n\r\n # dist to the center of the crossing less then grid_size//2 -> it's a preturn\r\n if self.dist[0] < self.grid_size // 2:\r\n\r\n # Check if Pac-Man wants to move up after the crossing\r\n if self.last_dir == 'u': \r\n \r\n # Check if the next field is a field Pac-Man can move to\r\n if field == None or (field[0] != 'r' and field != 'os'):\r\n self.pos[0] += (self.grid_size - (self.pos[0] % self.grid_size))\r\n self.pos[1] -= self.speed\r\n self.direction = self.last_dir[:]\r\n\r\n # Check if Pac-Man wants to move down after the crossing\r\n if self.last_dir == 'd':\r\n\r\n # Check if the next field is a field Pac-Man can move to\r\n if field == None or (field[0] != 'r' and field != 'os'):\r\n self.pos[0] += (self.grid_size - (self.pos[0] % self.grid_size))\r\n self.pos[1] += self.speed\r\n self.direction = self.last_dir[:]\r\n\r\n # dist to the center of the crossing greater then grid_size//2 -> it's a postturn\r\n elif self.dist[0] > self.grid_size // 2:\r\n\r\n # Check if Pac-Man wants to move up after the crossing\r\n if self.last_dir == 'u': \r\n \r\n # Check if the next field is a field Pac-Man can move to\r\n if field == None or (field[0] != 'r' and field != 'os'):\r\n self.pos[0] -= (self.pos[0] % self.grid_size)\r\n self.pos[1] -= self.speed\r\n self.direction = self.last_dir[:]\r\n\r\n # Check if Pac-Man wants to move down after the crossing\r\n if self.last_dir == 'd':\r\n\r\n # Check if the next field is a field Pac-Man can move to\r\n if field == None or (field[0] != 'r' and field != 'os'):\r\n self.pos[0] -= (self.pos[0] % self.grid_size)\r\n self.pos[1] += self.speed\r\n self.direction = self.last_dir[:]\r\n \r\n # The rest of the function does the same as above, just for the other three directions \r\n\r\n elif self.direction == 'l':\r\n #Preturn left\r\n if self.dist[0] > self.grid_size // 2:\r\n if self.last_dir == 'u':\r\n if field == None or (field[0] != 'r' and field != 'os'):\r\n self.pos[0] -= (self.pos[0] % self.grid_size)\r\n self.pos[1] -= self.speed\r\n self.direction = self.last_dir[:]\r\n if self.last_dir == 'd':\r\n if field == None or (field[0] != 'r' and field != 'os'):\r\n self.pos[0] -= (self.pos[0] % self.grid_size)\r\n self.pos[1] += self.speed\r\n self.direction = self.last_dir[:]\r\n #Postturn left\r\n elif self.dist[0] < self.grid_size // 2:\r\n if self.last_dir == 'u':\r\n if field == None or (field[0] != 'r' and field != 'os'):\r\n self.pos[0] += (self.grid_size - (self.pos[0] % self.grid_size))\r\n self.pos[1] -= self.speed\r\n self.direction = self.last_dir[:]\r\n if self.last_dir == 'd':\r\n if field == None or (field[0] != 'r' and field != 'os'):\r\n self.pos[0] += (self.grid_size - (self.pos[0] % self.grid_size))\r\n self.pos[1] += self.speed\r\n self.direction = self.last_dir[:]\r\n \r\n elif self.direction == 'u':\r\n #Preturn up\r\n if self.dist[1] > self.grid_size // 2:\r\n if self.last_dir == 'l':\r\n if field == None or (field[0] != 'r' and field != 'os'):\r\n self.pos[0] -= self.speed\r\n self.pos[1] -= (self.pos[1] % self.grid_size)\r\n self.direction = self.last_dir[:]\r\n if self.last_dir == 'r':\r\n if field == None or (field[0] != 'r' and field != 'os'):\r\n self.pos[0] += self.speed\r\n self.pos[1] -= (self.pos[1] % self.grid_size)\r\n self.direction = self.last_dir[:]\r\n #Postturn up\r\n elif self.dist[1] < self.grid_size // 2:\r\n if self.last_dir == 'l':\r\n if field == None or (field[0] != 'r' and field != 'os'):\r\n self.pos[0] -= self.speed\r\n self.pos[1] += self.grid_size - (self.pos[1] % self.grid_size)\r\n self.direction = self.last_dir[:]\r\n if self.last_dir == 'r':\r\n if field == None or (field[0] != 'r' and field != 'os'):\r\n self.pos[0] += self.speed\r\n self.pos[1] += (self.grid_size - (self.pos[1] % self.grid_size))\r\n self.direction = self.last_dir[:]\r\n \r\n elif self.direction == 'd':\r\n #Preturn down\r\n if self.dist[1] < self.grid_size // 2:\r\n if self.last_dir == 'l':\r\n if field == None or (field[0] != 'r' and field != 'os'):\r\n self.pos[0] -= self.speed\r\n self.pos[1] += (self.grid_size - (self.pos[1] % self.grid_size))\r\n self.direction = self.last_dir[:]\r\n if self.last_dir == 'r':\r\n if field == None or (field[0] != 'r' and field != 'os'):\r\n self.pos[0] += self.speed\r\n self.pos[1] += (self.grid_size - (self.pos[1] % self.grid_size))\r\n self.direction = self.last_dir[:]\r\n #Postturn down\r\n elif self.dist[1] > self.grid_size // 2:\r\n if self.last_dir == 'l':\r\n if field == None or (field[0] != 'r' and field != 'os'):\r\n self.pos[0] -= self.speed\r\n self.pos[1] -= (self.pos[1] % self.grid_size)\r\n self.direction = self.last_dir[:]\r\n if self.last_dir == 'r':\r\n if field == None or (field[0] != 'r' and field != 'os'):\r\n self.pos[0] += self.speed\r\n self.pos[1] -= (self.pos[1] % self.grid_size)\r\n self.direction = self.last_dir[:]\r\n pass", "def lrshift(val, n) -> np.int64:\n return (val % (1 << 64)) >> n", "def test_Pad3D9():\n input_shape = (1, 2, 3)\n pad = [1, 2]\n mode = \"reflect\"\n res = [[[2, 1, 2, 3, 2, 1], [5, 4, 5, 6, 5, 4]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data=data, data_format=\"NCL\")", "def _apply_array_spin1234(self, h1e: 'Nparray', h2e: 'Nparray',\n h3e: 'Nparray', h4e: 'Nparray') -> 'Nparray':\n norb = self.norb()\n tno = 2 * norb\n assert h4e.shape == (tno, tno, tno, tno, tno, tno, tno, tno)\n lena = self.lena()\n lenb = self.lenb()\n\n nh1e = numpy.copy(h1e)\n nh2e = numpy.copy(h2e)\n nh3e = numpy.copy(h3e)\n\n if fqe.settings.use_accelerated_code:\n _make_nh123(norb, h4e, nh1e, nh2e, nh3e)\n else:\n for i in range(norb * 2):\n for j in range(norb * 2):\n for k in range(norb * 2):\n nh1e[:, :] -= h4e[:, j, i, k, j, i, k, :]\n for l in range(norb * 2):\n nh2e[i, j, :, :] += (h4e[j, l, i, k, l, k, :, :] +\n h4e[i, j, l, k, l, k, :, :] +\n h4e[i, l, k, j, l, k, :, :] +\n h4e[j, i, k, l, l, k, :, :] +\n h4e[i, k, j, l, k, :, l, :] +\n h4e[j, i, k, l, k, :, l, :] +\n h4e[i, j, k, l, :, k, l, :])\n nh3e[i, j, k, :, :, :] += (\n h4e[k, i, j, l, l, :, :, :] +\n h4e[j, i, l, k, l, :, :, :] +\n h4e[i, l, j, k, l, :, :, :] +\n h4e[i, k, j, l, :, l, :, :] +\n h4e[i, j, l, k, :, l, :, :] +\n h4e[i, j, k, l, :, :, l, :])\n\n (dveca, dvecb) = self.calculate_dvec_spin()\n evecaa = numpy.zeros((norb, norb, norb, norb, lena, lenb),\n dtype=self._dtype)\n evecab = numpy.zeros((norb, norb, norb, norb, lena, lenb),\n dtype=self._dtype)\n evecba = numpy.zeros((norb, norb, norb, norb, lena, lenb),\n dtype=self._dtype)\n evecbb = numpy.zeros((norb, norb, norb, norb, lena, lenb),\n dtype=self._dtype)\n for i in range(norb):\n for j in range(norb):\n tmp = self._calculate_dvec_spin_with_coeff(dveca[i, j, :, :])\n evecaa[:, :, i, j, :, :] = tmp[0][:, :, :, :]\n evecba[:, :, i, j, :, :] = tmp[1][:, :, :, :]\n\n tmp = self._calculate_dvec_spin_with_coeff(dvecb[i, j, :, :])\n evecab[:, :, i, j, :, :] = tmp[0][:, :, :, :]\n evecbb[:, :, i, j, :, :] = tmp[1][:, :, :, :]\n\n out = self._apply_array_spin123(nh1e, nh2e, nh3e, (dveca, dvecb),\n (evecaa, evecab, evecba, evecbb))\n\n def ncon(A, B):\n \"\"\"Tensor contraction and transposition corresponding with\n einsum 'ikmojlnp,mnopxy->ijklxy'\n \"\"\"\n return numpy.transpose(numpy.tensordot(A,\n B,\n axes=((2, 6, 3, 7), (0, 1, 2,\n 3))),\n axes=(0, 2, 1, 3, 4, 5))\n\n n = norb # shorter\n nevecaa = ncon(h4e[:n, :n, :n, :n, :n, :n, :n, :n], evecaa) \\\n + 2.0 * ncon(h4e[:n, :n, :n, n:, :n, :n, :n, n:], evecab) \\\n + ncon(h4e[:n, :n, n:, n:, :n, :n, n:, n:], evecbb)\n\n nevecab = ncon(h4e[:n, n:, :n, :n, :n, n:, :n, :n], evecaa) \\\n + 2.0 * ncon(h4e[:n, n:, :n, n:, :n, n:, :n, n:], evecab) \\\n + ncon(h4e[:n, n:, n:, n:, :n, n:, n:, n:], evecbb)\n\n nevecbb = ncon(h4e[n:, n:, :n, :n, n:, n:, :n, :n], evecaa) \\\n + 2.0 * ncon(h4e[n:, n:, :n, n:, n:, n:, :n, n:], evecab) \\\n + ncon(h4e[n:, n:, n:, n:, n:, n:, n:, n:], evecbb)\n\n dveca2 = numpy.zeros(dveca.shape, dtype=self._dtype)\n dvecb2 = numpy.zeros(dvecb.shape, dtype=self._dtype)\n for i in range(norb):\n for j in range(norb):\n dveca[:, :, :, :] = nevecaa[i, j, :, :, :, :]\n dvecb[:, :, :, :] = nevecab[i, j, :, :, :, :]\n cvec = self._calculate_coeff_spin_with_dvec((dveca, dvecb))\n dveca2[i, j, :, :] += cvec[:, :]\n\n dveca[:, :, :, :] = nevecab[:, :, i, j, :, :]\n dvecb[:, :, :, :] = nevecbb[i, j, :, :, :, :]\n cvec = self._calculate_coeff_spin_with_dvec((dveca, dvecb))\n dvecb2[i, j, :, :] += cvec[:, :]\n\n out += self._calculate_coeff_spin_with_dvec((dveca2, dvecb2))\n return out", "def left_rotate_s3(arr, d):\n n = len(arr)\n for i in range(d):\n temp = arr[0]\n del arr[0]\n arr.append(temp)", "def shift(clump,shiftRight = True):\n try:\n DomainRight = clump.ds[\"DomainRightEdge\"]\n DomainLeft = clump.ds[\"DomainLeftEdge\"]\n DomainWidth = DomainRight - DomainLeft\n except:\n DomainRight = clump.data.ds[\"DomainRightEdge\"]\n DomainLeft = clump.data.ds[\"DomainLeftEdge\"]\n DomainWidth = DomainRight - DomainLeft\n shift = np.zeros(3)\n for i,axis in enumerate(['x','y','z']):\n\n dx = 'd'+axis\n nique = np.unique(clump[axis]).v\n nique.sort()\n max_dx = clump[dx].max()\n\n #has to be close to the edges, or 'Periodic Wrap' isn't the problem.\n if np.abs(nique.max() - DomainRight[i]) > 3*max_dx.v:\n continue\n if np.abs(nique.min() - DomainLeft[i]) > 3*max_dx.v:\n continue\n delta_x = nique[1:] - nique[0:-1]\n break_index = np.where(delta_x > max_dx.v)\n\n if break_index[0].size > 1:\n clump.CheckThisClump = True\n if break_index[0].size == 1:\n break_x = nique[break_index[0]]\n if shiftRight:\n all_to_shift = np.where( clump[axis] <= clump.ds.quan(break_x,'code_length') + clump[dx].min() )[0]\n clump[axis][all_to_shift] += clump.ds.arr(DomainWidth[i],'code_length')\n shift[i] = DomainWidth[i]\n else:\n all_to_shift = np.where( clump[axis] >= clump.ds.quan(break_x,'code_length') - clump[dx].min() )[0]\n clump[axis][all_to_shift] -= clump.ds.arr(DomainWidth[i],'code_length')\n shift[i] = -DomainWidth[i]\n \n try: \n if clump.stuff:\n clump.stuff.shift = shift\n except:\n pass\n return shift", "def focus_field_beam(shape = (128,128,128),\n units = (0.1,0.1,0.1),\n lam =.5, NA = .6, n0 = 1.,\n return_all_fields = False,\n n_integration_steps = 200):\n\n\n p = OCLProgram(absPath(\"kernels/psf_debye.cl\"),\n build_options = [\"-I\",absPath(\"kernels\"),\"-D\",\"INT_STEPS=%s\"%n_integration_steps])\n\n if np.isscalar(NA):\n NA = [0.,NA]\n \n Nx0, Ny0, Nz0 = shape\n dx, dy, dz = units\n\n #FIXME: the loop below does not yet work for odd inputs\n if not Nx0%2+Ny0%2+Nz0%2==0:\n raise NotImplementedError(\"odd shapes not supported yet\")\n\n\n alphas = np.arcsin(np.array(NA)/n0)\n assert len(alphas)%2 ==0\n\n # as we assume the psf to be symmetric, we just have to calculate each octant\n Nx = Nx0//2+1\n Ny = Ny0//2+1\n Nz = Nz0//2+1\n\n u_g = OCLArray.empty((Nz,Ny,Nx),np.float32)\n ex_g = OCLArray.empty(u_g.shape,np.complex64)\n ey_g = OCLArray.empty(u_g.shape,np.complex64)\n ez_g = OCLArray.empty(u_g.shape,np.complex64)\n\n alpha_g = OCLArray.from_array(alphas.astype(np.float32))\n\n \n p.run_kernel(\"debye_wolf\",u_g.shape[::-1],None,\n ex_g.data,ey_g.data,ez_g.data, u_g.data,\n np.float32(1.),np.float32(0.),\n np.float32(0.),np.float32(dx*(Nx-1.)),\n np.float32(0.),np.float32(dy*(Ny-1.)),\n np.float32(0.),np.float32(dz*(Nz-1.)),\n np.float32(lam), np.float32(n0),\n alpha_g.data, np.int32(len(alphas)))\n\n u = u_g.get()\n ex = ex_g.get()\n ey = ey_g.get()\n ez = ez_g.get()\n\n u_all = np.empty((Nz0,Ny0,Nx0),np.float32)\n ex_all = np.empty((Nz0,Ny0,Nx0),np.complex64)\n ey_all = np.empty((Nz0,Ny0,Nx0),np.complex64)\n ez_all = np.empty((Nz0,Ny0,Nx0),np.complex64)\n\n sx = [slice(0,Nx),slice(Nx,Nx0)]\n sy = [slice(0,Ny),slice(Ny,Ny0)]\n sz = [slice(0,Nz),slice(Nz,Nz0)]\n\n\n\n # spreading the calculated octant to the full volume\n for i,j,k in itertools.product([0,1],[0,1],[0,1]):\n\n # i, j, k = 0 indicates the + octant\n\n u_all[sz[1-i],sy[1-j],sx[1-k]] = u[1-i:Nz-1+i,1-j :Ny-1+j,1-k :Nx-1+k][::(-1)**i,::(-1)**j,::(-1)**k]\n if i ==0:\n ex_all[sz[1-i],sy[1-j],sx[1-k]] = ex[1-i:Nz-1+i,1-j :Ny-1+j,1-k :Nx-1+k][::(-1)**i,::(-1)**j,::(-1)**k]\n ey_all[sz[1-i],sy[1-j],sx[1-k]] = ey[1-i:Nz-1+i,1-j :Ny-1+j,1-k :Nx-1+k][::(-1)**i,::(-1)**j,::(-1)**k]\n ez_all[sz[1-i],sy[1-j],sx[1-k]] = ez[1-i:Nz-1+i,1-j :Ny-1+j,1-k :Nx-1+k][::(-1)**i,::(-1)**j,::(-1)**k]\n\n else:\n ex_all[sz[1-i],sy[1-j],sx[1-k]] = np.conjugate(ex[1-i:Nz-1+i,1-j :Ny-1+j,1-k :Nx-1+k][::(-1)**i,::(-1)**j,::(-1)**k])\n ey_all[sz[1-i],sy[1-j],sx[1-k]] = np.conjugate(ey[1-i:Nz-1+i,1-j :Ny-1+j,1-k :Nx-1+k][::(-1)**i,::(-1)**j,::(-1)**k])\n ez_all[sz[1-i],sy[1-j],sx[1-k]] = np.conjugate(ez[1-i:Nz-1+i,1-j :Ny-1+j,1-k :Nx-1+k][::(-1)**i,::(-1)**j,::(-1)**k])\n\n if return_all_fields:\n return u_all, ex_all, ey_all, ez_all\n else:\n return u_all", "def pixel_shift_fun(self, i, points, image_shape):\n self.delta_0 = np.round(self.displacements[:, i, 0]).astype(int)\n self.delta_1 = np.round(self.displacements[:, i, 1]).astype(int)\n \n # Exlude the points that have displacement going outside of the image range\n out_of_range_it = np.logical_or(self.delta_0 + points[:, 0] > image_shape[0] - 1, self.delta_1 + points[:, 1] > image_shape[1] - 1)\n if np.any(out_of_range_it):\n self.delta_0[out_of_range_it] = 0\n self.delta_1[out_of_range_it] = 0\n self.valid_points[out_of_range_it] = False\n warnings.warn('Displacement is going outside of the image range! The valid points are saved in self.method.valid_points')\n self.displacements[~self.valid_points, i, :] = np.nan", "def _TODOStepsScipy(z, nstep, refr, Fin):\n\n if Fin._curvature != 0.0:\n raise ValueError('Cannot operate on spherical coords.'\n + 'Use Convert() first')\n Fout = Field.copy(Fin)\n N = Fout.N\n lam = Fout.lam\n size = Fout.siz\n dtype = Fout._dtype\n \n legacy = True\n if legacy:\n Pi = 3.141592654 #to compare Cpp results accurately\n else:\n Pi = _np.pi\n K = 2.*Pi/lam\n z = z/2.\n Pi4lz = 4.*Pi/lam/z\n imPi4lz = 1j * Pi4lz\n \n delta = size/(N-1.) #dx\n delta2 = delta*delta\n \n n = 100\n c = 1\n # n = N\n # c = delta**2\n def f(u, ):\n return u**3\n \n def f_prime(u):\n return 3 * u**2\n \n def fun(u, n, f, f_prime, c, **kwargs):\n v = _np.zeros((n + 2, n + 2))\n u = u.reshape((n, n))\n v[1:-1, 1:-1] = u\n y = v[:-2, 1:-1] + v[2:, 1:-1] + v[1:-1, :-2] + v[1:-1, 2:] - 4 * u + c * f(u)\n return y.ravel()\n\n def compute_jac_indices(n):\n i = _np.arange(n)\n jj, ii = _np.meshgrid(i, i)\n \n ii = ii.ravel()\n jj = jj.ravel()\n \n ij = _np.arange(n**2)\n \n jac_rows = [ij]\n jac_cols = [ij]\n \n mask = ii > 0\n ij_mask = ij[mask]\n jac_rows.append(ij_mask)\n jac_cols.append(ij_mask - n)\n \n mask = ii < n - 1\n ij_mask = ij[mask]\n jac_rows.append(ij_mask)\n jac_cols.append(ij_mask + n)\n \n mask = jj > 0\n ij_mask = ij[mask]\n jac_rows.append(ij_mask)\n jac_cols.append(ij_mask - 1)\n \n mask = jj < n - 1\n ij_mask = ij[mask]\n jac_rows.append(ij_mask)\n jac_cols.append(ij_mask + 1)\n \n return _np.hstack(jac_rows), _np.hstack(jac_cols)\n jac_rows, jac_cols = compute_jac_indices(N)\n # u0 = np.ones(n**2) * 0.5\n u0 = Fin.field.ravel() #initial guess is old field\n \n def jac(u, n, f, f_prime, c, jac_rows=None, jac_cols=None):\n jac_values = _np.ones_like(jac_cols, dtype=float)\n jac_values[:n**2] = -4 + c * f_prime(u)\n return coo_matrix((jac_values, (jac_rows, jac_cols)),\n shape=(n**2, n**2))\n \n res_1 = least_squares(fun, u0.real, jac=jac, gtol=1e-3,\n args=(N, f, f_prime, c),\n kwargs={'jac_rows': jac_rows,\n 'jac_cols': jac_cols},\n verbose=0)\n # print(res_1)\n Fout.field = res_1.x.reshape((N, N))\n Fout._IsGauss=False\n return Fout", "def make_move(self, direction):\r\n\t\tif direction == 0:\r\n\t\t\tself.board[self.x][self.y], self.board[self.x-1][self.y] = self.board[self.x-1][self.y], self.board[self.x][self.y]\r\n\t\t\tself.x -= 1\r\n\r\n\t\telif direction == 1:\r\n\t\t\tself.board[self.x][self.y], self.board[self.x][self.y+1] = self.board[self.x][self.y+1], self.board[self.x][self.y]\r\n\t\t\tself.y += 1\r\n\r\n\t\telif direction == 2:\r\n\t\t\tself.board[self.x][self.y], self.board[self.x+1][self.y] = self.board[self.x+1][self.y], self.board[self.x][self.y]\r\n\t\t\tself.x += 1\r\n\r\n\t\telif direction == 3:\r\n\t\t\tself.board[self.x][self.y], self.board[self.x][self.y-1] = self.board[self.x][self.y-1], self.board[self.x][self.y]\r\n\t\t\tself.y -= 1", "def move(self, imember, jmember, factor=0.2, in_place=False):\n\n x1 = self.db[imember]['x']\n x2 = self.db[jmember]['x']\n newx = x1 + factor * (x2 - x1)\n if not in_place:\n new_ident = self.new_identifier()\n self.actives.append(new_ident)\n self.members.append(new_ident)\n else:\n new_ident = imember\n\n # print 'Moving',imember,'located at',x1\n if new_ident not in self.moves:\n # print 'No previous'\n self.moves[new_ident] = np.vstack((x1, newx))\n else:\n # print 'With previous'\n self.moves[new_ident] = np.vstack((self.moves[new_ident], newx))\n # print self.moves[new_ident]\n self.db[new_ident] = {'x': newx, 'fx': None}\n self.evaluate_entry(new_ident)\n return new_ident" ]
[ "0.7170832", "0.6100913", "0.5694191", "0.562633", "0.5549602", "0.5426374", "0.54186237", "0.5341644", "0.5283791", "0.5212293", "0.49859214", "0.4963954", "0.49574816", "0.4957023", "0.4904727", "0.4896321", "0.48726845", "0.4868087", "0.48667774", "0.4864888", "0.4824995", "0.48084083", "0.48082078", "0.48076808", "0.48027065", "0.4799215", "0.47902367", "0.47746342", "0.47687024", "0.4759916", "0.47558936", "0.47540787", "0.47504762", "0.4738017", "0.471187", "0.47020003", "0.46958044", "0.46592593", "0.4658086", "0.46560207", "0.4651318", "0.4651318", "0.4640382", "0.4639206", "0.4638706", "0.46347663", "0.46216646", "0.46194637", "0.46130973", "0.45904005", "0.45904005", "0.45863098", "0.45777738", "0.45759007", "0.45731154", "0.45679885", "0.45557365", "0.45400596", "0.4535333", "0.4531424", "0.45151016", "0.45116687", "0.45097497", "0.45077205", "0.4498141", "0.4491477", "0.44862416", "0.4480041", "0.4479142", "0.44766837", "0.4474322", "0.44692647", "0.44569522", "0.4456738", "0.44561195", "0.445553", "0.44513667", "0.44495478", "0.44396108", "0.44388118", "0.44380817", "0.4435165", "0.4434167", "0.44341478", "0.44326094", "0.44285193", "0.44277617", "0.44275296", "0.44268823", "0.44244543", "0.44220504", "0.4419099", "0.44189474", "0.44182357", "0.441727", "0.44163656", "0.44163588", "0.44160378", "0.441547", "0.44151783" ]
0.77101654
0
Shift the field 'field_array' by n_move cells on the GPU. This is done in spectral space and corresponds to multiplying the fields with the factor exp(ikz_truedz)n_move .
def shift_spect_array_gpu( field_array, shift_factor, n_move ): # Get a 2D CUDA grid iz, ir = cuda.grid(2) # Only access values that are actually in the array if ir < field_array.shape[1] and iz < field_array.shape[0]: power_shift = 1. + 0.j # Calculate the shift factor (raising to the power n_move ; # for negative n_move, we take the complex conjugate, since # shift_factor is of the form e^{i k dz}) for i in range( abs(n_move) ): power_shift *= shift_factor[iz] if n_move < 0: power_shift = power_shift.conjugate() # Shift fields field_array[iz, ir] *= power_shift
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def shift_spect_array_cpu( field_array, shift_factor, n_move ):\n Nz, Nr = field_array.shape\n\n # Loop over the 2D array (in parallel over z if threading is enabled)\n for iz in prange( Nz ):\n power_shift = 1. + 0.j\n # Calculate the shift factor (raising to the power n_move ;\n # for negative n_move, we take the complex conjugate, since\n # shift_factor is of the form e^{i k dz})\n for i in range( abs(n_move) ):\n power_shift *= shift_factor[iz]\n if n_move < 0:\n power_shift = power_shift.conjugate()\n # Shift the fields\n for ir in range( Nr ):\n field_array[iz, ir] *= power_shift", "def move_grids(self, fld, comm, time):\n # To avoid discrepancies between processors, only the first proc\n # decides whether to send the data, and broadcasts the information.\n dz = comm.dz\n if comm.rank==0:\n # Move the continuous position of the moving window object\n self.zmin += self.v * (time - self.t_last_move)\n # Find the number of cells by which the window should move\n zmin_global_domain, zmax_global_domain = comm.get_zmin_zmax(\n local=False, with_damp=False, with_guard=False )\n n_move = int( (self.zmin - zmin_global_domain)/dz )\n else:\n n_move = None\n # Broadcast the information to all proc\n if comm.size > 1:\n n_move = comm.mpi_comm.bcast( n_move )\n\n # Move the grids\n if n_move != 0:\n # Move the global domain\n comm.shift_global_domain_positions( n_move*dz )\n # Shift the fields\n Nm = len(fld.interp)\n for m in range(Nm):\n # Modify the values of the corresponding z's\n fld.interp[m].zmin += n_move*fld.interp[m].dz\n fld.interp[m].zmax += n_move*fld.interp[m].dz\n # Shift/move fields by n_move cells in spectral space\n self.shift_spect_grid( fld.spect[m], n_move )\n\n # Because the grids have just been shifted, there is a shift\n # in the cell indices that are used for the prefix sum.\n if fld.use_cuda:\n fld.prefix_sum_shift += n_move\n # This quantity is reset to 0 whenever prefix_sum is recalculated\n\n # Prepare the positions of injection for the particles\n # (The actual creation of particles is done when the routine\n # exchange_particles of boundary_communicator.py is called)\n if comm.rank == comm.size-1:\n # Move the injection position\n self.z_inject += self.v * (time - self.t_last_move)\n # Take into account the motion of the end of the plasma\n self.z_end_plasma += self.v_end_plasma * (time - self.t_last_move)\n # Increment the number of particle cells to add\n nz_new = int( (self.z_inject - self.z_end_plasma)/dz )\n self.nz_inject += nz_new\n # Increment the virtual position of the end of the plasma\n # (When `generate_particles` is called, then the plasma\n # is injected between z_end_plasma - nz_inject*dz and z_end_plasma,\n # and afterwards nz_inject is set to 0.)\n self.z_end_plasma += nz_new*dz\n\n # Change the time of the last move\n self.t_last_move = time", "def shift_spect_grid( self, grid, n_move,\n shift_rho=True, shift_currents=True ):\n if grid.use_cuda:\n shift = grid.d_field_shift\n # Get a 2D CUDA grid of the size of the grid\n tpb, bpg = cuda_tpb_bpg_2d( grid.Ep.shape[0], grid.Ep.shape[1] )\n # Shift all the fields on the GPU\n shift_spect_array_gpu[tpb, bpg]( grid.Ep, shift, n_move )\n shift_spect_array_gpu[tpb, bpg]( grid.Em, shift, n_move )\n shift_spect_array_gpu[tpb, bpg]( grid.Ez, shift, n_move )\n shift_spect_array_gpu[tpb, bpg]( grid.Bp, shift, n_move )\n shift_spect_array_gpu[tpb, bpg]( grid.Bm, shift, n_move )\n shift_spect_array_gpu[tpb, bpg]( grid.Bz, shift, n_move )\n if shift_rho:\n shift_spect_array_gpu[tpb, bpg]( grid.rho_prev, shift, n_move )\n if shift_currents:\n shift_spect_array_gpu[tpb, bpg]( grid.Jp, shift, n_move )\n shift_spect_array_gpu[tpb, bpg]( grid.Jm, shift, n_move )\n shift_spect_array_gpu[tpb, bpg]( grid.Jz, shift, n_move )\n else:\n shift = grid.field_shift\n # Shift all the fields on the CPU\n shift_spect_array_cpu( grid.Ep, shift, n_move )\n shift_spect_array_cpu( grid.Em, shift, n_move )\n shift_spect_array_cpu( grid.Ez, shift, n_move )\n shift_spect_array_cpu( grid.Bp, shift, n_move )\n shift_spect_array_cpu( grid.Bm, shift, n_move )\n shift_spect_array_cpu( grid.Bz, shift, n_move )\n if shift_rho:\n shift_spect_array_cpu( grid.rho_prev, shift, n_move )\n if shift_currents:\n shift_spect_array_cpu( grid.Jp, shift, n_move )\n shift_spect_array_cpu( grid.Jm, shift, n_move )\n shift_spect_array_cpu( grid.Jz, shift, n_move )", "def TransformUpMovement(field):\n i = 0\n side = int(math.sqrt(len(field)))\n while i < side:\n j = len(field) - side + i\n line = []\n l = i\n while l <= j:\n line.append(field[l])\n l = l + side\n\n line = move(line)\n j = len(field) - side + i\n l = i\n k = 0\n while l <= j:\n field[l] = line[k]\n l = l + side\n k = k + 1\n i = i + 1\n return field", "def grid_shift(grid, advection, trim_edges=0, field_list=None):\n if trim_edges == 0:\n trim_slice = slice(None, None)\n else:\n trim_slice = slice(int(trim_edges), -int(trim_edges))\n\n shifted_grid = copy.deepcopy(grid)\n\n # grab the x and y axis and trim\n shifted_grid.x[\"data\"] = grid.x[\"data\"][trim_slice].copy()\n shifted_grid.y[\"data\"] = grid.y[\"data\"][trim_slice].copy()\n\n # shift each field.\n if field_list is None:\n field_list = grid.fields.keys()\n\n for field in field_list:\n # copy data and fill with nans\n data = grid.fields[field][\"data\"].copy()\n data = np.ma.filled(data, np.nan)\n\n # shift the data\n shifted_data = shift(data, [0, advection[0], advection[1]], prefilter=False)\n\n # mask invalid, trim and place into grid\n shifted_data = np.ma.fix_invalid(\n shifted_data, copy=False, fill_value=get_fillvalue()\n )\n shifted_data = shifted_data[:, trim_slice, trim_slice]\n shifted_grid.fields[field][\"data\"] = shifted_data\n\n return shifted_grid", "def TransformRightMovement(field):\n i = 0\n side = int(math.sqrt(len(field)))\n while i < len(field):\n j = (i + side) - 1\n line = []\n for x in range(j, i - 1, -1):\n line.append(field[x])\n line = move(line)\n k = 0\n for x in range(j, i - 1, -1):\n field[x] = line[k]\n k = k + 1\n i = i + side\n return field", "def make_move(self, board, fieldy, fieldx):\n board[self.posy][self.posx], board[fieldy][fieldx] = board[fieldy][fieldx], board[self.posy][self.posx]\n self.posy = fieldy\n self.posx = fieldx", "def TransformDownMovement(field):\n i = 0\n side = int(math.sqrt(len(field)))\n while i < side:\n j = len(field) - side + i\n line = []\n l = j\n while l >= i:\n line.append(field[l])\n l = l - side\n\n line = move(line)\n j = len(field) - side + i\n l = j\n k = 0\n while l >= i:\n field[l] = line[k]\n l = l - side\n k = k + 1\n i = i + 1\n return field", "def shift_rows(state):\n state[1][0], state[1][1], state[1][2], state[1][3] = state[1][1], state[1][2], state[1][3], state[1][0]\n state[2][0], state[2][1], state[2][2], state[2][3] = state[2][2], state[2][3], state[2][0], state[2][1]\n state[3][0], state[3][1], state[3][2], state[3][3] = state[3][3], state[3][0], state[3][1], state[3][2]", "def TransformLeftMovement(field):\n i = 0\n side = int(math.sqrt(len(field)))\n while i < len(field):\n j = (i + side)\n line = []\n for x in range(i, j):\n line.append(field[x])\n\n line = move(line)\n k = 0\n for x in range(i, j):\n field[x] = line[k]\n k = k + 1\n i = i + side\n return field", "def inv_shift_rows(state):\n state[1][0], state[1][1], state[1][2], state[1][3] = state[1][3], state[1][0], state[1][1], state[1][2]\n state[2][0], state[2][1], state[2][2], state[2][3] = state[2][2], state[2][3], state[2][0], state[2][1]\n state[3][0], state[3][1], state[3][2], state[3][3] = state[3][1], state[3][2], state[3][3], state[3][0]", "def move(self):\n x = y = z = 0.0\n for cell in self.cells:\n x += (cell.x)#*n\n y += (cell.y)#*n\n z += (cell.z)#*n\n np = float(len(self.cells))\n med = numpy.array([x/np,y/np,z/np])\n \n dists = []\n for cell in self.cells:\n d = (cell.x-self.x)**2+(cell.y-self.y)**2+(cell.z-self.z)**2\n d = numpy.sqrt(d)\n dists.append(d)\n #md = (cell.x-med[0])**2+(cell.y-med[1])**2+(cell.z-med[2])**2\n #dists[-1] = (dists[-1]+md)/2\n cell = self.cells[numpy.argmin(dists)]\n cc = numpy.array([cell.x, cell.y, cell.z])\n \n t = self.t\n if abs(self.dnp) * ( self.np-self.np_req) > 0:\n t = self.tr\n self.dcenter = (1-t)*(med-self.center + self.u*(cc-med))\n self.x,self.y,self.z = self.center = self.center + self.dcenter", "def _move_in_one_more_block():\n with tik_inst.for_range(0, sub_h_align_block_size) as sub_h_idx:\n tik_inst.data_move(dst[sub_w_block * data_cnt_one_block * sub_h_idx],\n src[w_offset + w_size * sub_h_idx], 0, 1, sub_w_block, 0, 0)\n # in order to avoid dirty data when multiple core\n with tik_inst.for_range(0, data_cnt_one_block) as sub_h_idx_1:\n tik_inst.data_move(dst[sub_w_block * data_cnt_one_block *\n (sub_h_align_block_size + sub_h_idx_1)],\n src[w_offset +\n w_size * (sub_h_size - data_cnt_one_block + sub_h_idx_1)],\n 0, 1, sub_w_block, 0, 0)", "def ShiftFrame(Frame, PixShift):\n \n import numpy as np\n \n F, R, C = Frame.shape\n \n if F > 1:\n msg = f\"'Frame' must be a 2D frame with shape (1, R, C) but has shape\"\\\n + f\" ({F}, {R}, {C}).\"\n \n raise Exception(msg)\n \n # Initialise ShiftedFrame:\n ShiftedFrame = np.zeros((1, R, C), dtype='uint')\n #ShiftedFrame = np.empty_like(Frame, dtype='uint') # this creates 42,932\n # unique values for some reason!\n \n #unique = UniqueItems(Nda=Frame, NonZero=False)\n #print(f'\\n---> There are {len(unique)} unique items in Frame')\n #unique = UniqueItems(Nda=ShiftedFrame, NonZero=False)\n #print(f'\\n---> There are {len(unique)} unique items in the initialised',\n # f'ShiftedFrame: {unique[:11]}...')\n \n di, dj, dk = PixShift\n \n ##ShiftedFrame[0, dj:, di:] = Frame[0, :-(1+dj), :-(1+di)]\n ##ShiftedFrame[0, :-(1+dj), :-(1+di)] = Frame[0, dj:, di:]\n #ShiftedFrame[0, :R-dj, :C-di] = Frame[0, dj:, di:]\n \n if di > 0 and dj > 0:\n ShiftedFrame[0, dj:, di:] = Frame[0, :-dj, :-di]\n \n elif di < 0 and dj < 0:\n ShiftedFrame[0, :dj, :di] = Frame[0, -dj:, -di:]\n \n elif di > 0 and dj < 0:\n ShiftedFrame[0, :dj, di:] = Frame[0, -dj:, :-di]\n \n elif di < 0 and dj > 0:\n ShiftedFrame[0, dj:, :di] = Frame[0, :-dj, -di:]\n \n elif di == 0 and dj > 0:\n ShiftedFrame[0, dj:, :] = Frame[0, :-dj, :]\n \n elif di == 0 and dj < 0:\n ShiftedFrame[0, :dj, :] = Frame[0, -dj:, :]\n \n elif di > 0 and dj == 0:\n ShiftedFrame[0, :, di:] = Frame[0, :, :-di]\n \n elif di < 0 and dj == 0:\n ShiftedFrame[0, :, :di] = Frame[0, :, -di:]\n \n elif di == 0 and dj == 0:\n ShiftedFrame[0] = Frame[0]\n \n #unique = UniqueItems(Nda=ShiftedFrame, NonZero=False)\n #print(f'\\n---> There are {len(unique)} unique items in the ShiftedFrame',\n # 'after shifting.')\n \n return ShiftedFrame", "def yank(self):\r\n self.block.bucket_array.yank_cell(self)", "def move_element(self,n_a,n_b):\n self.element_array.insert(n_b,self.element_array.pop(n_a))", "def make_move(self, move):\n self.board[int(move) - 1] = self.nplayer", "def Repeater(arr,n):\n new_arr = np.zeros((arr.shape[0]*n,arr.shape[1]),dtype=object)\n for i in range(0,arr.shape[0]):\n new_row = np.tile(arr[i,:],(n,1))\n new_arr[i*n:(i+1)*n,:] = new_row\n return new_arr", "def move(self, direction):\n original_grid = []\n for row in self._grid:\n original_row = list(row)\n original_grid.append(original_row)\n steps = 0\n if direction == UP or direction == DOWN:\n steps = self._grid_height\n elif direction == LEFT or direction == RIGHT:\n steps = self._grid_width\n to_move = []\n for initial_cell in self._initial_cells[direction]:\n for step in range(steps):\n new_row = initial_cell[0] + step * OFFSETS[direction][0]\n new_column = initial_cell[1] + step * OFFSETS[direction][1]\n to_move.append(self._grid[new_row][new_column])\n to_move = merge(to_move)\n row = initial_cell[0]\n column = initial_cell[1]\n for step in range(steps):\n self._grid[row + OFFSETS[direction][0] * step][column + OFFSETS[direction][1] * step] = to_move[step]\n to_move = []\n if original_grid != self._grid:\n self.new_tile()", "def Forward(Fin, z, sizenew, Nnew ):\n if z <= 0:\n raise ValueError('Forward does not support z<=0')\n Fout = Field.begin(sizenew, Fin.lam, Nnew, Fin._dtype)\n \n field_in = Fin.field\n field_out = Fout.field\n \n field_out[:,:] = 0.0 #default is ones, clear\n \n old_size = Fin.siz\n old_n = Fin.N\n new_size = sizenew #renaming to match cpp code\n new_n = Nnew\n\n on2 = int(old_n/2)\n nn2 = int(new_n/2) #read \"new n over 2\"\n dx_new = new_size/(new_n-1)\n dx_old = old_size/(old_n-1)\n #TODO again, dx seems better defined without -1, check this\n \n R22 = _np.sqrt(1/(2*Fin.lam*z))\n\n X_new = _np.arange(-nn2, new_n-nn2) * dx_new\n Y_new = X_new #same\n X_old = _np.arange(-on2, old_n-on2) * dx_old\n Y_old = X_old #same\n for i_new in range(new_n):\n x_new = X_new[i_new]\n \n P1 = R22*(2*(X_old-x_new)+dx_old)\n P3 = R22*(2*(X_old-x_new)-dx_old)\n Fs1, Fc1 = _fresnel(P1)\n Fs3, Fc3 = _fresnel(P3)\n for j_new in range(new_n):\n y_new = Y_new[j_new]\n \n P2 = R22*(2*(Y_old-y_new)-dx_old)\n P4 = R22*(2*(Y_old-y_new)+dx_old)\n Fs2, Fc2 = _fresnel(P2)\n Fs4, Fc4 = _fresnel(P4)\n \n C4C1=_np.outer(Fc4, Fc1) #out[i, j] = a[i] * b[j] \n C2S3=_np.outer(Fc2, Fs3) #-> out[j,i] = a[j]*b[i] here\n C4S1=_np.outer(Fc4, Fs1)\n S4C1=_np.outer(Fs4, Fc1)\n S2C3=_np.outer(Fs2, Fc3)\n C2S1=_np.outer(Fc2, Fs1)\n S4C3=_np.outer(Fs4, Fc3)\n S2C1=_np.outer(Fs2, Fc1)\n C4S3=_np.outer(Fc4, Fs3)\n S2S3=_np.outer(Fs2, Fs3)\n S2S1=_np.outer(Fs2, Fs1)\n C2C3=_np.outer(Fc2, Fc3)\n S4S1=_np.outer(Fs4, Fs1)\n C4C3=_np.outer(Fc4, Fc3)\n C4C1=_np.outer(Fc4, Fc1)\n S4S3=_np.outer(Fs4, Fs3)\n C2C1=_np.outer(Fc2, Fc1)\n \n Fr = 0.5 * field_in.real\n Fi = 0.5 * field_in.imag\n Temp_c = (Fr * (C2S3 + C4S1 + S4C1 + S2C3\n - C2S1 - S4C3 - S2C1 - C4S3)\n + Fi * (-S2S3 + S2S1 + C2C3 - S4S1\n - C4C3 + C4C1 + S4S3 - C2C1)\n + 1j * Fr *(-C4C1 + S2S3 + C4C3 - S4S3\n + C2C1 - S2S1 + S4S1 - C2C3)\n + 1j * Fi*(C2S3 + S2C3 + C4S1 + S4C1\n - C4S3 - S4C3 - C2S1 - S2C1))\n field_out[j_new, i_new] = Temp_c.sum() #complex elementwise sum\n Fout._IsGauss=False\n return Fout", "def move(self, direction):\r\n # replace with your code\r\n row_dir = OFFSETS[direction][0]\r\n col_dir = OFFSETS[direction][1]\r\n \r\n if row_dir == 0:\r\n new_cells = self._cells\r\n new_dir = col_dir\r\n else:\r\n new_tuples = zip(*self._cells)\r\n new_cells = [list(item) for item in new_tuples]\r\n new_dir = row_dir\r\n \r\n tmp_cells = []\r\n for lists in new_cells:\r\n lists = lists[::new_dir]\r\n merge_lists = merge(lists)\r\n tmp_cells.append(merge_lists[::new_dir])\r\n \r\n if row_dir == 0:\r\n self._cells = tmp_cells\r\n else:\r\n new_tuples = zip(*tmp_cells)\r\n new_cells = [list(item) for item in new_tuples]\r\n self._cells = new_cells\r\n \r\n self.new_tile()", "def _extend_contiguous_traj_field(self, run_idx, traj_idx, field_path, field_data):\n\n traj_grp = self.h5['{}/{}/{}/{}'.format(RUNS, run_idx, TRAJECTORIES, traj_idx)]\n field = traj_grp[field_path]\n\n # make sure this is a feature vector\n assert len(field_data.shape) > 1, \\\n \"field_data must be a feature vector with the same number of dimensions as the number\"\n\n # of datase new frames\n n_new_frames = field_data.shape[0]\n\n # check the field to make sure it is not empty\n if all([i == 0 for i in field.shape]):\n\n # check the feature shape against the maxshape which gives\n # the feature dimensions for an empty dataset\n assert field_data.shape[1:] == field.maxshape[1:], \\\n \"field feature dimensions must be the same, i.e. all but the first dimension\"\n\n # if it is empty resize it to make an array the size of\n # the new field_data with the maxshape for the feature\n # dimensions\n feature_dims = field.maxshape[1:]\n field.resize( (n_new_frames, *feature_dims) )\n\n # set the new data to this\n field[0:, ...] = field_data\n\n else:\n # make sure the new data has the right dimensions against\n # the shape it already has\n assert field_data.shape[1:] == field.shape[1:], \\\n \"field feature dimensions must be the same, i.e. all but the first dimension\"\n\n\n # append to the dataset on the first dimension, keeping the\n # others the same, these must be feature vectors and therefore\n # must exist\n field.resize( (field.shape[0] + n_new_frames, *field.shape[1:]) )\n # add the new data\n field[-n_new_frames:, ...] = field_data", "def realign_image(arr, shift, angle=0):\n # if both shifts are integers, do circular shift; otherwise perform Fourier shift.\n if np.count_nonzero(np.abs(np.array(shift) - np.round(shift)) < 0.01) == 2:\n temp = np.roll(arr, int(shift[0]), axis=0)\n temp = np.roll(temp, int(shift[1]), axis=1)\n temp = temp.astype('float32')\n else:\n temp = fourier_shift(np.fft.fftn(arr), shift)\n temp = np.fft.ifftn(temp)\n temp = np.abs(temp).astype('float32')\n return temp", "def move(self,move):\n for x in range(len(self.coord)):\n self.coord[x] = np.array([y+np.array(move) for y in self.coord[x]])\n return self", "def shift(image,shift_x,shift_y):\n return np.roll(np.roll(image,shift_y,axis=0),shift_x,axis=1)", "def offsetElements(self, i):\n\n #iterate over each tile and subtract\n #if the value is -1, indicating a blank tile, leave it as that\n for y in range(0, len(self.array)):\n for x in range(0, len(self.array[0])):\n if self.array[y][x] != -1:\n self.array[y][x] -= i", "def rgbArray_move(self, rgbList, delay):\n # res\n\n res = self.rgbArrayOfs_move(0,rgbList,delay)\n return res", "def DELAY(A, n):\r\n At = pivot_table(A)\r\n res = At.shift(n)\r\n res = stack_table(res)\r\n return res", "def piecewise_transform(image, numcols=5, numrows=5, warp_left_right=10, warp_up_down=10, order=1):\n\n rows, cols = image.shape[0], image.shape[1]\n\n numcols = numcols\n numrows = numrows\n\n src_cols = np.linspace(0, cols, numcols, dtype=int)\n src_rows = np.linspace(0, rows, numrows, dtype=int)\n src_rows, src_cols = np.meshgrid(src_rows, src_cols)\n src = np.dstack([src_cols.flat, src_rows.flat])[0]\n\n src_rows_new = np.ndarray.transpose(src_rows)\n src_cols_new = np.ndarray.transpose(src_cols)\n # src_new = np.dstack([src_cols_new.flat, src_rows_new.flat])[0]\n\n dst_cols = np.ndarray(src_cols.shape)\n dst_rows = np.ndarray(src_rows.shape)\n for i in range(0, numcols):\n for j in range(0, numrows):\n if src_cols[i, j] == 0 or src_cols[i, j] == cols:\n dst_cols[i, j] = src_cols[i, j]\n else:\n dst_cols[i, j] = src_cols[i, j] + np.random.uniform(-1, 1) * warp_left_right\n\n if src_rows[i, j] == 0 or src_rows[i, j] == rows:\n dst_rows[i, j] = src_rows[i, j]\n else:\n dst_rows[i, j] = src_rows[i, j] + np.random.uniform(-1, 1) * warp_up_down\n\n dst = np.dstack([dst_cols.flat, dst_rows.flat])[0]\n\n # dst_rows_new = np.ndarray.transpose(dst_rows)\n # dst_cols_new = np.ndarray.transpose(dst_cols)\n # dst_new = np.dstack([dst_cols_new.flat, dst_rows_new.flat])[0]\n\n tform = transform.PiecewiseAffineTransform()\n tform.estimate(src, dst)\n\n img_new = transform.warp(image, tform, output_shape=(rows, cols), order=order, preserve_range=True)\n img_new = img_new.astype(image.dtype)\n \n return img_new", "def move(self, direction):\n new_grid = []\n # get the indices of specific direction\n new_indices = self._grid_indices[direction]\n for cell in new_indices:\n lst = self.traversed_list(cell, direction)\n merged_list = merge(lst)\n new_grid.append(merged_list)\n \n adjusted_grid = adjust_grid(new_grid,direction)\n if self.is_changed(adjusted_grid):\n self.update_grid(adjusted_grid)\n self.new_tile()", "def _shift(BD):\n bsz, n_head, max_len, _ = BD.size()\n zero_pad = layers.zeros(shape=(bsz, n_head, max_len, 1))\n BD = layers.reshape(x=layers.concat([BD, zero_pad], axis=-1),\n shape=(bsz, n_head, -1, max_len))\n BD = layers.reshape(x=BD[:, :, :-1], shape=(bsz, n_head, max_len, -1))\n BD = BD[:, :, :, max_len:]\n return BD", "def fshift_nb(a, n):\n return fshift_1d_nb(a, n)", "def fshift_1d_nb(a, n):\n out = np.empty_like(a, dtype=np.float_)\n out[:n] = np.nan\n out[n:] = a[:-n]\n return out", "def _shift_amplitudes(qc, n, inplace=False):\n if not inplace:\n qc = qc.copy()\n for q_reg in qc.qregs:\n # Unitary gate representing the shift operation on n qubits\n shift_matrix = np.roll(np.eye(2**q_reg.size), n, axis=1)\n # Add the gate to the circuit\n qc.append(UnitaryGate(shift_matrix), q_reg)\n return qc", "def _rel_shift_legacy(self, xs):\n bs, qlen, klen, n_heads = xs.size()\n xs = xs.permute(1, 2, 0, 3).contiguous().view(qlen, klen, bs * n_heads)\n zero_pad = xs.new_zeros((qlen, 1, bs * n_heads))\n xs_shifted = torch.cat([zero_pad, xs], dim=1).view(klen + 1, qlen, bs * n_heads)[1:].view_as(xs)\n return xs_shifted.view(qlen, klen, bs, n_heads).permute(2, 0, 1, 3)", "def make_move(self, row:int, col:int,curr_move):\n self.array[row][col] = curr_move", "def move(degs, i, j, n):\n if n > 0:\n temp = 3 - i - j\n move(degs, i, temp, n - 1) \n degs[j].append(degs[i].pop(-1))\n print(degs)\n move(degs, temp, j, n - 1)", "def pitchshift(snd_array, n, window_size=2**13, h=2**11):\n\tfactor = 2**(1.0 * n / 12.0)\n\tstretched = stretch(snd_array, 1.0/factor, window_size, h)\n\treturn speedx(stretched[window_size:], factor)", "def move(self, direction):\r\n # replace with your code\r\n initial_tile = self.__direct_top[direction]\r\n offset = OFFSETS[direction]\r\n direct_range = self.__direct_range[direction] \r\n backup_list = [[0 for dummy_col in range(self.grid_width)] for dummy_row in range(self.grid_height)]\r\n \r\n for initial_count, tile_cursor in enumerate(initial_tile):\r\n tem_list = []\r\n grid_cursor = tile_cursor\r\n for dummy_cursor in range(direct_range):\r\n \r\n tem_list.append(self.grid[grid_cursor[0]][grid_cursor[1]])\r\n grid_cursor = tuple(x + y for x,y in zip(grid_cursor,offset))\r\n \r\n new_list = merge(tem_list)\r\n if self.update_dict[direction] == 0:\r\n for col_cursor in range(direct_range):\r\n backup_list[col_cursor][initial_count] = new_list[col_cursor]\r\n elif self.update_dict[direction] == 1: \r\n for col_cursor in range(direct_range):\r\n backup_list[self.grid_height -1 - col_cursor][initial_count] = new_list[col_cursor]\r\n elif self.update_dict[direction] ==3:\r\n backup_list[initial_count] = new_list\r\n else:\r\n for col_cursor in range(direct_range):\r\n backup_list[initial_count][self.grid_width -1 - col_cursor] = new_list[col_cursor]\r\n \r\n flag = (self.grid == backup_list)\r\n self.grid = backup_list\r\n if not flag:\r\n self.new_tile()", "def step(self):\n\t\tnewBoard = CellArray(self.size)\n\t\tfor i in range(0, self.size, 1):\n\t\t\tfor j in range(0, self.size, 1):\n\t\t\t\tnewBoard.board[i][j] = self.changeCell(i, j)\n\t\tself.board = newBoard.board", "def move(t, length):\n pu(t)\n\t\n fd(t, length)\n pd(t)", "def move_egg(array):\n new_array = deepcopy(array)\n old_x, old_y = get_random_position(array, 1)\n new_x, new_y = get_random_position(array, 0)\n new_array[old_y][old_x] = 0\n new_array[new_y][new_x] = 1\n return new_array", "def _move_in_one_more_block():\n with tik_inst.for_range(0, sub_h_align_block_size) as sub_h_idx_0:\n tik_inst.data_move(dst[sub_w_block * data_cnt_one_block * sub_h_idx_0],\n src[in_offset + sub_h_idx_0 * w_size],\n 0, 1, sub_w_block, 0, 0)\n # move in one more block of h\n with tik_inst.for_range(0, data_cnt_one_block) as sub_h_idx_1:\n tik_inst.data_move(\n dst[sub_w_block * data_cnt_one_block * (sub_h_align_block_size + sub_h_idx_1)],\n src[in_offset + (sub_h_idx_1 + sub_h_size - data_cnt_one_block) * w_size],\n 0, 1, sub_w_block, 0, 0)", "def update(frame_num, mat, grid, N):\n\n new_grid = np.copy(grid)\n #print(\"grid size:\", grid.shape)\n for i in range(1, grid.shape[0]-1):\n for j in range(1, grid.shape[1]-1):\n neighbors = int(grid[i-1, j] + grid[i+1, j] + \\\n grid[i, j+1] + grid[i, j-1] + \\\n grid[i-1,j-1] + grid[i+1,j+1] + \\\n grid[i+1,j-1] + grid[i-1,j+1])\n if grid[i, j] == ON:\n if not (2 <= neighbors <= 3):\n new_grid[i, j] = OFF\n elif grid[i, j] == OFF and neighbors == 3:\n # Grow a cell\n new_grid[i, j] = ON\n else:\n new_grid[i, j] = OFF\n\n ### Update new grid\n mat.set_data(new_grid)\n grid[:] = new_grid[:] # Brackets are important\n return mat", "def train_sample_windowize(field, delta=1, n=20):\n padded = np.pad(field, delta, mode='constant', constant_values=-1)\n X = np.zeros((n * n, (1 + delta * 2) ** 2))\n for i in range(n):\n for j in range(n):\n X[i * n + j] = padded[i:i + 2 * delta + 1, j:j + 2 * delta + 1].ravel()\n return X", "def pitchshift(snd_array, n, window_size=2**13, h=2**11):\n factor = 2**(1.0 * n / 12.0)\n stretched = stretch(snd_array, 1.0/factor, window_size, h)\n return speedx(stretched[window_size:], factor)", "def pitchshift(snd_array, n, window_size=2**13, h=2**11):\n factor = 2**(1.0 * n / 12.0)\n stretched = stretch(snd_array, 1.0/factor, window_size, h)\n return speedx(stretched[window_size:], factor)", "def move(self, step):\n for point in self.points:\n l = min(len(step), len(point.position))\n for i in range(l):\n point.position[i] = step[i]", "def flow_to_warp(flow):\n batch, _, ht, wd = flow.shape\n coords = torch.meshgrid(torch.arange(ht), torch.arange(wd))\n coords = torch.stack(coords[::-1], dim=0).float()\n coords = coords[None].repeat(batch, 1, 1, 1)\n return coords + flow", "def _assemble_tiles(i, n, tile, tsincr_g, output_dir, outtype):\n # pylint: disable=too-many-arguments\n tsincr_file = os.path.join(output_dir, '{}_{}.npy'.format(outtype, n))\n tsincr = np.load(file=tsincr_file)\n tsincr_g[tile.top_left_y:tile.bottom_right_y, tile.top_left_x:tile.bottom_right_x] = tsincr[:, :, i]", "def update_shift_count(self, move):\n if len(move) == 2:\n self.shift_count += 1\n else:\n self.shift_count = 0", "def _roll(self):\n self.order = np.roll(self.order, 1)", "def move(self, offset):\n self._transform(\n [\n [1, 0, 0],\n [0, 1, 0],\n [0, 0, 1],\n ], center=None, offset=list(offset))", "def pre_or_post_turn(self, game_field, all_ghost_out:bool):\r\n\r\n reference_pos = self.pos[0] + self.grid_size // 2, self.pos[1] + self.grid_size // 2 #< Positon is set to center of Pac-Man so there is no difference in which direction he moves\r\n field = game_field.possible_way(reference_pos, self.last_dir)\r\n self.cnt_points(field, all_ghost_out)\r\n self.dist = reference_pos[0] % self.grid_size, reference_pos[1] % self.grid_size\r\n\r\n # Check if Pac-Man is moving to the right \r\n if self.direction == 'r':\r\n\r\n # dist to the center of the crossing less then grid_size//2 -> it's a preturn\r\n if self.dist[0] < self.grid_size // 2:\r\n\r\n # Check if Pac-Man wants to move up after the crossing\r\n if self.last_dir == 'u': \r\n \r\n # Check if the next field is a field Pac-Man can move to\r\n if field == None or (field[0] != 'r' and field != 'os'):\r\n self.pos[0] += (self.grid_size - (self.pos[0] % self.grid_size))\r\n self.pos[1] -= self.speed\r\n self.direction = self.last_dir[:]\r\n\r\n # Check if Pac-Man wants to move down after the crossing\r\n if self.last_dir == 'd':\r\n\r\n # Check if the next field is a field Pac-Man can move to\r\n if field == None or (field[0] != 'r' and field != 'os'):\r\n self.pos[0] += (self.grid_size - (self.pos[0] % self.grid_size))\r\n self.pos[1] += self.speed\r\n self.direction = self.last_dir[:]\r\n\r\n # dist to the center of the crossing greater then grid_size//2 -> it's a postturn\r\n elif self.dist[0] > self.grid_size // 2:\r\n\r\n # Check if Pac-Man wants to move up after the crossing\r\n if self.last_dir == 'u': \r\n \r\n # Check if the next field is a field Pac-Man can move to\r\n if field == None or (field[0] != 'r' and field != 'os'):\r\n self.pos[0] -= (self.pos[0] % self.grid_size)\r\n self.pos[1] -= self.speed\r\n self.direction = self.last_dir[:]\r\n\r\n # Check if Pac-Man wants to move down after the crossing\r\n if self.last_dir == 'd':\r\n\r\n # Check if the next field is a field Pac-Man can move to\r\n if field == None or (field[0] != 'r' and field != 'os'):\r\n self.pos[0] -= (self.pos[0] % self.grid_size)\r\n self.pos[1] += self.speed\r\n self.direction = self.last_dir[:]\r\n \r\n # The rest of the function does the same as above, just for the other three directions \r\n\r\n elif self.direction == 'l':\r\n #Preturn left\r\n if self.dist[0] > self.grid_size // 2:\r\n if self.last_dir == 'u':\r\n if field == None or (field[0] != 'r' and field != 'os'):\r\n self.pos[0] -= (self.pos[0] % self.grid_size)\r\n self.pos[1] -= self.speed\r\n self.direction = self.last_dir[:]\r\n if self.last_dir == 'd':\r\n if field == None or (field[0] != 'r' and field != 'os'):\r\n self.pos[0] -= (self.pos[0] % self.grid_size)\r\n self.pos[1] += self.speed\r\n self.direction = self.last_dir[:]\r\n #Postturn left\r\n elif self.dist[0] < self.grid_size // 2:\r\n if self.last_dir == 'u':\r\n if field == None or (field[0] != 'r' and field != 'os'):\r\n self.pos[0] += (self.grid_size - (self.pos[0] % self.grid_size))\r\n self.pos[1] -= self.speed\r\n self.direction = self.last_dir[:]\r\n if self.last_dir == 'd':\r\n if field == None or (field[0] != 'r' and field != 'os'):\r\n self.pos[0] += (self.grid_size - (self.pos[0] % self.grid_size))\r\n self.pos[1] += self.speed\r\n self.direction = self.last_dir[:]\r\n \r\n elif self.direction == 'u':\r\n #Preturn up\r\n if self.dist[1] > self.grid_size // 2:\r\n if self.last_dir == 'l':\r\n if field == None or (field[0] != 'r' and field != 'os'):\r\n self.pos[0] -= self.speed\r\n self.pos[1] -= (self.pos[1] % self.grid_size)\r\n self.direction = self.last_dir[:]\r\n if self.last_dir == 'r':\r\n if field == None or (field[0] != 'r' and field != 'os'):\r\n self.pos[0] += self.speed\r\n self.pos[1] -= (self.pos[1] % self.grid_size)\r\n self.direction = self.last_dir[:]\r\n #Postturn up\r\n elif self.dist[1] < self.grid_size // 2:\r\n if self.last_dir == 'l':\r\n if field == None or (field[0] != 'r' and field != 'os'):\r\n self.pos[0] -= self.speed\r\n self.pos[1] += self.grid_size - (self.pos[1] % self.grid_size)\r\n self.direction = self.last_dir[:]\r\n if self.last_dir == 'r':\r\n if field == None or (field[0] != 'r' and field != 'os'):\r\n self.pos[0] += self.speed\r\n self.pos[1] += (self.grid_size - (self.pos[1] % self.grid_size))\r\n self.direction = self.last_dir[:]\r\n \r\n elif self.direction == 'd':\r\n #Preturn down\r\n if self.dist[1] < self.grid_size // 2:\r\n if self.last_dir == 'l':\r\n if field == None or (field[0] != 'r' and field != 'os'):\r\n self.pos[0] -= self.speed\r\n self.pos[1] += (self.grid_size - (self.pos[1] % self.grid_size))\r\n self.direction = self.last_dir[:]\r\n if self.last_dir == 'r':\r\n if field == None or (field[0] != 'r' and field != 'os'):\r\n self.pos[0] += self.speed\r\n self.pos[1] += (self.grid_size - (self.pos[1] % self.grid_size))\r\n self.direction = self.last_dir[:]\r\n #Postturn down\r\n elif self.dist[1] > self.grid_size // 2:\r\n if self.last_dir == 'l':\r\n if field == None or (field[0] != 'r' and field != 'os'):\r\n self.pos[0] -= self.speed\r\n self.pos[1] -= (self.pos[1] % self.grid_size)\r\n self.direction = self.last_dir[:]\r\n if self.last_dir == 'r':\r\n if field == None or (field[0] != 'r' and field != 'os'):\r\n self.pos[0] += self.speed\r\n self.pos[1] -= (self.pos[1] % self.grid_size)\r\n self.direction = self.last_dir[:]\r\n pass", "def shift_board(self, dx, dy):\n self.board = np.roll(self.board, dy, axis=0)\n self.board = np.roll(self.board, dx, axis=1)\n self.agent_locs += [dy, dx]\n self.agent_locs %= self.board.shape\n self.update_exit_locs()", "def iquadshift(a):\r\n if len(a.shape) == 1 :\r\n b = np.roll(a, +(a.shape[-1]/2-1), -1)\r\n else :\r\n b = np.roll(a, +(a.shape[-2]/2-1), -2)\r\n b = np.roll(b, +(b.shape[-1]/2-1), -1)\r\n return b", "def shiftFlows(self, targetFlows, stepSize):\n for l in self.link:\n current = self.link[l].flow\n target = targetFlows[l]\n new = current *(1 - stepSize)+target*stepSize\n self.link[l].flow = new\n self.link[l].updateCost()", "def warp(x, flo):\n x=torch.squeeze(x,2)\n flo=torch.squeeze(flo,2)\n B, C, H, W = x.size()\n # mesh grid \n xx = torch.arange(0, W).view(1,-1).repeat(H,1)\n yy = torch.arange(0, H).view(-1,1).repeat(1,W)\n xx = xx.view(1,1,H,W).repeat(B,1,1,1)\n yy = yy.view(1,1,H,W).repeat(B,1,1,1)\n grid = torch.cat((xx,yy),1).float()\n\n #if x.is_cuda:\n # grid = grid.cuda()\n vgrid = torch.Tensor(grid).cuda() - flo.cuda()\n\n # scale grid to [-1,1] \n vgrid[:,0,:,:] = 2.0*vgrid[:,0,:,:].clone() / max(W-1,1)-1.0\n vgrid[:,1,:,:] = 2.0*vgrid[:,1,:,:].clone() / max(H-1,1)-1.0\n\n vgrid = vgrid.permute(0,2,3,1) \n #x=x.cuda()\n output = nn.functional.grid_sample(x, vgrid,mode='bilinear')\n mask = torch.Tensor(torch.ones(x.size())).cuda()\n mask = nn.functional.grid_sample(mask, vgrid,mode='bilinear')\n\n # if W==128:\n # np.save('mask.npy', mask.cpu().data.numpy())\n # np.save('warp.npy', output.cpu().data.numpy())\n \n mask[mask<0.9999] = 0\n mask[mask>0] = 1\n return torch.unsqueeze(output,2),torch.unsqueeze(mask,2)", "def test_Pad3D9():\n input_shape = (1, 2, 3)\n pad = [1, 2]\n mode = \"reflect\"\n res = [[[2, 1, 2, 3, 2, 1], [5, 4, 5, 6, 5, 4]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data=data, data_format=\"NCL\")", "def resampz(x, m_type, shift=1):\n sx = np.array(x.shape)\n\n if m_type == 0 or m_type == 1:\n y = np.zeros((sx[0] + np.abs(shift * (sx[1] - 1)), sx[1]))\n\n if m_type == 0:\n shift1 = np.arange(0, sx[1]) * (- shift)\n else:\n shift1 = np.arange(0, sx[1]) * shift\n\n if shift1[-1] < 0:\n shift1 = shift1 - shift1[-1]\n\n for n in range(sx[1]):\n y[shift1[n] + np.arange(0, sx[0]), n] = x[:, n]\n\n # Remove extra rows\n start = 0\n finish = y.shape[0]\n\n while np.linalg.norm(y[start, :], 2) == 0:\n start += 1\n\n while np.linalg.norm(y[finish-1, :], 2) == 0:\n finish -= 1\n\n y = y[start:finish, :]\n\n elif m_type == 2 or m_type == 3:\n y = np.zeros((sx[0], sx[1] + np.abs(shift * (sx[0] - 1))))\n\n if m_type == 2:\n shift2 = np.arange(0, sx[0]) * (- shift)\n else:\n shift2 = np.arange(0, sx[0]) * shift\n\n if shift2[-1] < 0:\n shift2 = shift2 - shift2[-1]\n\n for m in range(sx[0]):\n y[m, shift2[m] + np.arange(0, sx[1])] = x[m, :]\n\n # Remove extra rows\n start = 0\n finish = y.shape[1]\n\n while np.linalg.norm(y[:, start], 2) == 0:\n start += 1\n\n while np.linalg.norm(y[:, finish-1], 2) == 0:\n finish -= 1\n\n y = y[:, start:finish]\n\n else:\n print('Error: type not valid.')\n y = 0\n\n return y", "def roll(arrayin, shift = (0, 0), silent = True):\r\n arrayout = arrayin.copy()\r\n # if shift is integer valued then use np.roll\r\n if (type(shift[0]) == int) or (type(shift[0]) == np.int) or (type(shift[0]) == np.int32) or (type(shift[0]) == np.int64):\r\n if shift[-1] != 0 :\r\n if silent == False :\r\n print 'arrayout = np.roll(arrayout, shift[-1], -1)'\r\n arrayout = np.roll(arrayout, shift[-1], -1)\r\n # if shift is 1d then don't roll the other dim (if it even exists)\r\n if len(arrayout.shape) >= 2 :\r\n if shift[-2] != 0 :\r\n if silent == False :\r\n print 'arrayout = np.roll(arrayout, shift[-2], -2)'\r\n arrayout = np.roll(arrayout, shift[-2], -2)\r\n # if shift is float valued then use the Fourier shift theorem\r\n elif (type(shift[0]) == float) or (type(shift[0]) == np.float32) or (type(shift[0]) == np.float64):\r\n # if shift is 1d\r\n if len(shift) == 1 :\r\n if silent == False :\r\n print 'arrayout = fftn_1d(arrayout)'\r\n print 'arrayout = arrayout * phase_ramp(arrayout.shape, shift, origin = (0, 0))'\r\n print 'arrayout = ifftn_1d(arrayout)'\r\n arrayout = fftn_1d(arrayout)\r\n arrayout = arrayout * phase_ramp(arrayout.shape, shift, origin = (0, 0))\r\n arrayout = ifftn_1d(arrayout)\r\n elif len(shift) == 2 :\r\n if silent == False :\r\n print 'arrayout = fftn(arrayout)'\r\n print 'arrayout = arrayout * phase_ramp(arrayout.shape, shift, origin = (0, 0))'\r\n print 'arrayout = ifftn(arrayout)'\r\n arrayout = fftn(arrayout)\r\n arrayout = arrayout * phase_ramp(arrayout.shape, shift, origin = (0, 0))\r\n arrayout = ifftn(arrayout)\r\n return arrayout", "def make_move(self, direction):\r\n\t\tif direction == 0:\r\n\t\t\tself.board[self.x][self.y], self.board[self.x-1][self.y] = self.board[self.x-1][self.y], self.board[self.x][self.y]\r\n\t\t\tself.x -= 1\r\n\r\n\t\telif direction == 1:\r\n\t\t\tself.board[self.x][self.y], self.board[self.x][self.y+1] = self.board[self.x][self.y+1], self.board[self.x][self.y]\r\n\t\t\tself.y += 1\r\n\r\n\t\telif direction == 2:\r\n\t\t\tself.board[self.x][self.y], self.board[self.x+1][self.y] = self.board[self.x+1][self.y], self.board[self.x][self.y]\r\n\t\t\tself.x += 1\r\n\r\n\t\telif direction == 3:\r\n\t\t\tself.board[self.x][self.y], self.board[self.x][self.y-1] = self.board[self.x][self.y-1], self.board[self.x][self.y]\r\n\t\t\tself.y -= 1", "def step(self, move):", "def _field_Fresnel(z, field, dx, lam, dtype, usepyFFTW):\n \n \"\"\" *************************************************************\n Major differences to Cpp based LP version:\n - dx =siz/N instead of dx=siz/(N-1), more consistent with physics \n and rest of LP package\n - fftw DLL uses no normalization, numpy uses 1/N on ifft -> omitted\n factor of 1/(2*N)**2 in final calc before return\n - bug in Cpp version: did not touch top row/col, now we extract one\n more row/col to fill entire field. No errors noticed with the new\n method so far\n ************************************************************* \"\"\"\n _using_pyfftw = False # determined if loading is successful \n if usepyFFTW or _USE_PYFFTW:\n try:\n import pyfftw as _pyfftw\n from pyfftw.interfaces.numpy_fft import fft2 as _fft2\n from pyfftw.interfaces.numpy_fft import ifft2 as _ifft2\n _fftargs = {'planner_effort': 'FFTW_ESTIMATE',\n 'overwrite_input': True,\n 'threads': -1} #<0 means use multiprocessing.cpu_count()\n _using_pyfftw = True \n except ImportError:\n #import warnings\n #warnings.warn(_WARNING)\n _WARNING = '\\n**************************** WARNING ***********************\\n'\\\n +'In the Fresnel command you required FFT with the pyFFTW package.\\n'\\\n +'or _USE_PYFFTW = True in your config.py file.\\n'\\\n +'However LightPipes cannot import pyFFTW because it is not installed.\\n'\\\n +'Falling back to numpy.fft.\\n'\\\n +'(Try to) install pyFFTW on your computer for faster performance.\\n'\\\n +'Enter at a terminal prompt: python -m pip install pyfftw.\\n'\\\n +'Or reinstall LightPipes with the option pyfftw\\n'\\\n +'Enter: python -m pip install lightpipes[pyfftw]\\n\\n'\\\n +'*************************************************************'\n print(_WARNING)\n if not _using_pyfftw:\n from numpy.fft import fft2 as _fft2\n from numpy.fft import ifft2 as _ifft2\n _fftargs = {}\n tictoc.tic()\n N = field.shape[0] #assert square\n \n legacy = True #switch on to numerically compare oldLP/new results\n if legacy:\n kz = 2.*3.141592654/lam * z\n siz = N*dx\n dx = siz/(N-1) #like old Cpp code, even though unlogical\n else:\n kz = 2*_np.pi/lam*z\n \n \n cokz = _np.cos(kz)\n sikz = _np.sin(kz)\n \n No2 = int(N/2) #\"N over 2\"\n \"\"\"The following section contains a lot of uses which boil down to\n 2*No2. For even N, this is N. For odd N, this is NOT redundant:\n 2*No2 is N-1 for odd N, therefore sampling an even subset of the\n field instead of the whole field. Necessary for symmetry of first\n step involving Fresnel integral calc.\n \"\"\"\n if _using_pyfftw:\n in_outF = _pyfftw.zeros_aligned((2*N, 2*N),dtype=dtype)\n in_outK = _pyfftw.zeros_aligned((2*N, 2*N),dtype=dtype)\n else:\n in_outF = _np.zeros((2*N, 2*N),dtype=dtype)\n in_outK = _np.zeros((2*N, 2*N),dtype=dtype)\n \n \"\"\"Our grid is zero-centered, i.e. the 0 coordiante (beam axis) is\n not at field[0,0], but field[No2, No2]. The FFT however is implemented\n such that the frequency 0 will be the first element of the output array,\n and it also expects the input to have the 0 in the corner.\n For the correct handling, an fftshift is necessary before *and* after\n the FFT/IFFT:\n X = fftshift(fft(ifftshift(x))) # correct magnitude and phase\n x = fftshift(ifft(ifftshift(X))) # correct magnitude and phase\n X = fftshift(fft(x)) # correct magnitude but wrong phase !\n x = fftshift(ifft(X)) # correct magnitude but wrong phase !\n A numerically faster way to achieve the same result is by multiplying\n with an alternating phase factor as done below.\n Speed for N=2000 was ~0.4s for a double fftshift and ~0.1s for a double\n phase multiplication -> use the phase factor approach (iiij).\n \"\"\"\n # Create the sign-flip pattern for largest use case and \n # reference smaller grids with a view to the same data for\n # memory saving.\n ii2N = _np.ones((2*N),dtype=float)\n ii2N[1::2] = -1 #alternating pattern +,-,+,-,+,-,...\n iiij2N = _np.outer(ii2N, ii2N)\n iiij2No2 = iiij2N[:2*No2,:2*No2] #slice to size used below\n iiijN = iiij2N[:N, :N]\n\n RR = _np.sqrt(1/(2*lam*z))*dx*2\n io = _np.arange(0, (2*No2)+1) #add one extra to stride fresnel integrals\n R1 = RR*(io - No2)\n fs, fc = _fresnel(R1)\n fss = _np.outer(fs, fs) # out[i, j] = a[i] * b[j]\n fsc = _np.outer(fs, fc)\n fcs = _np.outer(fc, fs)\n fcc = _np.outer(fc, fc)\n \n \"\"\"Old notation (0.26-0.33s):\n temp_re = (a + b + c - d + ...)\n # numpy func add takes 2 operands A, B only\n # -> each operation needs to create a new temporary array, i.e.\n # ((((a+b)+c)+d)+...)\n # since python does not optimize to += here (at least is seems)\n New notation (0.14-0.16s):\n temp_re = (a + b) #operation with 2 operands\n temp_re += c\n temp_re -= d\n ...\n Wrong notation:\n temp_re = a #copy reference to array a\n temp_re += b\n ...\n # changing `a` in-place, re-using `a` will give corrupted\n # result\n \"\"\"\n temp_re = (fsc[1:, 1:] #s[i+1]c[j+1]\n + fcs[1:, 1:]) #c[+1]s[+1]\n temp_re -= fsc[:-1, 1:] #-scp [p=+1, without letter =+0]\n temp_re -= fcs[:-1, 1:] #-csp\n temp_re -= fsc[1:, :-1] #-spc\n temp_re -= fcs[1:, :-1] #-cps\n temp_re += fsc[:-1, :-1] #sc\n temp_re += fcs[:-1, :-1] #cs\n \n temp_im = (-fcc[1:, 1:] #-cpcp\n + fss[1:, 1:]) # +spsp\n temp_im += fcc[:-1, 1:] # +ccp\n temp_im -= fss[:-1, 1:] # -ssp\n temp_im += fcc[1:, :-1] # +cpc\n temp_im -= fss[1:, :-1] # -sps\n temp_im -= fcc[:-1, :-1] # -cc\n temp_im += fss[:-1, :-1]# +ss\n \n temp_K = 1j * temp_im # a * b creates copy and casts to complex\n temp_K += temp_re\n temp_K *= iiij2No2\n temp_K *= 0.5\n in_outK[(N-No2):(N+No2), (N-No2):(N+No2)] = temp_K\n \n in_outF[(N-No2):(N+No2), (N-No2):(N+No2)] \\\n = field[(N-2*No2):N,(N-2*No2):N] #cutting off field if N odd (!)\n in_outF[(N-No2):(N+No2), (N-No2):(N+No2)] *= iiij2No2\n \n tictoc.tic()\n in_outK = _fft2(in_outK, **_fftargs)\n in_outF = _fft2(in_outF, **_fftargs)\n t_fft1 = tictoc.toc()\n \n in_outF *= in_outK\n \n in_outF *= iiij2N\n tictoc.tic()\n in_outF = _ifft2(in_outF, **_fftargs)\n t_fft2 = tictoc.toc()\n #TODO check normalization if USE_PYFFTW\n \n Ftemp = (in_outF[No2:N+No2, No2:N+No2]\n - in_outF[No2-1:N+No2-1, No2:N+No2])\n Ftemp += in_outF[No2-1:N+No2-1, No2-1:N+No2-1]\n Ftemp -= in_outF[No2:N+No2, No2-1:N+No2-1]\n comp = complex(cokz, sikz)\n Ftemp *= 0.25 * comp\n Ftemp *= iiijN\n field = Ftemp #reassign without data copy\n ttotal = tictoc.toc()\n t_fft = t_fft1 + t_fft2\n t_outside = ttotal - t_fft\n debug_time = False\n if debug_time:\n print('Time total = fft + rest: {:.2f}={:.2f}+{:.2f}'.format(\n ttotal, t_fft, t_outside))\n return field", "def shift_rigth_3d(x, pad_value=None):\n if pad_value is None:\n shifted_targets = tf.pad(x, [[0, 0], [1, 0], [0, 0]])[:, :-1, :]\n else:\n shifted_targets = tf.concat([pad_value, x], axis=1)[:, :-1, :]\n return shifted_targets", "def _rel_shift(self, xs):\n bs, qlen, klen, n_heads = xs.size()\n xs = xs.permute(0, 3, 2, 1)\n idx = torch.arange(klen, device=xs.device)\n k_idx, q_idx = idx.unsqueeze(0), idx.unsqueeze(1)\n rel_pos_idx = torch.abs(k_idx - q_idx)\n if klen != qlen:\n rel_pos_idx = rel_pos_idx[:, :qlen]\n mask = xs.new_ones(qlen, klen, dtype=torch.bool if torch_12_plus else torch.uint8)\n mask = torch.tril(mask, diagonal=0).transpose(1, 0)\n rel_pos_idx[mask] *= -1\n rel_pos_idx = klen - qlen - rel_pos_idx\n rel_pos_idx[rel_pos_idx < 0] *= -1\n if self.clamp_len > 0:\n rel_pos_idx.clamp_(max=self.clamp_len)\n rel_pos_idx = rel_pos_idx.expand_as(xs)\n x_shift = torch.gather(xs, dim=2, index=rel_pos_idx)\n x_shift = x_shift.permute(0, 3, 2, 1)\n return x_shift", "def _StepsLoopElim(z, nstep, _refr, Fin):\n if Fin._curvature != 0.0:\n raise ValueError('Cannot operate on spherical coords.'\n + 'Use Convert() first')\n\n if type(_refr) != _np.ndarray:\n refr=_np.ones((Fin.N,Fin.N))*_refr\n else:\n refr = _refr\n \n if Fin.field.shape != refr.T.shape:\n #TODO fix the .T problem\n raise ValueError('refractive index refr must have same NxN'\n + ' dimension as field.')\n \n Fout = Field.copy(Fin)\n N = Fout.N\n lam = Fout.lam\n size = Fout.siz\n dtype = Fout._dtype\n \n legacy = True\n if legacy:\n Pi = 3.141592654 #to compare Cpp results accurately\n else:\n Pi = _np.pi\n K = 2.*Pi/lam\n dz = z/2. #since 2 staggered steps, each row then col-wise\n Pi4lz = 2*K/dz\n imPi4lz = 1j * Pi4lz\n \n delta = size/(N-1.) #dx\n delta2 = delta*delta\n \n \"\"\"\n /* absorption at the borders is described here */\n \"\"\"\n AA= -10./dz/nstep #/* total absorption */\n band_pow=2. #/* profile of the absorption border, 2=quadratic*/\n \"\"\"\n /* width of the absorption border */\n \"\"\"\n i_left = N/2 + 1.0 - 0.4*N\n i_right = N/2 + 1.0 + 0.4*N\n \n \"\"\"\n ///* absorption borders are formed here */\n \"\"\"\n c_absorb_x = _np.zeros(N, dtype=dtype)\n iv = _np.arange(N, dtype=int)\n mask = iv+1<=i_left\n iii = i_left - iv[mask]\n c_absorb_x[mask] = 1j* (AA*K)*_np.power(iii/i_left, band_pow)\n \n mask2 = iv+1 >= i_right\n iii = iv[mask2]-i_right+2\n im = N-i_right+1\n c_absorb_x[mask2] = 1j* (AA*K)*_np.power(iii/im, band_pow)\n \n \n c_absorb_x2 = _np.zeros(N, dtype=dtype)\n mask = iv+1<=i_left\n iii = i_left - iv[mask]\n c_absorb_x2[mask] = 1j* (AA*K)*_np.power(iii/i_left, band_pow)\n \n mask2 = iv+1 >= i_right\n iii = iv[mask2]-i_right+1 #TODO why 1 difference\n im = N-i_right+1\n c_absorb_x2[mask2] = 1j* (AA*K)*_np.power(iii/im, band_pow)\n \n \n c_absorb_y = _np.zeros(N, dtype=dtype)\n jv = _np.arange(N, dtype=int)\n mask = jv+1<=i_left\n iii = i_left - jv[mask] -1# REM +1 in i direction, why different here?\n c_absorb_y[mask] = 1j* (AA*K)*_np.power(iii/i_left, band_pow)\n \n mask2 = jv+1 >= i_right\n iii = jv[mask2]-i_right+1\n im = N-i_right+1\n c_absorb_y[mask2] = 1j* (AA*K)*_np.power(iii/im, band_pow)\n \n \n # c_absorb_y2 = _np.zeros(N, dtype=complex)\n # mask = jv+1<=i_left\n # iii = i_left - jv[mask]-1# REM +1 in i direction, why different here?\n # c_absorb_y2[mask] = 1j* (AA*K)*_np.power(iii/i_left, band_pow)\n \n # mask2 = jv+1 >= i_right\n # iii = jv[mask2] +1-i_right#REM +1 for i-direction loop, why different?\n # im = N-i_right+1\n # c_absorb_y2[mask2] = 1j* (AA*K)*_np.power(iii/im, band_pow)\n \n c_absorb_y2 = c_absorb_y\n #TODO last two were identical, why are absorbx,x2,y different?\n # probably can just assume same everywhere after legacy=False...\n \"\"\"\n ///* end absorption */\n \"\"\"\n \n refr = refr.T #TODO I messed up somewhere...\n \n \"\"\"The refraction part (real part of refr. index n) is separated out\n and applied as a phase term instead of stepping through like the\n imaginary part. According to LightPipes for MATLAB manual, this proved\n to be more stable.\"\"\"\n \n # expfi4 = _np.exp(1j*0.25*K*dz*(refr.real-1.0))\n tempfi = 1j*refr.real\n tempfi -= 1j*1.0 #avoid mem copies where possible\n tempfi *= 0.25*K*dz \n expfi4 = _np.exp(tempfi, out=tempfi) #quarter phase fi, for half phase apply twice\n \n # medium = (-1j*K)*refr.imag\n mediumIm = -K*refr.imag #half the RAM vs. complex, real part 0 anyway\n \n CCX = -2/delta2 + 1j*(Pi4lz + mediumIm)\n CCX[1:N-2:2,:] -= c_absorb_x\n CCX[2:N-1:2,:] -= c_absorb_x2\n CCY = -2/delta2 + 1j*(Pi4lz + mediumIm)\n CCY[:,1:N-2:2] -= c_absorb_y.reshape((-1,1)) #to column vector\n CCY[:,2:N-1:2] -= c_absorb_y2.reshape((-1,1)) #to column vector\n \n #Variables for elimination function elim():\n a = -1/delta2\n b = -1/delta2\n uu = _np.zeros(N, dtype=dtype)\n uu2 = _np.zeros(N, dtype=dtype)\n alpha = _np.zeros(N, dtype=dtype)\n beta = _np.zeros(N, dtype=dtype)\n p = _np.zeros(N, dtype=dtype)\n \n \"\"\"\n /* Main loop, steps here */\n \"\"\"\n for istep in range(nstep):\n \"\"\"\n /* Elimination in the direction i, halfstep */\n \"\"\"\n Fout.field *= expfi4 #*=_np.exp(1j*(0.25*K*dz*(refr.real-1.0)))\n \n for j in range(1, N-1):\n uij = Fout.field[j, 1:N-1]\n uij1 = Fout.field[j+1, 1:N-1]\n uij_1 = Fout.field[j-1, 1:N-1]\n p[1:N-1] = -1/delta2 * (uij_1 + uij1 -2.0 * uij) + imPi4lz * uij\n \n elim(N, a, b, CCX[j,:], p, uu, alpha, beta)\n \n Fout.field[j-1, :] = uu2[:] #apply result from previous elim!\n uu2[:] = uu[:] #store this elim for next application\n # this is necessary to not overwrite the data used in the next\n # elim step\n \n Fout.field[N-1, :] = uu2[:] #apply final elim in this direction\n \n Fout.field *= expfi4 #*=_np.exp(1j*(0.25*K*dz*(refr.real-1.0)))\n Fout.field *= expfi4 #twice makes it 0.5*k*dz*(n-1)\n \n \"\"\"\n /* Elimination in the j direction is here, halfstep */\n \"\"\"\n uu2[:] = 0.0\n \n for i in range(1, N-1):\n uij = Fout.field[1:N-1, i]\n ui1j = Fout.field[1:N-1, i+1]\n ui_1j = Fout.field[1:N-1, i-1]\n p[1:N-1] = -1/delta2 * (ui_1j + ui1j -2.0 * uij) + imPi4lz * uij\n \n elim(N, a, b, CCY[:,i], p, uu, alpha, beta)\n \n Fout.field[:, i-1] = uu2[:]\n uu2[:] = uu[:]\n \n #TODO BUG! why are we accessing i here? out of scope. Last value:\n #resulting from for(ii in range(1, N-2, 2))\n # -> last ii in loop is int((N-2)/2)*2-1\n # and i=ii+1\n # -> i_final = int((N-2)/2)*2-1+1 = int((N-2)/2)*2\n # tested OK for even and odd N -> works for all N\n i = int((N-2)/2)*2\n #TODO also, why 0:N-1 where all else is 0:N?\n Fout.field[0:N-1, i] = uu2[1:N]\n \"\"\"\n ///* end j */\n \"\"\"\n #TODO should this be in nstep loop??\n # seems so, that would add up to 1*ikz*n, right now its 3/4*ikz per iter\n # and a final 1/4 ??\n Fout.field *= expfi4 #*=_np.exp(1j*(0.25*K*dz*(refr.real-1.0)))\n Fout._IsGauss=False\n return Fout", "def memmove(self, grid):\n self.moveList.append((self.x, self.y))\n self.moveList2.append((self.x, self.y))\n waysList = []\n for f in range(4):\n if self.test(grid, f):\n if (self.x+SPEED_X[f], self.y+SPEED_Y[f]) not in self.moveList:\n waysList.append(f)\n if len(waysList) == 1:\n self.flag = waysList[0]\n self.move(grid)\n return\n elif len(waysList) == 4:\n self.flag = 0\n self.move(grid)\n return\n elif len(waysList) > 1:\n for f in waysList:\n self.mem.append((self.x, self.y, f))\n self.x, self.y, self.flag = self.mem[-1]\n elif len(waysList) == 0:\n self.x, self.y, self.flag = self.mem[-1]\n for i in range(len(self.moveList2)):\n if self.moveList2[i][0] == self.x and self.moveList2[i][1] == self.y:\n del self.moveList2[i+1:]\n break\n self.move(grid)\n self.mem.pop()", "def update_pos(self, game_field, all_ghost_out, windowsize):\r\n\r\n # If Pac-Man wants to change the direction into a direction, that is not the same or the opposite of the current direction, it could possible be a pre- or postturn\r\n if self.direction != self.last_dir and find_opposite(self.last_dir) != self.direction and self.state != '':\r\n self.pre_or_post_turn(game_field, all_ghost_out)\r\n\r\n # If Pac-Man moves, update his position depending on his direction\r\n if self.state == 'm':\r\n fak = 1\r\n if self.direction == 'u':\r\n self.pos[1] -= fak * self.speed\r\n elif self.direction == 'd':\r\n self.pos[1] += fak * self.speed\r\n elif self.direction == 'l':\r\n self.pos[0] -= fak * self.speed\r\n elif self.direction == 'r':\r\n self.pos[0] += fak * self.speed\r\n\r\n ongrid = (self.pos[0] % self.grid_size == 0 and self.pos[1] % self.grid_size == 0)\r\n\r\n # When Pac-Man is on grid check the field type he's on and in front of him\r\n if ongrid :\r\n field = game_field.possible_way(self.pos, self.last_dir)\r\n self.cnt_points(field, all_ghost_out)\r\n\r\n # When the next field is a wall of the maze, make Pac-Man stop moving, otherwise let him continue moving\r\n if field != None and field[0] == 'r':\r\n field2 = game_field.possible_way(self.pos, self.direction)\r\n self.cnt_points(field2, all_ghost_out)\r\n if field2 != None and field2[0] == 'r':\r\n self.state = ''\r\n else:\r\n self.state = 'm'\r\n\r\n # When the field in front of him is the end of a tunnel move Pac-Man to the other side\r\n if field == 'os':\r\n if self.direction == 'l':\r\n self.pos[0] = windowsize[0] \r\n elif self.direction == 'r':\r\n self.pos[0] = -self.grid_size\r\n\r\n # When the next field is a field Pac-Man can move on to, safe the latest direction in direction\r\n if (field == None or field[0] != 'r'):\r\n self.direction = self.last_dir[:]\r\n\r\n # Force Pacmans direction to drive through the tunnel, just to avoid graphical bugs\r\n if self.pos[0] < 0:\r\n self.direction = 'r'\r\n self.last_dir = 'r'\r\n elif self.pos[0] > windowsize[0] - self.grid_size:\r\n self.direction = 'l'\r\n self.last_dir = 'l'", "def flipud(n):\n times = lambda x: jnp.flipud(x)\n trans = lambda x: jnp.flipud(x)\n return Operator(times=times, trans=trans, shape=(n,n))", "def push_up (grid):\r\n for a in range (4): \r\n for i in range(3,0,-1): \r\n for j in range(4): \r\n if grid[i-1][j]==0: \r\n grid[i-1][j]=grid[i][j] \r\n grid[i][j]=0\r\n #joining like numbers \r\n for i in range(3): \r\n for j in range(4): \r\n if grid[i][j]==grid[i+1][j]: \r\n grid[i][j]=(grid[i][j])*2\r\n grid[i+1][j]=0\r\n #pafter adding the numbers continue to move them \r\n for a in range (4): \r\n for i in range(3,0,-1): \r\n for j in range(4): \r\n if grid[i-1][j]==0: \r\n grid[i-1][j]=grid[i][j] \r\n grid[i][j]=0", "def vec_shift_left_n(x, n):\n return jnp.zeros_like(x).at[0:-n].set(x[n:])", "def get_move(arr=None):\n if arr is None or np.sum(arr!=0) < 55:\n return []\n \n moves = [] # (coord, dir) ex ((3, 4), 0) means move (3, 4) to right, 0 right, 1 up, 2 left, 3 down\n mask_moved = np.ones_like(arr)\n replace_value = 0\n # detect 2 consecutive\n for key in filters:\n for rot in [1, 3, 0, 2]:\n early_break = False\n out = signal.correlate2d(arr, np.rot90(filters[key], rot), mode='same', fillvalue=100)\n \n mask = (out==arr).astype(np.float)\n tmp = np.stack(np.where(mask), -1)\n # print(tmp)\n for idx in range(tmp.shape[0]):\n # if mask_moved[tuple(tmp[idx])] == 1:\n if mask_moved[tuple(tmp[idx])] == 1 and mask_moved[tuple(tmp[idx]+dirs[rot])] == 1:\n # if mask_moved[tuple(tmp[idx])] == 1 and mask_moved[tuple(tmp[idx]+dirs[rot])] == 1 and arr[tuple(tmp[idx]+dirs[rot])] != replace_value:\n moves.append((tmp[idx], rot))\n # mask_moved[tuple(tmp[idx])] = 0\n mask_moved[tuple(tmp[idx]+dirs[rot])] = 0\n arr[tuple(tmp[idx])], arr[tuple(tmp[idx]+dirs[rot])] = arr[tuple(tmp[idx]+dirs[rot])], arr[tuple(tmp[idx])]\n arr[tuple(tmp[idx]+dirs[rot])] = replace_value\n if key == 3:\n mask_moved[tuple(tmp[idx]+dirs[rot]*2)] = 0\n mask_moved[tuple(tmp[idx]+dirs[rot]*3)] = 0\n arr[tuple(tmp[idx]+dirs[rot]*2)] = replace_value\n arr[tuple(tmp[idx]+dirs[rot]*3)] = replace_value\n elif key == 2:\n mask_moved[tuple(tmp[idx]+dirs[rot]+dirs[(rot+1)%4])] = 0\n mask_moved[tuple(tmp[idx]+dirs[rot]+dirs[(rot+3)%4])] = 0\n arr[tuple(tmp[idx]+dirs[rot]+dirs[(rot+1)%4])] = replace_value\n arr[tuple(tmp[idx]+dirs[rot]+dirs[(rot+3)%4])] = replace_value\n elif key == 0:\n mask_moved[tuple(tmp[idx]+dirs[rot]+dirs[(rot+1)%4])] = 0\n mask_moved[tuple(tmp[idx]+dirs[rot]+2*dirs[(rot+1)%4])] = 0\n arr[tuple(tmp[idx]+dirs[rot]+dirs[(rot+1)%4])] = replace_value\n arr[tuple(tmp[idx]+dirs[rot]+2*dirs[(rot+1)%4])] = replace_value\n else:\n mask_moved[tuple(tmp[idx]+dirs[rot]+dirs[(rot+3)%4])] = 0\n mask_moved[tuple(tmp[idx]+dirs[rot]+2*dirs[(rot+3)%4])] = 0\n arr[tuple(tmp[idx]+dirs[rot]+dirs[(rot+3)%4])] = replace_value\n arr[tuple(tmp[idx]+dirs[rot]+2*dirs[(rot+3)%4])] = replace_value\n early_break = True\n break\n if early_break:\n break\n \n if len(moves) > 5: # early break to save computing resources\n break\n\n if len(moves) == 0:\n icon_other = np.stack(np.where(arr==0), -1)\n for idx in range(icon_other.shape[0]):\n moves.append((icon_other[idx], np.random.randint(0, 4)))\n\n return moves", "def vec_shift_right_n(x, n):\n return jnp.zeros_like(x).at[n:].set(x[:-n])", "def c_not_align_split_n_fp32(self, tik_instance):\n n_d, d_d, h_d, w_d, c_d = self.dst_shape\n dhw_d = d_d * h_d * w_d\n nc_one = self.ub_ele // dhw_d\n n_ub = nc_one // 2 // self.cp_align_len // c_d\n\n all_core = _ceil_div(n_d, n_ub)\n ac_num = _set_core_num(all_core)\n\n with tik_instance.for_range(0, ac_num, block_num=ac_num) as num_core:\n ub_ori = tik_instance.Tensor(\"float16\",\n (self.ub_ele * 2,),\n name=\"ub_ori\",\n scope=tik.scope_ubuf)\n ub_trans = tik_instance.Tensor(\"float16\",\n (self.ub_ele * 2,),\n name=\"ub_trans\",\n scope=tik.scope_ubuf)\n ub_tail = tik_instance.Tensor(\"float16\",\n (16,),\n name=\"ub_tail\",\n scope=tik.scope_ubuf)\n\n ub_loop = _set_loop(tik_instance, num_core, ac_num, all_core)\n\n with tik_instance.for_range(0, ub_loop) as num_u:\n core_index = num_u * ac_num + num_core\n\n with tik_instance.if_scope(core_index < all_core - 1):\n n_len = n_ub\n n_before = n_ub * core_index\n args = tik_instance, ub_ori, ub_trans, ub_tail, \\\n n_before, n_len\n self.func_c_not_align_split_n_fp32(args)\n\n with tik_instance.else_scope():\n n_before = (all_core - 1) * n_ub\n n_len = n_d - n_before\n args = tik_instance, ub_ori, ub_trans, ub_tail, \\\n n_before, n_len\n self.func_c_not_align_split_n_fp32(args)\n\n return tik_instance", "def _transpose_shift(E):\n bsz, n_head, max_len, _ = E.size()\n zero_pad = layers.zeros(shape=(bsz, n_head, max_len, 1))\n E = layers.reshape(x=layers.concat([E, zero_pad], axis=-1),\n shape=(bsz, n_head, -1, max_len))\n indice = layers.arange(start=0, end=max_len, dtype=int)\n E = layers.index_select(input=E, index=indice, dim=-2)\n E = layers.transpose(E, perm=[0, 1, 3, 2])\n return E", "def vec_rotate_left_n(x, n):\n return jnp.roll(x, -n)", "def vec_rotate_right_n(x, n):\n return jnp.roll(x, n)", "def focus_field_beam(shape = (128,128,128),\n units = (0.1,0.1,0.1),\n lam =.5, NA = .6, n0 = 1.,\n return_all_fields = False,\n n_integration_steps = 200):\n\n\n p = OCLProgram(absPath(\"kernels/psf_debye.cl\"),\n build_options = [\"-I\",absPath(\"kernels\"),\"-D\",\"INT_STEPS=%s\"%n_integration_steps])\n\n if np.isscalar(NA):\n NA = [0.,NA]\n \n Nx0, Ny0, Nz0 = shape\n dx, dy, dz = units\n\n #FIXME: the loop below does not yet work for odd inputs\n if not Nx0%2+Ny0%2+Nz0%2==0:\n raise NotImplementedError(\"odd shapes not supported yet\")\n\n\n alphas = np.arcsin(np.array(NA)/n0)\n assert len(alphas)%2 ==0\n\n # as we assume the psf to be symmetric, we just have to calculate each octant\n Nx = Nx0//2+1\n Ny = Ny0//2+1\n Nz = Nz0//2+1\n\n u_g = OCLArray.empty((Nz,Ny,Nx),np.float32)\n ex_g = OCLArray.empty(u_g.shape,np.complex64)\n ey_g = OCLArray.empty(u_g.shape,np.complex64)\n ez_g = OCLArray.empty(u_g.shape,np.complex64)\n\n alpha_g = OCLArray.from_array(alphas.astype(np.float32))\n\n \n p.run_kernel(\"debye_wolf\",u_g.shape[::-1],None,\n ex_g.data,ey_g.data,ez_g.data, u_g.data,\n np.float32(1.),np.float32(0.),\n np.float32(0.),np.float32(dx*(Nx-1.)),\n np.float32(0.),np.float32(dy*(Ny-1.)),\n np.float32(0.),np.float32(dz*(Nz-1.)),\n np.float32(lam), np.float32(n0),\n alpha_g.data, np.int32(len(alphas)))\n\n u = u_g.get()\n ex = ex_g.get()\n ey = ey_g.get()\n ez = ez_g.get()\n\n u_all = np.empty((Nz0,Ny0,Nx0),np.float32)\n ex_all = np.empty((Nz0,Ny0,Nx0),np.complex64)\n ey_all = np.empty((Nz0,Ny0,Nx0),np.complex64)\n ez_all = np.empty((Nz0,Ny0,Nx0),np.complex64)\n\n sx = [slice(0,Nx),slice(Nx,Nx0)]\n sy = [slice(0,Ny),slice(Ny,Ny0)]\n sz = [slice(0,Nz),slice(Nz,Nz0)]\n\n\n\n # spreading the calculated octant to the full volume\n for i,j,k in itertools.product([0,1],[0,1],[0,1]):\n\n # i, j, k = 0 indicates the + octant\n\n u_all[sz[1-i],sy[1-j],sx[1-k]] = u[1-i:Nz-1+i,1-j :Ny-1+j,1-k :Nx-1+k][::(-1)**i,::(-1)**j,::(-1)**k]\n if i ==0:\n ex_all[sz[1-i],sy[1-j],sx[1-k]] = ex[1-i:Nz-1+i,1-j :Ny-1+j,1-k :Nx-1+k][::(-1)**i,::(-1)**j,::(-1)**k]\n ey_all[sz[1-i],sy[1-j],sx[1-k]] = ey[1-i:Nz-1+i,1-j :Ny-1+j,1-k :Nx-1+k][::(-1)**i,::(-1)**j,::(-1)**k]\n ez_all[sz[1-i],sy[1-j],sx[1-k]] = ez[1-i:Nz-1+i,1-j :Ny-1+j,1-k :Nx-1+k][::(-1)**i,::(-1)**j,::(-1)**k]\n\n else:\n ex_all[sz[1-i],sy[1-j],sx[1-k]] = np.conjugate(ex[1-i:Nz-1+i,1-j :Ny-1+j,1-k :Nx-1+k][::(-1)**i,::(-1)**j,::(-1)**k])\n ey_all[sz[1-i],sy[1-j],sx[1-k]] = np.conjugate(ey[1-i:Nz-1+i,1-j :Ny-1+j,1-k :Nx-1+k][::(-1)**i,::(-1)**j,::(-1)**k])\n ez_all[sz[1-i],sy[1-j],sx[1-k]] = np.conjugate(ez[1-i:Nz-1+i,1-j :Ny-1+j,1-k :Nx-1+k][::(-1)**i,::(-1)**j,::(-1)**k])\n\n if return_all_fields:\n return u_all, ex_all, ey_all, ez_all\n else:\n return u_all", "def swipeBase (self) :\n grid = self.grid\n\n #we start by putting every tile up\n for columnNbr in range(4) :\n nbrZeros = 4 - np.count_nonzero(grid[:,columnNbr])\n\n for lineNbr in range(4) :\n counter = 0\n while (grid[lineNbr, columnNbr] == 0) and (counter < 4):\n counter += 1\n if np.count_nonzero(grid[lineNbr:4, columnNbr]) != 0 :\n for remainingLine in range (lineNbr, 3) :\n grid[remainingLine, columnNbr] = grid[remainingLine+1, columnNbr]\n grid[3, columnNbr] = 0\n\n #now we do the additions\n for lineNbr in range(3) :\n if grid[lineNbr, columnNbr] == grid[lineNbr+1, columnNbr] :\n grid[lineNbr, columnNbr] *= 2\n for remainingLine in range (lineNbr+1, 3) :\n grid[remainingLine, columnNbr] = grid[remainingLine+1, columnNbr]\n grid[3, columnNbr] = 0\n\n return (grid)", "def fun_no_cut(self, reg_x_len, n_size, block_index, n_loop):\n data_input_ub = self.tik_instance.Tensor(self.dtype_x,\n self.shape_v,\n name=\"data_input_ub\",\n scope=tik.scope_ubuf)\n input_indices_ub = self.tik_instance.Tensor(self.dtype_indices, (8,),\n name=\"input_indices_ub\",\n scope=tik.scope_ubuf)\n self.tik_instance.data_move(input_indices_ub[0],\n self.input_indices_gm[0], 0, 1, 1, 0, 0)\n reg_start = self.tik_instance.Scalar(dtype=\"int32\")\n reg_start.set_as(input_indices_ub[0])\n reg_burst = self.tik_instance.Scalar(dtype=\"int32\")\n if self.dtype_x in (\"float32\", \"int32\"):\n reg_burst.set_as(reg_x_len // 8)\n else:\n reg_burst.set_as(reg_x_len // 16)\n\n with self.tik_instance.for_range(0, n_loop) as n_index:\n with self.tik_instance.if_scope(\n block_index * n_size + n_index != reg_start):\n self.tik_instance.data_move(\n data_input_ub[0],\n self.input_x_gm[(block_index * n_size + n_index) *\n reg_x_len], 0, 1, reg_burst, 0, 0)\n with self.tik_instance.else_scope():\n self.tik_instance.data_move(data_input_ub[0],\n self.input_v_gm[0], 0, 1, reg_burst,\n 0, 0)\n self.tik_instance.data_move(\n self.output_y_gm[(block_index * n_size + n_index) * reg_x_len],\n data_input_ub[0], 0, 1, reg_burst, 0, 0)", "def move(self, frames_per_second):\n # tuple_add in one line\n position_delta = tuple(map(lambda x: x/frames_per_second, self.velocity))\n velocity_delta = tuple(map(lambda x: x/frames_per_second, (0, GRAVITY)))\n self.position = tuple(sum(x) for x in zip(self.position, position_delta))\n # apply the pull of GRAVITY\n self.velocity = tuple(sum(x) for x in zip(self.velocity, velocity_delta))\n #print('shell.move:postion:{}'.format(self.position))", "def move(self, state, move_cmd, i, j):\r\n new_state = self.clone_state(state)\r\n coordinate_change = self.action_dic[self.reflection_dic[move_cmd]]\r\n new_state[i][j], new_state[i + coordinate_change[0]][j + coordinate_change[1]] = \\\r\n new_state[i + coordinate_change[0]][j + coordinate_change[1]]\\\r\n , new_state[i][j]\r\n return new_state", "def move(self, side, number_of_turns=1):\n # Creating a deep copy of the \n new = self.copy()\n \n turn = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])\n\n # Creating a matrix that will perform the desired rotation given the number_of_turns of said side\n for i in range(number_of_turns):\n turn = turn.dot(C_WISE[side])\n\n # Performing rotation of the coordinates and the norms of its faces for each of the cubies in the side \n for i, cubie in enumerate(new.cubies):\n if self.cubies[i].in_side(side):\n for j in range(len(cubie.faces)):\n old_face_norm = self.cubies[i].faces[j].norm\n cubie.faces[j].norm = old_face_norm.dot(turn)\n cubie.coordinates = self.cubies[i].coordinates.dot(turn)\n return new", "def shift_image(img, shft_int = 1):\n no_cols = img[0].shape[1]\n lst_col = no_cols - 1\n col_sty = no_cols - shft_int \n col_idx = torch.cat([torch.zeros(col_sty, dtype = torch.bool),\n torch.ones(shft_int, dtype = torch.bool)])\n cols = torch.reshape(img[0][0,:,col_idx], (no_cols,shft_int))\n cols_sum = torch.sum(cols)\n inval_shft = torch.is_nonzero(cols_sum)\n\n if inval_shft:\n col_idx = torch.cat([torch.ones(shft_int, dtype = torch.bool),\n torch.zeros(col_sty, dtype = torch.bool)])\n cols = torch.reshape(img[0][0,:,col_idx], (no_cols,shft_int))\n cols_sum = torch.sum(cols)\n inval_shft = torch.is_nonzero(cols_sum)\n if inval_shft:\n raise ValueError('Consider shifting along another axis.')\n mod_img = torch.cat([img[0][0,:,~col_idx],cols], dim = 1)\n mod_img = torch.reshape(mod_img, (1,mod_img.shape[0], mod_img.shape[1]))\n mod_img = (mod_img,img[1])\n return mod_img\n \n mod_img = torch.cat([cols,img[0][0,:,~col_idx]], dim = 1)\n mod_img = torch.reshape(mod_img, (1,mod_img.shape[0], mod_img.shape[1]))\n mod_img = (mod_img,img[1])\n return mod_img", "def shift_z(a, off):\n # set initial and final indices along z-direction for original and\n # shifted 3D arrays\n ai_z = int((abs(off) + off) / 2)\n af_z = a.shape[0] + int((-abs(off) + off) / 2)\n\n bi_z = a.shape[0] - af_z\n bf_z = a.shape[0] - ai_z\n\n b = a * 0\n b[bi_z:bf_z, :, :] = a[ai_z:af_z, :, :]\n\n return b", "def move_step(self, direction):\n x = self.objects[0].x\n y = self.objects[0].y\n if direction == 0 and y >= 1:\n self.objects[0].y -= 1\n elif direction == 1 and y <= self.size_y - 2:\n self.objects[0].y += 1\n elif direction == 2 and x >= 1:\n self.objects[0].x -= 1\n elif direction == 3 and x <= self.size_x - 2:\n self.objects[0].x += 1", "def move(self):\n self._move_range_shuffle(3)\n self._move_satisfy_random_constraint()\n # self._move_range_shuffle(3)\n #if (curr_energy > 50):\n # self._move_satisfy_random_constraint()\n #else:\n # self._move_range_shuffle(3)", "def shift_point_cloud(batch_data, shift_range=0.1):\n B, N, C = batch_data.shape\n shifts = np.random.uniform(-shift_range, shift_range, (B,3))\n for batch_index in range(B):\n batch_data[batch_index,:,:] += shifts[batch_index,:]\n return batch_data", "def move(self): # AH note. Swich move with extra_steps?\n if self.adjustment < 0:\n self.position += self.extra_steps\n super().move()\n self.no_moves += 1\n # Do the regular move", "def init_shiftind(self, n_t):\n i = np.arange(n_t * n_t)\n i2 = np.arange(n_t).repeat(n_t)\n ik = np.arange(n_t).repeat(n_t)\n ii = np.arange(n_t)[np.newaxis].repeat(n_t, 0).flatten()\n\n si = ik * n_t + (ik + ii) % n_t\n self.shiftinds_fwd = np.roll(si.reshape((n_t, n_t)), int((n_t - 1) / 2), 1)[:, ::-1].flatten()\n\n si = ik * n_t + (ii - ik) % n_t\n self.shiftinds_back = np.roll(np.arange(n_t * n_t).reshape((n_t, n_t))[:, ::-1], -int((n_t - 1) / 2), 1).flatten()[si]\n\n self.shiftinds = ((i + i2 - n_t) % n_t + i2 * n_t).astype(int)\n self.shiftinds_neg = ((i + i2 - n_t) % n_t + i2 * n_t).astype(int)\n self.shiftinds_pos = ((-n_t + i - i2) % n_t + i2 * n_t).astype(int)\n # self.shiftinds = ((i + i2 - n_t) % n_t + i2 * n_t).astype(int).reshape((n_t, n_t)).transpose().flatten()\n # self.shiftinds_neg = ((i + i2 - n_t) % n_t + i2 * n_t).astype(int).reshape((n_t, n_t)).transpose().flatten()\n # self.shiftinds_pos = ((-n_t + i - i2) % n_t + i2 * n_t).astype(int).reshape((n_t, n_t)).transpose().flatten()", "def c_align_split_n(self, tik_instance):\n n_d, d_d, h_d, w_d, _ = self.dst_shape\n dhw_d = d_d * h_d * w_d\n nc_one = self.ub_ele // dhw_d\n c_align = self.c_1 * self.c_0\n n_ub = nc_one // c_align\n\n all_core = _ceil_div(n_d, n_ub)\n ac_num = _set_core_num(all_core)\n\n with tik_instance.for_range(0, ac_num, block_num=ac_num) as num_core:\n ub_ori = tik_instance.Tensor(self.dtype,\n (self.ub_ele,),\n name=\"ub_ori\",\n scope=tik.scope_ubuf)\n ub_trans = tik_instance.Tensor(self.dtype,\n (self.ub_ele,),\n name=\"ub_trans\",\n scope=tik.scope_ubuf)\n\n ub_loop = _set_loop(tik_instance, num_core, ac_num, all_core)\n\n with tik_instance.for_range(0, ub_loop) as num_u:\n core_index = num_u * ac_num + num_core\n\n with tik_instance.if_scope(core_index < all_core - 1):\n n_len = n_ub\n n_before = n_ub * core_index\n args = tik_instance, ub_ori, ub_trans, n_before, n_len\n self.func_c_align_split_n(args)\n\n with tik_instance.else_scope():\n n_before = (all_core - 1) * n_ub\n n_len = n_d - n_before\n args = tik_instance, ub_ori, ub_trans, n_before, n_len\n self.func_c_align_split_n(args)\n\n return tik_instance", "def shift(a, n=1):\n return a[n:] + a[:n]", "def rel_shift(x, klen=-1):\n x_size = x.shape\n\n x = x.reshape(x_size[1], x_size[0], x_size[2], x_size[3])\n x = x[1:, ...]\n x = x.reshape(x_size[0], x_size[1] - 1, x_size[2], x_size[3])\n # x = x[:, 0:klen, :, :]\n x = torch.index_select(x, 1, torch.arange(klen, device=x.device, dtype=torch.long))\n\n return x", "def rotate(arr: StaticArray, steps: int) -> StaticArray:\n length = arr.size()\n new_array = StaticArray(length) # Creates a new array to preserve original elements\n for index in range(length):\n pos = index + steps # Adds steps to index number\n if pos == length: # If index is 1 more than last index, change to first index\n pos = 0\n while pos > (length - 1): # If index is greater than the range, subtract by size of array\n pos -= length\n while pos < 0: # If index is less than the range, add by size of array\n pos += length\n new_array.set(pos, arr.get(index)) # Set position\n\n return new_array", "def shift(self, arr, shift_amt, pre_context, post_context):\n result = arr[pre_context - shift_amt:arr.shape[0] - post_context -\n shift_amt, :]\n return result", "def move_members(_) -> int:\n return 1 << 24", "def move_members(_) -> int:\n return 1 << 24", "def update_E(self):\n self.grid.E[:, 0, :, :] = self.grid.E[:, -1, :, :]", "def move_zombies(self, human_distance_field): #essentially the same as move_humans, but in 4 directions not 8\r\n blocked = self.get_grid_height() * self.get_grid_width()\r\n new_positions = []\r\n for zombie in self.zombies():\r\n moves = self.four_neighbors(zombie[0], zombie[1])\r\n moves.append((zombie[0], zombie[1]))\r\n potential_moves = [moves[0]]\r\n distance = human_distance_field[moves[0][0]][moves[0][1]]\r\n \r\n for move in moves:\r\n if human_distance_field[move[0]][move[1]] < blocked:\r\n if human_distance_field[move[0]][move[1]] < distance:\r\n potential_moves = [move]\r\n distance = human_distance_field[move[0]][move[1]]\r\n elif human_distance_field[move[0]][move[1]] == distance:\r\n potential_moves.append(move)\r\n \r\n new_positions.append(random.choice(potential_moves))\r\n \r\n self._zombie_list = new_positions" ]
[ "0.73844004", "0.6082966", "0.58029294", "0.5760416", "0.5458749", "0.5457597", "0.54504377", "0.5396676", "0.5381811", "0.53601795", "0.51622343", "0.508853", "0.50696003", "0.5024297", "0.5003249", "0.49621084", "0.49588436", "0.49356058", "0.4930291", "0.49255875", "0.49201998", "0.48937675", "0.48927703", "0.48697072", "0.48549896", "0.48480734", "0.48192695", "0.47968262", "0.47925448", "0.47841445", "0.47835943", "0.47789383", "0.47778523", "0.47771168", "0.47619042", "0.4712624", "0.4705566", "0.47008514", "0.46821833", "0.46771857", "0.46761793", "0.46708843", "0.46701613", "0.4664618", "0.46624273", "0.46612924", "0.46612924", "0.4661119", "0.46505973", "0.46363598", "0.46354225", "0.4633005", "0.46285903", "0.46164194", "0.46020794", "0.45970494", "0.45954463", "0.45950446", "0.4593474", "0.45917618", "0.45904478", "0.4581338", "0.45796895", "0.45774877", "0.4570963", "0.45692366", "0.4564279", "0.4560185", "0.45582148", "0.45551467", "0.4553554", "0.45529413", "0.45525035", "0.45520216", "0.4551978", "0.45504254", "0.4549332", "0.45471543", "0.45451397", "0.4544408", "0.4541318", "0.4534452", "0.4527823", "0.45260125", "0.45248434", "0.45224777", "0.45193005", "0.45158187", "0.45116717", "0.4498011", "0.44965905", "0.44820952", "0.44789466", "0.44781965", "0.44745755", "0.44714797", "0.44638884", "0.44638884", "0.4463372", "0.44631112" ]
0.72734666
1
Capture the query events comming from Grafana. Investigate the query and replace the measurement name with a Retention Policy measurement name if possible. Send out the (modified or unmodified) query to Influx and return the result
def proxy_influx_query(path): params = dict(request.query) try: LOGGER.debug(params.get('q', None)) params['q'] = modify_queries(params) LOGGER.debug(params.get('q', None)) except Exception as e: LOGGER.exception("Error (%s) proxying query: %s", type(e).__class__, e.message, exc_info=True) pass headers = request.headers cookies = request.cookies r = requests.get(url=CONFIG['influxdb_url'] +'/'+ path, params=params, headers=headers, cookies=cookies, stream=True) for key, value in dict(r.headers).iteritems(): response.set_header(key, value) for key, value in dict(r.cookies).iteritems(): response.cookies[key] = value if r.status_code == 200: return r.raw else: abort(r.status_code, r.reason)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def query(self, query, current_time):\n params = {'query': query}\n auth_type = self.authentication.get(constants.METRICS_BACKEND_CONFIG_AUTH_TYPE)\n logger.debug(f\"authentication type is: {auth_type}\")\n try:\n query_result = None\n if auth_type == constants.METRICS_BACKEND_CONFIG_AUTH_TYPE_NONE:\n query_result = requests.get(self.prometheus_url, params=params).json()\n elif auth_type == constants.METRICS_BACKEND_CONFIG_AUTH_TYPE_BASIC:\n auth=HTTPBasicAuth(\n self.authentication.get(constants.METRICS_BACKEND_CONFIG_AUTH_USERNAME), \n self.authentication.get(constants.METRICS_BACKEND_CONFIG_AUTH_PASSWORD)\n )\n verify = (not self.authentication.get(constants.METRICS_BACKEND_CONFIG_AUTH_INSECURE_SKIP_VERIFY))\n query_result = requests.get(self.prometheus_url, params=params, auth=auth, verify=verify).json()\n else:\n logger.warning(f\"Unsupported authentication type: {auth_type}; trying {constants.METRICS_BACKEND_CONFIG_AUTH_TYPE_NONE}\")\n query_result = requests.get(self.prometheus_url, params=params).json()\n logger.debug(\"query result -- raw\")\n logger.debug(query_result)\n except Exception as e:\n logger.error(\"Error while attempting to connect to prometheus\")\n raise HTTPException(status_code=422, detail=\"Error while attempting to connect to prometheus.\")\n return self.post_process(query_result, current_time)", "def handleQuery(self, query) -> None: # noqa\n results = []\n\n try:\n query_str = query.string.strip()\n\n # too small request - don't even send it.\n if len(query_str) < 2:\n keys_monitor.reset()\n return\n\n if len(query_str.split()) > 1:\n # pydictionary or synonyms.com don't seem to support this\n query.add(\n v0.Item(\n id=md_name,\n icon=[icon_path],\n text=\"A term must be only a single word\",\n actions=[],\n )\n )\n return\n\n # determine if we can make the request --------------------------------------------\n keys_monitor.report()\n if keys_monitor.triggered():\n results.extend(get_items_for_word(query, query_str))\n\n if not results:\n query.add(\n 0,\n v0.Item(\n id=md_name,\n icon=[icon_path],\n text=\"No results.\",\n actions=[],\n ),\n )\n\n return\n else:\n query.add(results)\n\n except Exception: # user to report error\n print(traceback.format_exc())\n query.add(\n v0.Item(\n id=md_name,\n icon=[icon_path],\n text=\"Something went wrong! Press [ENTER] to copy error and report it\",\n actions=[\n ClipAction(\n f\"Copy error - report it to {md_url[8:]}\",\n f\"{traceback.format_exc()}\",\n )\n ],\n ),\n )", "def _enrich_results(self, record, query):\n record['metadata.query_name'] = query['name']\n record['metadata.query_id'] = '{}_{}'.format(\n query['name'], self.run_tag)\n record['metadata.query_description'] = query['description']\n record['metadata.query_headers'] = query['headers']\n record['@timestamp'] = int(round(time.time() * 1000))\n return record", "def query(self, query):", "def _process_query(self, query):\n query_search_pattern = r'\\nquery: (\\{.*\\}) nreturned'\n query_search_remove_pattern = r'(.*)(\\nquery: \\{.*\\} )( nreturned.*)'\n\n command_search_pattern = r'command: (\\{.*\\}) reslen'\n command_search_remove_pattern = r'(.*)(command: \\{.*\\})( reslen.*)'\n\n out = {}\n out['millis'] = query.get('millis', 0)\n out['ts'] = query.get('ts')\n\n out['org_info'] = query.get('info')\n\n info = query.get('info').split(' ')\n out['operation_type'] = info[0]\n out['collection'] = info[1]\n\n info = ' '.join(info[2:])\n mongo_query = re.search(query_search_pattern, info)\n mongo_command = re.search(command_search_pattern, info)\n\n if mongo_query:\n out['query'] = mongo_query.group(1)\n info = re.sub(query_search_remove_pattern, r'\\1\\3', info)\n\n elif mongo_command:\n out['query'] = mongo_command.group(1)\n info = re.sub(command_search_remove_pattern, r'\\1\\3', info)\n else:\n out['query'] = \"\"\n\n out['extra'] = info\n out['optimizations'] = ', '.join(self._should_optimize(out))\n\n return out", "def inspect_query(query):\n return _parse_query(query)", "def query(output, query):\n gqlapi = gql.get_api()\n print_output(output, gqlapi.query(query))", "def human(self, query):\n result = self.query(query)\n width = max([len(x) for x in result.keys()])\n fmt = (\" %%-%ds\" % width) + \"\\t%s\\t%s\"\n for key, val in result.iteritems():\n self._log(fmt % (key, val[0], val[1]), deadline=self._deadline)", "def query(self) -> FlaskResponse:\n query_context = self.get_query_context_factory().create(\n **json.loads(request.form[\"query_context\"])\n )\n query_context.raise_for_access()\n result = query_context.get_payload()\n payload_json = result[\"queries\"]\n return json.dumps(\n payload_json, default=utils.json_int_dttm_ser, ignore_nan=True\n )", "def query(monitorPoint) :\n return s.query(monitorPoint)", "def pp_query(query):\n print(format_query(query))", "def queryFlux(source, freq=0.0, deltafreq=0.0, daysback=0.0) :\n return s.queryFlux(source, freq, deltafreq, daysback)", "def _run_query(self):", "def visit_query(self, query):\n return query", "def user_query_stats_helper(request, search_query, base_brand):\n\n # print(\"Got: request %r\" % request)\n print(\"Got search_query %r\" % search_query)\n print(\"Got base_brand %r\" % base_brand)\n\n mongo_utils.track_visit(request)\n\n # first prettify the query for mandrill, intercom, and slack\n try:\n only_setup_params = find_non_default_query(search_query)\n if only_setup_params is None or only_setup_params == [{}]:\n only_setup_params = {}\n query_formatted = format_query_for_displaying(only_setup_params)\n print \"only_setup_params = [%r] query_formatted = [%r]\" % (only_setup_params, query_formatted)\n except:\n a = json.dumps(search_query, sort_keys=True, indent=4, separators=(',', ': '))\n query_formatted = 'Problem in formatting %r' % a\n pass\n\n mongo_utils.track_query(\"brand-search-query\", query_formatted, {\"user_id\": request.visitor[\"auth_user\"].id})\n\n account_helpers.intercom_track_event(request, \"brand-search-query\", {\n 'query': query_formatted,\n })\n\n if base_brand:\n user = User.objects.get(id=request.user.id)\n if base_brand.flag_trial_on and not account_helpers.internal_user(user):\n slack_msg = \"\\n**************\\nBrand = \" + base_brand.domain_name + \" User: \" + request.user.email + \"\\n\" + query_formatted\n account_helpers.send_msg_to_slack('brands-trial-activity', slack_msg)\n\n base_brand.saved_queries.create(query=json.dumps(search_query), user=request.user)", "def handleQuery(query) -> list: # noqa\n results = []\n\n if query.isTriggered:\n try:\n # be backwards compatible with v0.2\n if \"disableSort\" in dir(query):\n query.disableSort()\n\n results_setup = setup(query)\n if results_setup:\n return results_setup\n\n query_parts = query.string.strip().split()\n name = None\n if query_parts:\n name = query_parts.pop(0)\n subtext = f'Name: {name if name else \"Not given\"}'\n\n results.extend(\n [\n v0.Item(\n id=__prettyname__,\n icon=countdown_path,\n text=\"Create countdown\",\n subtext=f'{subtext}{\" - <u>Please provide a duration</u>\" if not query_parts else \"\"}',\n completion=__trigger__,\n actions=[\n v0.FuncAction(\n \"Create countdown\",\n lambda name=name, query_parts=query_parts: create_countdown(\n name, *query_parts\n ),\n )\n ],\n ),\n v0.Item(\n id=__prettyname__,\n icon=stopwatch_path,\n text=\"Create stopwatch\",\n subtext=subtext,\n completion=__trigger__,\n actions=[\n v0.FuncAction(\n \"Create stopwatch\",\n lambda name=name, query_parts=query_parts: create_stopwatch(\n name, *query_parts\n ),\n )\n ],\n ),\n ]\n )\n\n # cleanup watches that are done\n for li in [countdowns, stopwatches]:\n for watch in li:\n if watch.to_remove():\n li.remove(watch)\n\n results.extend([get_as_item(item) for item in all_watches()])\n\n except Exception: # user to report error\n if dev_mode: # let exceptions fly!\n print(traceback.format_exc())\n raise\n\n results.insert(\n 0,\n v0.Item(\n id=__prettyname__,\n icon=countdown_path,\n text=\"Something went wrong! Press [ENTER] to copy error and report it\",\n actions=[\n v0.ClipAction(\n f\"Copy error - report it to {__homepage__[8:]}\",\n f\"{traceback.format_exc()}\",\n )\n ],\n ),\n )\n\n return results", "async def on_query_response(self, query_type, response):\n pass", "def api():\n query = dict(request.args)\n socket_io.emit('log', dict(data=str(query)), broadcast=True)\n return jsonify(dict(success=True, message='Received'))", "def query(self, session, query):\n\t\ttry:\n\t\t\tstart = time.time()\n\t\t\tevent_docs = []\n\t\t\tfor event in self.model.events.query(**query):\n\t\t\t\tif event.PUBLIC:\n\t\t\t\t\tdoc = event.serialize()\n\t\t\t\t\tdoc['id'] = None\n\t\t\t\t\tevent_docs.append(doc)\n\t\t\t\t\n\t\t\t\n\t\t\tend = time.time()\n\t\texcept Exception:\n\t\t\tlogger.error(traceback.format_exc())\n\t\t\treturn responses.database_error(\"getting a set of events with query %s\" % query)\n\t\t\n\t\tquery['after'] = max(\n\t\t\tquery.get('after', 0), \n\t\t\ttime.time() - configuration.snuggle['changes_synchronizer']['max_age']\n\t\t)\n\t\t\n\t\ttry:\n\t\t\tsnuggler, data = user_data()\n\t\t\tevent = types.EventsQueried(\n\t\t\t\tquery,\n\t\t\t\tend-start,\n\t\t\t\tlen(event_docs),\n\t\t\t\tsnuggler,\n\t\t\t\tdata\n\t\t\t)\n\t\t\tself.model.events.insert(event)\n\t\texcept Exception as e:\n\t\t\tlogger.error(traceback.format_exc())\n\t\t\t\n\t\t\n\t\treturn responses.success(event_docs)", "def handleQuery(self,query):\n results = None\n return results", "def report_old():\n try:\n path = flask.request.args['q']\n except:\n app.logger.info('Unable to decode valid domains from q GET param')\n return flask.redirect('/error/1')\n\n return flask.redirect('/search/{}'.format(path))", "def trackingQuery(self, node, REQUEST=None, **kw):\n node = self.getQueryAnchor(node)\n return self.localQuery(node, REQUEST, **kw)", "def snapshot(snapshot_type, result_q, time_delta):", "def query_request():\n query_data = request.get_json()\n print(query_data)\n example_response = []\n \n # First we need to check if the request is for table or time series data\n if query_data and query_data == 'table':\n # send back columns and rows\n pass\n elif query_data:\n # send back value/clock pairs for timeseries charts\n example_response = generate_fake_timeseries(query_data.get('range', {}).get('from'),\n query_data.get('range', {}).get('to'),\n interval=query_data.get('intervalMs', 60000),\n create=4)\n return make_response(jsonify(example_response))", "def get(self):\n return_status = None\n result = {}\n try:\n log.debug(\"Summary info : \")\n #get the payload to influx DB\n url = \"http://localhost:8086/query\"\n querystring = {\"pretty\": \"true\", \"db\": \"IOT\",\n \"q\": sql }\n response = requests.request(\"GET\", url, params=querystring)\n r_d=json.loads(response.text)\n result_d=[]\n for rec in r_d['results'][0]['series']:\n for element in rec['values']:\n temp_d={}\n temp_d.update(rec['tags'])\n temp_d.update(dict(zip(rec['columns'],element)))\n result_d.append(temp_d)\n result['status'] = 1\n result['message']=result_d\n return_status = 200\n except ValueError as e:\n result = {}\n log.exception('Value Exception while fetching aggregate data')\n result['status'] = 0\n return_status = 400\n result['message'] = e.args[0]\n except :\n result = {}\n log.exception('Exception while aggregating the data')\n return_status = 500\n result['status'] = 0\n result['message'] = 'Internal Error has occurred while fetching aggregate data'\n finally:\n resp = Response(json.dumps(result), status=return_status, mimetype=\"application/json\")\n return resp", "def query_loggings(self, query: str) -> Tuple[List[dict], list]:\n query_data = {'query': self.add_instance_id_to_query(query),\n 'language': 'csql'}\n demisto.debug('Query being executed in CDL: {}'.format(str(query_data)))\n query_service = self.initial_query_service()\n response = query_service.create_query(query_params=query_data, enforce_json=True)\n query_result = response.json()\n\n if not response.ok:\n status_code = response.status_code\n try:\n # For some error responses the messages are in 'query_result['errors'] and for some they are simply\n # in 'query_result\n errors = query_result.get('errors', query_result)\n error_message = ''.join([message.get('message') for message in errors])\n except AttributeError:\n error_message = query_result\n\n raise DemistoException(f'Error in query to Cortex Data Lake XSOAR Connector [{status_code}] - {error_message}')\n\n try:\n raw_results = [r.json() for r in query_service.iter_job_results(job_id=query_result.get('jobId'),\n result_format='valuesDictionary',\n max_wait=2000)]\n except exceptions.HTTPError as e:\n raise DemistoException(f'Received error {str(e)} when querying logs.')\n\n extended_results: List[Dict] = []\n for result in raw_results:\n page = result.get('page', {})\n data = page.get('result', {}).get('data', [])\n if data:\n extended_results.extend(data)\n\n return extended_results, raw_results", "async def monquery(self, ctx, *, query):\r\n\r\n self.connect()\r\n cursor = self.database.entries.find({\"entry\": {\"$regex\": query}})\r\n count = 0\r\n\r\n await ctx.send('Results for \"**{}:**\"'.format(query))\r\n\r\n for entry in cursor:\r\n count += 1\r\n await ctx.send('{}) {}'.format(count, entry['entry']))", "def query(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"query\")", "def query(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"query\")", "def process_query(self, action):\n query_terms = action[1]\n screen = ResultScreen(['snip1', 'snip2', 'snip3'])\n screen.query = query_terms\n\n return screen", "def refine_search():\n obj = {}\n\n # query : will be event kit in case of triage information\n uid = flask.request.args.get(\"uid\", None)\n qpositive = flask.request.args.get(\"positive\", \"[]\") # json array\n qnegative = flask.request.args.get(\"negative\", \"[]\") # json array\n\n if uid == None:\n obj[\"error\"] = \"Missing search ID\"\n\n positive = []\n negative = []\n\n # Convert from HVC to non HVC\n for apos in json.loads(qpositive):\n if len(apos) == 9:\n positive.append(int(apos[3:]))\n else:\n positive.append(int(apos))\n\n for apos in json.loads(qnegative):\n if len(apos) == 9:\n negative.append(int(apos[3:]))\n else:\n negative.append(int(apos[3:]))\n\n obj[\"query\"] = {}\n obj[\"query\"][\"uid\"] = uid\n obj[\"query\"][\"positive\"] = positive\n obj[\"query\"][\"negative\"] = negative\n\n try:\n ret = backend.refine_iqr_search(uid,positive,negative,[],[])\n except Exception as e:\n obj[\"error\"] = str(type(e)) + \": \" + str(e)\n return jsonify(obj)\n\n obj[\"host\"] = ret[0].host\n obj[\"port\"] = ret[0].port\n obj[\"name\"] = ret[0].name\n obj[\"collection\"] = ret[0].collection\n obj[\"state\"] = \"http://localhost:5003/iqr/search_state?\" + urllib.urlencode({\"uid\" : uid})\n obj[\"results\"] = \"http://localhost:5003/iqr/search_results?\" + urllib.urlencode({\"uid\" : uid})\n\n return jsonify(obj)", "def audit_remediation_history(self, query=None):\n return self.select(RunHistory).where(query)", "def process_tpf_query_text(query_text, raw_repo_uri, call_name, extraMetadata):\n query_metadata = gquery.get_yaml_decorators(query_text)\n\n tags = query_metadata['tags'] if 'tags' in query_metadata else []\n glogger.debug(\"Read query tags: \" + ', '.join(tags))\n\n summary = query_metadata['summary'] if 'summary' in query_metadata else \"\"\n glogger.debug(\"Read query summary: \" + summary)\n\n description = query_metadata['description'] if 'description' in query_metadata else \"\"\n glogger.debug(\"Read query description: \" + description)\n\n method = query_metadata['method'].lower() if 'method' in query_metadata else \"get\"\n if method not in ['get', 'post', 'head', 'put', 'delete', 'options', 'connect']:\n method = \"get\"\n\n pagination = query_metadata['pagination'] if 'pagination' in query_metadata else \"\"\n glogger.debug(\"Read query pagination: \" + str(pagination))\n\n endpoint = query_metadata['endpoint'] if 'endpoint' in query_metadata else \"\"\n glogger.debug(\"Read query endpoint: \" + endpoint)\n\n # If this query allows pagination, add page number as parameter\n params = []\n if pagination:\n params.append(pageUtils.getSwaggerPaginationDef(pagination))\n\n item = packItem('/' + call_name, method, tags, summary, description, params, query_metadata, extraMetadata)\n\n return item", "def request(query):", "def main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--no-parameterize\",\n action=\"store_true\",\n help=\"Generate a query without parameters\",\n )\n parser.add_argument(\n \"--source-table\",\n type=str,\n help=\"Name of Glean table\",\n default=\"org_mozilla_fenix_stable.metrics_v1\",\n )\n args = parser.parse_args()\n\n # If set to 1 day, then runs of copy_deduplicate may not be done yet\n submission_date = (\n \"date_sub(current_date, interval 2 day)\"\n if args.no_parameterize\n else \"@submission_date\"\n )\n header = (\n \"-- Query generated by: python3 -m \"\n \"bigquery_etl.glam.clients_daily_scalar_aggregates \"\n f\"--source-table {args.source_table}\"\n + (\" --no-parameterize\" if args.no_parameterize else \"\")\n )\n\n schema = get_schema(args.source_table)\n unlabeled_metric_names = get_scalar_metrics(schema, \"unlabeled\")\n labeled_metric_names = get_scalar_metrics(schema, \"labeled\")\n unlabeled_metrics = get_unlabeled_metrics_sql(unlabeled_metric_names).strip()\n labeled_metrics = get_labeled_metrics_sql(labeled_metric_names).strip()\n\n if not unlabeled_metrics and not labeled_metrics:\n print(header)\n print(\"-- Empty query: no probes found!\")\n sys.exit(1)\n print(\n render_main(\n header=header,\n source_table=args.source_table,\n submission_date=submission_date,\n attributes=ATTRIBUTES,\n unlabeled_metrics=unlabeled_metrics,\n labeled_metrics=labeled_metrics,\n ping_type=ping_type_from_table(args.source_table),\n )\n )", "def filter_query(self, query, request, resource):\n raise NotImplementedError()", "def query(\n region: str,\n profile: str,\n query_params: ConfigType,\n quiet: bool = False,\n interval: float = 0.05,\n) -> QueryResultResponse:\n\n client = InsightsClient(region, profile)\n client.start_query(**query_params)\n\n counter = 0\n progress = Progress(\n processing_msg=\"Search for matching logs...\",\n end_msg=\"Search completed!\",\n quiet=quiet,\n )\n\n try:\n while True:\n progress.show(counter)\n\n if (results := client.fetch_result()) is not None:\n progress.done()\n return cast(QueryResultResponse, results)\n\n counter += 1\n sleep(interval)\n\n except (\n QueryNotYetStartError,\n NotFetchQueryResultError,\n QueryTimeoutError,\n QueryAlreadyCancelled,\n QueryUnknownError,\n ) as err:\n sys.exit(err)\n\n except KeyboardInterrupt:\n client.end_query()\n sys.exit(\"\\nAbort\")", "def executeQuery(payload, newQuery):\r\n\tq = newQuery.format(**payload)\r\n\tdb.query(q)\r\n\tdata = db.fetchall()\r\n\treturn data", "async def query(self, metric):\n raise NotImplementedError()", "def query(self):\n pass", "def queryProcessor(self, query):\n # Check chatterbot first\n response = self.cc.get_response(query)\n attachments = None\n if response.confidence > 0.5:\n # process this response\n response = self.cc.processResponse(response.text)\n else:\n # probably not that good so head to Wolfram alpha\n (response, attachments) = self.processWolframAlphaQuery(query)\n if response == None:\n response = \"I don't know that.\"\n return (response, attachments)", "def post_le_search(query_config, auth):\n logs = query_config.get('logs').split(\":\")\n to_ts = int(round(time.time() * 1000))\n from_ts = to_ts - int(query_config.get('query_range')) * 1000\n statement = query_config.get('statement')\n\n payload = {\"logs\": logs,\n \"leql\": {\"during\": {\"from\": from_ts, \"to\": to_ts},\n \"statement\": statement}}\n\n LOGGER.info(\"Making request. Json: %s\", json.dumps(payload))\n return do_post_json_to_le(QUERY_URL, payload, get_le_api_key(auth))", "async def run_query(query):\n async with httpx.AsyncClient(timeout=None) as client:\n response = await client.post(\n BLAZEGRAPH_URL,\n headers=BLAZEGRAPH_HEADERS,\n data=query,\n )\n assert response.status_code < 300\n return response.json()['results']['bindings']", "def get_query():\n query = \"\"\"{\n repository(name: \"flux\", owner: \"fluxcd\") {\n forkCount\n issues {\n totalCount\n }\n pullRequests {\n totalCount\n }\n releases {\n totalCount\n }\n stargazers {\n totalCount\n }\n watchers {\n totalCount\n }\n }\n}\n \"\"\"\n return query", "def get_any_sample_query(ctrl, srvid, database, queryid, _from, _to):\n has_pgqs = ctrl.has_extension_version(srvid, \"pg_qualstats\", \"0.0.7\")\n example_query = None\n if has_pgqs:\n rs = list(ctrl.execute(text(\"\"\"\n SELECT pg_qualstats_example_query(:queryid)\n LIMIT 1\n \"\"\"), params={\"queryid\": queryid}, srvid=srvid, remote_access=True))\n if len(rs) > 0:\n example_query = rs[0][0]\n if example_query is not None:\n unprepared = unprepare(example_query)\n if example_query == unprepared:\n return example_query\n return get_unjumbled_query(ctrl, srvid, database, queryid,\n _from, _to, 'most executed')", "def cli_saved_queries_get(query_name):\n query_data = None\n try:\n query_data = api.saved_queries_get(query_name=query_name)\n except NoRecordsFound as error:\n print \"%(error)s\" % locals()\n return \n print \" \".join(query_data)", "def _reset_query(self):\n self.query = pysnow.QueryBuilder()\n self.desired_response_fields = list()", "def query(self):\r\n records = self.input()\r\n if self.to_investigate:\r\n records = self.investigate(records)\r\n post.log.info(\"Caching {} records for {}\".format(len(records), self.name))\r\n self.cache_records(records)", "def execute_query():\n start_time = time.time()\n\n queries = request.json[\"queries\"]\n random_command = request.json[\"random_command\"]\n\n \"\"\" Running the queries against the pre-loaded index. \"\"\"\n output_dict = runner.run_queries(queries, random_command)\n\n \"\"\" Dumping the results to a JSON file. \"\"\"\n with open(output_location, 'w') as fp:\n json.dump(output_dict, fp)\n\n response = {\n \"Response\": output_dict,\n \"time_taken\": str(time.time() - start_time),\n \"username_hash\": username_hash\n }\n return flask.jsonify(response)", "def handler(event, context):\n logger.info(json.dumps(event))\n params = event.get(\"queryStringParameters\")\n\n if params and \"ride-id\" in params:\n response = query_db(params[\"ride-id\"])\n return create_http_response(200, response)\n\n all_results = scan_db()\n time_results = strip_old_records(all_results)\n results = sort_results(time_results, params)\n if not params:\n return create_http_response(200, results)\n origin = params.get(\"origin\", \"\")\n destination = params.get(\"destination\", \"\")\n results = search_routes(results, origin, destination)\n\n return create_http_response(200, results)", "def handleQuery(self, message, protocol, address):\n\n # Add transport to each query\n for query in message.queries:\n query.device_addr = self._get_addr(protocol, address)\n \n server.DNSServerFactory.handleQuery(self, message, protocol, address)", "def query(self):", "def get(self):\n args = search_parser.parse_args()\n return_status = None\n deviceid = request.args['deviceid']\n start_time=request.args['start_time']\n end_time=request.args['end_time']\n log.debug(request.args)\n result = {}\n try:\n start_time=start_time.replace(\"T\", \" \")\n end_time=end_time.replace(\"T\", \" \")\n log.debug(\"deviceId searched for : \" + deviceid+ \" Start Time:\"+start_time+\" end_time:\"+end_time)\n #get the payload to influx DB\n url = \"http://localhost:8086/query\"\n \n querystring = {\"pretty\": \"true\", \"db\": \"IOT\",\n \"q\": \"SELECT * FROM \\\"ttd_devices\\\" WHERE deviceId=\\'%s\\' AND time >= '%s' AND time <= '%s' \"%(deviceid,start_time,end_time)}\n response = requests.request(\"GET\", url, params=querystring) \n D=json.loads(response.text)\n #log.debug('------------------------------------------')\n #log.debug(D)\n #log.debug('------------------------------------------')\n response_dict=[]\n for element in D['results'][0]['series'][0]['values']:\n temp_dict=dict(zip(D['results'][0]['series'][0]['columns'],element))\n processed_dict=dict()\n for key,value in temp_dict.items():\n if value is not None and value != np.nan:\n if key == 'tStamp':\n timestamp = datetime.fromtimestamp(eval(value))\n value=timestamp.strftime('%Y-%m-%d %H:%M:%S')\n elif key == 'ipAddress':\n value=eval(value)\n elif key == 'time':\n value=str(pd.to_datetime(value, format=\"%Y-%m-%dT%H:%M:%S.%fZ\"))\n processed_dict[key]=value \n response_dict.append(processed_dict)\n #log.debug('------------------------------------------')\n #log.debug(response_dict)\n #log.debug('------------------------------------------')\n result['status'] = 1\n result['message']=response_dict\n return_status = 200\n except ValueError as e:\n result = {}\n log.exception('Value Exception while processing the request for search')\n result['status'] = 0\n return_status = 400\n result['message'] = e.args[0]\n except :\n result = {}\n log.exception('Exception while doing search')\n return_status = 500\n result['status'] = 0\n result['message'] = 'Internal Error has occurred while processing the request for search'\n finally:\n resp = Response(json.dumps(result), status=return_status, mimetype=\"application/json\")\n return resp", "def query(self):\r\n raise NotImplementedError", "def inspect_query(querystring: str) -> dict:\n return _parse_query(querystring)", "def search():\n def eventStream():\n while True:\n try:\n data = q.get(timeout=0.5)\n except:\n data = {'msg': 'Cloud Run cold start'}\n yield \"event: images\\ndata: {}\\n\\n\".format(json.dumps(data))\n return Response(eventStream(), mimetype=\"text/event-stream\")", "def query(self, query):\n self._query = query", "def post(self, args):\n try:\n analyzer = Analyzer()\n key = args['key'] if 'key' in args.keys() else None\n results = analyzer.run_analysis(args['query'], key)\n estimated_intent = results['classification'][0]['intent']\n estimated_confidence = results['classification'][0]['confidence']\n if(args['save_expression']):\n db = get_db('expressions')\n db.add_unlabeled_expression(args['query'], estimated_intent, estimated_confidence)\n resp = jsonify(results)\n return resp\n except DatabaseError as e:\n logger.exception(e.value)\n resp = jsonify(e.value)\n resp.status_code = 500\n return resp\n except AnalyzerError as e:\n logger.exception(e.value)\n resp = jsonify(e.value)\n resp.status_code = 500\n return resp", "def get_audit(self, query, session):\n raise NotImplementedError()", "def query(self):\n return self.event.get('queryStringParameters', dict())", "def get_data(self, query):\n result = input(\"{}: \".format(query))\n return result", "def query():\n query = request.json.get('query')\n variables = request.json.get('variables') # Todo: add handling variables\n logger.debug('Query: %s', request.json)\n result = schema.execute(query)\n result_hash = format_result(result)\n return result_hash", "def query(self, query, initNs=None, initBindings=None, queryGraph=None, **kwargs):\n\n# r_queryType = pattern.search(query).group(\"prefixes\").upper()\n# print(r_queryType)\n uri = self.rest_services[\"repository\"]\n infer = kwargs.get('infer',None)\n #timeout = kwargs.get('timeout',\"0\")\n payload = {\"$\"+k: v.n3() for k,v in initBindings.items()}\n\n payload[\"infer\"] = self.infer if infer is None else infer\n payload[\"infer\"] = str(payload[\"infer\"]).lower()\n #payload[\"$\"+timeout]=0\n payload[\"query\"] = query\n r = requests.post(uri, data=payload,\n stream=True,\n headers= {\"Accept\" : \"application/sparql-results+json,application/trix\",\n 'connection': 'keep-alive',\n 'Accept-Encoding': 'gzip,deflate',\n \"Content-Type\" :\"application/x-www-form-urlencoded\"})\n\n r.raw.decode_content = True\n if r.headers['Content-Type'] == 'application/sparql-results+json;charset=UTF-8':\n return self.__make_result(r)\n elif r.headers['Content-Type'] == 'application/trix;charset=UTF-8':\n return self.__make_trix_generator__(r)\n else:\n raise ValueError(\"Response content type not parsable {r}\".format(r=r.text))", "def query(self, query, request_type=None):\n\n #encode to UTF-8\n try: query = query.encode(\"utf-8\")\n except: query = query.decode('raw_unicode_escape').encode(\"utf-8\")\n\n lowercase_query = query.lower()\n if lowercase_query.startswith(\"select\") or \\\n lowercase_query.startswith(\"describe\") or \\\n lowercase_query.startswith(\"show\") or \\\n request_type==\"GET\":\n\n return self._get(urllib.urlencode({'sql': query}))\n\n else:\n return self._post(urllib.urlencode({'sql': query}))", "def amtool_silence_query(self, mess, expired=None, within=None, matchers=[]):\n helper = AmtoolHelper(\n alertmanager_address=self.config['server_address'])\n filters = helper.get_filters_by_terms(matchers)\n self.log.info(\"Expired {0} within {1} filtered {2}\".format(expired, within, filters))\n result = helper.get_silences(filter=filters, expired=expired, within=within)\n return {\"silences\": result}", "def listQueries():\n for query_name in elasticsearch_queries.queries.keys():\n logger.info(\"Query name: %s\" % query_name)", "def inlinequery(bot, update):\n #So I type \"@Mc125_Bot\" followed by a space...\n query = update.inline_query.query\n\n #...and these are the things that pop out!\n results = [\n\n InlineQueryResultArticle(\n id=uuid4(),\n title=\"The Howard Diner\",\n input_message_content=\n InputTextMessageContent(\"https://hamilton.cafebonappetit.com/cafe/the-howard-diner/\",parse_mode=ParseMode.MARKDOWN),\n description=\"Diner Food!\",\n thumb_url=\"https://hamilton.cafebonappetit.com/content/themes/bamco/img/theme/cafe_bamco_logo-new.png\"),\n\n InlineQueryResultArticle(\n id=uuid4(),\n title=\"McEwen\",\n input_message_content=\n InputTextMessageContent(\"https://hamilton.cafebonappetit.com/cafe/mcewens-green-cafe/\",parse_mode=ParseMode.MARKDOWN),\n description=\"McEwen Food!\",\n thumb_url=\"https://hamilton.cafebonappetit.com/content/themes/bamco/img/theme/cafe_bamco_logo-new.png\"),\n\n InlineQueryResultArticle(\n id=uuid4(),\n title=\"Commons\",\n input_message_content=\n InputTextMessageContent(\"https://hamilton.cafebonappetit.com/cafe/soper-commons-cafe/\",parse_mode=ParseMode.MARKDOWN),\n description=\"Commons Food!\",\n thumb_url=\"https://hamilton.cafebonappetit.com/content/themes/bamco/img/theme/cafe_bamco_logo-new.png\"),\n\n InlineQueryResultArticle(\n id=uuid4(),\n title=\"Hill Card Refill\",\n input_message_content=\n InputTextMessageContent(\"https://hamilton-sp.blackboard.com/eaccounts/AnonymousHome.aspx\",parse_mode=ParseMode.MARKDOWN),\n description=\"It's as if Hamilton hasn't taken enough of our money yet!\",\n thumb_url=\"https://upload.wikimedia.org/wikipedia/commons/0/00/Hamilton_Continentals_logo.png\"),\n\n InlineQueryResultArticle(\n id=uuid4(),\n title=\"Blackboard\",\n input_message_content=\n InputTextMessageContent(\"https://blackboard.hamilton.edu/\",parse_mode=ParseMode.MARKDOWN),\n description=\"See your classes and stuff!\",\n thumb_url=\"https://en.wikipedia.org/wiki/Blackboard_Inc.#/media/File:Blackboard_Inc._logo.png\")\n ]\n\n #Once the user clicks enter, the results will pop out!\n update.inline_query.answer(results)", "def query(self, query):\n # if self._send > self._reset:\n # self.reconnect()\n query = format_query(query)\n try:\n self.socket.send(query)\n except socket.error:\n raise ConnectionError('Server is not running.')\n answer = self._stream()\n self._send += 1\n answer = format_answer(answer)\n return answer", "def issue_query(self):\n if self.queries_issued < len(self.query_list):\n query_text = self.query_list[self.queries_issued]\n\n print \"ISSUING QUERY '{0}' to DRIVER\".format(query_text)\n response = self.driver.perform_action((10, query_text))\n self.action_history.append((10, response))\n\n self.queries_issued = self.queries_issued + 1\n self.documents_examined = 0\n\n return response\n else:\n self.driver.perform_action((00, None))\n return None", "def convert_old_catalog_query(query):\n for k, v in query.items():\n q_field = q_type = q_param = None\n if '_usage' in k:\n q_field = k.replace('_usage', '')\n usage = v.split(':')\n q_type = usage[0].strip()\n q_param = ':'.join(usage[1:]).strip()\n elif '_operator' in k:\n q_field = k.replace('_operator', '')\n q_type = 'operator'\n q_param = v\n if q_field:\n new_val = query[q_field]\n if not isinstance(v, dict):\n new_val = {'query': new_val}\n new_val[q_type] = q_param\n query[q_field] = new_val\n del query[k]\n return query", "def send_query(self, query_str):\n failed_query_message = f\"The following query failed:\\n{query_str}\"\n try:\n response = client.query_workspace(\n workspace_id = ws_id,\n query = query_str,\n timespan = (start_time, end_time)\n )\n if response.status == LogsQueryStatus.PARTIAL:\n self.fail(f\"Got partial response for the following query:\\n{query_str}\")\n elif response.status == LogsQueryStatus.FAILURE:\n self.fail(failed_query_message)\n elif response.tables == None or len(response.tables) == 0:\n self.fail(\"No data tables were returned in the response for the query\")\n else:\n return response\n except HttpResponseError as err:\n self.fail(failed_query_message)", "def process_database(db_config):\n client = InfluxDBClient(\n host=db_config['host'],\n database=db_config['database'],\n port=db_config['port']\n )\n\n state = get_database_state(client, db_config)\n existing_policies, existing_queries, policy_info, query_info = state\n\n for policy in policy_info:\n if policy not in existing_policies:\n logger.info(\"Creating {}\".format(policy))\n client.query(policy_info[policy][\"create\"])\n else:\n current = existing_policies[policy]\n desired = policy_info[policy]\n if current[\"duration\"] != desired[\"retention\"]:\n logger.info(\"Updating policy {}\".format(policy))\n client.query(desired[\"update\"])\n\n for policy in existing_policies:\n if policy not in policy_info:\n logger.info(\"Deleting policy {}\".format(policy))\n query = \"DROP RETENTION POLICY \\\"{}\\\" ON \\\"{}\\\"\".format(\n policy, db_config['database']\n )\n client.query(query)\n\n for query in query_info:\n if query not in existing_queries:\n logger.info(\"Creating query {}\".format(query))\n client.query(query_info[query][\"query\"])\n\n for query in existing_queries:\n if query in query_info:\n current = existing_queries[query]\n desired = query_info[query]\n if current != desired[\"query\"]:\n logger.info(\"Re-Creating query {}\".format(query))\n client.query(\n \"DROP CONTINUOUS QUERY {} ON {}\"\n \"\".format(query, db_config['database']))\n client.query(desired[\"query\"])", "def _format_queries(self, body):\n for query in body:\n if \"bindVars\" in query:\n query[\"bind_vars\"] = query.pop(\"bindVars\")\n if \"runTime\" in query:\n query[\"runtime\"] = query.pop(\"runTime\")\n return body", "def interrogate(self, query_type, target):\n report = \"I didn't understand your request. Try asking for help!\\n\"\n if query_type == \"server_report\":\n report = self.tasks.report_server_formatted.delay(target)\n elif query_type == \"group_report\":\n report = self.tasks.report_group_formatted.delay(target)\n elif query_type == \"ip_report\":\n report = self.get_ip_report(target)\n elif query_type == \"all_servers\":\n report = self.tasks.list_all_servers_formatted.delay()\n elif query_type == \"all_groups\":\n report = self.tasks.list_all_groups_formatted.delay()\n elif query_type == \"group_firewall_report\":\n img_tag = os.getenv('FIREWALL_GRAPH_VERSION', 'v0.2')\n image = \"docker.io/halotools/firewall-graph:%s\" % img_tag\n env_literal = {\"TARGET\": target}\n env_expand = {\"HALO_API_KEY\": \"HALO_API_KEY\",\n \"HALO_API_SECRET_KEY\": \"HALO_API_SECRET_KEY\",\n \"HALO_API_HOSTNAME\": \"HALO_API_HOSTNAME\",\n \"HTTPS_PROXY\": \"HTTPS_PROXY\"}\n report = self.tasks.generic_containerized_task.delay(image,\n env_literal,\n env_expand,\n False)\n elif query_type == \"servers_in_group\":\n report = self.tasks.servers_in_group_formatted.delay(target)\n elif query_type == \"servers_by_cve\":\n report = self.tasks.search_server_by_cve(target)\n elif query_type == \"ec2_halo_footprint_csv\":\n img_tag = os.getenv('EC2_HALO_DELTA_VERSION', 'v0.2')\n image = \"docker.io/halotools/ec2-halo-delta:%s\" % img_tag\n env_literal = {\"OUTPUT_FORMAT\": \"csv\"}\n # Set optional args\n optional_fields = [\"AWS_ROLE_NAME\", \"AWS_ACCOUNT_NUMBERS\"]\n for field in optional_fields:\n if os.getenv(field, \"\") != \"\":\n env_literal[field] = os.getenv(field)\n env_expand = {\"HALO_API_KEY\": \"HALO_API_KEY\",\n \"HALO_API_SECRET_KEY\": \"HALO_API_SECRET_KEY\",\n \"HALO_API_HOSTNAME\": \"HALO_API_HOSTNAME\",\n \"AWS_ACCESS_KEY_ID\": \"AWS_ACCESS_KEY_ID\",\n \"AWS_SECRET_ACCESS_KEY\": \"AWS_SECRET_ACCESS_KEY\",\n \"HTTPS_PROXY\": \"HTTPS_PROXY\"}\n report = self.tasks.generic_containerized_task.delay(image,\n env_literal,\n env_expand,\n False)\n elif query_type == \"tasks\":\n report = self.list_tasks_formatted(self.flower_host)\n elif query_type == \"selfie\":\n report = Halo.take_selfie()\n elif query_type == \"help\":\n report = Halo.help_text()\n elif query_type == \"version\":\n report = Halo.version_info(self.product_version) + \"\\n\"\n elif query_type == \"config\":\n report = self.running_config()\n elif query_type == \"health\":\n report = self.health_string\n return(report)", "def _refresh_query(session, query_id):\n resp = session.post('{}/api/queries/{}/refresh'.format(REDASH_HOST, query_id))\n return resp", "def make_query(self):", "def __generate_search_query(self) -> None:\n if self.query_accuracy < 100:\n if self.title is not None and self.title != '' and self.artist is not None and self.artist != '':\n # Use the title and the artist name to find more information about the song.\n query: str = self.title + ' ' + self.artist\n query = re.sub(self.__get_filter_regex(), '', query)\n self.query = query\n # Remove unnecessary information in order to get a simpler query version.\n self.minimal_query = re.sub(r'\\([\\s\\S]+\\)', '', query).strip()\n self.query_accuracy = 100\n return\n if self.query_accuracy < 50:\n # No title nor artist name available, use the filename as search query.\n filename: str = os.path.basename(self.original_path)\n filename = os.path.splitext(filename)[0]\n query: str = filename.lower()\n query = re.sub(self.__get_filter_regex(), '', query)\n query = query.replace('_', ' ')\n query = query.strip()\n self.query = query\n self.minimal_query = re.sub(r'\\([\\s\\S]+\\)', '', query).strip()\n self.query_accuracy = 50", "def generate_query_report(self, db_uri, parsed_query, db_name, collection_name):\r\n index_analysis = None\r\n recommendation = None\r\n namespace = parsed_query['ns']\r\n indexStatus = \"unknown\"\r\n\r\n index_cache_entry = self._ensure_index_cache(db_uri,\r\n db_name,\r\n collection_name)\r\n\r\n\r\n query_analysis = self._generate_query_analysis(parsed_query,\r\n db_name,\r\n collection_name)\r\n if ((query_analysis['analyzedFields'] != []) and\r\n query_analysis['supported']):\r\n index_analysis = self._generate_index_analysis(query_analysis,\r\n index_cache_entry['indexes'])\r\n indexStatus = index_analysis['indexStatus']\r\n if index_analysis['indexStatus'] != 'full':\r\n recommendation = self._generate_recommendation(query_analysis,\r\n db_name,\r\n collection_name)\r\n # a temporary fix to suppress faulty parsing of $regexes.\r\n # if the recommendation cannot be re-parsed into yaml, we assume\r\n # it is invalid.\r\n if not validate_yaml(recommendation['index']):\r\n recommendation = None\r\n query_analysis['supported'] = False\r\n\r\n\r\n # QUERY REPORT\r\n return OrderedDict({\r\n 'queryMask': parsed_query['queryMask'],\r\n 'indexStatus': indexStatus,\r\n 'parsed': parsed_query,\r\n 'namespace': namespace,\r\n 'queryAnalysis': query_analysis,\r\n 'indexAnalysis': index_analysis,\r\n 'recommendation': recommendation\r\n })", "def main(argv):\n logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)\n args = parse_args(argv[1:])\n database = parse_database(args.dict_file)\n\n server_socket = socket.socket(type=socket.SOCK_DGRAM)\n server_socket.bind(args.servent_address)\n\n query_creator = utils.QueryCreator()\n queries_seen = set()\n while True:\n message_data, message_origin = server_socket.recvfrom(\n utils.MAX_SERVER_MESSAGE_SIZE)\n message_type = utils.extract_message_type(message_data)\n\n if message_type == utils.MessageType.CLIREQ:\n key = utils.unpack_clireq(message_data)\n query = query_creator.new_query(key, message_origin)\n logging.info('Received clireq, new query: %s', repr(query))\n elif message_type == utils.MessageType.QUERY:\n query = utils.unpack_query(message_data)\n logging.info('Received query: %s', repr(query))\n else:\n logging.error('Server received RESPONSE message from %s',\n message_origin)\n continue\n\n if query.content in queries_seen:\n logging.info('Query already seen: %s', repr(query))\n continue\n queries_seen.add(query.content)\n\n if query.ttl > 0 and args.neighbor_addresses:\n packed_query = utils.pack_query(query)\n for neighbor_address in args.neighbor_addresses:\n if neighbor_address != message_origin:\n logging.info('Forwarding query to %s', neighbor_address)\n client_socket = socket.socket(type=socket.SOCK_DGRAM)\n client_socket.sendto(packed_query, neighbor_address)\n\n if query.content.key in database:\n response = utils.pack_response(query.content.key,\n database[query.content.key])\n logging.info('Sending response to %s', query.content.address)\n client_socket = socket.socket(type=socket.SOCK_DGRAM)\n client_socket.sendto(response, query.content.address)", "def show_last_watched(self, query):\n alias = query.lower()\n if alias in ['day', 'week', 'month']:\n self.show_last_watched_by_date(alias)\n else:\n self.show_last_watched_by_alias(query)", "def updateView(request, query, exquery, wild_card_str):\n query = copy.deepcopy(query)\n exquery = copy.deepcopy(exquery)\n\n if 'modificationtime__castdate__range' in query:\n query['creationdate__castdate__range'] = query['modificationtime__castdate__range']\n del query['modificationtime__castdate__range']\n if 'workinggroup' in query and 'preset' in request.session['requestParams'] and \\\n request.session['requestParams']['preset'] == 'MC' and ',' in query['workinggroup']:\n # excludeWGList = list(str(wg[1:]) for wg in request.session['requestParams']['workinggroup'].split(','))\n # exquery['workinggroup__in'] = excludeWGList\n try:\n del query['workinggroup']\n except:\n pass\n if 'status' in request.session['requestParams'] and request.session['requestParams']['status'] == '':\n try:\n del query['status']\n except:\n pass\n if 'site' in request.session['requestParams'] and request.session['requestParams']['site'] == 'hpc':\n try:\n del query['site']\n except:\n pass\n exquery['site__isnull'] = True\n if 'currentpriority__gte' in query and 'currentpriority__lte' in query:\n query['priority__gte'] = query['currentpriority__gte']\n query['priority__lte'] = query['currentpriority__lte']\n del query['currentpriority__gte']\n del query['currentpriority__lte']\n\n if 'runnumber' in request.session['requestParams'] and request.session['requestParams']['runnumber']:\n try:\n query['runnumber'] = int(request.session['requestParams']['runnumber'])\n except:\n _logger.exception('Provided runnumber is not valid. It should be int')\n\n jedi_tasks_fields = [field.name for field in JediTasks._meta.get_fields() if field.get_internal_type() == 'CharField']\n running_prod_fields = (set([\n field.name for field in RunningProdTasksModel._meta.get_fields() if field.get_internal_type() == 'CharField'\n ])).difference(set(jedi_tasks_fields))\n\n for f in running_prod_fields:\n if f in request.session['requestParams'] and request.session['requestParams'][f] and f not in query and f not in wild_card_str:\n if f == 'hashtags':\n wild_card_str += ' and ('\n wildCards = request.session['requestParams'][f].split(',')\n currentCardCount = 1\n countCards = len(wildCards)\n for card in wildCards:\n if '*' not in card:\n card = '*' + card + '*'\n elif card.startswith('*'):\n card = card + '*'\n elif card.endswith('*'):\n card = '*' + card\n wild_card_str += preprocess_wild_card_string(card, 'hashtags')\n if currentCardCount < countCards:\n wild_card_str += ' and '\n currentCardCount += 1\n wild_card_str += ')'\n elif f == 'scope' and (\n '!' in request.session['requestParams'][f] or '*' in request.session['requestParams'][f]):\n wild_card_str += ' and ({})'.format(preprocess_wild_card_string(request.session['requestParams'][f], f))\n else:\n query[f] = request.session['requestParams'][f]\n\n return query, exquery, wild_card_str", "async def query(self, metric):\n metric_name = metric.spec.provider.metric\n\n url = self.metrics_provider.spec.influx.url\n token = self.metrics_provider.spec.influx.token\n org = self.metrics_provider.spec.influx.org\n bucket_name = self.metrics_provider.spec.influx.bucket\n\n client = InfluxDBClient(url=url, token=token, org=org)\n query_api = client.query_api()\n\n query = f'''\n from(bucket:\"{bucket_name}\")\n |> range(start: -1h)\n |> filter(fn: (r) => r._measurement == \"{metric_name}\")\n |> last()\n '''\n\n try:\n loop = asyncio.get_event_loop()\n result = await loop.run_in_executor(None, query_api.query, query)\n for table in result:\n for record in table.records:\n response = record.values['_value']\n return float(response)\n\n except Exception as err:\n metric_provider_name = self.metrics_provider.metadata.name\n raise MetricsProviderError(\n f\"Failed to query InfluxDB with provider {metric_provider_name!r}\"\n ) from err\n\n raise MetricError(f\"Metric {metric_name!r} not in InfluxDB response\")", "def postQuery(self):\n pass", "def process_query(query_file):\r\n query_data = query_file.readlines()\r\n query_dict = {}\r\n x = 1 \r\n search_dict = {}\r\n search_dict['username'] = query_data[x].strip('\\n')\r\n x += 1\r\n operation_list = []\r\n \r\n while query_data[x] != 'FILTER\\n': \r\n operation_list.append(query_data[x].strip('\\n'))\r\n x += 1\r\n \r\n search_dict['operations'] = operation_list \r\n query_dict['search'] = search_dict \r\n x += 1\r\n \r\n filter_dict = {}\r\n filter_format(filter_dict, query_data, 'name-includes', x)\r\n filter_format(filter_dict, query_data, 'location-includes', x)\r\n filter_format(filter_dict, query_data, 'follower', x)\r\n filter_format(filter_dict, query_data, 'following', x)\r\n query_dict['filter'] = filter_dict\r\n \r\n present_dict = {}\r\n sort_by = query_data[-2].strip('sort-by ')\r\n present_dict['sort-by'] = sort_by.strip('\\n')\r\n \r\n format_type = query_data[-1].lstrip('format ')\r\n present_dict['format'] = format_type\r\n query_dict['present'] = present_dict\r\n \r\n return query_dict", "def when_query_pipeline(context):\n result = context.stage.runTest('testing stage endpoint')\n print('Result = {}'.format(result))\n context.result = result", "def query(self) -> None:\n raise NotImplementedError()", "def index(self, q):\n # take the query from url encoded format to a string\n qstr = unquote(q)\n langs = detect_langs(qstr)\n best = langs[0]\n prob, iso_code = best.prob, best.lang\n is_relieable = prob >= RELIABILITY_THRESHOLD\n language_name = to_name(iso_code)\n shortend_query = qstr[0: min(len(qstr), 16)] # send back the first 32 characters.\n return json.dumps({\"query_short\": shortend_query,\n \"prob\": math.floor(prob * 100),\n \"reliable\": is_relieable,\n \"iso_code\": iso_code,\n \"lang\": language_name}) + \"\\n\"", "def _report_intermediates_and_final(query_result: list[Any], metric: str, query: str, scale: float = 1.) -> tuple[float, list[float]]:\n if not query_result:\n raise ValueError('Invalid query. Results from benchmark is empty: ' + query)\n if len(query_result) > 1:\n query_result = random.choice(query_result)\n else:\n query_result = query_result[0]\n query_dict = cast(dict, query_result)\n for i in query_dict.get('intermediates', []):\n if i[metric] is not None:\n nni.report_intermediate_result(i[metric] * scale)\n nni.report_final_result(query_dict[metric] * scale)\n return query_dict[metric]", "def query(self, query):\n queryFile = self.__cacheLocation + \"/\" + query + \".json\"\n if os.path.isfile(queryFile):\n reply = json.load(open(queryFile, \"r\"))\n return reply\n reply = self.__fmqlIF.query(query)\n jreply = json.loads(reply)\n jcache = open(self.__cacheLocation + \"/\" + query + \".json\", \"w\")\n json.dump(jreply, jcache)\n jcache.close()\n # logging.info(\"Cached \" + query)\n return jreply", "def retrieve_values(query, metric):\n utc_now = datetime.utcnow()\n\n query = \"\"\"sum(\n max(kube_pod_labels{label_ow_action!=\"\"}) by (label_ow_action, pod, label_vim_id)\n *\n on(pod)\n group_right(label_ow_action, label_vim_id)\n label_replace(\n sum by (pod_name) (%(metric_function)s(%(metric_name)s{namespace=\"%(namespace)s\"}[1m])), \n \"pod\", \n \"$1\", \n \"pod_name\", \n \"(.+)\"\n )\n ) by (pod, label_ow_action, label_vim_id)\"\"\" \\\n % {\"metric_function\": apply_function_per_metric(metric), \"metric_name\": metric,\n \"namespace\": \"default\"}\n\n url_query = urllib.parse.quote(query)\n from_dt = utc_now - timedelta(seconds=int(SCHEDULER_SECONDS))\n from_time = from_dt.strftime('%Y-%m-%dT%H:%M:%S.%fZ')\n to_time = utc_now.strftime('%Y-%m-%dT%H:%M:%S.%fZ')\n return query.get(url_query, from_time=from_time, to_time=to_time, step=PROMETHEUS_POLLING_STEP)", "def get_raw_data(report, bucket, replay_path, query):\n\n logger = logging.getLogger(\"SimpleReplayLogger\")\n s3_client = boto3.client('s3')\n try:\n response = s3_client.get_object(Bucket=bucket.get('bucket_name'), Key=f\"{replay_path}/raw_data/{query}000\")\n except Exception as e:\n logger.error(f\"Unable to get raw data from S3. Results for {query} not found. {e}\")\n df = pd.read_csv(response.get(\"Body\")).fillna(0)\n logger.debug(f\"Parsing results from '{query}' query.\")\n if query == 'latency_distribution':\n report.feature_graph = df\n else:\n for t, vals in report.tables.items():\n if vals.get('sql') == query:\n vals['data'] = read_data(t, df, vals.get('columns'), report)", "def event_search_v1_command(client: Client, args: Dict[str, Any], return_v1_output: bool) -> Union[CommandResults, Dict]:\n new_args = {\n 'query': args.get('query'),\n 'limit': args.get('limit', '100'),\n 'time_range_unit': args.get('time-range-unit'),\n 'time_range_value': args.get('time-range-value'),\n 'time_range_date_from': args.get('time-range-date-from'),\n 'time_range_date_to': args.get('time-range-date-to'),\n }\n\n command_results = event_search_command(client, new_args)\n if return_v1_output:\n response = command_results.raw_response\n command_results = command_results.to_context()\n command_results['EntryContext'].update({'Redlock.Event(val.id == obj.id)': response}) # type: ignore[index]\n return command_results", "def _profile_query(collection, query=None, op=None, safe=None, result=None, docs_affected=None):\n\n report_kvs = {}\n _add_connection_info(report_kvs, collection.database)\n report_kvs['Collection'] = collection.name\n\n if isinstance(query, dict):\n report_kvs['Query'] = _to_json(query)\n if op:\n if op == 'find' and collection.name == '$cmd':\n report_kvs['QueryFingerprint'] = '%s.command(%s)' % (collection.database.name,\n _command_fingerprint(query))\n else:\n report_kvs['QueryFingerprint'] = '%s.%s.%s(%s)' % (collection.database.name,\n collection.name, op, _query_fingerprint(query))\n\n if op:\n report_kvs['QueryOp'] = op\n\n if safe is not None:\n report_kvs['SafeMode'] = safe\n # We only get document count if safe mode is true\n if safe and docs_affected is None and result is not None and 'n' in result:\n report_kvs['NumDocumentsAffected'] = result['n']\n\n if docs_affected is not None: # Used for insert\n report_kvs['NumDocumentsAffected'] = docs_affected\n\n return report_kvs", "def _send_query(self, query) -> None:\n Cli3App.instance().session.send_query(query)", "def test_unfilteredQuery(self):\n message = self._queryTest(False)\n self.assertIsInstance(message, Message)\n self.assertEqual(message.queries, [])\n self.assertEqual(\n message.answers,\n [RRHeader(b'foo.example.com', payload=Record_A('5.8.13.21', ttl=0))])\n self.assertEqual(message.authority, [])\n self.assertEqual(message.additional, [])", "def soql_query(self, query):\n self.builtin.log(\"Running SOQL Query: {}\".format(query))\n return self.cumulusci.sf.query_all(query)", "def event_query(timestamp):\n graphql_client = GraphQLClient('https://api.thegraph.com/subgraphs/name/miracle2k/all-the-keeps')\n members = GqlQuery().fields(['address']).query('members').generate()\n bondedECDSAKeep = GqlQuery().fields([members]).query('bondedECDSAKeep').generate()\n deposit = GqlQuery().fields(['id', bondedECDSAKeep]).query('deposit').generate()\n\n queries = []\n for event in event_queries.values():\n queries.append(GqlQuery()\n .fields(['id', 'timestamp', deposit])\n .query(event, input={'where': '{timestamp_gt: ' + str(timestamp) + '}'})\n .generate())\n final_query = GqlQuery().fields(queries).generate()\n\n result = jsonpickle.decode(graphql_client.execute(final_query))\n final_events = {}\n for event_type, events in result['data'].items():\n if not events:\n continue\n final_events.update({event_type: events_parser(events)})\n return final_events", "def __on_query_edited(self):\n self.__refresh_search_results()", "def query(\n c7n_config: C7nCfg,\n data_dir: PathLike = Path(\"data\").joinpath(\"query\"),\n telemetry_disabled: bool = True,\n):\n run(\n c7n_config, data_dir=data_dir, telemetry_disabled=telemetry_disabled, dryrun=True,\n )", "def query(self):\n self._measurements[self.KEY_USAGE].df = self.fetch_data_usage()" ]
[ "0.57339936", "0.56814665", "0.5500225", "0.54920375", "0.5458039", "0.5432182", "0.53636396", "0.5292462", "0.52455795", "0.5230611", "0.5201298", "0.51619875", "0.51121604", "0.5105648", "0.50829905", "0.50792825", "0.5070931", "0.50500315", "0.5038517", "0.5031934", "0.50188744", "0.501349", "0.4998475", "0.49821755", "0.49595734", "0.49544862", "0.4931842", "0.49263203", "0.49263203", "0.49233907", "0.49221435", "0.49189475", "0.49156", "0.491321", "0.48861608", "0.48858953", "0.4884205", "0.48769632", "0.4875355", "0.48727617", "0.48417318", "0.48317185", "0.482566", "0.48118624", "0.4807122", "0.47985667", "0.47951654", "0.4794558", "0.47928107", "0.4792359", "0.47891003", "0.47870672", "0.47832388", "0.4772637", "0.47642303", "0.47547507", "0.47527426", "0.47398788", "0.47369736", "0.47316515", "0.47266236", "0.4724317", "0.47240132", "0.47210225", "0.4718412", "0.47139433", "0.47110394", "0.47093078", "0.4708856", "0.47063208", "0.46971503", "0.4689528", "0.46877468", "0.46867535", "0.46674812", "0.46661204", "0.46658477", "0.46652535", "0.46642122", "0.46579295", "0.46505517", "0.46496424", "0.46483573", "0.4642995", "0.46397933", "0.46394002", "0.46381393", "0.4637158", "0.4636587", "0.46363717", "0.46294397", "0.46279317", "0.4620372", "0.46125025", "0.46118274", "0.46104515", "0.46062902", "0.46054897", "0.46049806", "0.46033648" ]
0.5192731
11
Will be used in DataGenerator
def _read(path, desired_size): dcm = pydicom.dcmread(path) slope, intercept = dcm.RescaleSlope, dcm.RescaleIntercept try: img = (dcm.pixel_array * slope + intercept) except: img = np.zeros(desired_size[:2])-1 if img.shape != desired_size[:2]: img = cv2.resize(img, desired_size[:2], interpolation=cv2.INTER_LINEAR) img = _normalize(img) # return np.stack((img,)*3, axis=-1) return img
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generate(self):", "def gen_values(self):", "def data(self):", "def prepare_data(self):", "def generate(self):\r\n raise NotImplementedError", "def generate(self):\n pass", "def generate(self):\n pass", "def generate(self):\n pass", "def generate(self):\n pass", "def generate(self):\n raise NotImplementedError", "def get_data(self):", "def data(self):\r\n raise NotImplementedError", "def _get_data(self):\n raise NotImplementedError()", "def get_data(self):\r\n pass", "def _dataset_split_generators(self):\n raise NotImplementedError()", "def generate():", "def _read_data(self):", "def load_data(self):", "def data(self):\n pass", "def data(self):\n pass", "def data_shapes(self):", "def get_data(self):\n pass", "def get_data(self):\n pass", "def transform(self, data):", "def before_dataobj_create(self, dataobj):", "def transform(self):", "def _data_process(self, v):\n pass", "def use(self):", "def get_data():\n pass", "def Data(self) -> int:", "def __init__(self, dataset: Dataset):\n self.dataset = dataset", "def test_process_data(self):\n pass", "def data_group():\n ...", "def preprocess(self):", "def fetch_data(self):", "def __call__(self):\n raise NotImplementedError", "def result(self):", "def result(self):", "def sample(self):", "def make_data(self, limit: int):", "def __init__(self):\n self.__dataset = None", "def load_data(self) -> None:", "def datasets(self):\n pass", "def _generate_output(self):\n raise NotImplementedError()", "def GetValues(self):", "def feed_dict_generator(self):\n pass", "def feed_dict_generator(self):\n pass", "def output_data(self):\n pass", "def test_data_source_soaps_id_dynamic_datas_get(self):\n pass", "def values():", "def iterate(self):", "def _fetch_data(self):\n pass", "def on_dataobj_create(self, dataobj):", "def __init__(self):\n self.data = []\n self.idx = {}", "def DM(self):", "def __init__(self):\r\n super(DataTarget, self).__init__()", "def get_data(self):\n raise NotImplementedError(\"Not implemented!\")", "def generate(self, di):\n raise NotImplementedError", "def testGeneratorType(self):", "def value(self):", "def normalize_dataset(self):", "def generate(self) -> Dict[str, Any]:\n raise NotImplementedError", "def __init__(self, dataset):\n self._dataset = dataset", "def _generate(self, **kwargs):\n super()._generate(**kwargs)", "def _generate(self, **kwargs):\n super()._generate(**kwargs)", "def _generate(self, **kwargs):\n super()._generate(**kwargs)", "def _generate(self, **kwargs):\n # Converted to numpy array by _validate. Simply assign to correct attribute\n self._samples = self.parameter_schema['parameter_samples']\n super()._generate()", "def get_objects_data(self):\n pass", "def build(self):", "def build(self):", "def build(self):", "def _fetch_data(self, samples):\n pass", "def process(self):", "def process(self):", "def process(self):", "def before_each(self, dataset: pydicom.dataset.Dataset) -> None:", "def _transform_data(self, *args, **kwargs) -> None:\n raise NotImplementedError", "def __init__(self, dat):\n self.data = dat", "def parameters(self):", "def rosterRowData(self):", "def generateCsvData(self, context, obj, entity):\n raise NotImplementedError()", "def _transform(self, dataset):\n raise NotImplementedError()", "def GetValues(self):\n ...", "def GetValues(self):\n ...", "def create_observation(self):", "def create_observation(self):", "def post_processor(self):", "def apply(self):", "def array(self):", "def __call__(self):\n return self.generate()", "def sequence_params(self):", "def get_data():\n return", "def generator(self, data):\n for instance in data:\n yield (0, [str(instance.string)])", "def gen_graph(self):", "def _get_to_actual_data(raw):\n raise NotImplemented", "def readOneData(self):\n\t\tpass", "def _do_mapping(self):\n pass", "def test_ds(self, obj):\n pass", "def generate(self):\n self.training_data.gen_x(self.x_func)\n self.training_data.gen_a(self.a_func)\n self.training_data.gen_y(self.y_func)\n \n self.testing_data.gen_x(self.x_func)\n self.testing_data.gen_ys(self.y_func)\n self.testing_data.gen_azero(self.ytotal_func)", "def schema(self):", "def _prepare(self):" ]
[ "0.71645045", "0.7069964", "0.68903464", "0.68166614", "0.6782682", "0.6600323", "0.6600323", "0.6600323", "0.65556085", "0.6542308", "0.6518862", "0.6419387", "0.63396436", "0.6294505", "0.62797654", "0.6270805", "0.6239118", "0.6204579", "0.61918163", "0.61918163", "0.61851364", "0.6158682", "0.6158682", "0.61275405", "0.6112194", "0.6097302", "0.6089823", "0.6072874", "0.60611737", "0.5994479", "0.597594", "0.59563637", "0.5951295", "0.5942914", "0.59366107", "0.59310013", "0.5927378", "0.5927378", "0.59268683", "0.5923907", "0.5902667", "0.58885014", "0.5881877", "0.58795047", "0.5871719", "0.58654904", "0.58654904", "0.58539706", "0.58446866", "0.5832946", "0.582287", "0.5819928", "0.5808995", "0.5807461", "0.5795479", "0.5794757", "0.5786678", "0.5779616", "0.5770042", "0.5763996", "0.5758102", "0.5752482", "0.5749293", "0.57449806", "0.57449806", "0.57449806", "0.57387066", "0.57369745", "0.5732887", "0.5732887", "0.5732887", "0.57313436", "0.5711729", "0.5711729", "0.5711729", "0.5706319", "0.5700383", "0.5699538", "0.5684044", "0.56815594", "0.5668886", "0.5666909", "0.56665856", "0.56665856", "0.5659312", "0.5659312", "0.565469", "0.5647709", "0.5641495", "0.56386036", "0.5636331", "0.5635647", "0.5629976", "0.56245613", "0.562334", "0.56230265", "0.56151354", "0.56097806", "0.56082994", "0.5607843", "0.560601" ]
0.0
-1
Note the camelcase name and unused variable. Bad bad bad.
def camelCaseFunc(): unused = 1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_for_unused_names(self):\n for s in self.unused_names:\n self.warning(\"'%s' is unused.\"%s)\n\n# warns for param that specified with -c (but also if name gets defined in __main__,\n# e.g. by default_density=global_params.default_density in a script file\n## for name in self.params():\n## if name in self.context:\n## self.warning(\"'%s' still exists in global_params.context\"%name)\n\n # detect duplicate param value that wasn't used (e.g. specified with after script)\n for name,val in self.params().items():\n if name in self.context:\n if self.context[name]!=self.inspect_value(name):\n self.warning(\"'%s=%s' is unused.\"%(name,self.context[name]))", "def not_capitalized(): # noqa: D416", "def nice_name():\n\n pass", "def name():\n\n pass", "def unbound(name):", "def test_instance_vars_have_valid_names(question):\n instance = question[\"instance\"]\n for name in instance.get(\"variables\", {}).keys():\n assert CAMEL_CASE_PATTERN.match(\n name\n ), \"variable {} not slouchingCamelCase\".format(name)", "def name():\n pass", "def name():\n pass", "def test_var_names(var_name):\n assert isinstance(var_name, str)\n if standard_names.is_valid_name(var_name):\n standard_names.StandardName(var_name)\n else:\n warnings.warn(\"not a valid standard name: {name}\".format(name=var_name))", "def _NiceNameToPreventCompilerErrors(self, attrname):\n # only emit the rhs of a multi part name e.g. undo.UndoItem will appear only as UndoItem\n if attrname.find(\".\") != -1:\n attrname = attrname.split(\".\")[-1] # take the last\n # Prevent compiler errors on the java side by avoiding the generating of java keywords as attribute names\n if attrname in javakeywords:\n attrname = \"_\" + attrname\n return attrname", "def test_get_name_of_variable(self):\n name = Code()\n self.assertEqual(str(name), 'name')", "def name(self) -> str: # pragma: no cover", "def var_name ( self , name ) :\n if name in self.__var_names and not NameDuplicates.allowed() :\n self.warning ( 'The variable name \"%s\" is already defined!' % name )\n \n self.__var_names.add ( name )\n self.__local_names.add ( name )\n return name", "def lower_case_really():", "def name(self):", "def name(self):", "def name(self):", "def name(self):", "def __init__(self):\n self.__name = 'name'", "def name(self):\n ...", "def __getattribute__(self, name):\n if name in ('_special_names', '__dict__'):\n return super().__getattribute__(name)\n if hasattr(self, '_special_names'):\n if name in self._special_names:\n raise AttributeError(\n f\"{name} is a reserved variable name and it cannot be read\")\n return super().__getattribute__(name)", "def dummy(self):\n pass", "def verify_naming(self, reserved):\n for w in reserved:\n if w in self.decisions:\n raise ParseError('Duplicate variable/block name \"{}\"'.format(w))", "def __init__(self):\n FooBar = None\n Foo = None\n FOO = None\n foo_bar = None", "def var(self, name):\n raise NotImplementedError", "def my_name(self):\n# different block has different namespace\n# local namesapce for a function is created when the function is called, \n# is deleted when the function exit or expception happens and not be handled\n# by the function.\n local_name = 'a'\n my_life = \"alive\"\n print local_name", "def name(self, name):\n pass", "def verif_unused(sv):\r\n if Unused in sv.Object and sv.Object[Unused].value: # check presence and integrity of unused list\r\n unusedlist=[applied (x, Unused) for x in sv.Object[Unused].value]\r\n for nam in unusedlist: # check each unused declaration\r\n nod=sv.Object[nam]\r\n if sv.Namedpinlist.get(nam)==[nod.effects]: continue # pin is just named\r\n elif applied(nam, Output):\r\n if len(nod.effects)==1: # only effect is output list\r\n if len(nod.causes)<=2: continue\r\n if len(nod.causes)<=4 and Faux in nod.causes and Ewent in nod.causes: continue # allow 'take event'\r\n elif nod.causes or nod.effects: # object should have no cause and no effect\r\n print(Err_unused_obj) \r\n print(str(nam))\r\n sv.Current_clause=None, None, None\r\n raise ReferenceError", "def variable(self):", "def __init__(self, a):\n self.__name__ = a", "def undefined(self, ident, args):\n return \"\"", "def test_deprecated_private_variables(attr):\n with pytest.warns(AstropyDeprecationWarning):\n resolve_name(\"astropy\", \"cosmology\", \"flrw\", attr)", "def _get_arg_name(self, arg, variable_name):", "def name(self):\r\n pass", "def test_3_redeclared(self):\n\t\tinput = \"\"\"var i:integer;\n\t\tprocedure main(); begin end\n\t\tprocedure p(i:boolean); begin with y:real;y:real; do begin end end\n\t\t\"\"\"\n\t\texpect = \"Redeclared Variable: y\"\n\t\tself.assertTrue(TestChecker.test(input,expect,403))", "def fix_name(self):\n self._name_fixed = True", "def test_mapping(self):\n vark = VarKeyword()\n assert vark.name in vark\n assert '{}_'.format(vark.name) not in vark\n assert len(vark) == 1\n assert list(vark) == [vark.name]", "def test_strings_without_foo(self):\n write this test!", "def exported(*variables):\n ignored(variables)", "def disableIncorrectNameWarning(*args, **kwargs)->None:\n pass", "def wantsNametag(self):\n return 0", "def _dontChange(s, *args, **kwds):\n return {'name': s}", "def __init__(self, name):\n self.name = name.replace(\" \", \"-\").lower()", "def create_simplenamespace():\n obj1 = _(foo=1)\n obj1.random = \"Whoa\"\n print(obj1)\n obj2 = _(foo=2, bar=\"Yipee!\")\n print(obj2)\n obj3 = _(foo=5, bar=4.0, boo=[\"list\", \"with\", \"strings\"])\n print(obj3)", "def question_2(name: str) -> str:\n return \"Hello my name is\" + \" \" + name.capitalize()", "def _variable(self, name, vars_set):\n if not re.match(r\"[_a-zA-Z][_a-zA-Z0-9]*$\", name):\n self._syntax_error(\"Not a valid name\", name)\n vars_set.add(name)", "def _temp_prefix(cls) -> str:\n pass", "def cvarname(name):\n\treturn re.sub(r'[^\\w\\s]', '_', name)", "def test_name_properties_on_function():\n assert not Function(name=\"b\", path=\"a.b\", file_path=\"a.py\").name_properties\n assert \"private\" in Function(name=\"_b\", path=\"a._b\", file_path=\"a.py\").name_properties\n assert not Function(name=\"__b\", path=\"a.__b\", file_path=\"a.py\").name_properties\n assert not Function(name=\"__b__\", path=\"a.__b__\", file_path=\"a.py\").name_properties", "def test_issue_91():\n assert is_identifier(\"_results_bag\")\n assert is_identifier(\"hello__bag\")", "def disp_unknown(self, name, a, b):\n return name, a, b", "def name(self):\n raise NotImplementedError", "def __call__(self, name = None):\r\n return utils.add_tag_trace(self.make_variable(name))", "def test_94_misc(self):\n\t\tinput = \"\"\"var a:integer;procedure foo1();\n\t\tbegin putIntLn(4); end\n\t\tprocedure main();\n\t\tbegin a := foo1 + 1; end\"\"\"\n\t\texpect = \"Undeclared Identifier: foo1\"\n\t\tself.assertTrue(TestChecker.test(input,expect,494))", "def get_name(cls, unused_provider_details):\r\n return None", "def question_4(name: str) -> str:\n return \"My first name is\" + \" \" + name.capitalize()", "def get_name():", "def name(self):\n pass", "def test_add_var_desc():\n v = dd.vars['WGT']\n \n assert add_var_desc('Housing ', dd, 'WGT') == 'WGT'\n assert v.vardesc == 'Housing'\n\n \"\"\" Test add second line \"\"\"\n assert add_var_desc(' Unit Weight', dd, 'WGT') == 'WGT'\n assert v.vardesc == 'Housing Unit Weight'\n\n \"\"\" Test prevention against duplication \"\"\"\n assert add_var_desc('Housing Unit Weight', dd, 'WGT') == 'WGT'\n assert add_var_desc('HousingUnit Weight', dd, 'WGT') == 'WGT'\n\n assert add_var_desc('Person', dd, 'PWGT') == None", "def get_nothing():\n return \"\" # intentional non-existent variable", "def name(self) -> str:", "def name(self) -> str:", "def name(self) -> str:", "def name(self) -> str:", "def name(self) -> str:", "def test_nonreserved_name(self):\n try:\n field_name_validator('_identifier')\n except ValidationError:\n self.fail('Field name raised ValidationError unexpectedly')", "def test_unnamed_parameter(self):\n\n m = Mothur(**self.init_vars)\n m.help('summary.seqs')\n\n return", "def as_variable(identifier: str) -> str:\n return identifier.lower()", "def bad_default(var, default=unknown2): # [undefined-variable]\n print(var, default)\n print(xxxx) # [undefined-variable]\n augvar += 1 # [undefined-variable]\n del vardel # [undefined-variable]", "def printing_vars(self):\n print(\"Name is \", self.name)", "def clean(self):\n pass\n #TODO check whether short name is really clean and short!", "def _formatSolutionExportVariableNames(self, acceptable):\n return acceptable", "def visit_Ignored(self, attrs):\n name = attrs.get('name', None)\n if name is None:\n name = attrs.get('mangled', None)\n if name is None:\n name = 'UNDEFINED'\n else:\n name = MAKE_NAME(name)\n return c_ast.Ignored(name)", "def variable(self, val):", "def tname(self) -> str:", "def test_name_false(self):\r\n self.name = False", "def non_local_name(self, name):\n if \"!\" in name:\n return name[:name.find(\"!\")+1]\n else:\n return name", "def rename_var(self, old_id, new_id): # to be overriden in subclasses when necessary\n pass", "def _get_name(self):\n return self.__name", "def _get_name(self):\n return self.__name", "def _get_name(self):\n return self.__name", "def _get_name(self):\n return self.__name", "def _get_name(self):\n return self.__name", "def _get_name(self):\n return self.__name", "def _get_name(self):\n return self.__name", "def _get_name(self):\n return self.__name", "def _get_name(self):\n return self.__name", "def _get_name(self):\n return self.__name", "def _get_name(self):\n return self.__name", "def _get_name(self):\n return self.__name", "def _get_name(self):\n return self.__name", "def _get_name(self):\n return self.__name", "def _get_name(self):\n return self.__name", "def _get_name(self):\n return self.__name", "def _get_name(self):\n return self.__name", "def _get_name(self):\n return self.__name", "def _get_name(self):\n return self.__name", "def _get_name(self):\n return self.__name", "def _get_name(self):\n return self.__name", "def _get_name(self):\n return self.__name" ]
[ "0.6454768", "0.63859814", "0.61285913", "0.6127803", "0.5935644", "0.59143764", "0.58966666", "0.58966666", "0.58946407", "0.58621436", "0.5839409", "0.5818854", "0.5737015", "0.5730995", "0.57293755", "0.57293755", "0.57293755", "0.57293755", "0.57196563", "0.57148516", "0.5700287", "0.56968695", "0.56788254", "0.56771505", "0.567612", "0.5672481", "0.56635886", "0.5631864", "0.560034", "0.55763716", "0.5558683", "0.5555824", "0.55485034", "0.5547667", "0.55442977", "0.55301225", "0.5524829", "0.5503409", "0.5487947", "0.548204", "0.54799527", "0.5466915", "0.5464524", "0.54550964", "0.5454651", "0.54478824", "0.54415125", "0.54388976", "0.54346335", "0.5432373", "0.5428675", "0.5421812", "0.540711", "0.539787", "0.5396186", "0.5385634", "0.53780615", "0.5372843", "0.536846", "0.5349625", "0.53494024", "0.53494024", "0.53494024", "0.53494024", "0.53494024", "0.53417027", "0.53383183", "0.5331401", "0.53130037", "0.5312232", "0.5308515", "0.53070545", "0.5305538", "0.53010094", "0.5294063", "0.5287974", "0.5285637", "0.528472", "0.5283888", "0.5283888", "0.5283888", "0.5283888", "0.5283888", "0.5283888", "0.5283888", "0.5283888", "0.5283888", "0.5283888", "0.5283888", "0.5283888", "0.5283888", "0.5283888", "0.5283888", "0.5283888", "0.5283888", "0.5283888", "0.5283888", "0.5283888", "0.5283888", "0.5283888" ]
0.6955739
0
Extract an internal value from a string
def getfield(self, pkt, s): class_id = getattr(pkt, self._entity_class) entity_class = omci_entities.entity_id_to_class_map.get(class_id) data = {} for attribute in entity_class.attributes: if AttributeAccess.SetByCreate not in attribute.access: continue if attribute.field.name == 'managed_entity_id': continue fld = attribute.field s, value = fld.getfield(pkt, s) data[fld.name] = value return s, data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def value_from_str(self, s):\n if is_quoted_str(s):\n return s[1:-1]\n return super().value_from_str(s)", "def value_from_str(self, s):\n if is_quoted_str(s):\n return s[1:-1]\n return super().value_from_str(s)", "def get_value_from_str(value_str):\n try:\n return gdb.parse_and_eval(value_str)\n except RuntimeError:\n return None", "def value_from_str(self, s):\n raise ValueError()", "def kwextract(s):\n try:\n return strip(s, \"$\").strip().split(\": \")[1]\n except IndexError:\n return \"<unknown>\"", "def extract_subs_value(text):\n parts = text.split()\n value = float(parts[-1])\n\n return value", "def _extract_by_key(self, line, key):\n search = r'{0}=.+?,'.format(key) # lazy match to first ,\n attr_match = re.search(search, line)\n if attr_match:\n # grab just the value of the attribute from attr_key=value,\n value = attr_match.group()[len(key) + 1 : len(attr_match.group()) - 1]\n return value\n else:\n return \"notfound\"", "def parse(s):\n return s", "def parse_value(string: str) -> Union[str, dict, bool, int, float]:\n unesc_str = unescape(string)\n stripped = string.strip()\n if REGEX_RE.match(stripped):\n return {\"regex\": unesc_str.strip()[7:-2]}\n elif BOOL_RE.match(stripped):\n return stripped.lower() == \"true\"\n elif INT_RE.match(stripped):\n return int(stripped)\n elif FLOAT_RE.match(stripped):\n return float(stripped)\n else:\n return unesc_str[1:-1]", "def get_value(value):\n if value:\n return value.split('\\n')[0]\n else:\n return None", "def extractVal(value):\n assert value is not None, \"Value is None\"\n \n trimmed = value.strip()\n try:\n return int(trimmed)\n except ValueError:\n try:\n return float(trimmed)\n except ValueError:\n return str(trimmed)", "def parse_mask(string):\n return string.split(' = ')[1]", "def extract(self, str):\n\n ips = re.match( r'^[0-9]+(?:\\.[0-9]+){3}', str)\n\n if ips:\n return ips.group(0)", "def key_value_string_value(key_value_string, key):\n if key_value_string is None or key is None:\n return None\n words = key_value_string.split(' ')\n for i in range(0, len(words)-1):\n if words[i] == key + ':':\n return words[i+1]\n return None", "def _str_to_val(self, value):\n kind, value = value.split(': ', 1)\n\n # Lists and dictionaries are special case\n if kind in ('L', 'D'):\n return eval(value)\n\n if kind in TYPE_MAPPING.keys():\n if kind == 'B':\n if value != 'True':\n return False\n\n value = TYPE_MAPPING[kind](value)\n\n return value\n else:\n raise ValueError(\"An Unknown type of setting was found!\")", "def get_attrs(str):\n return _scanner.scan(str)[0]", "def value_from_str(self, s):\n try:\n return int(s)\n except ValueError:\n return super().value_from_str(s)", "def get_record_value(record_entry, key):\n value = record_entry[key][\"value\"]\n return value[value.rfind(\"#\") + 1:]", "def get_ref_value(self, s):\n return eval(self.ekv_ref)", "def str_to_value(s):\n s = s.strip()\n if _int_matcher.match(s):\n return int(s)\n elif _float_matcher.match(s):\n return float(s)\n elif _bool_matcher.match(s):\n return (s.lower() == 'true')\n return s", "def decode_string(self, value):\r\n return value", "def get(self):\n return self.match.group(\"value\")", "def extract_sub(s: str):\n subject = re.search(r'sub-\\d+', s)[0]\n return subject", "def get_part_value(part:str, data:dict) -> int:\n return int(data.get(part).get(\"value\"))", "def value_from_str(self, s):\n ddict = defaultdict(lambda: self.default)\n ddict['TRUE'] = True\n ddict['FALSE'] = False\n return ddict[s.upper()]", "def parse_token(bn,token):\n return bn.split(token)[1].split('_')[0]", "def _parse(val: str):\n\n if not isinstance(val, str):\n raise TypeError(\"Method requires string input\")\n\n value = re.findall(r'^([-+]?\\d*\\.\\d*(?=\\s)|\\d+(?=\\s))', val)\n if not (value and val[:len(value[0])] == value[0]):\n return val, None\n\n # string starts with value\n value = value[0]\n val = val[len(value):]\n\n val = val.strip()\n if val:\n unit = val\n else:\n unit = 'dimensionless'\n\n return value, unit", "def decode_extra_field(self, string):\n\n if isinstance(string, str):\n try:\n decode = int(string)\n except ValueError:\n return string\n return decode\n else:\n return string", "def parse_string_value(str_value: Text) -> Any:\n try:\n return ast.literal_eval(str_value)\n except ValueError:\n return str_value\n except SyntaxError:\n # e.g. $var, ${func}\n return str_value", "def _get_string_from_packing(self, string_to_unpack):\n return string_to_unpack[4:]", "def xml_value_from_key(xml,match,matchNumber=1):\n for i in range(1,10):\n if match.endswith(\"~%d~\"%i):\n match=match.replace(\"~%d~\"%i,'')\n matchNumber=i\n if not match in xml:\n return None\n else:\n val=xml.split(match)[1].split(\"value=\",3)[matchNumber]\n val=val.split('\"')[1]\n try:\n val=float(val)\n except:\n val=str(val)\n return val", "def get_instance(string):\n row = string.split(\".\")\n\n # handles \"f.eid\" case\n if len(row) < 4:\n return \"0\"\n\n # the number is somewhat arbitrary... \n # it is determined by Joeri's UK Phenotypes script.\n # (which is \"get_UKphenotypes.r\" --- thanks Joeri!)\n return row[2]", "def getValue(splits, featureName):\n for split in splits:\n if split.startswith(featureName):\n return split[split.find(\"=\")+1:]\n \n return None", "def extractValue(line, attribute):\n\t\n\tmyValue = ''\n\n\t#to avoid attributes in javascript or normal text\n\tif attribute + '=\"' in line or attribute + \"='\" in line:\n\t\taIndex = line.index(attribute)\n\telse:\n\t\taIndex = None\n\t\n\t#attribute exists and it's a tag\n\tif aIndex != None:\n\t\n\t\t#traverse upto the value\n\t\tch = line[aIndex]\n\t\twhile ch != '\"' and ch != \"'\":\n\t\t\taIndex += 1\n\t\t\tch = line[aIndex]\n\n\t\taIndex += 1\n\t\tch = line[aIndex]\n\t\t\n\t\t# extract the value\n\t\twhile ch != \"'\" and ch != '\"':\n\t\t\tmyValue += ch\n\t\t\taIndex += 1\n\t\t\tch = line[aIndex]\n\n\treturn myValue", "def _parseSingle(string):\n string = string.strip()\n \n if len(string) == 0:\n return ''\n \n pattern = re.compile(r'[^0-9]')\n if not pattern.search(string):\n return int(string)\n pattern = re.compile(r'[^0-9\\.eE]')\n if not pattern.search(string):\n if (string.count('.') <= 1 and \n (string.count('e') + string.count('E') <= 1)):\n return float(string)\n \n boolValue = _bool(string)\n if boolValue is not None:\n return boolValue\n \n if string[0] == string[-1]:\n if string[0] == '\"' or string[0] == \"'\":\n return string[1:-1]\n elif string[1] == string[-1]:\n if ((string[0] == 'u' or string[0] == 'r') and \n (string[1] == '\"' or string[1] == \"'\")):\n return string[2:-1]\n \n if string == 'None':\n return None\n \n return string", "def _decode_value(data):\n\n if type(data) is tuple:\n data = data[0]\n\n # Key does not exist\n if data == '0' or data == \"\":\n return None\n \n elif data[0] == _PREFIX:\n\n encoding = data[:2]\n value = data[2:]\n\n if encoding == _TYPE_DOUBLE or encoding == _TYPE_DOUBLE_C:\n return float(value)\n elif encoding == _TYPE_STRING or encoding == _TYPE_STRING_C:\n return value\n elif encoding == _TYPE_INT or encoding == _TYPE_INT_C:\n return int(value)\n elif encoding == _TYPE_BOOL or encoding == _TYPE_BOOL_C:\n return value == \"true\"\n else:\n return data\n\n elif data.startswith(\"<elsystem.collections.vector>\"):\n return _decode_vector(data)\n elif data.startswith(\"<elsystem.collections.dictionary>\"):\n return _decode_dictionary(data)\n else:\n return data", "def _value(token):\n result = re.match(r'\\d*', '0' + token)\n return int(result.group(0))", "def _get_number_from_string(x):\n try:\n return float(x)\n except ValueError:\n raise ValueError('Unknown element')", "def get_key_value(line: str) -> str:\n if line.find('=') == -1:\n raise Exception(\"Error: Key line must have equal sign seperating name and value\")\n return line[line.find('=') + 1:]", "def extract_value(k, d, f=''):\n if k in d:\n if f != '':\n p = f(d[k])\n else:\n p = d[k]\n\n if type(p) == str:\n v = unicode_decode(p)\n else:\n v = p\n else:\n v = unicode_decode('')\n return v", "def get_value(self, str):\r\n base = len(self.chars)\r\n base_placement = len(str) - 1\r\n value = 0\r\n for symbol in str:\r\n valueChar = self.chars.find(symbol)\r\n value += valueChar * (base ** base_placement)\r\n base_placement -= 1\r\n return value", "def version_get(self, string, prefix):\n\n regex = r\"[/_.]{}\\d+\".format(prefix)\n matches = re.findall(regex, string, re.IGNORECASE)\n\n if not len(matches):\n msg = \"No '_{}#' found in '{}'\".format(prefix, string)\n raise ValueError(msg)\n return matches[-1:][0][1], re.search(r\"\\d+\", matches[-1:][0]).group()", "def parse_input_string(self, string_name):\n list_of_parts = string_name.split(\".\")\n if list_of_parts[0] == \"inputs\":\n return string_name\n else:\n # return only the integer part\n return int(list_of_parts[1])", "def from_string (cls, string, access=DEFAULT_ACCESS, accept_value=True):\n hKey, moniker, value = cls._from_string (string, access, accept_value)\n if value is None:\n return cls (moniker, access)\n else:\n return cls (moniker, access).get_value (value)", "def selector(string,key,lkey,lval):\n print string\n ip = string.find(key)\n print 'key =',key, 'position =',ip\n if ip > -1:\n value = string[ip+lkey:ip+lkey+lval]\n print 'velue = ',value\n else:\n value = 'none'\n \n return value", "def strToCardValue(self, str):\n CARD_REPRESENTATION = {v: k for k, v in Card.ENGLISH_REPRESENTATION.items()}\n return CARD_REPRESENTATION[str]", "def parse(type_str: str) -> \"ConfigurationVariable\":\n try:\n return ConfigurationVariable[type_str.upper()]\n except KeyError as e:\n raise ValueError(f\"Unknown configuration variable: {type_str}. {e}\")", "def value_from_str(self, s):\n try:\n return int(s)\n except ValueError:\n try:\n return bool(s)\n except ValueError:\n return self.default", "def FromString(cls, value: str):\n for _, member in cls.__members__.items():\n if member.value == value:\n return member\n raise LookupError('Invalid component: ' + value)", "def extract_from_taxa_string(tag, taxa_string):\n if tag in taxa_string:\n pieces = taxa_string.split(';')\n for piece in pieces:\n if tag in piece:\n return piece.replace(tag, '')\n return None", "def parseString(self, s):\n pass", "def parse_mem(string):\n m = re.search(r\"\\[([0-9]+)]\", string)\n memaddress = m.group(1)\n memvalue = string.split(' = ')[1]\n return int(memaddress), int(memvalue)", "def parse_pint_string(self, pint_string):\n val = pint_string.split(' ')[0]\n units = pint_string.split(val+' ')[-1]\n return val, units", "def netflix_read(string):\n val = -1\n ind = -1\n string = string.strip()\n if string.isdigit():\n val = int(string)\n ind = 0\n elif string:\n val = int(string.strip(':'))\n ind = 1\n return (val, ind)", "def parse_value(cls, value):\n return value", "def parse_value(cls, value):\n return value", "def find_value(code, value):\n value_pattern = re.compile(rf\"{re.escape(value)} ?= ?([^=][a-zA-Z0-9\\.'/_)(]*)\")\n\n target = None\n for line in code:\n if value_pattern.search(line):\n target = re.findall(value_pattern, line)\n break\n\n return target[0] if target is not None else value", "def ld8_extract(self, text):\n return re.search('\\d{5}_\\d{8}', text).group(0)", "def _rval(self, s):\n if common.is_num(s):\n return float(s)\n elif s.startswith('#'):\n return self.parent.constants[s[1:].lower()]\n else: # time-based ycomp code\n return s.lower()", "def value(self, p_str, p_str_1=None): # real signature unknown; restored from __doc__ with multiple overloads\n return \"\"", "def value(self, s):\n #TODO Add more complex evaluation than just True and False...\n if s == \"False\":\n return False\n elif s == \"True\":\n return True\n elif s.startswith('\"') and s.endswith('\"'):\n return s[1:-1]\n else:\n return s", "def obtain_partner(cls, partner_string):\n return string.split(partner_string, ':')[1]", "def parse_var(s):\n items = s.split(\"=\")\n key = items[0].strip() # we remove blanks around keys, as is logical\n value = \"\"\n if len(items) > 1:\n # rejoin the rest:\n value = \"=\".join(items[1:])\n return key, value", "def __segVal(self, string):\r\n return {\r\n \"local\": \"LCL\",\r\n \"argument\": \"ARG\",\r\n \"this\": \"THIS\",\r\n \"that\": \"THAT\",\r\n \"temp\": Consts.SEG_TEMP,\r\n 0: \"THIS\",\r\n 1: \"THAT\"\r\n }[string]", "def parse_value(cls, value):\n choice, value = value.split('=')\n value = cls.VALUES_MAP[value]\n\n return choice, value", "def _get_bib_element(bibitem, element):\n lst = [i.strip() for i in bibitem.split(\"\\n\")]\n for i in lst:\n if i.startswith(element):\n value = i.split(\"=\", 1)[-1]\n value = value.strip()\n while value.endswith(','):\n value = value[:-1]\n while value.startswith('{') or value.startswith('\"'):\n value = value[1:-1]\n return value\n return None", "def parseString(self, s):\n return self.parser.parseString(s)", "def extract(string, start_marker, end_marker):\n start_loc = string.find(start_marker)\n end_loc = string.find(end_marker)\n if start_loc == -1 or end_loc == -1:\n return \"\"\n return string[start_loc+len(start_marker):end_loc]", "def loc2val(x):\n if x=='Not Sched':\n return 0\n elif x=='Tel':\n return 1\n elif x=='Loc A':\n return 2\n elif x=='Loc B':\n return 3\n elif x=='Loc C':\n return 4\n elif x=='Vaca':\n return 5\n else:\n return 6", "def _findIdentifierValue (self, identifier : String) -> String:\n\n Logging.trace(\">>: %s\", identifier)\n cls = self.__class__\n\n if identifier not in self._keyToValueMap:\n # leave identifier as is (it might be some value name like\n # wahr or false\n Logging.traceError(\"no expansion found\")\n result = identifier\n else:\n result = self._keyToValueMap[identifier]\n\n if not isString(result):\n result = repr(result)\n else:\n result = (cls._doubleQuoteCharacter + result\n + cls._doubleQuoteCharacter)\n\n Logging.trace(\"<<: expanded %s into %r\", identifier, result)\n return result", "def parse_id(string):\n return string.split('/')[-1]", "def get_value_from_string(text):\n if len(text.strip()) == 0:\n return None\n\n try:\n if '-' in text or '+' in text:\n tl = [ti for ti in text.split('-')]\n for i in range(1, len(tl)):\n tl[i] = '-' + tl[i]\n ntl = []\n for ti in tl:\n ntl = ntl + ti.split('+')\n ntl = [ti.replace(' ', '') for ti in ntl]\n values = [float(ti) for ti in ntl if len(ti) > 0]\n value = sum(values)\n else:\n value = float(text)\n return value\n\n except Exception:\n return None", "def get_value(value: str, registers: dict):\n\n if value in registers:\n return registers[value]\n\n return int(value)", "def get_index(s):\n return int(s[s.find(\"[\")+1:s.find(\"]\")])", "def _parse_addr(self, addr: str):\n addr = addr.upper()\n return self._registers_list.get(addr, None)", "def extract_string(begin, end, string):\n b = string.find(begin) + len(begin)\n e = string.find(end, b)\n\n return string[b:e]", "def _extract_from_arn(arn, position):\n\n return re.findall(\"(.*?):\", arn)[position]", "def extract_int_substr(int_str: str, substr: str) -> int | None:\n\n match = re.search(f\"({substr})\\\\d+\", int_str)\n\n if not match:\n return None\n\n int_str = match[0].strip(substr)\n\n return int(int_str)", "def _parse_value(value):\n # Check if it is a boolean, int, or float value\n try:\n value = json.loads(value.lower())\n return value\n except ValueError:\n return value", "def get_key_and_value_from_line(line):\n if line.find(\"#\") != 0 or line.find(\"!\") != 0:\n index_key_end = line.find(\"=\")\n while (index_key_end > 0) and (line[index_key_end - 1] == \"\\\\\"):\n index_key_end = line.find(\"=\", index_key_end + 1)\n if index_key_end > 0:\n return line[0:index_key_end].strip(), line[index_key_end + 1:].strip()\n return None, None", "def parse_from_placeholder(string,pattern,encloser='%',matcher='(.+)'):\n pattern,fields = placeholder_to_regex(pattern,encloser,matcher)\n return parse_from_regex(string,pattern,fields)", "def resolve_value(obj, _):\n return obj.value.decode()", "def parse_atom_value(atom_value_str: str) -> AtomValue:\n return AtomValue(atom_value_str, atom_value_str.isupper())", "def parse_var(s):\n items = s.split('=')\n key = items[0].strip() # we remove blanks around keys, as is logical\n if len(items) > 1:\n # rejoin the rest:\n value = '='.join(items[1:])\n return (key, value)", "def parse_feature_value(s,next_index=0):\n next_index = jump_over_space(s,next_index)\n start_index = next_index\n while True:\n if not s[next_index].isspace():\n next_index += 1\n else:\n break\n feature_value = s[start_index:next_index]\n if feature_value == '':\n feature_value = None\n feature_value = feature_value.split('/')\n return (feature_value,next_index)", "def value_of(char: str) -> str:\n for value, c in _ELEMENTS.items():\n if char == c:\n return value\n else:\n raise AttributeError(\"No such Element: {}\".format(char))", "def string_to_index(s):\n s = Unquote(s)\n if s == \".\":\n return ()\n return tuple(s.split(\"/\"))", "def hax(string):\r\n hax_dict = { }\r\n return hax_dict.get(string, string)", "def parse_value(value: str) -> Tuple[str, str, str]:\n value_pattern = r'^(usb|pci)\\(([^:]{4}):([^:]{4})\\)$'\n matches = re.match(value_pattern, value)\n assert matches, value\n ilk, vendor, device = matches.group(1), matches.group(2), matches.group(3)\n return ilk, vendor, device", "def extract_string(line, idx, result):\n\n begin = line.find(resource_string_prefix, idx)\n if begin == -1:\n return -1\n \n begin = begin + len(resource_string_prefix)\n end = -1\n for i in range(begin, len(line)):\n if not is_valid_char(line[i]):\n end = i\n break\n\n result.add(line[begin:end])\n return end", "def _GetVersion(version_str):\n return int(version_str.split('.')[1])", "def _get_value(self, node):\n val = None\n if isinstance(node, ast.Str):\n val = node.s\n elif isinstance(node, ast.BinOp):\n if pairwise_isinstance(\n (node.op, ast.Mod), (node.left, ast.Str),\n (node.right, ast.Name)):\n val = node.left.s % self.globals_[node.right.id]\n elif pairwise_isinstance(\n (node.op, ast.Add), (node.left, ast.Name),\n (node.right, ast.Str)):\n val = self.globals_[node.left.id] + node.right.s\n elif isinstance(node, ast.Name):\n val = self.globals_[node.id]\n\n if val is None:\n raise ValueError(\n \"Unable to find value in %s, only the following are parsed: \"\n \"GLOBAL, 'pkg.foobar', '%%s.foobar' %% GLOBAL or 'GLOBAL + \"\n \"'.foobar'\"\n % ast.dump(node))\n\n return val", "def reference_to_id(value):\n m = re.search(r\"<@(U[A-Z0-9]+)>\", value)\n return m.group(1) if m else None", "def tp_key_value(str_tag):\n rgx_split = re.compile(r'[\\@\\(\\)\\{\\}]')\n str_key, str_value = '', ''\n\n # count the pieces\n lst_parts = rgx_split.split(str_tag)\n lng_parts = len(lst_parts)\n\n # and winnow the noise\n if lng_parts > 1:\n str_key = lst_parts[1]\n if lng_parts > 2:\n for str_value in lst_parts[2:]:\n if str_value != '':\n break\n\n return (str_key, str_value)", "def get_variable_string_key(value):\n if not isinstance(value, basestring):\n return None\n matches = _property_string_pattern.findall(value)\n if len(matches) > 0:\n return matches[0][1]\n return None", "def eval_one(self, s, a):\n return self.eval(s)[a]", "def extract_int(text):\n m = re.search(r\"\\d+\", text)\n if m is not None:\n return m.group(0)", "def ExtractProperty(line, name):\n line = line.replace('\"', '')\n line = line.replace(name, '')\n return line.strip()", "def parse(self, val):\n # type: (bytes) -> Any\n return val.decode()", "def parse(self,value):\r\n\t\treturn str(value)", "def _parse_field(self, buf):\n\n delim = buf.find(b'=')\n if delim == -1:\n raise FIXParserError('Incorrect format: missing \"=\"')\n\n tag_id = 0\n try:\n tag_id = int(buf[:delim])\n except ValueError as err:\n raise FIXParserError(f'Incorrect format: ID:{str(buf[:delim])}') \\\n from err\n\n return (tag_id, buf[delim+1:])" ]
[ "0.6899292", "0.6899292", "0.6757842", "0.63956285", "0.63543457", "0.6290516", "0.6245474", "0.6231597", "0.6227444", "0.61673063", "0.61501807", "0.60651314", "0.6053713", "0.60217947", "0.598673", "0.59774673", "0.59718746", "0.5971522", "0.5945698", "0.5924044", "0.59235114", "0.5890824", "0.5872135", "0.584399", "0.58391833", "0.5825631", "0.5813502", "0.58134913", "0.58003396", "0.5791444", "0.57896763", "0.5787093", "0.57825553", "0.5767282", "0.5761552", "0.574505", "0.57421046", "0.57383883", "0.57301396", "0.57100207", "0.5707424", "0.5680945", "0.56806904", "0.56551576", "0.56441647", "0.5638233", "0.5637824", "0.5629473", "0.5618231", "0.56077605", "0.560436", "0.5604001", "0.5603694", "0.56005234", "0.55982953", "0.55982953", "0.55971444", "0.5593704", "0.5577474", "0.55773026", "0.55653214", "0.55609006", "0.5554352", "0.5537005", "0.55359596", "0.55296296", "0.5517236", "0.5515232", "0.5508087", "0.55056024", "0.55037177", "0.54941875", "0.54916596", "0.54818076", "0.54808146", "0.5477865", "0.54759854", "0.54723746", "0.5432797", "0.5415373", "0.541437", "0.5411034", "0.541072", "0.54050386", "0.54030377", "0.54023033", "0.53995615", "0.5392645", "0.5389881", "0.538657", "0.53829837", "0.53778684", "0.5377328", "0.5376688", "0.5360181", "0.5358255", "0.5355128", "0.5354667", "0.53415644", "0.53361654", "0.5332761" ]
0.0
-1
Extract an internal value from a string
def getfield(self, pkt, s): class_id = getattr(pkt, self._entity_class) attribute_mask = getattr(pkt, self._attributes_mask) entity_class = omci_entities.entity_id_to_class_map[class_id] indices = entity_class.attribute_indices_from_mask(attribute_mask) data = {} table_attribute_mask = 0 for index in indices: try: fld = entity_class.attributes[index].field except IndexError as e: log.error("attribute-decode-failure", attribute_index=index, entity_class=entity_class, e=e) continue try: s, value = fld.getfield(pkt, s) except Exception as _e: raise if isinstance(pkt, OmciGetResponse) and isinstance(fld, OmciTableField): data[fld.name + '_size'] = value table_attribute_mask = table_attribute_mask | (1 << (16 - index)) else: data[fld.name] = value if table_attribute_mask: data['table_attribute_mask'] = table_attribute_mask return s, data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def value_from_str(self, s):\n if is_quoted_str(s):\n return s[1:-1]\n return super().value_from_str(s)", "def value_from_str(self, s):\n if is_quoted_str(s):\n return s[1:-1]\n return super().value_from_str(s)", "def get_value_from_str(value_str):\n try:\n return gdb.parse_and_eval(value_str)\n except RuntimeError:\n return None", "def value_from_str(self, s):\n raise ValueError()", "def kwextract(s):\n try:\n return strip(s, \"$\").strip().split(\": \")[1]\n except IndexError:\n return \"<unknown>\"", "def extract_subs_value(text):\n parts = text.split()\n value = float(parts[-1])\n\n return value", "def _extract_by_key(self, line, key):\n search = r'{0}=.+?,'.format(key) # lazy match to first ,\n attr_match = re.search(search, line)\n if attr_match:\n # grab just the value of the attribute from attr_key=value,\n value = attr_match.group()[len(key) + 1 : len(attr_match.group()) - 1]\n return value\n else:\n return \"notfound\"", "def parse(s):\n return s", "def parse_value(string: str) -> Union[str, dict, bool, int, float]:\n unesc_str = unescape(string)\n stripped = string.strip()\n if REGEX_RE.match(stripped):\n return {\"regex\": unesc_str.strip()[7:-2]}\n elif BOOL_RE.match(stripped):\n return stripped.lower() == \"true\"\n elif INT_RE.match(stripped):\n return int(stripped)\n elif FLOAT_RE.match(stripped):\n return float(stripped)\n else:\n return unesc_str[1:-1]", "def get_value(value):\n if value:\n return value.split('\\n')[0]\n else:\n return None", "def extractVal(value):\n assert value is not None, \"Value is None\"\n \n trimmed = value.strip()\n try:\n return int(trimmed)\n except ValueError:\n try:\n return float(trimmed)\n except ValueError:\n return str(trimmed)", "def parse_mask(string):\n return string.split(' = ')[1]", "def extract(self, str):\n\n ips = re.match( r'^[0-9]+(?:\\.[0-9]+){3}', str)\n\n if ips:\n return ips.group(0)", "def key_value_string_value(key_value_string, key):\n if key_value_string is None or key is None:\n return None\n words = key_value_string.split(' ')\n for i in range(0, len(words)-1):\n if words[i] == key + ':':\n return words[i+1]\n return None", "def _str_to_val(self, value):\n kind, value = value.split(': ', 1)\n\n # Lists and dictionaries are special case\n if kind in ('L', 'D'):\n return eval(value)\n\n if kind in TYPE_MAPPING.keys():\n if kind == 'B':\n if value != 'True':\n return False\n\n value = TYPE_MAPPING[kind](value)\n\n return value\n else:\n raise ValueError(\"An Unknown type of setting was found!\")", "def get_attrs(str):\n return _scanner.scan(str)[0]", "def value_from_str(self, s):\n try:\n return int(s)\n except ValueError:\n return super().value_from_str(s)", "def get_record_value(record_entry, key):\n value = record_entry[key][\"value\"]\n return value[value.rfind(\"#\") + 1:]", "def get_ref_value(self, s):\n return eval(self.ekv_ref)", "def str_to_value(s):\n s = s.strip()\n if _int_matcher.match(s):\n return int(s)\n elif _float_matcher.match(s):\n return float(s)\n elif _bool_matcher.match(s):\n return (s.lower() == 'true')\n return s", "def decode_string(self, value):\r\n return value", "def get(self):\n return self.match.group(\"value\")", "def extract_sub(s: str):\n subject = re.search(r'sub-\\d+', s)[0]\n return subject", "def get_part_value(part:str, data:dict) -> int:\n return int(data.get(part).get(\"value\"))", "def value_from_str(self, s):\n ddict = defaultdict(lambda: self.default)\n ddict['TRUE'] = True\n ddict['FALSE'] = False\n return ddict[s.upper()]", "def parse_token(bn,token):\n return bn.split(token)[1].split('_')[0]", "def _parse(val: str):\n\n if not isinstance(val, str):\n raise TypeError(\"Method requires string input\")\n\n value = re.findall(r'^([-+]?\\d*\\.\\d*(?=\\s)|\\d+(?=\\s))', val)\n if not (value and val[:len(value[0])] == value[0]):\n return val, None\n\n # string starts with value\n value = value[0]\n val = val[len(value):]\n\n val = val.strip()\n if val:\n unit = val\n else:\n unit = 'dimensionless'\n\n return value, unit", "def decode_extra_field(self, string):\n\n if isinstance(string, str):\n try:\n decode = int(string)\n except ValueError:\n return string\n return decode\n else:\n return string", "def parse_string_value(str_value: Text) -> Any:\n try:\n return ast.literal_eval(str_value)\n except ValueError:\n return str_value\n except SyntaxError:\n # e.g. $var, ${func}\n return str_value", "def _get_string_from_packing(self, string_to_unpack):\n return string_to_unpack[4:]", "def xml_value_from_key(xml,match,matchNumber=1):\n for i in range(1,10):\n if match.endswith(\"~%d~\"%i):\n match=match.replace(\"~%d~\"%i,'')\n matchNumber=i\n if not match in xml:\n return None\n else:\n val=xml.split(match)[1].split(\"value=\",3)[matchNumber]\n val=val.split('\"')[1]\n try:\n val=float(val)\n except:\n val=str(val)\n return val", "def get_instance(string):\n row = string.split(\".\")\n\n # handles \"f.eid\" case\n if len(row) < 4:\n return \"0\"\n\n # the number is somewhat arbitrary... \n # it is determined by Joeri's UK Phenotypes script.\n # (which is \"get_UKphenotypes.r\" --- thanks Joeri!)\n return row[2]", "def getValue(splits, featureName):\n for split in splits:\n if split.startswith(featureName):\n return split[split.find(\"=\")+1:]\n \n return None", "def extractValue(line, attribute):\n\t\n\tmyValue = ''\n\n\t#to avoid attributes in javascript or normal text\n\tif attribute + '=\"' in line or attribute + \"='\" in line:\n\t\taIndex = line.index(attribute)\n\telse:\n\t\taIndex = None\n\t\n\t#attribute exists and it's a tag\n\tif aIndex != None:\n\t\n\t\t#traverse upto the value\n\t\tch = line[aIndex]\n\t\twhile ch != '\"' and ch != \"'\":\n\t\t\taIndex += 1\n\t\t\tch = line[aIndex]\n\n\t\taIndex += 1\n\t\tch = line[aIndex]\n\t\t\n\t\t# extract the value\n\t\twhile ch != \"'\" and ch != '\"':\n\t\t\tmyValue += ch\n\t\t\taIndex += 1\n\t\t\tch = line[aIndex]\n\n\treturn myValue", "def _parseSingle(string):\n string = string.strip()\n \n if len(string) == 0:\n return ''\n \n pattern = re.compile(r'[^0-9]')\n if not pattern.search(string):\n return int(string)\n pattern = re.compile(r'[^0-9\\.eE]')\n if not pattern.search(string):\n if (string.count('.') <= 1 and \n (string.count('e') + string.count('E') <= 1)):\n return float(string)\n \n boolValue = _bool(string)\n if boolValue is not None:\n return boolValue\n \n if string[0] == string[-1]:\n if string[0] == '\"' or string[0] == \"'\":\n return string[1:-1]\n elif string[1] == string[-1]:\n if ((string[0] == 'u' or string[0] == 'r') and \n (string[1] == '\"' or string[1] == \"'\")):\n return string[2:-1]\n \n if string == 'None':\n return None\n \n return string", "def _decode_value(data):\n\n if type(data) is tuple:\n data = data[0]\n\n # Key does not exist\n if data == '0' or data == \"\":\n return None\n \n elif data[0] == _PREFIX:\n\n encoding = data[:2]\n value = data[2:]\n\n if encoding == _TYPE_DOUBLE or encoding == _TYPE_DOUBLE_C:\n return float(value)\n elif encoding == _TYPE_STRING or encoding == _TYPE_STRING_C:\n return value\n elif encoding == _TYPE_INT or encoding == _TYPE_INT_C:\n return int(value)\n elif encoding == _TYPE_BOOL or encoding == _TYPE_BOOL_C:\n return value == \"true\"\n else:\n return data\n\n elif data.startswith(\"<elsystem.collections.vector>\"):\n return _decode_vector(data)\n elif data.startswith(\"<elsystem.collections.dictionary>\"):\n return _decode_dictionary(data)\n else:\n return data", "def _value(token):\n result = re.match(r'\\d*', '0' + token)\n return int(result.group(0))", "def _get_number_from_string(x):\n try:\n return float(x)\n except ValueError:\n raise ValueError('Unknown element')", "def get_key_value(line: str) -> str:\n if line.find('=') == -1:\n raise Exception(\"Error: Key line must have equal sign seperating name and value\")\n return line[line.find('=') + 1:]", "def extract_value(k, d, f=''):\n if k in d:\n if f != '':\n p = f(d[k])\n else:\n p = d[k]\n\n if type(p) == str:\n v = unicode_decode(p)\n else:\n v = p\n else:\n v = unicode_decode('')\n return v", "def get_value(self, str):\r\n base = len(self.chars)\r\n base_placement = len(str) - 1\r\n value = 0\r\n for symbol in str:\r\n valueChar = self.chars.find(symbol)\r\n value += valueChar * (base ** base_placement)\r\n base_placement -= 1\r\n return value", "def version_get(self, string, prefix):\n\n regex = r\"[/_.]{}\\d+\".format(prefix)\n matches = re.findall(regex, string, re.IGNORECASE)\n\n if not len(matches):\n msg = \"No '_{}#' found in '{}'\".format(prefix, string)\n raise ValueError(msg)\n return matches[-1:][0][1], re.search(r\"\\d+\", matches[-1:][0]).group()", "def parse_input_string(self, string_name):\n list_of_parts = string_name.split(\".\")\n if list_of_parts[0] == \"inputs\":\n return string_name\n else:\n # return only the integer part\n return int(list_of_parts[1])", "def from_string (cls, string, access=DEFAULT_ACCESS, accept_value=True):\n hKey, moniker, value = cls._from_string (string, access, accept_value)\n if value is None:\n return cls (moniker, access)\n else:\n return cls (moniker, access).get_value (value)", "def selector(string,key,lkey,lval):\n print string\n ip = string.find(key)\n print 'key =',key, 'position =',ip\n if ip > -1:\n value = string[ip+lkey:ip+lkey+lval]\n print 'velue = ',value\n else:\n value = 'none'\n \n return value", "def strToCardValue(self, str):\n CARD_REPRESENTATION = {v: k for k, v in Card.ENGLISH_REPRESENTATION.items()}\n return CARD_REPRESENTATION[str]", "def parse(type_str: str) -> \"ConfigurationVariable\":\n try:\n return ConfigurationVariable[type_str.upper()]\n except KeyError as e:\n raise ValueError(f\"Unknown configuration variable: {type_str}. {e}\")", "def value_from_str(self, s):\n try:\n return int(s)\n except ValueError:\n try:\n return bool(s)\n except ValueError:\n return self.default", "def FromString(cls, value: str):\n for _, member in cls.__members__.items():\n if member.value == value:\n return member\n raise LookupError('Invalid component: ' + value)", "def extract_from_taxa_string(tag, taxa_string):\n if tag in taxa_string:\n pieces = taxa_string.split(';')\n for piece in pieces:\n if tag in piece:\n return piece.replace(tag, '')\n return None", "def parseString(self, s):\n pass", "def parse_mem(string):\n m = re.search(r\"\\[([0-9]+)]\", string)\n memaddress = m.group(1)\n memvalue = string.split(' = ')[1]\n return int(memaddress), int(memvalue)", "def parse_pint_string(self, pint_string):\n val = pint_string.split(' ')[0]\n units = pint_string.split(val+' ')[-1]\n return val, units", "def netflix_read(string):\n val = -1\n ind = -1\n string = string.strip()\n if string.isdigit():\n val = int(string)\n ind = 0\n elif string:\n val = int(string.strip(':'))\n ind = 1\n return (val, ind)", "def parse_value(cls, value):\n return value", "def parse_value(cls, value):\n return value", "def find_value(code, value):\n value_pattern = re.compile(rf\"{re.escape(value)} ?= ?([^=][a-zA-Z0-9\\.'/_)(]*)\")\n\n target = None\n for line in code:\n if value_pattern.search(line):\n target = re.findall(value_pattern, line)\n break\n\n return target[0] if target is not None else value", "def ld8_extract(self, text):\n return re.search('\\d{5}_\\d{8}', text).group(0)", "def _rval(self, s):\n if common.is_num(s):\n return float(s)\n elif s.startswith('#'):\n return self.parent.constants[s[1:].lower()]\n else: # time-based ycomp code\n return s.lower()", "def value(self, p_str, p_str_1=None): # real signature unknown; restored from __doc__ with multiple overloads\n return \"\"", "def value(self, s):\n #TODO Add more complex evaluation than just True and False...\n if s == \"False\":\n return False\n elif s == \"True\":\n return True\n elif s.startswith('\"') and s.endswith('\"'):\n return s[1:-1]\n else:\n return s", "def obtain_partner(cls, partner_string):\n return string.split(partner_string, ':')[1]", "def parse_var(s):\n items = s.split(\"=\")\n key = items[0].strip() # we remove blanks around keys, as is logical\n value = \"\"\n if len(items) > 1:\n # rejoin the rest:\n value = \"=\".join(items[1:])\n return key, value", "def __segVal(self, string):\r\n return {\r\n \"local\": \"LCL\",\r\n \"argument\": \"ARG\",\r\n \"this\": \"THIS\",\r\n \"that\": \"THAT\",\r\n \"temp\": Consts.SEG_TEMP,\r\n 0: \"THIS\",\r\n 1: \"THAT\"\r\n }[string]", "def parse_value(cls, value):\n choice, value = value.split('=')\n value = cls.VALUES_MAP[value]\n\n return choice, value", "def _get_bib_element(bibitem, element):\n lst = [i.strip() for i in bibitem.split(\"\\n\")]\n for i in lst:\n if i.startswith(element):\n value = i.split(\"=\", 1)[-1]\n value = value.strip()\n while value.endswith(','):\n value = value[:-1]\n while value.startswith('{') or value.startswith('\"'):\n value = value[1:-1]\n return value\n return None", "def parseString(self, s):\n return self.parser.parseString(s)", "def extract(string, start_marker, end_marker):\n start_loc = string.find(start_marker)\n end_loc = string.find(end_marker)\n if start_loc == -1 or end_loc == -1:\n return \"\"\n return string[start_loc+len(start_marker):end_loc]", "def loc2val(x):\n if x=='Not Sched':\n return 0\n elif x=='Tel':\n return 1\n elif x=='Loc A':\n return 2\n elif x=='Loc B':\n return 3\n elif x=='Loc C':\n return 4\n elif x=='Vaca':\n return 5\n else:\n return 6", "def _findIdentifierValue (self, identifier : String) -> String:\n\n Logging.trace(\">>: %s\", identifier)\n cls = self.__class__\n\n if identifier not in self._keyToValueMap:\n # leave identifier as is (it might be some value name like\n # wahr or false\n Logging.traceError(\"no expansion found\")\n result = identifier\n else:\n result = self._keyToValueMap[identifier]\n\n if not isString(result):\n result = repr(result)\n else:\n result = (cls._doubleQuoteCharacter + result\n + cls._doubleQuoteCharacter)\n\n Logging.trace(\"<<: expanded %s into %r\", identifier, result)\n return result", "def parse_id(string):\n return string.split('/')[-1]", "def get_value_from_string(text):\n if len(text.strip()) == 0:\n return None\n\n try:\n if '-' in text or '+' in text:\n tl = [ti for ti in text.split('-')]\n for i in range(1, len(tl)):\n tl[i] = '-' + tl[i]\n ntl = []\n for ti in tl:\n ntl = ntl + ti.split('+')\n ntl = [ti.replace(' ', '') for ti in ntl]\n values = [float(ti) for ti in ntl if len(ti) > 0]\n value = sum(values)\n else:\n value = float(text)\n return value\n\n except Exception:\n return None", "def get_value(value: str, registers: dict):\n\n if value in registers:\n return registers[value]\n\n return int(value)", "def get_index(s):\n return int(s[s.find(\"[\")+1:s.find(\"]\")])", "def _parse_addr(self, addr: str):\n addr = addr.upper()\n return self._registers_list.get(addr, None)", "def extract_string(begin, end, string):\n b = string.find(begin) + len(begin)\n e = string.find(end, b)\n\n return string[b:e]", "def _extract_from_arn(arn, position):\n\n return re.findall(\"(.*?):\", arn)[position]", "def extract_int_substr(int_str: str, substr: str) -> int | None:\n\n match = re.search(f\"({substr})\\\\d+\", int_str)\n\n if not match:\n return None\n\n int_str = match[0].strip(substr)\n\n return int(int_str)", "def _parse_value(value):\n # Check if it is a boolean, int, or float value\n try:\n value = json.loads(value.lower())\n return value\n except ValueError:\n return value", "def get_key_and_value_from_line(line):\n if line.find(\"#\") != 0 or line.find(\"!\") != 0:\n index_key_end = line.find(\"=\")\n while (index_key_end > 0) and (line[index_key_end - 1] == \"\\\\\"):\n index_key_end = line.find(\"=\", index_key_end + 1)\n if index_key_end > 0:\n return line[0:index_key_end].strip(), line[index_key_end + 1:].strip()\n return None, None", "def parse_from_placeholder(string,pattern,encloser='%',matcher='(.+)'):\n pattern,fields = placeholder_to_regex(pattern,encloser,matcher)\n return parse_from_regex(string,pattern,fields)", "def resolve_value(obj, _):\n return obj.value.decode()", "def parse_atom_value(atom_value_str: str) -> AtomValue:\n return AtomValue(atom_value_str, atom_value_str.isupper())", "def parse_var(s):\n items = s.split('=')\n key = items[0].strip() # we remove blanks around keys, as is logical\n if len(items) > 1:\n # rejoin the rest:\n value = '='.join(items[1:])\n return (key, value)", "def parse_feature_value(s,next_index=0):\n next_index = jump_over_space(s,next_index)\n start_index = next_index\n while True:\n if not s[next_index].isspace():\n next_index += 1\n else:\n break\n feature_value = s[start_index:next_index]\n if feature_value == '':\n feature_value = None\n feature_value = feature_value.split('/')\n return (feature_value,next_index)", "def value_of(char: str) -> str:\n for value, c in _ELEMENTS.items():\n if char == c:\n return value\n else:\n raise AttributeError(\"No such Element: {}\".format(char))", "def string_to_index(s):\n s = Unquote(s)\n if s == \".\":\n return ()\n return tuple(s.split(\"/\"))", "def hax(string):\r\n hax_dict = { }\r\n return hax_dict.get(string, string)", "def parse_value(value: str) -> Tuple[str, str, str]:\n value_pattern = r'^(usb|pci)\\(([^:]{4}):([^:]{4})\\)$'\n matches = re.match(value_pattern, value)\n assert matches, value\n ilk, vendor, device = matches.group(1), matches.group(2), matches.group(3)\n return ilk, vendor, device", "def extract_string(line, idx, result):\n\n begin = line.find(resource_string_prefix, idx)\n if begin == -1:\n return -1\n \n begin = begin + len(resource_string_prefix)\n end = -1\n for i in range(begin, len(line)):\n if not is_valid_char(line[i]):\n end = i\n break\n\n result.add(line[begin:end])\n return end", "def _GetVersion(version_str):\n return int(version_str.split('.')[1])", "def _get_value(self, node):\n val = None\n if isinstance(node, ast.Str):\n val = node.s\n elif isinstance(node, ast.BinOp):\n if pairwise_isinstance(\n (node.op, ast.Mod), (node.left, ast.Str),\n (node.right, ast.Name)):\n val = node.left.s % self.globals_[node.right.id]\n elif pairwise_isinstance(\n (node.op, ast.Add), (node.left, ast.Name),\n (node.right, ast.Str)):\n val = self.globals_[node.left.id] + node.right.s\n elif isinstance(node, ast.Name):\n val = self.globals_[node.id]\n\n if val is None:\n raise ValueError(\n \"Unable to find value in %s, only the following are parsed: \"\n \"GLOBAL, 'pkg.foobar', '%%s.foobar' %% GLOBAL or 'GLOBAL + \"\n \"'.foobar'\"\n % ast.dump(node))\n\n return val", "def reference_to_id(value):\n m = re.search(r\"<@(U[A-Z0-9]+)>\", value)\n return m.group(1) if m else None", "def tp_key_value(str_tag):\n rgx_split = re.compile(r'[\\@\\(\\)\\{\\}]')\n str_key, str_value = '', ''\n\n # count the pieces\n lst_parts = rgx_split.split(str_tag)\n lng_parts = len(lst_parts)\n\n # and winnow the noise\n if lng_parts > 1:\n str_key = lst_parts[1]\n if lng_parts > 2:\n for str_value in lst_parts[2:]:\n if str_value != '':\n break\n\n return (str_key, str_value)", "def get_variable_string_key(value):\n if not isinstance(value, basestring):\n return None\n matches = _property_string_pattern.findall(value)\n if len(matches) > 0:\n return matches[0][1]\n return None", "def eval_one(self, s, a):\n return self.eval(s)[a]", "def extract_int(text):\n m = re.search(r\"\\d+\", text)\n if m is not None:\n return m.group(0)", "def ExtractProperty(line, name):\n line = line.replace('\"', '')\n line = line.replace(name, '')\n return line.strip()", "def parse(self, val):\n # type: (bytes) -> Any\n return val.decode()", "def parse(self,value):\r\n\t\treturn str(value)", "def _parse_field(self, buf):\n\n delim = buf.find(b'=')\n if delim == -1:\n raise FIXParserError('Incorrect format: missing \"=\"')\n\n tag_id = 0\n try:\n tag_id = int(buf[:delim])\n except ValueError as err:\n raise FIXParserError(f'Incorrect format: ID:{str(buf[:delim])}') \\\n from err\n\n return (tag_id, buf[delim+1:])" ]
[ "0.6899292", "0.6899292", "0.6757842", "0.63956285", "0.63543457", "0.6290516", "0.6245474", "0.6231597", "0.6227444", "0.61673063", "0.61501807", "0.60651314", "0.6053713", "0.60217947", "0.598673", "0.59774673", "0.59718746", "0.5971522", "0.5945698", "0.5924044", "0.59235114", "0.5890824", "0.5872135", "0.584399", "0.58391833", "0.5825631", "0.5813502", "0.58134913", "0.58003396", "0.5791444", "0.57896763", "0.5787093", "0.57825553", "0.5767282", "0.5761552", "0.574505", "0.57421046", "0.57383883", "0.57301396", "0.57100207", "0.5707424", "0.5680945", "0.56806904", "0.56551576", "0.56441647", "0.5638233", "0.5637824", "0.5629473", "0.5618231", "0.56077605", "0.560436", "0.5604001", "0.5603694", "0.56005234", "0.55982953", "0.55982953", "0.55971444", "0.5593704", "0.5577474", "0.55773026", "0.55653214", "0.55609006", "0.5554352", "0.5537005", "0.55359596", "0.55296296", "0.5517236", "0.5515232", "0.5508087", "0.55056024", "0.55037177", "0.54941875", "0.54916596", "0.54818076", "0.54808146", "0.5477865", "0.54759854", "0.54723746", "0.5432797", "0.5415373", "0.541437", "0.5411034", "0.541072", "0.54050386", "0.54030377", "0.54023033", "0.53995615", "0.5392645", "0.5389881", "0.538657", "0.53829837", "0.53778684", "0.5377328", "0.5376688", "0.5360181", "0.5358255", "0.5355128", "0.5354667", "0.53415644", "0.53361654", "0.5332761" ]
0.0
-1
Sets the size and position of the main window.
def _setup(self, width=turtle._CFG["width"], height=turtle._CFG["height"], startx=turtle._CFG["leftright"], starty=turtle._CFG["topbottom"]): if not hasattr(self._root, "set_geometry"): return sw = self._root.win_width() sh = self._root.win_height() if isinstance(width, float) and 0 <= width <= 1: width = sw*width if startx is None: startx = (sw - width) / 2 if isinstance(height, float) and 0 <= height <= 1: height = sh*height if starty is None: starty = (sh - height) / 2 self._root.set_geometry(width, height, startx, starty) self.update()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def placeWindow(self):\r\n\t\t# window size\r\n\t\tw = 600\r\n\t\th = 300\r\n\t\t# find the screen size\r\n\t\tsw = self.parent.winfo_screenwidth()\r\n\t\tsh = self.parent.winfo_screenheight()\r\n\t\t# now define the location on the current screen\r\n\t\tx = (sw/2-0.5*w)\r\n\t\ty = (sh/2-0.5*h)\r\n\t\tself.parent.geometry('%dx%d+%d+%d' % (w, h, x, y))", "def _positionWindow(self):\n\t\tif sys.platform=='win32':\n\t\t\tself.setGeometry(1050, 30, 375, 220)\n\t\telse:\n\t\t\tself.setGeometry(1050, 0, 375, 220)\n\t\t# self.move( (-screen.width()/2)+200, -screen.height()/2 )", "def _positionWindow(self):\n\t\tscreen = QtGui.QDesktopWidget().screenGeometry()\n\t\tself.setGeometry(1050, 275, 375, 350)\n\t\t# self.move( (-screen.width()/2)+200, -screen.height()/2 )", "def configure_window(self, width, height):\n self.configure_surface(width, height)", "def _set_size(self):\n if self.width_key is not None:\n width = config.get(self.width_key)\n height = config.get(self.height_key)\n self.window.resize(width, height)", "def initialise_window(self):\n self.imageLabel.setBackgroundRole(QtGui.QPalette.Base)\n self.imageLabel.setScaledContents(True)\n self.scrollArea.setWidget(self.imageLabel)\n self.setCentralWidget(self.scrollArea)\n self.scrollArea.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff) # Disable horizontal scrollbar.\n self.scrollArea.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff) # Disable vertical scrollbar.\n self.setWindowTitle(\"Robot Map\") # Set title.\n self.showFullScreen() # Make fullscreen.", "def setWindowGeometry(x,y,width,height):\n dislin.window(x,y,width,height)", "def window(main):\r\n main.title(\"BinCryptor 1.0\")\r\n main.update_idletasks()\r\n width = main.winfo_width() #Width of the current screen\r\n height = main.winfo_height() #Height of the current screen\r\n x = (main.winfo_screenwidth() // 2) - (width // 2)\r\n y = (main.winfo_screenheight() // 2) - (height // 2)\r\n main.geometry(f'{width}x{height}+{x}+{y}') #Adjusts the height and width\r", "def SetWindowSize(self, size):\n self.WINDOW_SIZE = size", "def init_window(self, size, screen=None):\n # enforce minimum size\n (mw, mh), (w, h) = config.minsize, size\n if w < mw or h < mh:\n size = mw, mh\n\n # init view surface and pass it to screen\n self.view = pygame.display.set_mode(size, pygame.RESIZABLE)\n self.view.fill((0, 0, 0))\n if screen is not None:\n screen.resize_view()", "def setwinsize(self, rows, cols):", "def set_igv_window_size(self, width=800, height=600):\n self.set_igv_window_width(width)\n self.set_igv_window_height(height)", "def setup(self, width=_CFG[\"width\"], height=_CFG[\"height\"],\n startx=_CFG[\"leftright\"], starty=_CFG[\"topbottom\"]):\n if not hasattr(self._root, \"set_geometry\"):\n return\n sw = self._root.win_width()\n sh = self._root.win_height()\n if isinstance(width, float) and 0 <= width <= 1:\n width = sw*width\n if startx is None:\n startx = (sw - width) / 2\n if isinstance(height, float) and 0 <= height <= 1:\n height = sh*height\n if starty is None:\n starty = (sh - height) / 2\n self._root.set_geometry(width, height, startx, starty)\n self.update()", "def setupWindow(self):\n\n\t\tself.main_menu_window = MenuFrame.MainMenuFrame(self.uiCoordinator)\n\t\tself.menu_window = self.main_menu_window._mf\n\t\tself.score_window = self.main_menu_window._hf\n\t\tself.instructions_window = self.main_menu_window._if\n\t\tself.menu_window.playButton.focus_set()", "def _prep_window(self, parent=None):\n self.toolkit.app.initialize()\n if not self.initialized:\n self.setup(parent)\n self.resize_to_initial()\n self.update_minimum_size()\n self.update_maximum_size()", "def setUp(self):\r\n self.caption = \"mirra extending classes\" # window name\r\n self.size = 640, 480 #window size\r\n self.pos = 100,100 # window top left location\r\n self.fullScreen = 0 # if fullScreen is on it will overwrite your pos and size to match the display's resolution\r\n self.frameRate = 15 # set refresh framerate\r", "def center(self):\n # get the compute screen's size\n screen = QDesktopWidget().screenGeometry()\n # get the app windows' size\n size = self.geometry()\n self.move(int((screen.width() - size.width()) / 2), int((screen.height() - size.height()) / 2))", "def configure_canvas(self):\r\n self.window.update_idletasks() # this updates window size\r\n\r\n border = 10\r\n self.canvas.config(\r\n width=self.window.winfo_reqwidth() + border,\r\n height=min(350, self.window.winfo_reqheight() + border,))\r\n self.canvas.configure(scrollregion=(\r\n 0, 0,\r\n self.window.winfo_reqwidth() + border,\r\n self.window.winfo_reqheight() + border))", "def align_window(self):\n self.parent.update()\n\n # get screen info\n screen_width = self.parent.winfo_screenwidth()\n screen_height = self.parent.winfo_screenheight()\n\n # get window info\n window_width = self.parent.winfo_width()\n window_height = self.parent.winfo_height()\n\n # determine position of the window\n x = screen_width - window_width/2 - 120\n y = screen_height - window_height/2 - 60\n\n # move the window to determined position\n self.parent.geometry('+%d+%d' % (x, y))", "def set_size(self, width, height):\n # Combine the height and width to single string to be passed to root\n set_str = '{}x{}'.format(str(width), str(height))\n self.root.geometry(set_str)", "def main():\n root = Tk()\n if high_dpi:\n root.call('tk', 'scaling', 4)\n if fullscreen:\n root.attributes('-fullscreen', True)\n root.configure(bg=yellow)\n root.grid_columnconfigure(2, weight=1)\n root.title('NS Fietsenstalling')\n\n MainScreen(root)\n root.mainloop()", "def centre(self):\n self.top.update_idletasks()\n # The horizontal position is calculated as (screenwidth - window_width)/2\n hpos = int((self.top.winfo_screenwidth() - self.top.winfo_width())/2)\n # And vertical position the same, but with the height dimensions\n vpos = int((self.top.winfo_screenheight() - self.top.winfo_height())/2)\n # And the move call repositions the window\n self.top.geometry('+{x}+{y}'.format(x=hpos, y=vpos))", "def __init__(self):\n self.app = qt.QApplication(sys.argv)\n self.window = qt.QMainWindow()\n self.screenSize = qt.QDesktopWidget().screenGeometry(-1)\n self.window.setGeometry(self.getDims()[1]/4, self.getDims()[0]/4, self.getDims()[1]/2, self.getDims()[0]/2)", "def SetWindow(self, w):\r\n\r\n self.window = w", "def set_geometry(self, width, height, fullscreen=False):\n self.root.tk.call(\"tk\", \"scaling\", self.scaling_factor)\n if fullscreen:\n initial_dimensions = (self.root.winfo_screenwidth(), self.root.winfo_screenheight())\n else:\n initial_dimensions = (round(width * self.scaling_factor),\n round(height * self.scaling_factor))\n\n if fullscreen and sys.platform == \"win32\":\n self.root.state('zoomed')\n elif fullscreen:\n self.root.attributes('-zoomed', True)\n else:\n self.root.geometry(\"{}x{}+80+80\".format(str(initial_dimensions[0]),\n str(initial_dimensions[1])))\n logger.debug(\"Geometry: %sx%s\", *initial_dimensions)", "def set_window_position(self, left, top, right, bottom, state, is_floating):\n self._set_window_position(left, top, right, bottom, state, is_floating)", "def center(self):\n self.root.update_idletasks()\n w = self.root.winfo_screenwidth()\n h = self.root.winfo_screenheight()\n size = tuple(int(_) for _ in self.root.geometry().split('+')[0].split('x'))\n x = w/2 - size[0]/2\n y = h/2 - size[1]/2\n self.root.geometry(\"240x80+%d+%d\" % (x, y))", "def centerWindow(self):\n framegeo = self.frameGeometry()\n center = QtGui.QDesktopWidget().availableGeometry().center()\n framegeo.moveCenter(center)\n self.move(framegeo.topLeft())", "def position_window(self):\n x, y = self.get_position()\n root_x = self.anchor_widget.winfo_rootx() + x\n root_y = self.anchor_widget.winfo_rooty() + y\n self.tipwindow.wm_geometry(\"+%d+%d\" % (root_x, root_y))", "def __init__( self, window_size=QSize( DEFAULT_H_SIZE, DEFAULT_V_SIZE ) ):\n super().__init__()\n\n self.centralWidget = None\n self.window_size = window_size\n\n self.create_models()\n self.create_widgets()\n self.create_layout()\n self.create_menus()\n self.set_state()", "def setup(self):\n cv2.namedWindow(Renderer.WINDOW_TITLE)\n # Allow window to be fullsized by both the OS window controls and OpenCV\n cv2.setWindowProperty(Renderer.WINDOW_TITLE,\n cv2.WND_PROP_AUTOSIZE,\n cv2.WINDOW_NORMAL)", "def set_screen(self, size):\r\n self.screen = size", "def init(self):\n sg.theme(gui.app_theme)\n self.window = sg.Window(\n gui.app_title,\n gui.create_layout(),\n **gui.window_config,\n )\n gui.after_window_init(self.window)", "def cb_main_window(self, event):\n self.main_frame.Show()", "def _configure_main(self, client):\n width = self._get_main_width()\n height = self.screen_rect.height\n left = self.screen_rect.x\n top = self.screen_rect.y\n\n if self.main_centered and len(self.clients) > 2:\n left += (self.screen_rect.width - width) // 2\n\n self._place_client(client, left, top, width, height)", "def set_resolution(self, width, height):\n self.driver.set_window_size(width, height, self.driver.window_handles[0])", "def _on_start(self):\n desktop = QtGui.QApplication.instance().desktop()\n available_geometry = desktop.screenGeometry(QtGui.QCursor().pos())\n self.setGeometry(available_geometry.x(), 0, 100, 100)", "def OnSize(self, event):\r\n\r\n self.Layout()", "def size_with_window(self, size_with_window):\n\n self.container['size_with_window'] = size_with_window", "def updatesize(frame):\n winwid, winhgt = frame.winfo_width(), frame.winfo_height()\n scrwid, scrhgt = frame.winfo_screenwidth(), frame.winfo_screenheight()\n newx, newy = math.floor(scrwid * 0.99) - winwid, math.floor(scrhgt * 0.01)\n frame.master.geometry(\"{}x{}+{}+{}\".format(winwid, winhgt, newx, newy))", "def setSize(self, width, height):\n frameWidth = width\n frameHeight = height\n repaint()", "def main():\r\n root = tk.Tk()\r\n app = Home(root)\r\n root.geometry(app.resize())\r\n root.configure(background = jt.color_background)\r\n root.mainloop()", "def setup_render_window(self):\n\n # Set camera\n self.__camera.SetPosition(0.0, 0.0, 20.0)\n self.__camera.SetFocalPoint(0.0, 0.0, 0.0)\n\n # Set renderer\n self.renderer.SetActiveCamera(self.__camera)\n self.renderer.SetBackground(0.6, 0.6, 0.6)\n\n # Set render window\n self.__render_window.AddRenderer(self.renderer)\n self.__render_window.SetSize(1000, 600)\n\n # Set render window interactor\n self.__render_window_interactor.SetRenderWindow(self.__render_window)\n self.__render_window_interactor.SetInteractorStyle(self.__interactor_style_trackball_camera)", "def initUI(self) -> None:\n ratio = 70\n width_to_set = (ratio * self.get_current_window_info()[0]) / 100.0\n height_to_set = (ratio * self.get_current_window_info()[1]) / 100.0\n self.setGeometry(200, 100, width_to_set, height_to_set)\n self.createTable()\n # Add box layout, add table to box layout and add box layout to widget\n self.layout = QVBoxLayout()\n self.layout.addWidget(self.tableWidget)\n self.setLayout(self.layout)\n self.setWindowTitle('View files')\n self.show()", "def center(win):\n win.update_idletasks()\n width = 1120\n frm_width = win.winfo_rootx() - win.winfo_x()\n win_width = width + 2 * frm_width\n height = 630\n titlebar_height = win.winfo_rooty() - win.winfo_y()\n win_height = height + titlebar_height + frm_width\n x = win.winfo_screenwidth() // 2 - win_width // 2\n y = win.winfo_screenheight() // 2 - win_height // 2\n win.geometry(\"{}x{}+{}+{}\".format(width, height, x, y))\n win.deiconify()", "def setup_window(self, fullscreen, dual):\n cv2.startWindowThread()\n if fullscreen:\n cv2.namedWindow(self.wname, cv2.WINDOW_NORMAL)\n else:\n cv2.namedWindow(self.wname)\n cv2.namedWindow(self.wname)\n cv2.setWindowProperty(self.wname, cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)\n\n if dual:\n # Move is to make sure it's on the right monitor\n cv2.moveWindow(self.wname, 1920, 0)\n cv2.namedWindow(self.wname + ' Small View')\n cv2.resizeWindow(self.wname + ' Small View', 960, 540)", "def center_window(self):\n\n\t\tframe_geo = self.frameGeometry()\n\t\tcursor_pos = QtWidgets.QApplication.desktop().cursor().pos()\n\t\tscreen = QtWidgets.QApplication.desktop().screenNumber(cursor_pos)\n\t\tcenter_point = QtWidgets.QApplication.desktop().screenGeometry(screen).center()\n\t\tframe_geo.moveCenter(center_point)\n\t\tself.move(frame_geo.topLeft())", "def _configureWindow(self):\n if self._win_type == WindowType.IMMERSIVE:\n pg.setConfigOptions(\n foreground='d',\n background=(_DARK_COLOUR if self._dark else _LIGHT_COLOUR))\n self._win = pg.plot(title=\"Abstact Map Visualisation\")\n self._plt = self._win.plotItem\n self._plt.setAspectLocked(True, 1)\n self._plt.hideAxis('left')\n self._plt.hideAxis('bottom')\n else: # DEFAULT\n pg.setConfigOptions(foreground='k', background='w')\n self._win = pg.plot(title=\"Abstact Map Visualisation\")\n self._plt = self._win.plotItem\n\n # Set up the overlay objects as they are static\n self._overlay_items = [\n QtWidgets.QGraphicsRectItem(-_OVERLAY_WIDTH / 2,\n -_OVERLAY_HEIGHT / 2, _OVERLAY_WIDTH,\n _OVERLAY_HEIGHT)\n ]\n self._overlay_items[0].setBrush(pg.mkBrush(_OVERLAY_COLOUR))\n self._overlay_items[0].setZValue(1000)\n self._win.addItem(self._overlay_items[0])\n self.toggleOverlay(enable=False)\n\n # Do any last settings in the window\n # self._win.parentWidget().showMaximized()\n limit = 30\n self._win.setRange(xRange=[-limit, limit], yRange=[-limit, limit])", "def resize(self, win, width:int, height:int):\r\n\r\n\t\tglViewport(0, 0, width, height)", "def run(self):\n self.root.title(\"Etymology relations\")\n self.root.geometry(\"1080x600\")\n self.root.deiconify()\n self.root.mainloop()", "def __init__(self, size, class_to_use, master, row, column, report=None):\n try:\n master.master.geometry(size)\n except AttributeError:\n pass\n self.window = class_to_use(master=master, borderwidth=0, relief=tk.GROOVE)\n self.window.grid(row=row, column=column, padx=10, pady=20)", "def initUI(self):\n \n self.setWindowTitle(\"Intecol Flir camera\")\n self.setGeometry(300, 100, 1012, 622)", "def defaultWindowSize(self):\n self.resize(self.defaultWindowWidth, self.defaultWindowHeight)", "def resize(self):\r\n del self.win\r\n self.__create_win()", "def create_main_enviroment(self):\n # self.layout=QGridLayout()\n self.resize(900, 900)\n self.centralWidget = CentralWidget(self) # CentralWidget(self)\n self.setCentralWidget(self.centralWidget)\n\n # self.toolbar = QToolBar(self)\n # self.addToolBar(self.toolbar)\n\n # self.setLayout(self.layout)\n self.setWindowTitle(\"Fitting elastic constants\")", "def __init__(self, root):\n self.root = root\n w, h = root.winfo_screenwidth(), self.root.winfo_screenheight()\n self.root.geometry(\"%dx%d+0+0\" % (w, h))\n self.root.protocol(\"WM_DELETE_WINDOW\", self.end_program)\n self.program_running = True", "def set_window(self, handle):\n pass", "def resize(self, width, height):\n\n\t\tself._window.resize(width, height)", "def resize(self):\n h, w = self.win.getmaxyx()\n self.maxh, self.maxw = h, w\n if w == 0 or h == 2:\n return\n self.win.resize(h, w)\n self.lpane.do_resize(h, w)\n self.rpane.do_resize(h, w)\n self.statusbar.resize(h, w)\n self.tabbar.resize(1,w)\n self.regenerate()\n self.display()", "def __set_properties(self): \n self.SetTitle(str(self.friend))\n self.SetSize((653, 467))\n self.chat_log.SetMinSize((635, 400))\n self.text_send.SetMinSize((635, -1))\n self.text_send.SetFocus()", "def setBounds(self, x, y, width, height):\n frameWidth = width\n frameHeight = height\n setLocation(x, y)", "def setWindowSize(width,height):\n dislin.winsiz(width,height)", "def draw_final_screen(self):\r\n root = Tk()\r\n MapGUI(root, self)\r\n root.geometry('710x540')\r\n root.mainloop()", "def create_main_window():\n main_win = MainWindow()\n main_windows.append(main_win)\n available_geometry = app.desktop().availableGeometry(main_win)\n main_win.resize(available_geometry.width() * 2 / 3,\n available_geometry.height() * 2 / 3)\n main_win.show()\n return main_win", "def __ev_resize(self, event):\n\n new_size = event.dict['size']\n surface_size = self.__screen.get_size()\n old_center = self.__screen.get_rect().center\n if new_size != surface_size:\n self.__screen = pygame.display.set_mode(new_size,\n self.__screen.get_flags(),\n self.__screen.get_bitsize())\n self.init(offset=vect_diff(self.__screen.get_rect().center,\n old_center))\n self.__screen_width, self.__screen_height = self.__screen.get_size()", "def resize(self, width, height):\n geo = self.geometry\n # Start of menu.\n self.menu_start = self.window.width - (geo.menu_width +\\\n geo.horizontal_margin + geo.scroll_bar_width)\n # Update vertical span of the window.\n self.current_view_span = height - self.status_bar.height\n # Call the resize method of all objects in the current window.\n for object in self.object_list:\n object.resize(width, height)\n # Just one call to the adaptive plot height is needed. Therefore the\n # calls need to be here.\n if self.waveforms:\n self.utils.adaptPlotHeight()", "def init_window(self, game, width, height, scale):\n self.controller = game\n self.window.geometry(\"{0}x{1}\".format((width * scale)+5, (height * scale)+5))\n self.window.resizable(False, False)\n\n self.canvas = tk.Canvas(self.window, width=width * scale, height=height * scale)\n self.canvas.grid(row=0, column=0, sticky=\"nesw\")\n\n self.draw_grid(width, height, scale)\n\n self.window.bind(\"<Button-1>\", lambda a: game.toggle_onclick(a))\n self.window.bind(\"<B1-Motion>\", lambda a: game.toggle_onclick(a))\n self.window.bind(\"<space>\", lambda a: game.toggle_pause())\n self.window.bind(\"<Return>\", lambda a: game.do_step())\n self.window.bind(\"<BackSpace>\", lambda a: game.reset())\n self.set_menu()", "def center_on_screen(self):\n window_frame = self.frameGeometry()\n screen_center = QtGui.QDesktopWidget().availableGeometry().center()\n window_frame.moveCenter(screen_center)\n self.move(window_frame.topLeft())", "def resize_display(self, (w, h)):\n self.surface = pygame.display.set_mode((w, h), pygame.RESIZABLE)", "def __init_window(self) -> pygame.Surface:\n pygame.display.set_caption(CAPTION)\n win = pygame.display.set_mode((WIDTH, HEIGHT))\n \n return win", "def center_window(top):\n screen_width = top.winfo_screenwidth()\n screen_height = top.winfo_screenheight()\n\n width, height, old_x, old_y = get_geometry(top)\n\n new_x = (screen_width - width) // 2\n new_y = (screen_height - height) // 2\n geom = '{}x{}+{}+{}'.format(width, height, new_x, new_y)\n print(\"new geometry:\", geom)\n top.geometry(geom)", "def setup(self):\n self.ui.setup_window()", "def set_size(self, width, height):\n cairo.cairo_xcb_surface_set_size(self._pointer, width, height)\n self._check_status()", "def __window_resizeTo(self, iWidth, iHeight):\n pass", "def window_size(self, window_size):\n\n self._window_size = window_size", "def create_window_constants() -> None:\r\n\r\n self.WIDTH = 1000\r\n self.HEIGHT = 600\r\n\r\n self.WIDGET_PAD = 5 # Widget padding\r\n self.MAIN_BG = '#eeeeee' # Main background\r\n\r\n self.FONT_LARGE = ('Courier',24)\r\n self.FONT_NORMAL = ('Courier', 12)\r\n self.FONT_SMALL = ('Courier', 10)", "def setSize(self, width, height):\n dw = (width - self.width()) / 2.0\n dh = (height - self.height()) / 2.0\n rect = self.sceneRect()\n rect.adjust(-dw, -dh, dw, dh)\n self.setSceneRect(rect)", "def inicialUI(self):\r\n\r\n self.setGeometry(500, 500, 500, 500)\r\n self.setWindownTitle(\"Pesquisa\")\r\n self.displayWidgets()\r\n\r\n self.show()", "def center(window):\n window.update_idletasks()\n\n # Find the screen resolution\n screen_width = window.winfo_screenwidth()\n screen_height = window.winfo_screenheight()\n\n # Find new (x, y) coordinates\n size = tuple(int(_) for _ in window.geometry().split('+')[0].split('x'))\n x = screen_width/2 - 7 * size[0] / 13\n y = screen_height/2 - 6 * size[1] / 11\n\n # Apply new coordinates\n window.geometry(\"+%d+%d\" % (x, y))", "def setMinSize(self,width,height):\n assert (type(width) == int), \"width %s is not an int\" % `width`\n assert (width > 0), \"width %s is negative\" % `width`\n assert (type(height) == int), \"height %s is not an int\" % `height`\n assert (height > 0), \"height %s is negative\" % `height`\n self._frame._root.minsize(width,height)", "def center_screen(self, window_width, window_height):\n offset_right = int(self.winfo_screenwidth()/2 - window_width/2)\n offset_down = int((self.winfo_screenheight()-40)/2 - window_height / 2)\n\n self.geometry('+{}+{}'.format(offset_right, offset_down))", "def center_screen(self, window_width, window_height):\n offset_right = int(self.winfo_screenwidth()/2 - window_width/2)\n offset_down = int((self.winfo_screenheight()-40)/2 - window_height / 2)\n\n self.geometry('+{}+{}'.format(offset_right, offset_down))", "def InitializeWindow(self):\n \n win_height = 600\n win_width = 900\n \n # 'x' and 'y' coordinates place window in the center of the screen\n y = int((self.winfo_screenheight() / 2) - (win_height / 2))\n x = int((self.winfo_screenwidth() / 2) - (win_width / 2))\n self.geometry(f'{win_width}x{win_height}+{x}+{y}')\n self.resizable(False, False)\n self.title('Log In')\n \n # Initialize the background template frame and canvas\n self.main_frame = Widgets.CreateFrame(self)\n self.main_frame.pack(fill='both', expand='true')\n self.main_canvas = Widgets.CreateCanvas(self.main_frame)\n self.main_canvas.pack(fill='both', expand='true')\n \n # Create a window in the center of the screen to hold widgets\n top_left_x = win_width / 4\n top_left_y = win_height / 4\n bottom_right_x = win_width - top_left_x\n bottom_right_y = win_height - top_left_y\n self.main_canvas.create_rectangle(top_left_x, top_left_y,\n bottom_right_x, bottom_right_y,\n fill='#f8f8ff')\n self.canvas_window = self.main_canvas.create_window(win_width / 2,\n win_height / 2)\n \n # Function to save user data if the window is exited\n self.protocol('WM_DELETE_WINDOW', self.OnClose)", "def open_window(self,size):\n # Window\n self.root = Tk()\n self.root.geometry(size)\n self.root.resizable(0, 0)\n\n\n # Tree\n self.tree = ttk.Treeview(self.root, heigh=20)\n self.tree.grid(row=4, column=0, padx=20)\n self.tree.grid(columnspan=5)\n\n hsb = ttk.Scrollbar(self.root, orient=\"horizontal\")\n hsb.configure(command=self.tree.xview)\n self.tree.configure(xscrollcommand=hsb.set)\n hsb.grid(row=5, column=0, padx=20, pady=20, columnspan=5, sticky=(W + E))", "def main(self):\n\n self.window.show_all()\n gtk.main()", "def _pos(self):\n sw = self.parent.winfo_screenwidth()\n sh = self.parent.winfo_screenheight()\n w = sw * 0.8\n h = sh * 0.8\n x = (sw - w) / 2\n y = (sh - h) / 2\n self.parent.geometry('%dx%d+%d+%d' % (w, h, x, y))", "def setup():\r\n #this happens just once\r\n size(width, height) #instead of create_canvas\r", "def set_mode(self, size, *args, **kwargs):\n if env.japplet:\n self.jframe = env.japplet\n else:\n self.jframe = Frame(self.caption, size)\n if self.icon:\n self.jframe.setIconImage(self.icon)\n env.jframe = self.jframe\n self.jpanel = self.jframe.jpanel\n self.surface = self.jpanel.surface\n self.surface._display = self\n self._surfaceRect = self.surface.get_rect()\n self._surface_rect = [self._surfaceRect]\n self._rect_list = None\n self.jframe.setLocationRelativeTo(None)\n self.jframe.setVisible(True)\n self._warmup()\n return self.surface", "def widgets(self):\r\n self.setWindowTitle(\"PyCrypt\")\r\n self.setMinimumSize(QSize(500, 500))\r\n self.setMaximumSize(QSize(500, 500))\r\n# Adding the sub def for widgets etc\r\n self.add_menus_and_status()\r\n self.add_buttons()", "def main(self):\n self.root.mainloop()", "def SetupView(self):\r\n size = self.GetClientSizeTuple()\r\n height = self.maxtop - self.maxbottom\r\n width = self.maxright - self.maxleft\r\n \r\n #The ratio of the width to the height in the client-area\r\n screenratio = float(size[0]) / float(size[1])\r\n \r\n #The ratio of the world window. Because of divide-by-0, we have to make a special-case assignment\r\n if height == 0 or width == 0:\r\n ratio = screenratio\r\n else:\r\n ratio = width / height\r\n\r\n #Should seem familiar, since we did it in class...\r\n if ratio > screenratio:\r\n glViewport(0, (size[1] - (size[0] / ratio)) / 2, size[0], size[0] / ratio)\r\n if ratio < screenratio:\r\n glViewport((size[0] - size[1] * ratio) / 2, 0, size[1] * ratio, size[1])\r\n \r\n \r\n #I need to find an appropriate border value. It's scaled by the client-area because the world-window zooms, thus skewing any normal border given.\r\n if width == 0 or height == 0:\r\n xborder = 1\r\n yborder = 1\r\n else:\r\n xscale = size[0] / width\r\n xborder = 10 / xscale\r\n yscale = size[1] / height\r\n yborder = 10 / yscale\r\n \r\n glMatrixMode(GL_PROJECTION)\r\n glLoadIdentity()\r\n gluOrtho2D(self.maxleft - xborder, self.maxright + xborder, self.maxbottom - yborder, self.maxtop + yborder)", "def OnSize(self, event):\r\n\r\n s = event.GetSize()\r\n self.SetTabRect(wx.Rect(0, 0, s.GetWidth(), s.GetHeight()))", "def __init__(self):\n self.size = width, height = pygame.display.Info().current_w, pygame.display.Info().current_h\n self.screen = pygame.display.set_mode(self.size)\n self.x = int((width - 910) / 2)\n self.y = int((height - 675) / 2)", "def relayout(self): \n\t\t#self.urmaswin.Layout()\n\t\t#wx.CallAfter(self.urmaswin.Layout)\n\t\t#wx.CallAfter(self.visualizer.OnSize)", "def _UpdateParent(self):\n self.Parent.Layout()\n self.Parent.SendSizeEvent()", "def DoSetSize(self, x, y, width, height, flags=wx.SIZE_AUTO):\r\n\r\n self._rect = wx.Rect(x, y, max(1, width), max(1, height))\r\n self.DoSizing()", "def __init__(self, title, width, height):\n super(BaseApp, self).__init__()\n self.title(title)\n self.__geometry(width, height)", "def showBasic(self):\n self.setWindowIcon(QIcon(self.icon))\n self.setWindowTitle(self.title)\n self.setGeometry(*self.posXY, *self.windowSize)\n self.show()", "def SetInitialSize(self, size=None):\n\n if size is None:\n size = wx.DefaultSize\n\n wx.Control.SetInitialSize(self, size)", "def DoSetViewport(self):\n size = self.size = self.GetClientSize()\n self.SetCurrent(self.context)\n glViewport(0, 0, size.width, size.height)" ]
[ "0.7444846", "0.74311256", "0.74113226", "0.7338753", "0.7217333", "0.7147651", "0.7128095", "0.7085407", "0.7039381", "0.69641066", "0.6941526", "0.6933898", "0.6916558", "0.6905236", "0.68072456", "0.67224926", "0.6704145", "0.666833", "0.66558856", "0.6651923", "0.66315264", "0.6628062", "0.66164577", "0.661176", "0.6606651", "0.65882736", "0.6539608", "0.65110385", "0.6502696", "0.645803", "0.6446009", "0.6445936", "0.6438267", "0.64295477", "0.6424715", "0.6399091", "0.6398647", "0.63961464", "0.63867515", "0.63865644", "0.63817775", "0.63687885", "0.635048", "0.63460594", "0.6326441", "0.6313632", "0.63119924", "0.6301869", "0.6295968", "0.62763005", "0.6275154", "0.62744606", "0.6273012", "0.6253665", "0.6247105", "0.6241412", "0.6236471", "0.62360585", "0.6235002", "0.6230756", "0.62141585", "0.61853844", "0.61793345", "0.61751616", "0.6171722", "0.61627257", "0.6159607", "0.61299497", "0.6127041", "0.6124443", "0.6114285", "0.61006844", "0.6097934", "0.60780996", "0.60668254", "0.6063442", "0.60629797", "0.6057844", "0.604207", "0.60335094", "0.6031357", "0.6031357", "0.6027301", "0.6013211", "0.59888923", "0.59846747", "0.5982843", "0.5969942", "0.59527546", "0.5952087", "0.5945269", "0.59369445", "0.59367883", "0.59271634", "0.5924055", "0.59219396", "0.59094155", "0.5909324", "0.59084606", "0.5902965" ]
0.6319304
45
Destroys this window and its associated assets
def _destroy(self): root = self._root turtle.Turtle._pen = None turtle.Turtle._screen = None self._root = None self._canvas = None turtle.TurtleScreen._RUNNING = True root.destroy()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def destroy(self):\n for window in self.windows:\n try:\n destroy_window(window)\n except:\n pass", "def destroy_window(self) -> None:\n self.master.destroy()\n self.master.master.create_right_left_containers()", "def destroy(self):\n self.window.destroy_output_panel(self.name)", "def __onclosing(self):\n self.window.destroy()", "def onCloseWindow(self, event):\r\n\r\n self.Destroy()", "def delwin(self):\n\t\tfor c in self.components:\n\t\t\tc.delwin()\n\t\tself.win = None", "def destroy(self):\n\n sceneOpts = self.sceneOpts\n contentPanel = self.contentPanel\n\n sceneOpts .removeListener('showXCanvas', self.name)\n sceneOpts .removeListener('showYCanvas', self.name)\n sceneOpts .removeListener('showZCanvas', self.name)\n sceneOpts .removeListener('labelSize', self.name)\n sceneOpts .removeListener('fgColour', self.name)\n sceneOpts .removeListener('showLabels', self.name)\n self.displayCtx .removeListener('location', self.name)\n self.displayCtx .removeListener('bounds', self.name)\n self.displayCtx .removeListener('selectedOverlay', self.name)\n self.displayCtx .removeListener('displaySpace', self.name)\n self.displayCtx .removeListener('radioOrientation', self.name)\n self.overlayList.removeListener('overlays', self.name)\n\n self.__labelMgr.destroy()\n self.__xcanvas.destroy()\n self.__ycanvas.destroy()\n self.__zcanvas.destroy()\n self.__removeEditMenu()\n\n contentPanel.Unbind(wx.EVT_SIZE)\n\n self.__xcanvas = None\n self.__ycanvas = None\n self.__zcanvas = None\n self.__focusedCanvas = None\n self.__labelMgr = None\n\n canvaspanel.CanvasPanel.destroy(self)", "def close(self):\n self.window.destroy()\n self.buttons_window.destroy()", "def DeleteWindow(self):\r\n\r\n if self._wnd:\r\n self._wnd.Destroy()\r\n self._wnd = None", "def destroy(self):\n tk.Frame.destroy(self)", "def destroy_on_close(self):\n self.deleteLater()", "def destroy(self):\r\n self._tidy()\r\n self.stop()\r\n try:\r\n self.opengl.destroy(self)\r\n except:\r\n pass\r\n if self.external_mouse:\r\n try:\r\n self.external_mouse.stop()\r\n except:\r\n pass_\r\n try:\r\n self.mouse.stop()\r\n except:\r\n pass\r\n try:\r\n self.tkwin.destroy()\r\n except:\r\n pass\r\n Display.INSTANCE = None", "def _close_window(self):\n render_window = self._iren.GetRenderWindow()\n render_window.Finalize()\n self._iren.TerminateApp()\n\n del render_window, self._iren, self._ren, self._renWin", "def destroy(self):\r\n if self.cur_message is not None:\r\n self.cur_message.destroy()\r\n self.cur_message = None\r\n if self.board is not None:\r\n self.board.destroy()\r\n self.board = None\r\n hold_sub_displays = True\r\n if not hold_sub_displays and self.game_control is not None:\r\n self.game_control.destroy()\r\n self.game_control = None\r\n if not hold_sub_displays and self.player_control is not None:\r\n self.player_control.destroy()\r\n self.player_control = None\r\n if not hold_sub_displays and self.score_window is not None:\r\n self.score_window.destroy()\r\n self.score_window = None", "def destroy(self):\r\n self.__destroy()", "def destroy_view(self): \n\n self.canvas.destroy()\n self.scrollbar.destroy()\n self.header_frame.destroy()\n self.button_frame.destroy()\n self.twitter_canvas.destroy()\n self.twitter_scrollbar.destroy()", "def finalizeExit(self) -> None:\n base.graphicsEngine.removeAllWindows()\n if self.win is not None:\n print(\"Exiting KarelCraft app, bye!\")\n self.closeWindow(self.win)\n self.win = None\n self.destroy()\n sys.exit()", "def destroy(self):\n self.root.stop()", "def unload(self):\n main.msgQ.removeEvent(Constants.CMSG_CHANGE_AVATAR_TYPE)\n main.msgQ.removeEvent(Constants.CMSG_CHANGE_TEAM_PVP)\n main.msgQ.removeEvent(Constants.CMSG_START_TO_READY_GAME)\n main.msgQ.removeEvent(Constants.CMSG_CANCEL_TO_JOIN_GAME)\n main.msgQ.removeEvent(Constants.CMSG_START_SIXTY_SECONDS_COUNTER)\n self.mainFrame.destroy()", "def destroy(self):\n gameengine.GameEngine().game_objects.remove(self)", "def image_window_destroy(self, widget, data=None):\n self._quit()", "def destroy(self):\n self.unbindAllWidgets()\n self.__func = None\n self.__instance = None", "def cleanup(self, window):\n if self._components:\n for component in self._components:\n component.cleanup(window)", "def destructor(self):\n cv2.destroyAllWindows()", "def destroy(self):\n bullet_tools.tear_down_scene()", "def close(self):\n self._screen = None\n pygame.display.quit()", "def destroy(self, *args):\n logger.debug(\"WarningSc.destroy called\")\n if self.manageGTK:\n if self.quit:\n sys.exit(0)\n else:\n self.gui.get_object(self.window).destroy()\n while gtk.events_pending():\n gtk.main_iteration()", "def cleanup(self):\n pygame.quit()", "def destroy (self,event=None):\n \n self.top.withdraw() # Don't allow this window to be destroyed.", "def delete_window(self):\r\n self.mw.eval('::ttk::CancelRepeat')\r\n SlTrace.lg(\"Closing windows\")\r\n ''' \r\n ActiveCheck.clear_active() # Disable activities\r\n if self.score_win is not None:\r\n self.score_win.destroy()\r\n self.score_win = None\r\n if self.mw is not None and self.mw.winfo_exists():\r\n self.mw.quit()\r\n self.mw.destroy()\r\n self.mw = None\r\n '''\r\n if self.on_exit is not None:\r\n self.on_exit()\r\n \r\n sys.exit() # Else quit\r", "def quit(self):\n\n self.main_window.destroy()", "def close(self):\n\n cv2.destroyWindow(winname=self.title)", "def destroy(self):\n widget = self.widget\n if widget:\n # On Windows, it's not sufficient to simply destroy the\n # widget. It appears that this only schedules the widget \n # for destruction at a later time. So, we need to explicitly\n # unparent the widget as well.\n widget.setParent(None)\n if widget.isWidgetType():\n widget.destroy()\n self.widget = None", "def destroy(self):\n self.__overlayList.removeListener('overlays', self.__name)\n base.Action.destroy(self)", "def destroy(self):\r\n self.visible = False", "def DestroyHintWindow(self):\r\n\r\n if self._hint_window:\r\n\r\n self._hint_window.Destroy()\r\n self._hint_window = None", "def destroy(self):\n\n self.cmapTexture.destroy()\n\n for tex in (self.modulateTexture,\n self.clipTexture,\n self.colourTexture):\n tex.deregister(self.name)\n glresources.delete(tex.getTextureName())\n\n self.removeListeners()\n self.deregisterAuxImage('modulate')\n self.deregisterAuxImage('clip')\n self.deregisterAuxImage('colour')\n\n self.modulateTexture = None\n self.clipTexture = None\n self.colourTexture = None\n self.modulateImage = None\n self.clipImage = None\n self.colourImage = None\n self.modulateOpts = None\n self.clipOpts = None\n self.colourOpts = None\n\n glimageobject.GLImageObject.destroy(self)", "def destroy_all(self):\n\n for k in self.widgets:\n self.widgets[k].destroy()\n self.widgets = {}\n self.window.destroy()\n self.window = tk.Frame(self.root)\n self.window.pack(side=\"top\", fill=\"both\", expand=True)", "def OnExit(self, event):\n \n print 'Cleaning up...'\n self.Destroy()", "def leave_page(self):\n self.window.destroy()", "def __destroy_ui(self):\n # Remove the viewable area from Gedit's side panel\n self.__side_panel.remove_item(self.__view_port)\n\n # Empty class's properties\n self.__tree_view = None\n self.__side_panel = None\n\n self.__view_port.destroy()\n self.__view_port = None", "def close(self):\n self.destroy()", "def done(self):\n self.root.destroy()", "def callback_destroy( self ):\r\n self.winRunning = False\r\n self.rootWin.destroy()\r\n exit()", "def destroy(self):\r\n self._obj.destroy()\r\n self._obj = None", "def close_window(self):\n # Window - END\n self.root.destroy()", "def close(self):\n \n self.renderer.RemoveActor(self._crosshair.actor)\n self.renderer.RemoveActor(self._scalar_bar_actor)\n self.renderer.RemoveActor(self._orientation_annotation)\n self.renderer.RemoveActor(self._corner_annotation)\n \n for layer in self._layers :\n self.renderer.RemoveActor(layer.actor)\n \n for gui_annotation in self._gui_annotations.values() :\n self.renderer.RemoveActor(gui_annotation.shape_actor)\n self.renderer.RemoveActor(gui_annotation.text_actor)", "def unload(self):\n for action in self.actions:\n self.iface.removePluginMenu(\n self.tr(u'&Convert to 3D'),\n action)\n self.iface.removeToolBarIcon(action)\n # remove the toolbar\n del self.toolbar", "def destroy(self):\n self.context.destroy()", "def unload(self):\n if self.material_background:\n self.parent.removeItem(self.material_background)\n self.material_background = None\n if self.mod_background:\n self.parent.removeItem(self.mod_background)\n self.mod_background = None\n if self.material_foreground:\n self.parent.removeItem(self.material_foreground)\n self.material_foreground = None\n if self.mod_foreground:\n self.parent.removeItem(self.mod_foreground)\n self.mod_foreground = None\n if self.liquid:\n self.parent.removeItem(self.liquid)\n self.liquid = None", "def quit(self):\n self.window.quit()\n self.window.destroy()", "def delete_win(self, *args):\n if cmds.window(self.win_name, ex=1):\n cmds.deleteUI(self.win_name)", "def emit_and_destroy(self):\n self.f1_frame.scene.stop_animation()\n self.drone_frame.scene.stop_animation()\n self.roomba_frame.scene.stop_animation()\n self.car_frame.scene.stop_animation()\n self.turtle_frame.scene.stop_animation()\n self.pepper_frame.scene.stop_animation()\n\n self.switch_window.emit()\n self.f1_frame.scene.view.deleteLater()\n self.drone_frame.scene.view.deleteLater()\n self.roomba_frame.scene.view.deleteLater()\n self.car_frame.scene.view.deleteLater()\n self.turtle_frame.scene.view.deleteLater()\n self.pepper_frame.scene.view.deleteLater()", "def clean_up(self):\n cv2.destroyAllWindows()\n # self.vs.release()", "def exitGame(self):\n self.myBoard.clearFrame()\n for tileRow in self.myBoard.tiles:\n for tile in tileRow:\n tile.destroy()\n del Tile.images[:]\n del self.myBoard.images[:]\n self.myBoard.destroy()\n self.destroy()\n exit(0)", "def destroy(self):\n\n self.renderTexture .destroy()\n self.cmapTexture .destroy()\n self.negCmapTexture.destroy()\n self.lutTexture .destroy()\n\n self.removeListeners()\n self.deregisterLut()\n\n globject.GLObject.destroy(self)\n\n if self.flatShader is not None: self.flatShader.destroy()\n if self.dataShader is not None: self.dataShader.destroy()\n\n self.dataShader = None\n self.flatShader = None\n self.activeShader = None\n\n self.lut = None\n self.renderTexture = None\n self.cmapTexture = None\n self.negCmapTexture = None\n self.lutTexture = None", "def destroy():\n if QFGUI.__instance is not None:\n QFGUI.__instance.__running = False\n try:\n QFGUI.__instance.__gui_app.terminate()\n except socket.error: # ignore\n pass\n QFGUI.__instance.__gui_app = None\n del QFGUI.__instance.__gui_server_thread # clean up resource\n QFGUI.__instance.__gui_server_thread = None\n QFGUI.__instance.__qf = None\n QFGUI.__instance = None\n #\n # DO NOT COMMENT: important to tell unittest that GUI is destroyed\n print \"*** Destroyed QF GUI ***\"", "def destroyWindow(windowType):\n if windowType == \"volume\":\n os.remove(MY_VOL_PATH)\n\n elif windowType == \"backlight\":\n os.remove(MY_BACKLIGHT_PATH)\n Gtk.main_quit()", "def __del__(self):\n self.destroy()", "def clean(self):\n for i in self.winfo_children():\n i.destroy()", "def reset(self, window):\n self.__close_preview(window)\n self.__clear_context()", "def close_app(self):\n BASE_DIR = os.path.dirname(os.path.abspath(__file__))\n\n db_path = os.path.join(BASE_DIR, \"..\", \"DATA\", \"AIRCRAFT_COLLISION_FORECAST_SYSTEM.db\")\n clean_table(db_path, 'AIRPLANES')\n\n BASE_DIR = os.path.dirname(os.path.abspath(__file__))\n\n img_path = os.path.join(BASE_DIR, \"..\", \"GUI\", \"IMAGE\")\n\n # img_path = 'GUI\\\\IMAGE\\\\'\n img_file_names = [file_name for file_name in listdir(img_path) if isfile(join(img_path, file_name))]\n for file_name in img_file_names:\n if file_name not in ('map_marker.png', 'airplane_marker.png', 'collision_marker.png'):\n os.remove(os.path.join(img_path, file_name))\n print('Closing app')\n self.app.root_window.close()", "def cleanup(self):\r\n\r\n # Remove strip from window.\r", "def close(self):\n\n\t\tself._window.close()", "def exit(self):\n if self.window:\n self.window.close()", "def destroy(self):\n pass # Nothing for now", "def endWindow(self):\n\t\tself.vidcap.release()\n\t\tcv2.destroyWindow(\"show\")", "def save_before_close(self):\n if self.db_window:\n self.db_window.destroy()\n self.destroy()", "def close(self):\n self._close_viewer_window()\n self.env.close()", "def on_cleanup(self):\n\n pygame.quit()", "def quit_click(self):\n\n self.parent.destroy()", "def destroy(self):\n pass", "def destroy(self):\n pass", "def destroy(self):\n pass", "def destroy(self):\n pass", "def cancel(self):\n self.top.destroy()", "def deinit(self):\n self._font.close()", "def destructor(self):\n print(\" [SB Live] Terminating...\\n\",\n \"[SB Live] View most recent cache in \\'cache/replay.mov\\'\")\n self.killThread = True\n self.t.join()\n self.root.destroy()\n self.vs.release() # release web camera\n self.cache.release()\n self.replayStream.release()\n cv2.destroyAllWindows() # it is not mandatory in this application", "def destroy(self):\n\n pass", "def _destroy(self):\n # FIXME: Arrange for a more controlled shutdown through the credentials\n\n self.replay_window_persisted = True\n self.sequence_number_persisted = self.sender_sequence_number\n self._store()\n\n del self.sender_key\n del self.recipient_key\n\n os.unlink(self.lockfile.lock_file)\n self.lockfile.release()\n\n self.lockfile = None", "def statDestroy():\n root.destroy()\n statView()", "def close(self):\n if(screen == self):\n screen = None", "def destroy(self):\n if self._ptr is not None:\n # run and remove destructor on c data\n _global_destroy(self._display, self._ptr)\n ffi.gc(self._ptr, None)\n self._ptr = None\n self._display = None", "def exit(self):\n self.root.grab_release()\n self.root.destroy()", "def onClose(self, event): \n \n self.Destroy()\n return", "def exit(self):\n GRobot.exit_lock.acquire()\n if not self._deleted:\n if self.inspector:\n self.inspector.close()\n sip.delete(self.inspector)\n\n if self.display:\n self.webview.close()\n sip.delete(self.webview)\n\n if self.page and not sip.isdeleted(self.page):\n sip.delete(self.page)\n\n GRobot._liveRobot -= 1\n\n if GRobot._liveRobot == 0 and GRobot._loop is not None:\n GRobot._kill_loop=gevent.spawn_later(20,self.kill_loop)\n\n\n self._deleted = True\n\n GRobot.exit_lock.release()", "def removeScene(self):\n del self.scene, self.imgPixmapItem", "def remove_window(self, window: AbstractView) -> None:\n self._logger.debug(\"running\")\n self.removeSubWindow(window)\n self._logger.debug(\"done\")", "def unload(self):\n self.iface.removePluginRasterMenu(self.menu, self.action)\n self.iface.removeRasterToolBarIcon(self.action)", "def unload(self):\n self.iface.pluginToolBar().removeAction(self.openDialogAction)", "def exit(self):\n \t\troot.destroy()\n \t\tpass", "def destroy_mat(self):\n\n if self._mat is None:\n return\n\n if self._shell:\n destroy_shell_context(self._mat)\n\n self._mat.destroy()\n self._mat = None", "def clear_screen(self):\r\n lst_grid = self.root.grid_slaves()\r\n for widget in lst_grid:\r\n widget.destroy()\r\n lst_pack = self.root.pack_slaves()\r\n for widget in lst_pack:\r\n widget.destroy()", "def bye(self):\n self._frame._destroy()\n self._turtles = []\n self._gpens = []\n del self._frame", "def __window_close(self):\n pass", "def unload(self):\n for action in self.actions:\n self.iface.removePluginRasterMenu(\n self.tr(u'&Hybriddekning'),\n action)\n self.iface.removeToolBarIcon(action)\n # remove the toolbar\n del self.toolbar", "def unload(self):\n for action in self.actions:\n self.iface.removePluginMenu(\n self.tr(u'&PacSafe'),\n action)\n self.iface.removeToolBarIcon(action)\n # remove the toolbar\n del self.toolbar", "def unload(self):\n for action in self.actions:\n self.iface.removePluginMenu(\n self.tr(u'&PolygonByPolarCoordinates'),\n action)\n self.iface.removeToolBarIcon(action)\n # remove the toolbar\n del self.toolbar", "def unload(self):\n for action in self.actions:\n self.iface.removePluginMenu(\n self.tr(u'&CHOUCAS'),\n action)\n self.iface.removeToolBarIcon(action)\n # remove the toolbar\n del self.toolbar", "def unload(self):\n for action in self.actions:\n self.iface.removePluginMenu(\n self.tr(u'&Create xyzrgb from Mosaic/DSM'),\n action)\n self.iface.removeToolBarIcon(action)" ]
[ "0.7758888", "0.7666368", "0.76103795", "0.7549023", "0.75396657", "0.749664", "0.74601287", "0.7457397", "0.7421807", "0.7339096", "0.73000395", "0.72744155", "0.7256992", "0.72431695", "0.7238555", "0.7213867", "0.7204563", "0.71423733", "0.7119873", "0.7090933", "0.7061171", "0.70511407", "0.70408297", "0.7016762", "0.6986384", "0.6978324", "0.69704646", "0.6966765", "0.6949641", "0.694166", "0.6935619", "0.69206995", "0.69110984", "0.69050163", "0.6903453", "0.69018126", "0.6892647", "0.68895936", "0.68729335", "0.6861786", "0.6844735", "0.6841508", "0.679743", "0.6776919", "0.6774244", "0.6767136", "0.6766207", "0.67652464", "0.6723242", "0.671631", "0.671461", "0.67083395", "0.67077255", "0.67066544", "0.67059255", "0.6669194", "0.6658357", "0.6655379", "0.6632422", "0.66317", "0.661518", "0.66119826", "0.66105473", "0.66093993", "0.6585281", "0.6577487", "0.6575614", "0.65641606", "0.6557198", "0.6546722", "0.65424603", "0.65297335", "0.65297335", "0.65297335", "0.65297335", "0.6525651", "0.6517546", "0.6509867", "0.6495265", "0.64862686", "0.6468969", "0.6459824", "0.6459395", "0.64532495", "0.6450933", "0.64463353", "0.6445482", "0.64443797", "0.64426374", "0.64418244", "0.64390063", "0.64364475", "0.6431333", "0.64250183", "0.6423568", "0.64188594", "0.6416926", "0.64154136", "0.6414408", "0.64059997" ]
0.6874586
38
The x coordinate for top left corner of window
def x(self): return self._x
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_x_position(self):\n return self.rect.x", "def get_x(self):\n return self.posX", "def get_pos_x(self):\n return self.__pos_x", "def content_box_x(self):\n return self.position_x + self.margin_left + self.padding_left + \\\n self.border_left_width", "def border_box_x(self):\n return self.position_x + self.margin_left", "def _get_x(self):\n return self.position.x", "def get_x_position(self):\n return self.actual_coordinates[0]", "def centerx(self):\n return self.left + self.width / 2", "def offset_x(self) -> int:\n self.tk_ref.update()\n return self.tk_ref.winfo_x()", "def top_left(self):\n return Point(self.left, self.top)", "def get_x(self):\n return self.coords[0]", "def origin_x(self):\n return self._origin[0]", "def x_origin(self):\n return self._x_origin", "def get_origin_x_position(self):\n return self.origin_coordinates[0]", "def _pos(self):\n sw = self.parent.winfo_screenwidth()\n sh = self.parent.winfo_screenheight()\n w = sw * 0.8\n h = sh * 0.8\n x = (sw - w) / 2\n y = (sh - h) / 2\n self.parent.geometry('%dx%d+%d+%d' % (w, h, x, y))", "def position(self):\n\n\t\treturn self._window.position", "def calculate_window_position(self):\n self.x = SQUARE_SIZE * self.col + SQUARE_SIZE // 2\n self.y = SQUARE_SIZE * self.row + SQUARE_SIZE // 2", "def getlefttop(self,xnum,ynum):\n left = self.xmargin + xnum*CELLSIZE\n top = self.ymargin + ynum*CELLSIZE\n return (left,top)", "def left(self):\n return self.points['topLeft'].x", "def left(self):\n return self.points['topLeft'].x", "def layout_x(self):\n return self.floris.farm.layout_x", "def Getxcoord(self):\n return self.x_coord", "def x(self):\n return self.coords[0]", "def x(self):\n return self._coords[0]", "def x(self):\r\n return self.position.x", "def get_top_left(self):\n return self._top_left", "def x(self):\n return _libsbml.Point_x(self)", "def get_window_x_y(windowid):\n return commands.getoutput(\"xwininfo -id \"+windowid+\" | grep 'Corners' | cut -d' ' -f5 | cut -d'+' -f2,3\").split(\"+\")", "def top_left_option():\n active = get_active_window()\n Width=get_corner_Width(active)\n Height=get_top_Height()\n PosX = get_left_PosX(active,Width)\n PosY=get_top_PosY()\n move_window(active,PosX,PosY,Width,Height)\n raise_window(active)", "def get_x(self):\n\t\treturn self._collision_rect.x + 14", "def _positionWindow(self):\n\t\tif sys.platform=='win32':\n\t\t\tself.setGeometry(1050, 30, 375, 220)\n\t\telse:\n\t\t\tself.setGeometry(1050, 0, 375, 220)\n\t\t# self.move( (-screen.width()/2)+200, -screen.height()/2 )", "def getAbsoluteLeft(self):\n return self.canvas.getAbsoluteLeft()", "def get_pos(self):\n return self.rect.midtop", "def topLeftCorner(self):\n self._updateExtents()\n return (self._mMinX,self._mMinY)", "def get_xmin(self):\n return self.__xmin", "def getXCoordinate(self) -> float:\n return self.x_coord", "def _positionWindow(self):\n\t\tscreen = QtGui.QDesktopWidget().screenGeometry()\n\t\tself.setGeometry(1050, 275, 375, 350)\n\t\t# self.move( (-screen.width()/2)+200, -screen.height()/2 )", "def getX(self):\n return self.position.getX()", "def getX(self):\n return self.position[0]", "def x(self):\n if self._x is None:\n self.compute_coordinates()\n return self._x", "def anchor_x(self):\n return self._anchor_x", "def pos(self):\n x = (self.ec._win._mouse_x -\n self.ec._win.width / 2.) / (self.ec._win.width / 2.)\n y = (self.ec._win._mouse_y -\n self.ec._win.height / 2.) / (self.ec._win.height / 2.)\n return np.array([x, y])", "def topleft(self, x=0, y=0):\n topleft = self.rect.topleft\n if x or y:\n return (topleft[0]+x, topleft[1]+y)\n return topleft", "def xaxis ( self ) :\n return self.__xaxis", "def xaxis ( self ) :\n return self.__xaxis", "def xaxis ( self ) :\n return self.__xaxis", "def position_window(self):\n x, y = self.get_position()\n root_x = self.anchor_widget.winfo_rootx() + x\n root_y = self.anchor_widget.winfo_rooty() + y\n self.tipwindow.wm_geometry(\"+%d+%d\" % (root_x, root_y))", "def get_axis_x(self):\r\n return self.__x_axis", "def padding_box_x(self):\n return self.position_x + self.margin_left + self.border_left_width", "def screenPos(self):\n return Point(self._screenPos)", "def screenPos(self):\n return Point(self._screenPos)", "def screenPos(self):\n return Point(self._screenPos)", "def _eef0_xpos(self):\n if self.env_configuration == \"bimanual\":\n return np.array(self.sim.data.site_xpos[self.robots[0].eef_site_id[\"left\"]])\n else:\n return np.array(self.sim.data.site_xpos[self.robots[0].eef_site_id])", "def topleft(self):\n return (self.left, self.top)", "def mousePos():\n data = display.Display().screen().root.query_pointer()._data\n return data[\"root_x\"], data[\"root_y\"]", "def get_x(self) -> int:\n return self.__x", "def x(self):\n return self._turtle.xcor()", "def x(self):\n return self._turtle.xcor()", "def _eef1_xpos(self):\n if self.env_configuration == \"bimanual\":\n return np.array(self.sim.data.site_xpos[self.robots[0].eef_site_id[\"right\"]])\n else:\n return np.array(self.sim.data.site_xpos[self.robots[1].eef_site_id])", "def x_coord(self):\n\n return self.x0 + np.arange(self.nx) * self.dx", "def mouse_position(self):\r\n # TODO: add: Now deprecated in favor of pi3d.events\r\n if self.mouse:\r\n return self.mouse.position()\r\n elif self.tkwin:\r\n return self.tkwin.winfo_pointerxy()\r\n else:\r\n return -1, -1", "def x(self):\n return self.axes[1]", "def pos_x(self, *args, **kwargs) -> Any:\n pass", "def xaxis(self):\n return self._xaxis", "def x(self) -> int:\n return self.data.x_centre >> 4", "def get_ship_x(self):\n return self.x", "def _rect_left(self):\n\treturn min(self.x, self.x + self.w)", "def getXLimit(self):\n return self.axes.get_xlim()", "def x_halo(self): \n return self.coords_halo[0]", "def get_last_click_x():\r\n return _cue.getMouseLocation().getX()", "def getXOffset(self):\n return _libsbml.Point_getXOffset(self)", "def top_left(self, obj):\n return self.phy2abs.top_left(obj)", "def TopHat_window(self, x):\n return 3./(x)**3*(np.sin(x)-x*np.cos(x))", "def offset_x(self, x: int):\n self.tk_ref.geometry(f'{self.width}x{self.height}+{x}+{self.offset_y}')", "def getMinX(self):\n return self.minx", "def get_alien_x(self):\n return self.x", "def get_attach_point_top(self):\n return self.mapToGlobal(self.rect().center()) # Default behavior", "def show_mouse_position_with_px(self):\n self.main_menu_greets_fonts = pygame.font.Font(os.path.join(PATH_TO_RESOURCE, 'font_forever.ttf'), 10)\n self.positiontext(f'Mouse position {pygame.mouse.get_pos()}', (770, 20))\n self.mouse = pygame.mouse.get_pos()\n return self.mouse", "def get_mouse_pos(self):\n return self.mouse_pos", "def __get_x__(self):\n return self.Direction['x']", "def xAt(self, col):\n\n return self.bottomBoard.x + self.bottomBoard.xAt(col)", "def __window_moveTo(self, x, y):\n pass", "def get_virtual_x_position(self):\n x_real = (\n (self.get_x_position() - self.get_origin_x_position()) *\n sin(self.get_origin_direction() * pi / 180)\n )\n y_real = (\n (self.get_y_position() - self.get_origin_y_position()) *\n cos(self.get_origin_direction() * pi / 180)\n )\n return abs(x_real + y_real)", "def getX(self):\n return self.x", "def GetSnapPosition(self):\r\n\r\n snap, hAlign, vAlign, monitor = self._is_docked\r\n \r\n display = wx.Display(monitor)\r\n area = display.GetClientArea()\r\n size = self.GetManagedWindow().GetSize()\r\n \r\n pos = wx.Point()\r\n if hAlign == wx.LEFT:\r\n pos.x = area.x\r\n elif hAlign == wx.CENTER:\r\n pos.x = area.x + (area.width - size.x)/2\r\n else:\r\n pos.x = area.x + area.width - size.x\r\n\r\n if vAlign == wx.TOP:\r\n pos.y = area.y\r\n elif vAlign == wx.CENTER:\r\n pos.y = area.y + (area.height - size.y)/2\r\n else:\r\n pos.y = area.y + area.height - size.y\r\n\r\n return pos", "def position_window(window):\n pos = QtGui.QCursor.pos()\n window.move(pos.x(), pos.y())", "def xmin(self):\n return self.bbox[0][0]", "def x_axis_location(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"x_axis_location\")", "def x_axis_location(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"x_axis_location\")", "def x_axis_location(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"x_axis_location\")", "def locations_x(self):\n return self._locations[0]", "def get_xpos(self, body_index):\n xpos_x_func = self.wrapper.get_xpos_x\n xpos_y_func = self.wrapper.get_xpos_y\n xpos_x_func.restype = ctypes.c_double\n xpos_y_func.restype = ctypes.c_double\n xpos_x = xpos_x_func(self.instance, body_index)\n xpos_y = xpos_y_func(self.instance, body_index)\n\n return xpos_x, xpos_y", "def _handle_0_xpos(self):\n return self.sim.data.site_xpos[self.handle_0_site_id]", "def minX(self):\n return min(self.getx())", "def getPosition(self):\n return self.x", "def get_machinekit_position():\n return settings.controller.axes_position()", "def get_position(self):\n return self._find_gnx_node(self.gnx)", "def reflect_x(self):\n\n return Point(self.x, - self.y)", "def getupperleft(self):\n return (self.rect.x, self.rect.y)", "def getX(self):\r\n\t\treturn self._x", "def get_attach_point_top(self):\n return self._center_widget.mapToGlobal(\n self._center_widget.rect().topLeft()\n ) + QPoint(self._center_widget.width() / 2, 0)" ]
[ "0.7928865", "0.7497541", "0.74468994", "0.7431216", "0.7420587", "0.7373978", "0.73659885", "0.7315604", "0.73032784", "0.7258727", "0.7226243", "0.72176665", "0.71952814", "0.71450615", "0.7142987", "0.70740926", "0.70560735", "0.705076", "0.7001769", "0.7001769", "0.6961339", "0.6955607", "0.6953267", "0.6950569", "0.6931859", "0.6921299", "0.6900523", "0.6882399", "0.68313134", "0.6820719", "0.6814145", "0.6811229", "0.6809684", "0.68027604", "0.674373", "0.6691854", "0.6691355", "0.668975", "0.6638566", "0.66377807", "0.6636005", "0.66272444", "0.6619061", "0.65826166", "0.65826166", "0.6577197", "0.6571482", "0.6547223", "0.65405715", "0.6522919", "0.6522919", "0.6522919", "0.64865345", "0.6452804", "0.64424413", "0.64294916", "0.64198315", "0.64198315", "0.64149827", "0.6407143", "0.63978094", "0.63935524", "0.63830394", "0.6380576", "0.63791996", "0.6379128", "0.6375044", "0.6365636", "0.6347216", "0.63283384", "0.63274735", "0.63086873", "0.6297791", "0.6297616", "0.6283192", "0.6281833", "0.62621963", "0.6260557", "0.6260046", "0.6252685", "0.6251885", "0.62499946", "0.6244317", "0.6238377", "0.62347686", "0.62323415", "0.62223303", "0.6221579", "0.6221579", "0.6221579", "0.622084", "0.62186897", "0.62066066", "0.6203024", "0.6194201", "0.6192554", "0.6192197", "0.6188219", "0.6181676", "0.61608565", "0.61514914" ]
0.0
-1
The y coordinate for top left corner of window
def y(self): return self._y
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def y(self):\n return self.top", "def get_y_position(self): \n return self.rect.y", "def top_y(self):\r\n return self.position.y + self.size.y + self.bulk", "def get_y(self):\n return self.posY", "def _get_y(self):\n return self.position.y", "def y(self):\r\n return self.position.y", "def get_pos_y(self):\n return self.__pos_y", "def top(self):\n return self.points['topRight'].y", "def top(self):\n return self.points['topRight'].y", "def get_y_position(self):\n return self.actual_coordinates[1]", "def _rect_top(self):\n\treturn max(self.y, self.y + self.h)", "def getYpos(self):\n return self.y", "def border_box_y(self):\n return self.position_y + self.margin_top", "def content_box_y(self):\n return self.position_y + self.margin_top + self.padding_top + \\\n self.border_top_width", "def origin_y(self):\n return self._origin[1]", "def position(self):\n\n\t\treturn self._window.position", "def get_y(self):\n return self.coords[1]", "def y_origin(self):\n return self._y_origin", "def getAbsoluteTop(self):\n return self.canvas.getAbsoluteTop()", "def getY(self):\n return self.position.getY()", "def get_origin_y_position(self):\n return self.origin_coordinates[1]", "def y(self):\n return _libsbml.Point_y(self)", "def y(self):\n return self.coords[1]", "def calculate_window_position(self):\n self.x = SQUARE_SIZE * self.col + SQUARE_SIZE // 2\n self.y = SQUARE_SIZE * self.row + SQUARE_SIZE // 2", "def getY(self):\n return self.position[1]", "def get_pos(self):\n return self.rect.midtop", "def y(self):\n return self._coords[1]", "def offset_y(self) -> int:\n self.tk_ref.update()\n return self.tk_ref.winfo_y()", "def y_coord(self):\n\n return self.y0 + np.arange(self.ny) * self.dy", "def y(self) -> int:\n return self.data.y_centre >> 4", "def layout_y(self):\n return self.floris.farm.layout_y", "def getY(self):\n return self.y", "def getY(self):\n return self.y", "def get_y(self):\n\t\treturn self._collision_rect.y + 25", "def getY(self):\r\n\t\treturn self._y", "def get_ly(self):\r\n return self.dy * self.ny - self.oy", "def get_position(self):\n # The tip window must be completely outside the anchor widget;\n # otherwise when the mouse enters the tip window we get\n # a leave event and it disappears, and then we get an enter\n # event and it reappears, and so on forever :-(\n #\n # Note: This is a simplistic implementation; sub-classes will likely\n # want to override this.\n return 20, self.anchor_widget.winfo_height() + 1", "def get_xy(self):\r\n return self.board.get_xy()", "def cursorPosGL(self):\n globalPos = QtGui.QCursor.pos()\n pos = self.mapFromGlobal(globalPos)\n y = self.size().height() - pos.y()\n return pos.x(), y", "def cursorPosGL(self):\n globalPos = QtGui.QCursor.pos()\n pos = self.mapFromGlobal(globalPos)\n y = self.size().height() - pos.y()\n return pos.x(), y", "def getYCoordinate(self) -> float:\n return self.y_coord", "def screenPos(self):\n return Point(self._screenPos)", "def screenPos(self):\n return Point(self._screenPos)", "def screenPos(self):\n return Point(self._screenPos)", "def getY(self):\n return self.__y", "def _pos(self):\n sw = self.parent.winfo_screenwidth()\n sh = self.parent.winfo_screenheight()\n w = sw * 0.8\n h = sh * 0.8\n x = (sw - w) / 2\n y = (sh - h) / 2\n self.parent.geometry('%dx%d+%d+%d' % (w, h, x, y))", "def get_alien_y(self):\n return self.y", "def __get_y__(self):\n return self.Direction['y']", "def top_distance(self):\n return self.y", "def bottom_y(self):\r\n return self.position.y - self.size.y - self.bulk", "def anchor_y(self):\n return self._anchor_y", "def GetY(self):\r\n\r\n return self._y", "def get_window_x_y(windowid):\n return commands.getoutput(\"xwininfo -id \"+windowid+\" | grep 'Corners' | cut -d' ' -f5 | cut -d'+' -f2,3\").split(\"+\")", "def get_virtual_y_position(self):\n x_real = (\n - 1 * (self.get_x_position() - self.get_origin_x_position()) * cos(\n self.get_origin_direction() * pi / 180\n )\n )\n y_real = (\n (self.get_y_position() - self.get_origin_y_position()) *\n sin(self.get_origin_direction() * pi / 180)\n )\n return x_real + y_real", "def getMinY(self):\n return self.miny", "def padding_box_y(self):\n return self.position_y + self.margin_top + self.border_top_width", "def centery(self):\n return self.top + self.height / 2", "def pos(self):\n x = (self.ec._win._mouse_x -\n self.ec._win.width / 2.) / (self.ec._win.width / 2.)\n y = (self.ec._win._mouse_y -\n self.ec._win.height / 2.) / (self.ec._win.height / 2.)\n return np.array([x, y])", "def top_coords_absolute(self):\n pass", "def y(self):\n return self.axes[0]", "def height(self) -> int:\n return self.screen.getmaxyx()[0]", "def lastScreenPos(self):\n return Point(self._lastScreenPos)", "def lastScreenPos(self):\n return Point(self._lastScreenPos)", "def findY(self):\n return self.y", "def height(self):\n _, ymin, _, ymax = self.viewport\n return self.parent.window_size[1] * (ymax - ymin)", "def get_last_click_y():\r\n return _cue.getMouseLocation().getY()", "def bottom(self):\n return self.top + self.height", "def y(self):\n if self._y is None:\n self.compute_coordinates()\n return self._y", "def get_position(self):\n return self._border.get_position()", "def toTk(self,y):\r\n if y == maxValue: return 0\r\n tk_y = Size\r\n if y != minValue:\r\n tk_y -= y\r\n return tk_y", "def getY(self):\n return _libsbml.BoundingBox_getY(self)", "def event_to_x_y(self, event):\n\t\treturn (round(event.x / self.w_to_px), round((HEIGHT - event.y) / self.h_to_px))", "def tool_pos(self):\n return self.sim.data.get_body_xpos(self.end_effector)", "def get_attach_point_top(self):\n return self.mapToGlobal(self.rect().center()) # Default behavior", "def bottom(self):\n return self.points['bottomRight'].y", "def bottom(self):\n return self.points['bottomRight'].y", "def get_y(self):\n return self.__y", "def topRightCorner(self):\n self._updateExtents()\n return (self._mMaxX,self._mMinY)", "def mouse_position(self):\r\n # TODO: add: Now deprecated in favor of pi3d.events\r\n if self.mouse:\r\n return self.mouse.position()\r\n elif self.tkwin:\r\n return self.tkwin.winfo_pointerxy()\r\n else:\r\n return -1, -1", "def _positionWindow(self):\n\t\tif sys.platform=='win32':\n\t\t\tself.setGeometry(1050, 30, 375, 220)\n\t\telse:\n\t\t\tself.setGeometry(1050, 0, 375, 220)\n\t\t# self.move( (-screen.width()/2)+200, -screen.height()/2 )", "def getY(self):\n return self.proj.getY()", "def y(self):\n return self._translation[1, 0]", "def locations_y(self):\n return self._locations[1]", "def height(self):\n return self.maxy - self.miny", "def getYOffset(self):\n return _libsbml.Point_getYOffset(self)", "def get_machinekit_position():\n return settings.controller.axes_position()", "def height(self):\n return self.upper_right.y - self.lower_left.y", "def y_halo(self): \n return self.coords_halo[1]", "def getY(self):\n return self.components[1]", "def getY(self):\n return self.components[1]", "def _rect_bottom(self):\n\treturn min(self.y, self.y + self.h)", "def height(self):\n return(self.SCREEN_H)", "def ymax(self):\n return self.bbox[1][1]", "def _positionWindow(self):\n\t\tscreen = QtGui.QDesktopWidget().screenGeometry()\n\t\tself.setGeometry(1050, 275, 375, 350)\n\t\t# self.move( (-screen.width()/2)+200, -screen.height()/2 )", "def position_window(self):\n x, y = self.get_position()\n root_x = self.anchor_widget.winfo_rootx() + x\n root_y = self.anchor_widget.winfo_rooty() + y\n self.tipwindow.wm_geometry(\"+%d+%d\" % (root_x, root_y))", "def mousePos():\n data = display.Display().screen().root.query_pointer()._data\n return data[\"root_x\"], data[\"root_y\"]", "def y(self):\n return self._turtle.ycor()", "def y(self):\n return self._turtle.ycor()", "def get_below(self):\n current_index = ALL_WINDOWS.index(self)\n if current_index == 0:\n return BASE_SCREEN\n\n return ALL_WINDOWS[current_index - 1]", "def window_height(self):\n return self._window_size()[1]", "def y0(self):\n return self._y0" ]
[ "0.8223235", "0.7753512", "0.7607309", "0.75265026", "0.74642813", "0.74340755", "0.74284726", "0.7398167", "0.7398167", "0.73863155", "0.7359533", "0.73249245", "0.73192805", "0.7303369", "0.7265452", "0.71478647", "0.7138041", "0.71298176", "0.7127513", "0.71184486", "0.71144253", "0.7112831", "0.7089523", "0.7063504", "0.7031104", "0.70307827", "0.7019381", "0.6984234", "0.6950975", "0.69398874", "0.6872126", "0.6863474", "0.6863474", "0.6839254", "0.68200594", "0.6820009", "0.6815857", "0.68144315", "0.6779492", "0.6779492", "0.676467", "0.67571175", "0.67571175", "0.67571175", "0.674955", "0.67467964", "0.67295736", "0.6723394", "0.6691851", "0.66541433", "0.6639679", "0.6608272", "0.6607005", "0.6604636", "0.65942967", "0.65851027", "0.65810513", "0.6571354", "0.65677935", "0.65599525", "0.6551162", "0.6521164", "0.6521164", "0.6498584", "0.6496705", "0.6480199", "0.64710104", "0.64657015", "0.64646405", "0.6443772", "0.64312154", "0.6427481", "0.64273506", "0.6426771", "0.64180773", "0.64180773", "0.64079654", "0.64011544", "0.6375132", "0.6373699", "0.63532746", "0.6331074", "0.6325401", "0.6318437", "0.6309311", "0.63063985", "0.62926316", "0.62916756", "0.62708485", "0.62708485", "0.62504786", "0.6248378", "0.62458336", "0.6245269", "0.6241773", "0.6233762", "0.6232052", "0.6232052", "0.62305564", "0.6204554", "0.6200229" ]
0.0
-1
The width of the window in pixels
def width(self): return self._width
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_window_width(self):", "def width(self):\n return(self.SCREEN_W)", "def window_width(self):\n return self._window_size()[0]", "def width(self):\n xmin, _, xmax, _ = self.viewport\n return self.parent.window_size[0] * (xmax - xmin)", "def width(self):\n return self._vim.current.window.width", "def width (self):\n return self._w", "def w(self):\n return self.width", "def width(self) -> int:\n return self.screen.getmaxyx()[1]", "def winfo_screenwidth(self):\n return self.width", "def screen_width(self):\n # type: () -> int\n return self._screen_width", "def video_window_width(self):\n # type: () -> int\n return self._video_window_width", "def width(self):\n return self.config.get('resolution', {}).get('x',1920) #1280", "def width(self):\n return self.figure.scene.get_size()[0]", "def _get_main_width(self):\n return int(self.screen_rect.width * self.ratio)", "def width(self):\n return _libsbml.Dimensions_width(self)", "def width(self) :\n return self.m_width", "def get_width(self):\n return self.width", "def get_width(self):\r\n return self._width", "def get_width(self):\r\n return self._width", "def get_width(self):\r\n return self._width", "def size(self):\n\n\t\treturn self._window.size", "def get_width ( self ):\n return self.width", "def get_window_size(self):\n return self.__window_size", "def width(self):\n # type: () -> float\n return self._width", "def width(self):\n return self.__width", "def width(self):\n return self.__width", "def width(self):\n return self.__width", "def width(self):\n return self.__width", "def width(self):\n return self.__width", "def width(self):\n return self.__width", "def width(self):\n return self.__width", "def width(self):\n return self.__width", "def width(self):\n return self.__width", "def width(self):\n return self.__width", "def width(self):\n return self.__width", "def width(self):\n return self.__width", "def get_width(self):\n return self._width", "def get_width(self):\n return self._width", "def get_width(self):\n return self._width", "def get_width(self):\n return self._width", "def get_grid_width(self):\r\n return self.width", "def getWidth(self):\n return constants.DEFAULT_WIDTH", "def getWidth(self):\n return self.width", "def getWidth(self):\n return self.width", "def get_width(self):\n return self.__width", "def width(self) -> float:\n return self._width", "def get_width(self) -> int:\n return int(self._surface.get_width())", "def width(self):\n return self._el._parent.execute_script(\"return arguments[0].width\", self._el)", "def width(self):\n return self._el._parent.execute_script(\"return arguments[0].width\", self._el)", "def width(self) -> int:\n return self._width", "def getWidth(self):\n return frameWidth", "def getWidth(self):\n return self._width", "def width(self) -> int:\n return self.__width", "def width(self) -> int:\n self.tk_ref.update()\n return self.tk_ref.winfo_width()", "def get_dimension_width(self):\n pass", "def width(self):\n self._updateExtents()\n return self._mWidth", "def frame_width(self) -> int:\n pass", "def getWidth(self):\n return DEFAULT_WIDTH", "def getwinsize(self):", "def get_grid_width(self):\n return self._width", "def _window_size(self):\n width = self.cv.winfo_width()\n if width <= 1: # the window isn't managed by a geometry manager\n width = self.cv['width']\n height = self.cv.winfo_height()\n if height <= 1: # the window isn't managed by a geometry manager\n height = self.cv['height']\n return width, height", "def get_width(self) -> int:\n return self.rsimulator.get_frame_width()", "def getWidth(self) -> int:\n ...", "def get_grid_width(self):\n # replace with your code\n return self._width", "def get_grid_width(self):\n # replace with your code\n return self._width", "def GetWidth(self):\r\n\r\n return self._width", "def GetWidth(self):\r\n\r\n return self._width", "def width(self):\n return self.maxx - self.minx", "def size_with_window(self):\n return self.container['size_with_window']", "def width(self) -> int:\n\n return self._width", "def width(self):\n return self['width']", "def get_grid_width(self):\r\n # replace with your code\r\n return self._width", "def get_grid_width(self):\n return self.grid_width", "def get_grid_width(self):\n return self.grid_width", "def width(cls):\n return cls._width", "def GetWindowSize(self):\r\n \r\n return self._windowsize", "def width(self) -> int:\r\n return self.rect_uv.w", "def get_window_size(self):\n raise NotImplementedError", "def get_grid_width(self):\r\n return self._grid_width", "def width(self):\n return (self.__width)", "def width(self):\n return (self.scene.shape[2] - self.size) // self.size + 1", "def width(self):\n\t\tpass", "def get_grid_width(self):\r\n\r\n return self._grid_width", "def width(self) -> int:", "def width(self) -> int:", "def widget_width(self) -> Tuple[int, int]:\n return self.widget_size[0]", "def width(self):\n return self.board.shape[1]", "def frameWidth(self):\n return self._frame_width" ]
[ "0.8736872", "0.84118", "0.8364625", "0.830401", "0.80245584", "0.79670316", "0.7860544", "0.779818", "0.7734384", "0.7687281", "0.75625384", "0.7501574", "0.74973494", "0.7429925", "0.7426164", "0.7416717", "0.7414268", "0.7401848", "0.7401848", "0.7401848", "0.73836476", "0.7370822", "0.7358113", "0.73563063", "0.734123", "0.734123", "0.734123", "0.734123", "0.734123", "0.734123", "0.734123", "0.734123", "0.734123", "0.734123", "0.734123", "0.734123", "0.7323816", "0.7323816", "0.7323816", "0.7323816", "0.73197424", "0.7311692", "0.73074484", "0.73074484", "0.730334", "0.72957146", "0.72669035", "0.72498953", "0.72498953", "0.7235121", "0.7219604", "0.7219498", "0.72190064", "0.72166157", "0.71991265", "0.71985203", "0.71931106", "0.71665967", "0.71632427", "0.7162638", "0.71576095", "0.71360606", "0.7135815", "0.7131842", "0.7131842", "0.7129527", "0.7129527", "0.71227026", "0.71220195", "0.71095663", "0.71077085", "0.7105775", "0.7100481", "0.7100481", "0.7089964", "0.7088116", "0.7078072", "0.7070342", "0.7067175", "0.70601356", "0.7058879", "0.70365435", "0.70347947", "0.69969904", "0.69969904", "0.69902813", "0.6987537", "0.6982298" ]
0.7367283
28
The height of the window in pixels
def height(self): return self._height
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def window_height(self):\n return self._window_size()[1]", "def height(self):\n _, ymin, _, ymax = self.viewport\n return self.parent.window_size[1] * (ymax - ymin)", "def height(self):\n return(self.SCREEN_H)", "def height(self) -> int:\n return self.screen.getmaxyx()[0]", "def winfo_screenheight(self):\n return self.height", "def screen_height(self):\n # type: () -> int\n return self._screen_height", "def video_window_height(self):\n # type: () -> int\n return self._video_window_height", "def height(self):\n return self.config.get('resolution', {}).get('y',1080) #720", "def height (self):\n return self._h", "def getHeight(self):\n return self.height", "def getHeight(self):\n return self.height", "def height(self):\n return self.maxy - self.miny", "def getHeight(self):\n return self._height", "def get_height(self):\n return self.calc_height(self.root)", "def get_height(self) -> int:\n return self.rsimulator.get_frame_height()", "def getHeight(self):\n return frameHeight", "def frame_height(self) -> int:\n pass", "def get_height(self):\r\n return self._height", "def get_height(self):\r\n return self._height", "def get_height(self):\r\n return self._height", "def height(self):\n\t\tpass", "def get_height(self):\n return self.__height", "def get_height(self):\n return self._height", "def get_height(self):\n return self._height", "def get_height(self):\n return self._height", "def get_height(self):\n return self._height", "def height(self):\n return _libsbml.Dimensions_height(self)", "def get_grid_height(self):\r\n return self.height", "def get_grid_height(self):\r\n return self._height", "def get_height(self):\r\n return self.state['h']", "def height(self):\r\n return self._height", "def height(self) :\n return self.m_height", "def height(self):\n return self.__height", "def height(self):\n return self.__height", "def height(self):\n return self.__height", "def height(self):\n return self.__height", "def height(self):\n return self.__height", "def height(self):\n return self.__height", "def height(self):\n return self.__height", "def height(self):\n return self.__height", "def height(self):\n return self.__height", "def height(self):\n return self.__height", "def height(self):\n return self.__height", "def height(self):\n return self.__height", "def get_height(self) -> int:\n return int(self._surface.get_height())", "def get_grid_height(self):\n return self._height", "def get_grid_height(self):\n return self._height", "def getHeight(self):\n return _libsbml.Dimensions_getHeight(self)", "def height(self):\n yy = self.yy\n return max(yy) - min(yy)", "def height(self) -> int:\n return self.__height", "def height(self) -> int:\n return self._height", "def height(self) -> int:\n return self._height", "def height(self):\n # type: () -> float\n return self._height", "def height(self):\n return self._el._parent.execute_script(\"return arguments[0].height\", self._el)", "def height(self):\n return self._el._parent.execute_script(\"return arguments[0].height\", self._el)", "def get_grid_height(self):\n return self.grid_height", "def get_grid_height(self):\n return self.grid_height", "def get_frame_height(self) -> int:\n return self.__sim.frame_size()[1]", "def widget_height(self) -> int:\n return self.widget_size[1]", "def get_dimension_height(self):\n pass", "def height(self):\n\n return self.__height", "def height(self):\n return self.y.max() - self.y.min()", "def height(self):\n return self.__size[1]", "def height(self):\n return self.upper_right.y - self.lower_left.y", "def h(self):\n return self.height", "def height(self):\n return self[\"height\"]", "def height(self):\n return self[\"height\"]", "def GetHeight(self):\r\n\r\n return self._height", "def get_grid_height(self):\r\n return self._grid_height", "def get_current_height(self) -> int:\n return self.current_height", "def get_grid_height(self):\r\n\r\n return self._grid_height", "def getHeight(self):\n return _tkCall(self.image.height)", "def height(self):\n return self.client.call('GET', self.name + 'height')", "def height(self) -> int:\r\n return self.rect_uv.h", "def get_grid_height(self):\n return self._grid_height", "def get_grid_height(self):\n return self._grid_height", "def get_grid_height(self):\n return self._grid_height", "def height(self):\n self._updateExtents()\n return self._mHeight", "def height(self) -> int:\n self.tk_ref.update()\n return self.tk_ref.winfo_height()", "def get_grid_height(self):\n # replace with your code\n return self._height", "def height(self):\n return self.get_delta_value(self.Y_INDEX)", "def calculate_height(self):\n return self.endY - self.startY", "def frameHeight(self):\n return self._frame_height", "def height(self) -> int:\n if self.props.max_height:\n max_height = UIMetric.parse(self.props.max_height).to_pixels(self.parent.height)\n return min(self.isize[1].to_pixels(self.parent.height), max_height)\n else:\n return self.isize[1].to_pixels(self.parent.height)", "def getHeight(self):\r\n height = 1\r\n if self.orientation == \"v\":\r\n height = self.size\r\n return height", "def height(self) -> int:\n return self.root.height if not self.empty() else 0", "def get_height(self):\n if self.root is None:\n return 0\n else:\n return self._get_height(self.root) # Start at the root", "def get_grid_height(self):\n\n return self._grid_height", "def height(self) -> int:", "def height(self) -> int:", "def height(self) -> int:" ]
[ "0.8631184", "0.8542661", "0.84499466", "0.80354226", "0.79522634", "0.78763425", "0.777075", "0.77323276", "0.76520807", "0.76136863", "0.76136863", "0.7605742", "0.75113416", "0.748779", "0.7457209", "0.74447066", "0.74397165", "0.74333185", "0.74333185", "0.74333185", "0.7410618", "0.74003214", "0.73886126", "0.73886126", "0.73886126", "0.73886126", "0.7386378", "0.73698306", "0.7335979", "0.7316408", "0.7311917", "0.7307816", "0.73008096", "0.73008096", "0.73008096", "0.73008096", "0.73008096", "0.73008096", "0.73008096", "0.73008096", "0.73008096", "0.73008096", "0.73008096", "0.73008096", "0.729356", "0.7291726", "0.7291726", "0.7288797", "0.726298", "0.72526854", "0.7240657", "0.7240657", "0.72399795", "0.72229356", "0.72229356", "0.7219219", "0.7219219", "0.72145295", "0.72027385", "0.7199236", "0.71957034", "0.71954596", "0.7193556", "0.7180789", "0.7170521", "0.7161155", "0.7161155", "0.7159493", "0.71500266", "0.71465975", "0.7134463", "0.7133128", "0.7113188", "0.7106913", "0.71055627", "0.71055627", "0.71055627", "0.7098868", "0.7096165", "0.7087122", "0.7046123", "0.70431286", "0.70412767", "0.7026555", "0.7008995", "0.70047826", "0.7002457", "0.7001737", "0.69830656", "0.69830656", "0.69830656" ]
0.72912085
56
The title displayed at top of window bar
def title(self): return self._frame._title
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_window_title(self): # real signature unknown; restored from __doc__\n return \"\"", "def winTitle(self, title):\n winTitle = title\n window = self.window\n window.setWindowTitle(winTitle)", "def set_title( self , winTitle ):\r\n self.rootWin.wm_title( str( winTitle ) )", "def title(self):\n return win32gui.GetWindowText(self.hwnd)", "def set_title(self):\n if self.currentconfig is None:\n self.setWindowTitle(\"(No config)\")\n elif self.cfname is None:\n self.setWindowTitle(\"Working config\")\n else:\n filename = miscutils.removesuffix(os.path.basename(self.cfname)).upper()\n self.setWindowTitle(\"Processing - \" + filename)", "def title_p(self):\n self.run_command('title_p')", "def app_title():\n print(\"*\" * 27)\n print(\" Stock App\")\n print(\"*\" * 27)", "def show_top_status(self):\n self.header_win.clear()\n size = self.size()\n display = self.app.config[\"display\"]\n head_parts = []\n if display[\"show_app_name\"]:\n head_parts.append(\"Suplemon Editor v\"+self.app.version)\n if display[\"show_clock\"]:\n head_parts.append(curr_time())\n if display[\"show_file_list\"]:\n head_parts.append(self.file_list_str())\n\n # Add module statuses to the status bar\n for name in self.app.modules.modules.keys():\n module = self.app.modules.modules[name]\n if module.options[\"status\"] == \"top\":\n head_parts.append(module.get_status());\n\n head = \" - \".join(head_parts)\n head = head + ( \" \" * (self.screen.getmaxyx()[1]-len(head)-1) )\n if len(head) >= size[0]:\n head = head[:size[0]-1]\n self.header_win.addstr(0,0, head, curses.color_pair(0) | curses.A_REVERSE)\n self.header_win.refresh()", "def draw_title(self):\n title = text_helper.create_text(\"Indefinite Loop\", menu_fonts, 50, white)\n self.main_menu_surface.blit(title, (center_horizontally(title, self.screen_dimensions), 50))", "def get_window_title(self):\n\n return self.window_title", "def __draw_title(self):\n title = 'SNAAAAKE'\n x_offset = (curses.COLS - len(title)) // 2\n y_offset = max(1, (curses.LINES - self.config.arena_size[1] - 2) // 4)\n self.stdscr.addstr(y_offset, x_offset, title)", "def setWindowTitle(self, title):\n self.__windowTitle = title", "def draw_title_window(self, screen: curses.window, height: int, width: int, y: int, x: int) -> None:\n title_win = screen.subwin(height, width, y, x)\n title_win.border()\n\n title = \"XKCD Extractor\"\n centered_x = width // 2 - len(title) // 2\n title_win.addstr(1, centered_x, title)", "def title_n(self):\n self.run_command('title_n')", "def create_title(self):\n label_title = Label(self.frame, text=\"Game Over\", font=(\"Arial\", 40), bg='lightblue',\n fg='white')\n label_title.pack()", "def Show_Titles( self ):\r\n self.system.Change_Seq( \"Title\" )", "def __draw_title(self):\n if self.title is not None:\n self.fig.suptitle(\n self.title, y=self.settings.otherParams[\"figure.title.yposition\"])", "def set_title(self):\n if self.loaded_filename:\n self.setWindowTitle('Starbound Mapper | {}'.format(self.loaded_filename))\n else:\n self.setWindowTitle('Starbound Mapper')", "def title(self):\n return self.run_command('title')[0]", "def title(self, title: str):\n\n #self.master.title(title)\n self.ax.set_title(title)\n self.canvas.draw()", "def set_title(self):\n plt.title(label=self.title, fontsize=self.titlesize)", "def setTitle(self, title):\n self.__title = title\n self.drawBorder()", "def getTitle(self): #$NON-NLS-1$\r", "def getTitle(self): #$NON-NLS-1$\r", "def set_root_title(self, text=None):\n title = \"Faceswap.py\"\n title += \" - {}\".format(text) if text is not None and text else \"\"\n self.root.title(title)", "def title(self):\n with switch_window(self._browser, self.name):\n return self._browser.title", "def title(self, titlestring):\n if _Screen._root is not None:\n _Screen._root.title(titlestring)\n _Screen._title = titlestring", "def makeTitle(self):\n l1=Label(self.app, text=\"Asset Allocation Combinations\")\n l1.grid(row=0, column=0)", "def CreateConsole(self):\n lc = launcher.TextFrame('title')\n return lc", "def _get_title_text(self):\n return Text(\n self,\n self.settings.font_bold_filename,\n 96,\n self.settings.font_color,\n 'zuckbot',\n {'center': self.screen_rect.center},\n 0,\n -50,\n )", "def print_title():\n print(\" ##################\")\n print(\" # #\")\n print(\" # ===== ===== #\")\n print(\" # | | #\")\n print(\" # | | #\")\n print(\" # #\")\n print(\" ##################\")\n print(\"\\n\")\n print(\"#\" * 10, end='')\n print(\"Welcome to timetable tool\", end='')\n print(\"#\" * 10)", "def title(self) -> str:\n return self.tk_ref.title()", "def print_title(title):\n print \"\\n\"+\"#\"*32+\"\\n# \"+title+\"\\n\"+\"#\"*32+\"\\n\"", "def windowTitle(self):\n return self.__windowTitle", "def title(self) -> str:\n pass", "def title(self, title):\n\t\tself.head += '<title>' + title + '</title>\\n'", "def _mySetWindowTitle(self, windowTitle):\n self.setWindowTitle(windowTitle)", "def get_title():", "def TitlePrint(title):\n titleLength = len(title)\n barLength = titleLength + 12\n fmtdTitle = '----- {0} -----'.format(title)\n bar = '-' * barLength\n print(bar, fmtdTitle, bar,\n sep='\\n', end='\\n\\n')", "def title(self):\n return self.header", "def set_title(self, title):\n\t\tpass", "def settitle(self, title):\n self.__title = title\n self.__nonzero = True", "def create_title(self):\n label_title = Label(self.frame, text=\"Brick Breaker\", font=(\"Arial\", 40), bg='light blue',\n fg='white')\n label_title.pack()", "def printTitle(self, data):\r\n\t#try:\r\n #\twx.CallLater(1800, lambda x: x.SetTitle(self.title), self)\r\n\t#except:\r\n\t#\treturn\r\n #self.SetTitle(data)\r\n pass", "def title(self):\n return self.container['title']", "def title(self, title):\n\n self.container['title'] = title", "def prep_title(self):\n self.title_image = self.font.render(self.title, True, self.text_color,\n self.ctl_settings.panel_bg_color)\n self.title_image_rect = self.title_image.get_rect()\n self.title_image_rect.centerx = self.rect.centerx\n self.title_image_rect.bottom = self.rect.top - 1", "def set_title(self, title):\n self.title = title\n self.opf.title = title\n self.ncx.title = title", "def get_title(self):\n return self.run_command('get_title')[0]", "def set_title(self, title):\r\n self.title = title", "def configured_title(self):\n return self.get('title', self.DEFAULT_SPACE_TITLE)", "def set_title(self, title = \"FORM\"):\n\n c = self.canvas.setTitle(title)", "def __init__(self, title):\n super(TXWindowHeader, self).__init__()\n self.setupUi(self)\n self.window_lbl.setText(title)\n pxm = QtGui.QPixmap(path.Path(__file__).dirname().dirname() / 'resource' / 'tx_x.png').scaled(30, 30, QtCore.Qt.AspectRatioMode.KeepAspectRatio, QtCore.Qt.SmoothTransformation)\n self.tx_logo_lbl.setPixmap(pxm)", "def display_gui_window(self, window_title):\r\n cv2.imshow(window_title, self.image)", "def change_title(window):\n for i in range(1, 100):\n time.sleep(3)\n window.title = f'New Title #{i}'\n print(window.title)", "def title(self):\n\t\treturn self.page_title", "def set_title(self, title):\n self.widget.SetTitle(title)", "def updateTabTitle (self, wdoc, title):\n tabId = self.tab.indexOf(wdoc)\n self.tab.setTabText( tabId, self.addTag( repoType=wdoc.repoDest, txt=title, \n addSlash=False, project=wdoc.project) )\n self.updateActions(wdocument=wdoc)\n windowTitle = wdoc.getPath( absolute=True, withAsterisk = True )\n windowTitleFinal = self.addTag( repoType=wdoc.repoDest, txt=windowTitle, \n project=wdoc.project )\n # emit signal\n self.UpdateWindowTitle.emit(windowTitleFinal)", "def ftitle(self, text):\n return \"{} - {}\".format(self._app_name, text)", "def set_window_title(self, filename=None):\n\n if filename:\n self.setWindowTitle(f\"DataLab {self.version} - Loaded Project: {filename}\")\n else:\n self.setWindowTitle(f\"DataLab {self.version}\")", "def toolbar_title(title):\n return LazyToolbarItem(\"staff_toolbar.items.Title\", title)", "def getTitleAndPos(self, pos, windowname = \"\"):\n \n wnd = WindowFromPoint(pos)\n while True:\n if not GetParent(wnd): break\n if windowname:\n if windowname in GetWindowText(wnd):\n break\n wnd = GetParent(wnd)\n\n # if the user-specified window is a valid top-level window, use it\n # except that the click took place on the genius application window or\n # on a child window that has the user-specified name\n if GetWindowText(wnd) != \"Operation Genius\" and windowname:\n try:\n w = winutil.getWindowHandle(windowname)\n except WindowNotFound:\n pass\n else:\n if windowname not in GetWindowText(wnd):\n wnd = w\n \n title = GetWindowText(wnd)\n wPos = winutil.ScreenToWindow(wnd, pos)\n return (title, wPos)", "def title(self, txt):\n num = len(txt)\n ticks = \"=\" * num\n print(ticks)\n print(txt)\n print(ticks)", "def show_title():\r\n complement = (\r\n '\\n __ ')\r\n title = ('\\n _______ _______________ ____ _______ __ ___ _ _______/ /_ ____ _____ ____ ____ ')\r\n title += ('\\n / ___/ / / / ___/ ___/ _ \\/ __ \\/ ___/ / / / / _ \\| |/_/ ___/ __ \\/ __ `/ __ \\/ __ `/ _ \\ ')\r\n title += ('\\n/ /__/ /_/ / / / / / __/ / / / /__/ /_/ / / __/> </ /__/ / / / /_/ / / / / /_/ / __/ ')\r\n title += ('\\n\\___/\\__,_/_/ /_/ \\___/_/ /_/\\___/\\__, / \\___/_/|_|\\___/_/ /_/\\__,_/_/ /_/\\__, /\\___/ ')\r\n title += ('\\n /____/ /____/ ')\r\n # Add Styles\r\n break_line = ('-' * len(complement) + \"\\n\") * 2\r\n print(\"{}\\n{}\\n{}\\n\".format(break_line, title, break_line))", "def SetTitle(self, title):\n if self._title != title:\n self._title = title\n def closure(pane):\n pane.Caption(title)\n self._PaneInfoOperation(closure)", "def set_title(self, setto):\n command = 'title ' + str(setto)\n self.run_command(command)", "def set_title (self, title):\n self.title = title", "def set_title(self, title):\n self.l1.setText(title)", "def _reassign_title(self):\n # Reassign title from main axes to top panel -- works when this is\n # called on the main axes *or* on the top panel itself. This is\n # critical for bounding box calcs; not always clear whether draw() and\n # get_tightbbox() are called on the main axes or panel first\n if self._panel_side == 'top' and self._panel_parent:\n ax, taxs = self._panel_parent, [self]\n else:\n ax, taxs = self, self._tpanels\n if not taxs or not ax._title_above_panel:\n tax = ax\n else:\n tax = taxs[0]\n tax._title_pad = ax._title_pad\n for loc, obj in ax._titles_dict.items():\n if not obj.get_text() or loc not in (\n 'left', 'center', 'right'):\n continue\n kw = {}\n loc, tobj, _ = tax._get_title_props(loc=loc)\n for key in ('text', 'color', 'fontproperties'): # add to this?\n kw[key] = getattr(obj, 'get_' + key)()\n tobj.update(kw)\n tax._titles_dict[loc] = tobj\n obj.set_text('')\n\n # Push title above tick marks -- this is known matplotlib problem,\n # but especially annoying with top panels!\n # TODO: Make sure this is robust. Seems 'default' is returned usually\n # when tick label sides is actually *both*. Also makes sure axis is\n # visible; if not, this is a filled cbar/legend, no padding needed\n pad = 0\n pos = tax.xaxis.get_ticks_position()\n labs = tax.xaxis.get_ticklabels()\n if pos == 'default' or (pos == 'top' and not len(labs)) or (\n pos == 'unknown' and tax._panel_side == 'top'\n and not len(labs) and tax.xaxis.get_visible()):\n pad = tax.xaxis.get_tick_padding()\n tax._set_title_offset_trans(self._title_pad + pad)", "def title(self) -> String:\n pass", "def title(self):\n return self.__title", "def title(self):\n return self.__title", "def title(self):\n return self.__title", "def set_title(self, title):\n self.axplot.set_title(title)", "def writeTitle( self ):\n \n if self.mTitle:\n e = SVGdraw.text( self.mPageWidth / 2,\n self.mTitleFontSize ,\n self.mTitle,\n self.mTitleFontSize,\n self.mTitleFont,\n stroke = \"rgb(%i,%i,%i)\" % BLACK,\n text_anchor = \"middle\" )\n\n self.addElement(e)", "def set_title(self, title):\n self.dlg.txt_layer.setText(title)", "def title(self):\n\n return self._title", "def GetXTitle(self):\n return self.GetXaxis().GetTitle()", "def SetHeader(self, window):\n window.SetName(\"header\")\n window.SetBackgroundColour(wx.GetApp().settings.header_bg_color)\n window.SetForegroundColour(wx.GetApp().settings.header_fg_color)\n window.SetFont(wx.GetApp().settings.header_text_font)", "def print_title(title):\n\n print(\"\\n\" + title)\n print(\"=\" * len(title))", "def _helpmenu_about():\n self.helpindex = Toplevel(self.master)\n self.helpindex.title(\"About\")\n self.helpindex.geometry(\"500x300\")\n self.helpindex.label()", "def change_tmux_window_title(text):\n # The idea here is to show the time through the window title\n # And other messages when needed.\n command = \"tmux rename-window \" + text\n subprocess.call(command.split())", "def get_title_menu(self):\n return _(self.view_label).capitalize()", "def Title(self):\n return self.title", "def title(self):\n return self.browser.get_attribute(\"title\", self)", "def title_draw():\n nonlocal width\n widthTitle = len(self.str_title)\n if widthTitle > width:\n self.str_title = self.str_title[0:width-5] + '...'\n widthTitle = len(self.str_title)\n h_len = widthTitle + self.l_padding + self.r_padding\n top = ''.join(['┌'] + ['─' * h_len] + ['┐']) + '\\n'\n result = top + \\\n '│' + \\\n ' ' * self.l_padding + \\\n self.str_title + \\\n ' ' * self.r_padding + \\\n '│' + self.str_shadow + '\\n'\n offset = 2 + self.l_padding + len(self.str_title) + self.r_padding\n return result, offset", "def set_title(self, title, color='black'):\n self._myCanvas.set_title(title, color)\n\n return", "def __window_home(self):\n pass", "def settitle(self, title):\n self.__title = title", "def settitle(self, title):\n self.__title = title", "def __setDetails(self):\n self.MainWindow.setWindowTitle(\"{0} {1}\".format(\n const.APP_NAME, const.VERSION))\n return True", "def title(self) -> str:\r\n return self._title", "def title(self):\n return self._title", "def title(self):\n return self._title", "def title(self):\n return self._title", "def title(self):\n return self._title", "def title(self):\n return self._title", "def title(self):\n return self._title", "def title(self) -> str:\n raise NotImplementedError", "def getTitle(self):\n\t\treturn self.driver.title" ]
[ "0.7741059", "0.75310904", "0.7421483", "0.73852855", "0.7244872", "0.7223689", "0.7173795", "0.71673006", "0.7165312", "0.71467364", "0.71392095", "0.7074522", "0.70630264", "0.7054297", "0.7051213", "0.70490485", "0.70396465", "0.702222", "0.7009707", "0.70010066", "0.69687885", "0.69574314", "0.6933193", "0.6933193", "0.68968445", "0.68931156", "0.68757033", "0.6874334", "0.6859962", "0.68519443", "0.6754884", "0.6754023", "0.6739594", "0.67305815", "0.6690976", "0.66751814", "0.66578126", "0.66464746", "0.66145235", "0.6611896", "0.659596", "0.6560446", "0.6538908", "0.65327907", "0.65322715", "0.6523058", "0.65053415", "0.6502184", "0.6494175", "0.6464744", "0.64385813", "0.64203054", "0.6394732", "0.63919616", "0.63771313", "0.63768184", "0.637142", "0.6364417", "0.6345351", "0.634375", "0.6338807", "0.6326203", "0.63143533", "0.6294063", "0.6287048", "0.6279719", "0.62795836", "0.62698674", "0.62616575", "0.62548923", "0.6254435", "0.6254435", "0.6254435", "0.62529904", "0.6252073", "0.6241936", "0.62321794", "0.6224777", "0.6224453", "0.62223", "0.6214025", "0.6213468", "0.62058306", "0.6201119", "0.6199946", "0.6176798", "0.6170496", "0.6165181", "0.61605495", "0.61605495", "0.61595315", "0.6152341", "0.61455274", "0.61455274", "0.61455274", "0.61455274", "0.61455274", "0.61455274", "0.6141729", "0.61337143" ]
0.6889334
26
Whether or not the Window supports user resizing
def resizable(self): return self._frame._root.resizable() == '1 1'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def IsResizeable(self):\r\n \r\n return self.HasFlag(self.optionResizable)", "def ev_windowsizechanged(self, event: WindowResized) -> None:", "def isSelectionResizing(self):\n return self.resizing", "def ev_windowsizechanged(self, event: tcod.event.WindowResized) -> T | None:", "def ev_windowresized(self, event: WindowResized) -> None:", "def AuiManager_HasLiveResize(manager):\r\n\r\n # With Core Graphics on Mac, it's not possible to show sash feedback,\r\n # so we'll always use live update instead.\r\n \r\n if wx.Platform == \"__WXMAC__\":\r\n return True\r\n else:\r\n return (manager.GetAGWFlags() & AUI_MGR_LIVE_RESIZE) == AUI_MGR_LIVE_RESIZE", "def check_resize(self):\n yx = self.screen.getmaxyx()\n if self.current_yx != yx:\n self.current_yx = yx\n self.resize(yx)", "def __window_resizeTo(self, iWidth, iHeight):\n pass", "def check_window_size():\n \n wight = 870\n height = 519\n \n window = win32gui.FindWindow(MINECRAFT_CLASS_NAME, MINECRAFT_TITLE + MINECRAFT_VERSION)\n x0, y0, x1, y1 = win32gui.GetWindowRect(window)\n # x0 and y0 are initial points, upper left corner and lower left corner\n # then we need the difference between upper left corner and upper right corner to get the wight and\n # the difference between lower left corner and lower right corner to get the height\n \n w = x1 - x0\n h = y1 - y0\n \n if w is not wight or h is not height:\n win32gui.MoveWindow(window, x0, y0, wight, height, True)", "def ev_windowmaximized(self, event: WindowEvent) -> None:", "def ev_windowresized(self, event: tcod.event.WindowResized) -> T | None:", "def ev_windowminimized(self, event: WindowEvent) -> None:", "def get_window_size(self):\n raise NotImplementedError", "def HasMaximizeButton(self):\r\n \r\n return self.HasFlag(self.buttonMaximize)", "def resize_x(self) -> bool:\n raise NotImplementedError", "def ev_windowmaximized(self, event: tcod.event.WindowEvent) -> T | None:", "def getwinsize(self):", "def ev_windowminimized(self, event: tcod.event.WindowEvent) -> T | None:", "def fullscreen(self) -> bool:\n return bool(self.tk_ref.wm_attributes('-fullscreen'))", "def __window_resizeBy(self, xDelta, yDelta):\n pass", "def IsMaximized(self):\r\n \r\n return self.HasFlag(self.optionMaximized)", "def _get_window_width(self):", "def sizeHint( self ):\n return self.window_size", "def IsFixed(self):\r\n \r\n return not self.HasFlag(self.optionResizable)", "def handleResize(self):\n pass", "def scale(self, _: Application) -> bool:\n return False", "def resize_y(self) -> bool:\n raise NotImplementedError", "def CanUseModernDockArt(self):\r\n\r\n if not _winxptheme:\r\n return False\r\n\r\n # Get the size of a small close button (themed)\r\n hwnd = self._frame.GetHandle()\r\n hTheme = winxptheme.OpenThemeData(hwnd, \"Window\")\r\n\r\n if not hTheme:\r\n return False\r\n\r\n return True", "def Resizable(self, resizable=True):\r\n \r\n return self.SetFlag(self.optionResizable, resizable)", "def maximize_option():\n Width=MaxWidth\n Height=MaxHeight - WinTitle -WinBorder\n PosX=LeftPadding\n PosY=TopPadding\n move_active(PosX,PosY,Width,Height)\n raise_window(\":ACTIVE:\")", "def can_zoom(self):\n return False", "def resize(self):\n h, w = self.win.getmaxyx()\n self.maxh, self.maxw = h, w\n if w == 0 or h == 2:\n return\n self.win.resize(h, w)\n self.lpane.do_resize(h, w)\n self.rpane.do_resize(h, w)\n self.statusbar.resize(h, w)\n self.tabbar.resize(1,w)\n self.regenerate()\n self.display()", "def _set_size(self):\n if self.width_key is not None:\n width = config.get(self.width_key)\n height = config.get(self.height_key)\n self.window.resize(width, height)", "def is_wide(self) -> bool:\n return self.layout == \"planar\"", "def CheckMovableSizer(self, part):\r\n\r\n # a dock may not be resized if it has a single\r\n # pane which is not resizable\r\n if part.type == AuiDockUIPart.typeDockSizer and part.dock and \\\r\n len(part.dock.panes) == 1 and part.dock.panes[0].IsFixed():\r\n \r\n return False\r\n \r\n if part.pane:\r\n \r\n # panes that may not be resized should be ignored here\r\n minPix, maxPix = self.CalculatePaneSizerLimits(part.dock, part.pane)\r\n\r\n if minPix == maxPix:\r\n return False\r\n \r\n return True", "def get_window_size(self):\n return self.__window_size", "def size_with_window(self):\n return self.container['size_with_window']", "def __isSizingCursor(self):\n sizingCursors = [wx.CURSOR_SIZENESW,\n wx.CURSOR_SIZENS,\n wx.CURSOR_SIZENWSE,\n wx.CURSOR_SIZEWE,\n wx.CURSOR_SIZING,\n wx.CURSOR_CROSS]\n try:\n sizingCursors.index(self.__currentCursor)\n return 1\n except ValueError:\n return 0", "def resizeEvent(self, _event: Optional[QResizeEvent] = None) -> None:\n\n current_frame = self.scene().current_frame\n\n if current_frame is not None:\n # EXTREMELY IMPORTANT LINE!\n # The sceneRect grows but never shrinks automatically\n self.scene().setSceneRect(current_frame.boundingRect())\n self.fitInView(current_frame.boundingRect(), Qt.KeepAspectRatio)", "def is_window(game_object: GameObject) -> bool:\n from sims4communitylib.enums.tags_enum import CommonGameTag\n from sims4communitylib.utils.objects.common_object_tag_utils import CommonObjectTagUtils\n return CommonObjectTagUtils.has_game_tags(game_object, (CommonGameTag.BUILD_WINDOW, ))", "def HasMinimizeButton(self):\r\n \r\n return self.HasFlag(self.buttonMinimize)", "def Maximize(self):\r\n\r\n return self.SetFlag(self.optionMaximized, True)", "def size(self):\n\n\t\treturn self._window.size", "def resizeEvent(self, event):\n self.resized.emit()\n return super(PiWndow, self).resizeEvent(event)", "def is_full_screen(self) -> bool:\n return self._full_screen_windows is not None", "def tryFullscreen(self):\n\t\tif self.get_size()[0] == self.get_screen().get_width() and self.get_size()[1] == self.get_screen().get_height():\n\t\t\tself.autoResize()\n\t\t\tself.display()\n\t\telse:\n\t\t\tgobject.timeout_add(1, self.tryFullscreen)", "def IsPaneMinimized(self):\r\n \r\n manager = self.GetAuiManager()\r\n if not manager:\r\n return False\r\n \r\n if manager.GetAGWFlags() & AUI_MGR_PREVIEW_MINIMIZED_PANES == 0:\r\n # No previews here\r\n return False\r\n\r\n self_name = manager.GetPane(self).name\r\n \r\n if not self_name.endswith(\"_min\"):\r\n # Wrong tool name\r\n return False\r\n\r\n return self_name[0:-4]", "def check_win(self):\n return self.win", "def have_window(self) -> bool:\r\n return (\r\n self._first_enc_at is not None\r\n and (time.time() > self._first_enc_at + self.window_size)\r\n )", "def maximise(self) -> None:\n if self.win is not None:\n try:\n if self.win.style_get_property(\"maximize_initially\"):\n self.win.unmaximise()\n else:\n self.win.maximise()\n except ValueError:\n print(\"error :(\")\n self.win.maximize()", "def check_size(height_max, width_max):\n def check_size_window(func):\n @wraps(func)\n def wrapped(self, *args, **kwargs):\n # Extract window size\n height, width = self.stdscr.getmaxyx()\n # Check size window\n if width >= width_max and height >= height_max:\n return func(self, *args, **kwargs)\n else:\n string_warning = \"jtop\"\n string_warning_msg = \"Change size window!\"\n size_window_width = \"Width: \" + str(width) + \" >= \" + str(width_max)\n size_window_height = \"Height: \" + str(height) + \" >= \" + str(height_max)\n try:\n height_c = int(height / 2)\n self.stdscr.addstr(height_c - 2, int((width - len(string_warning)) / 2), string_warning, curses.A_BOLD)\n self.stdscr.addstr(height_c - 1, int((width - len(string_warning_msg)) / 2), string_warning_msg, curses.A_BOLD)\n # Show size window\n if width < width_max:\n self.stdscr.addstr(height_c, int((width - len(size_window_width)) / 2), str(size_window_width), curses.color_pair(1))\n else:\n size_window_width = \"Width OK!\"\n self.stdscr.addstr(height_c, int((width - len(size_window_width)) / 2), size_window_width, curses.A_BOLD)\n if height < height_max:\n self.stdscr.addstr(height_c + 1, int((width - len(size_window_height)) / 2), str(size_window_height), curses.color_pair(1))\n else:\n size_window_height = \"Height OK!\"\n self.stdscr.addstr(height_c + 1, int((width - len(size_window_height)) / 2), str(size_window_height), curses.A_BOLD)\n # Set background for all menu line\n self.stdscr.addstr(height - 1, 0, (\"{0:<\" + str(width - 1) + \"}\").format(\" \"), curses.A_REVERSE)\n # Add close option menu\n self.stdscr.addstr(height - 1, 1, \"Q to close\", curses.A_REVERSE)\n except curses.error:\n pass\n return wrapped\n return check_size_window", "def resizeEvent(self, event):\n self.autosize()\n super().resizeEvent(event)", "def Fixed(self):\r\n \r\n return self.SetFlag(self.optionResizable, False)", "def check_channel_window_change_request(self, channel, width, height, pixelwidth, pixelheight):\n return False", "def resizeEvent(self, event):\n self.updateViewer()", "def on_resize(self, width, height):\n self.gamestatemanager.peek().on_resize(width, height)", "def on_resize(self, _: int = 0) -> None:\n assert CursesMenu.stdscr is not None\n screen_rows, screen_cols = CursesMenu.stdscr.getmaxyx()\n curses.resizeterm(screen_rows, screen_cols)\n self.draw()", "def _window_size(self):\n width = self.cv.winfo_width()\n if width <= 1: # the window isn't managed by a geometry manager\n width = self.cv['width']\n height = self.cv.winfo_height()\n if height <= 1: # the window isn't managed by a geometry manager\n height = self.cv['height']\n return width, height", "def resize (self):\n return self._arrange_displays()", "def showMaximized(self):\n self.usualSize = self.size()\n self.setWindowState(Qt.WindowMaximized)\n self.move(0, 0)\n self.setFixedSize(QSize(self.screenSize.width(), self.screenSize.height()))\n self.maximized = True\n QWidget.showMaximized(self)", "def set_window_rect(self, value: bool):\n self._caps['setWindowRect'] = value", "def setwinsize(self, rows, cols):", "def is_window(h_wnd):\n _is_window = WINDLL.user32.IsWindow\n _is_window.argtypes = [HWND]\n _is_window.restype = bool\n return _is_window(h_wnd)", "def is_scale_enabled(self) -> bool:\r\n ...", "def isMaximized(self, timeout=20.0, commandId=None):\n TestAdapterLib.check_timeout(caller=TestAdapterLib.caller(), timeout=timeout)\n \n return self.isActionAccepted(timeout=timeout, commandName=Command.MAXIMIZE_WINDOW, commandId=commandId)", "def get_curr_screen_size():\n root = tk.Tk()\n root.update_idletasks()\n root.attributes('-fullscreen', True)\n root.state('iconic')\n size = (root.winfo_width(), root.winfo_height(),)\n root.destroy()\n return size", "def is_visible(self):\n return self.rect.x < self.screen_rect.width", "def OnResize(self, event):\n self._resizing = True\n self._resize_timer.Start(60, True)", "def OnSize(self, event):\r\n \r\n skipped = False\r\n if isinstance(self._frame, AuiFloatingFrame) and self._frame.IsShownOnScreen():\r\n skipped = True\r\n event.Skip()\r\n\r\n if self._frame:\r\n \r\n self.DoFrameLayout()\r\n if wx.Platform == \"__WXMAC__\":\r\n self._frame.Refresh()\r\n else:\r\n self.Repaint()\r\n \r\n if isinstance(self._frame, wx.MDIParentFrame) or isinstance(self._frame, tabmdi.AuiMDIClientWindow) \\\r\n or isinstance(self._frame, tabmdi.AuiMDIParentFrame):\r\n # for MDI parent frames, this event must not\r\n # be \"skipped\". In other words, the parent frame\r\n # must not be allowed to resize the client window\r\n # after we are finished processing sizing changes\r\n return\r\n\r\n if not skipped:\r\n event.Skip()\r\n\r\n # For the snap to screen...\r\n self.OnMove(None)", "def resizeEvent(self, *args, **kwargs):\n self.windowMoved.emit()", "def on_user_resize(self, event):\n self.resize_scaled(drag_rootx=event.x_root + self._mouse_drag_offset)", "def Minimize(self):\r\n \r\n return self.SetFlag(self.optionMinimized, True)", "def adjust_screen_size(self) -> None:\n if self.screen:\n max_row, max_cols = self.screen.getmaxyx()\n if max_row < MIN_SIZE + len(self.all_items):\n self.screen.resize(self.menu_height, max_cols)\n self.draw()", "def on_mode_changed(self):\n\n if self.mode.currentText() != self.ScaleCustom:\n self.width.setEnabled(False)\n self.height.setEnabled(False)\n self.resolution.hide()\n else:\n self.width.setEnabled(True)\n self.height.setEnabled(True)\n self.resolution.show()", "def expand(self) -> bool:\n return self._expand or self.width is not None", "def OnSize(self, event):\r\n\r\n if self._owner_mgr and self._send_size:\r\n self._owner_mgr.OnFloatingPaneResized(self._pane_window, event.GetSize())", "def resize(self):\r\n Win.resize(self)\r\n self.write(\"### console has been resized\")", "def multipleWindows(self):\n\t\treturn False if (len(self.driver.window_handles) == 1) else True", "def on_parent_resize(self, event):\n #self.resize()\n #self.resize_scaled(drag_rootx=self.resize_frame.winfo_rootx())\n self.resize_scaled(current=MathStat.lerp(0,\n self.prop_frame.winfo_width(), self.last_right_bias))", "def on_resize(self, width, height):\n\t\tglViewport(0, 0, width, height)\n\t\tglMatrixMode(GL_PROJECTION)\n\t\tglLoadIdentity()\n\t\tgluPerspective(70., width / float(height), .1, 1000.)\n\t\tglMatrixMode(GL_MODELVIEW)\n\t\treturn pyglet.event.EVENT_HANDLED", "def resize(self, width, height):\n geo = self.geometry\n # Start of menu.\n self.menu_start = self.window.width - (geo.menu_width +\\\n geo.horizontal_margin + geo.scroll_bar_width)\n # Update vertical span of the window.\n self.current_view_span = height - self.status_bar.height\n # Call the resize method of all objects in the current window.\n for object in self.object_list:\n object.resize(width, height)\n # Just one call to the adaptive plot height is needed. Therefore the\n # calls need to be here.\n if self.waveforms:\n self.utils.adaptPlotHeight()", "def IsMinimized(self):\r\n\r\n return self.HasFlag(self.optionMinimized)", "def resize_display(self, (w, h)):\n self.surface = pygame.display.set_mode((w, h), pygame.RESIZABLE)", "def maximize(self):\n lib.SDL_MaximizeWindow(self._ptr)", "def toggle_maximized(self):\n if self.isMaximized():\n self.showNormal()\n else:\n self.showMaximized()", "def has_ontime_pane(self):\n pass", "def _get_screen_size():\n import PySide.QtGui\n rect = PySide.QtGui.QDesktopWidget().screenGeometry(-1)\n return [rect.width(), rect.height()]", "def inWindow((square_x, square_y)):\n # Take care in the conditions. Since cells are tracked by theur upper-left corner,\n # there is a cell-wide gap on the right and bottom of the window\n if 0<=square_x<=WINDOW_WIDTH-CELL_WIDTH and 0<=square_y<=WINDOW_HEIGHT-CELL_HEIGHT:\n return True\n else:\n return False", "def resizeEvent(self, event):\n if ((event.oldSize().height() == 0 and event.size().height()) or\n (event.oldSize().width() == 0 and event.size().width())):\n self.updateContents()\n return super().resizeEvent(event)", "def is_screen_on(self):\n out = self.adb.get_window_policy_info()\n pattern = re.compile('mScreenOnFully=(true|false)')\n return pattern.search(str(out)).group(1)", "def update_minimization(self):\n\t\twhile gtk.events_pending():\n\t\t\tgtk.main_iteration()\n\t\tself.is_minimized = self.__window.is_minimized()\n\t\t# TODO: pass along updated state to parent pile for possible removal.\n\t\treturn", "def scale(self, app: Application) -> bool:\n pass", "def on_resize(event):\n gloo.set_viewport(0, 0, *event.physical_size)", "def resize_child_window(self):\n s = struct.pack('HHHH', 0, 0, 0, 0)\n x = fcntl.ioctl(0,termios.TIOCGWINSZ,s)\n fcntl.ioctl(self.child_fd,termios.TIOCSWINSZ,x)", "def IsHorizontal(self):\r\n\r\n return self.dock_direction in [AUI_DOCK_TOP, AUI_DOCK_BOTTOM]", "def IsHorizontal(self):\r\n\r\n return self.dock_direction in [AUI_DOCK_TOP, AUI_DOCK_BOTTOM]", "def IsDocked(self):\r\n \r\n return not self.HasFlag(self.optionFloating)", "def check_destroy(self) -> bool:\r\n if self._x >= game_values.SCREEN_W / 2:\r\n return True\r\n return False", "def __ev_resize(self, event):\n\n new_size = event.dict['size']\n surface_size = self.__screen.get_size()\n old_center = self.__screen.get_rect().center\n if new_size != surface_size:\n self.__screen = pygame.display.set_mode(new_size,\n self.__screen.get_flags(),\n self.__screen.get_bitsize())\n self.init(offset=vect_diff(self.__screen.get_rect().center,\n old_center))\n self.__screen_width, self.__screen_height = self.__screen.get_size()", "def check_destroy(self) -> bool:\r\n if self._x <= game_values.SCREEN_W / 2:\r\n return True\r\n return False" ]
[ "0.7450744", "0.67073464", "0.6607859", "0.65714055", "0.6563378", "0.6560024", "0.64793974", "0.6461006", "0.63823223", "0.63217413", "0.624592", "0.62382627", "0.6228985", "0.61842704", "0.6160696", "0.6158254", "0.61509866", "0.61256456", "0.610373", "0.60751265", "0.60684496", "0.6063032", "0.6051795", "0.6042474", "0.6041099", "0.6008556", "0.5990024", "0.5957024", "0.59343606", "0.5909357", "0.5892888", "0.5854313", "0.5849346", "0.58423984", "0.5819021", "0.5810336", "0.57847124", "0.5781128", "0.5764611", "0.5730979", "0.57222736", "0.57212126", "0.5713212", "0.5695497", "0.5694262", "0.5693812", "0.5685612", "0.56637853", "0.5655911", "0.56486994", "0.56460536", "0.56458175", "0.56416684", "0.5634683", "0.56330967", "0.56307775", "0.5627707", "0.56045246", "0.5600602", "0.5588574", "0.55819887", "0.5554814", "0.5553163", "0.5542765", "0.5530791", "0.5505294", "0.5489889", "0.54896563", "0.5475007", "0.5474875", "0.547083", "0.54696935", "0.54589313", "0.5458579", "0.54560864", "0.54339844", "0.5431664", "0.5421681", "0.541189", "0.5409047", "0.53987056", "0.5386237", "0.5378066", "0.53681386", "0.5357809", "0.5354063", "0.5349808", "0.5343913", "0.5339886", "0.5339511", "0.53360283", "0.533321", "0.5331995", "0.53218883", "0.531476", "0.531476", "0.52968746", "0.5295921", "0.5293735", "0.5292862" ]
0.7568574
0
How often to refresh the screen when drawing the turtle
def refresh(self): return self._refresh
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _update(self):\n screen = self.screen\n if screen._tracing == 0:\n return\n elif screen._tracing == 1:\n self._update_data()\n self._drawturtle()\n screen._update() # TurtleScreenBase\n screen._delay(screen._delayvalue) # TurtleScreenBase\n else:\n self._update_data()\n if screen._updatecounter == 0:\n for t in screen.turtles():\n t._drawturtle()\n screen._update()", "def handle_tick():\n reset()\n hideturtle()\n move_aquarium(aq)\n draw_aquarium(aq)\n update()\n ontimer(handle_tick, 10)", "def update(self):\n tracing = self._tracing\n self._tracing = True\n for t in self.turtles():\n t._update_data()\n t._drawturtle()\n self._tracing = tracing\n self._update()", "def cool_turtle():\n # Make the TurtleWindow.\n window = rg.TurtleWindow()\n\n # Make the SimpleTurtle.\n cool_turtle = rg.SimpleTurtle('turtle')\n cool_turtle.pen = rg.Pen('forest green', 1) # Try thickness 5 too\n cool_turtle.speed = 1 # Slow\n\n # Move the SimpleTurtle to her starting position.\n start_at = rg.Point(100, -50)\n cool_turtle.pen_up()\n cool_turtle.go_to(start_at)\n cool_turtle.pen_down()\n\n # Set up some parameters that control the nature of the shape drawn.\n size = 100 # Try 150 too\n angle = 1 # Try 20 too\n iterations = 360 # Try 90 too\n\n # Store the animation speed (to reset it later).\n tracer_n, tracer_d = window.tracer(), window.delay()\n\n # Make the animation go much faster.\n # First number: bigger means faster.\n # Second number: bigger means slower.\n window.tracer(5, 5)\n\n for _ in range(iterations):\n cool_turtle.right(angle)\n cool_turtle.draw_square(size)\n\n # Reset the animation to its original speed.\n window.tracer(tracer_n, tracer_d)\n\n window.close_on_mouse_click()", "def drawTimer(self,screen):\n if(self.frame - self.genStartFrame >= self.nextCheckpointCost):\n self.checkpoint +=1\n self.lastCheckpointCost = self.nextCheckpointCost\n self.nextCheckpointCost = self.maze.checkFuelCost(self.checkpoint)\n angle = 2*np.pi*(self.nextCheckpointCost - self.frame + self.genStartFrame) / (self.nextCheckpointCost - self.lastCheckpointCost)\n temppos = [50 - 20*np.sin(angle),50 - 20*np.cos(angle)]\n tempsize = int(angle * 3)\n pygame.draw.line(screen,(240,240,240),(50,50),temppos,2)\n pygame.draw.circle(screen,(240,240,240),(50,50),max(24-tempsize,1),1)", "def redraw_viz():\n\tglobal g_last_draw\n\tif (rospy.Time.now().to_sec() > (refresh_rate + g_last_draw)):\n\t\tg_last_draw = rospy.Time.now().to_sec()\n\t\t# redraw imu box\n\t\tdoDraw()", "def timer():\r\n\r\n T = 0\r\n while True:\r\n print (term.white + term.move_xy(82,1) + 'TIMER : ', end='')\r\n print(T, end='\\r')\r\n time.sleep(1)\r\n T = T + 1", "def draw_t(self):\r\n pen.forward(20)\r\n pen.left(90)\r\n pen.down()\r\n pen.forward(40)\r\n pen.right(90)\r\n pen.up()\r\n pen.forward(20)\r\n pen.down()\r\n pen.back(40)\r\n pen.up()\r\n pen.right(90)\r\n pen.forward(40)\r\n pen.left(90)\r\n pen.forward(50)", "def redraw(self) -> None:\n self.canvas.draw_idle()\n self.Refresh()", "def init_turtle():\n turtle.up()\n turtle.home()", "def repaint(self):\n self.screen.blit(self.source, (0, 0))\n self.lcd.draw(self.lcddraw)\n if self.drawmode & self.DRAW_CIRCLE:\n self.plot_circle()\n pygame.display.flip()", "def up():\n turtleTmp.penup()", "def redraw(self):\r\n self.c.update()", "def update(self):\n self.t = time()\n self.frame += 1\n self.loop(self)\n self.draw_bg()\n self.draw_C()\n if self.cursor:\n self.draw_rect(*self.pos, RED, 2)\n self.draw_grid()\n self.draw_T()\n self.show_info()\n for (surf, rect) in self.surf_list:\n self.screen.blit(surf, rect)\n pygame.display.update()\n self.clock.tick(self.fps)", "def draw_square():\r\n turtle.forward(100)\r\n turtle.left(90)\r\n turtle.forward(100)\r\n turtle.left(90)\r\n turtle.forward(100)\r\n turtle.left(90)\r\n turtle.forward(100)\r\n turtle.left(90)", "def draw_objects():\n\n # Disable the turtle animation, and erase the scren.\n turtle.tracer(False)\n turtle.hideturtle()\n turtle.clear()\n\n # Draw all the parts of the scene.\n draw_ball()\n draw_target()\n draw_bounds()\n draw_pins()\n\n show_status()\n\n # Now show the screen, after everything has been drawn\n turtle.tracer(True)", "def flush(self):\n if self.fill:\n self._turtle.fill(False)\n self._turtle.fill(True)", "def draw(self, screen):", "def timer(alarm):\n # Start alarm clock again.\n glutTimerFunc(DELAY, timer, 0)\n if exiting:\n global brightness\n brightness -= 0.05\n if brightness < 0.01:\n # Enough dimming - terminate!\n glutLeaveMainLoop()\n glutPostRedisplay()\n \n if animate:\n # Advance to the next frame.\n advance()\n glutPostRedisplay()\n\n if animateTan:\n # Advance to the next frame\n advanceTan()\n glutPostRedisplay()\n\n if animateSilver:\n # Advance to the next frame\n advanceSilver()\n glutPostRedisplay()\n\n if animateDice:\n # Advance to the next frame\n advanceDice()\n glutPostRedisplay()", "def reset(self):\n self._turtle.clear()\n self._turtle.setposition((0,0)) \n self._turtle.shape('turtle')\n self.color = 'red'\n self.heading = 180\n self.speed = 0", "def runFrame(self):\n self._drawFrame(self._advanceTime())", "def draw():", "def draw_flower_advanced():\n draw_flower()\n turtle.left(90)\n turtle.up() #Raise pen for movement\n turtle.forward(150)\n turtle.left(90)\n turtle.forward(150)\n turtle.right(90)\n turtle.down() #lower pen for drawing", "def animation(self, t):\n self.program['u_clock'] = 2*t\n gloo.clear('black')\n self.program.draw('points')\n return _screenshot((0, 0, self.size[0], self.size[1]))[:,:,:3]", "def draw(self):\n\n State.screen.draw()", "def draw(self):\r\n if not self.stopped:\r\n super().draw()\r\n self.next_frame()", "def graphics_loop(self, font):\n self.screen.blit(self.background, (0, 0))\n if not self.scroll:\n self.all_sprites.draw(self.screen)\n else:\n self.draw_onscreen()\n #display which step we're on\n if pygame.font:\n text = font.render(str(self.stepid), 1, (255, 255, 255))\n textpos = text.get_rect(centerx = int(\n (self.screen.get_width() * 0.5)))\n self.screen.blit(text, textpos)\n pygame.display.flip()\n #cap at x fps\n self.clock.tick(self.max_fps)", "def end_fill():\n turtleTmp.end_fill()", "def draw():\n ant.move(aim)\n ant.x = wrap(ant.x)\n ant.y = wrap(ant.y)\n\n aim.move(random() - 0.5)\n aim.rotate(random() * 10 - 5)\n\n clear()\n goto(ant.x, ant.y)\n dot(4)\n\n ontimer(draw, 100)", "def draw(screen):\n MY.restart_button.draw(screen)\n MY.display_text.draw(screen)", "def game_window(self):\r\n t = turtle.Turtle()\r\n t.hideturtle()\r\n\r\n for total_num in range(self.circle):\r\n t.hideturtle()\r\n t.speed(20)\r\n t.penup()\r\n t.goto(self.posn.x,self.posn.y)\r\n t.pendown()\r\n t.color(\"#40e0d0\")\r\n t.begin_fill()\r\n t.circle(30)\r\n t.end_fill()\r\n self.posn.x= self.posn.x+65\r\n if self.posn.x>=25:\r\n self.posn.y= self.posn.y-65\r\n self.posn.x=-300", "def update(self, dt):\n self.current_time = pg.time.get_ticks()\n if self._scene.quit:\n pg.mouse.set_visible(True)\n self.done = True\n elif self._scene.done:\n self.change_scene()\n self._scene.update(dt)\n self._scene.draw(self.screen)", "def tick(self):\n # detect pressed keys\n if not self.handle_events():\n return False\n # redraw\n if self.pause:\n return True\n self.phy.tick()\n color = 0\n self.screen.fill((0, 0, 0))\n new_image = pygame.Surface(self.screen.get_size()).convert()\n for p in self.phy.objects:\n self.put_object(new_image, p, COLORS[color])\n color = (color + 1) % len(COLORS)\n self.screen.blit(new_image, (0, 0))\n color = 0\n for p in self.phy.objects:\n text = \"%.2E\" % (int(p.mass))\n self.put_text(\n text,\n COLORS[color], (\n int(p.position[0] / SCALE_FACTOR) - len(text) * 5,\n int(p.position[1] / SCALE_FACTOR + int(p.radius) / SCALE_FACTOR * 1.2)\n )\n )\n color = (color + 1) % len(COLORS)\n self.show_status()\n pygame.display.set_icon(self.surficon)\n pygame.display.flip()\n return True", "def reset(self):\n self._turtle.clear()\n self._turtle.setposition((0,0)) \n try:\n self._turtle.shape('pen.gif')\n except:\n self._turtle.shape('classic')\n self._turtle.color('red')\n self.speed = 0\n \n #pair = self._turtle.color()\n self._pencolor = self._turtle.color()[0]\n self._fillcolor = self._turtle.color()[0]", "def refresh_screen(self):", "def main():\n # Your code here\n draw_graph(turtle, -500, -200, 0)", "def on_draw():\n window.clear()\n world.draw()", "def draw_block():\n turtle.down()\n turtle.begin_fill()\n turtle.pensize(3)\n turtle.forward(50)\n turtle.left(90)\n turtle.forward(50)\n turtle.left(90)\n turtle.forward(50)\n turtle.left(90)\n turtle.forward(50)\n turtle.left(90)\n turtle.end_fill()\n turtle.up()", "def drawFrame(dt):\n\n global start_time,step,paused,ittr,globalTime\n\n if reachedGoals or ittr > maxIttr or QUIT: #Simulation Loop\n print(\"%s itterations ran ... quitting\"%ittr)\n win.destroy()\n else:\n elapsed_time = time.time() - start_time\n start_time = time.time()\n if not paused:\n updateSim(dt)\n ittr += 1\n globalTime += dt\n for agent in agents:\n if not agent.atGoal:\n trajectories.append([agent.id, agent.gid, agent.pos[0], agent.pos[1], agent.vel[0], agent.vel[1], agent.radius, globalTime])\n\n drawWorld()\n if step == True:\n step = False\n paused = True \n \n win.title('Multi-Agent Navigation')\n win.after(framedelay,lambda: drawFrame(dt))", "def draw_graphic(self):\r\n\r\n t = Turtle()\r\n text = Turtle()\r\n s = t.getscreen()\r\n s.bgcolor(\"orange\")\r\n count = 0\r\n while count < 1:\r\n text.penup()\r\n text.setposition(-100, -100)\r\n text.pencolor(\"purple\")\r\n text.write(\"{}, area: {:.2f}, perimeter: {:.2f}\".format(self.name, self.area(), self.perimeter()), align=\"left\",\r\n font=(\"Arial\", 20, \"bold\"))\r\n t.goto(0, 0)\r\n t.pen(pencolor=\"purple\", fillcolor=\"green\", pensize=6, speed=20)\r\n t.fillcolor(\"red\")\r\n t.begin_fill()\r\n t.pendown()\r\n t.circle(self.__radius)\r\n t.end_fill()\r\n delay(30)\r\n t.clear()\r\n t.reset()\r\n text.clear()\r\n text.reset()\r\n count += 1", "def refresh(self) -> None:\n self.screen.refresh()", "def refresh_view(self):\n if self._step_number % 2 == 0:\n self._view.draw_enemies(self._game.enemies)\n self._view.draw_towers(self._game.towers)\n self._view.draw_obstacles(self._game.obstacles)", "def draw_sun():\n lisandro.penup()\n lisandro.goto(40, 90)\n lisandro.begin_fill()\n lisandro.circle(150) # draws out a circle with a radius of 150 for the sun.\n lisandro.end_fill()\n lisandro.hideturtle()", "def repaint(self):\n pass", "def tick (self):\n\t\n\t\tself.display.clear ()\n\t\tself.draw ()\n\t\tfor sprite in self.sprites:\n\t\t\tsprite.drawToDisplay (self.display)\n\t\tself.display.showFrame ()", "def tick(self):\n uh.rotation(270)\n while True:\n self.show_time()\n time.sleep(60)\n uh.off()", "def draw_long_shape():\n turtle.fillcolor('blue')\n draw_block()\n turtle.forward(50)\n draw_block()\n turtle.forward(50)\n draw_block()\n turtle.forward(50)\n draw_block()\n turtle.back(150)", "def _drawturtle(self):\n screen = self.screen\n shape = screen._shapes[self.Myturtle.shapeIndex]\n ttype = shape._type\n titem = self.Myturtle._item\n if self._shown and screen._updatecounter == 0 and screen._tracing > 0:\n self._hidden_from_screen = False\n tshape = shape._data\n if ttype == \"polygon\":\n if self._resizemode == \"noresize\": w = 1\n elif self._resizemode == \"auto\": w = self._pensize\n else: w =self._outlinewidth\n shape = self._polytrafo(self._getshapepoly(tshape))\n fc, oc = self._fillcolor, self._pencolor\n screen._drawpoly(titem, shape, fill=fc, outline=oc,\n width=w, top=True)\n elif ttype == \"image\":\n screen._drawimage(titem, self._position, tshape)\n elif ttype == \"compound\":\n for item, (poly, fc, oc) in zip(titem, tshape):\n poly = self._polytrafo(self._getshapepoly(poly, True))\n screen._drawpoly(item, poly, fill=self._cc(fc),\n outline=self._cc(oc), width=self._outlinewidth, top=True)\n else:\n if self._hidden_from_screen:\n return\n if ttype == \"polygon\":\n screen._drawpoly(titem, ((0, 0), (0, 0), (0, 0)), \"\", \"\")\n elif ttype == \"image\":\n screen._drawimage(titem, self._position,\n screen._shapes[\"blank\"]._data)\n elif ttype == \"compound\":\n for item in titem:\n screen._drawpoly(item, ((0, 0), (0, 0), (0, 0)), \"\", \"\")\n self._hidden_from_screen = True", "def draw_you_guess_it():\n window = rg.TurtleWindow()\n\n tx = rg.SimpleTurtle('turtle')\n tx.pen = rg.Pen('blue', 20)\n tx.speed = 5 # Medium\n\n tx.left(60)\n tx.forward(200)\n\n tx.pen_up()\n tx.left(120)\n tx.forward(100)\n tx.left(120)\n\n tx.pen_down()\n tx.forward(200)\n\n window.close_on_mouse_click()", "def draw_s(self):\r\n pen.down()\r\n pen.forward(40)\r\n pen.left(90)\r\n pen.forward(20)\r\n pen.left(90)\r\n pen.forward(40)\r\n pen.right(90)\r\n pen.forward(20)\r\n pen.right(90)\r\n pen.forward(40)\r\n pen.up()\r\n pen.back(40)\r\n pen.right(90)\r\n pen.forward(40)\r\n pen.left(90)\r\n pen.forward(50)", "def plot_refresh():\n figure.canvas.draw()", "def update(self, time_step):\n a = [0,0]\n F = self.force()\n for i in [0,1]: # We have to update x and y\n a[i] = self.force()[i] / self.mass\n self.velocity[i] = self.velocity[i] + a[i]*time_step\n self.position[i] = self.position[i] + self.velocity[i]*time_step # I'm lazy\n self.turtle.goto(self.position) # Comment out the goto if you need the simulation to run really fast; you won't get the animation", "def on_draw(self):\n\n # Start the render. This must happen before any drawing\n # commands. We do NOT need an stop render command.\n arcade.start_render()\n\n # Calculate minutes\n minutes = int(self.total_time) // 60\n\n # Calculate seconds by using a modulus (remainder)\n seconds = int(self.total_time) % 60\n\n # Figure out our output\n output = \"Time minutes:\" + format(minutes) + \" seconds:\" + format(seconds) + \" \"\n\n # See if the output is the same as last frame. If not, generate a new\n # text object.\n if not self.timer_text or self.timer_text.text != output:\n self.timer_text = arcade.create_text(output, arcade.color.BLACK, 30)\n\n # Output the timer text.\n arcade.render_text(self.timer_text, 300, 300)", "def draw_flower():\n turtle.right(45)\n draw_petal()\n turtle.right(90)\n draw_petal()\n turtle.right(90)\n draw_petal()\n turtle.right(90)\n draw_petal()\n turtle.right(135)\n turtle.forward(150)", "def main(self):\n update = self.update\n draw = self.draw\n screen = self.screen\n flip = pg.display.update\n clock = time.time\n frame_length = (1. / self.fps)\n time_since_draw = 0\n last_update = clock()\n fps_timer = 0\n frames = 0\n\n while not self.done:\n clock_tick = clock() - last_update\n last_update = clock()\n time_since_draw += clock_tick\n update(clock_tick)\n if time_since_draw >= frame_length:\n time_since_draw -= frame_length\n draw(screen)\n flip()\n frames += 1\n\n fps_timer, frames = self.handle_fps(clock_tick, fps_timer, frames)\n time.sleep(.01)", "def pause(time=1e-6):\n pl.draw()\n pl.gcf().canvas.start_event_loop(time)", "def turtle_setup():\n # ___ ___ _ _ ___ _____ __ __ ___ ___ ___ _____ __\n # | \\ / _ \\ | \\| |/ _ \\_ _| | \\/ |/ _ \\| \\_ _| __\\ \\ / /\n # | |) | (_) | | .` | (_) || | | |\\/| | (_) | |) | || _| \\ V /\n # |___/ \\___/ |_|\\_|\\___/ |_| |_| |_|\\___/|___/___|_| |_|\n # _____ _ _ ___ ___ ___ _ _ _ _ ___ _____ ___ ___ _ _\n # |_ _| || |_ _/ __| | __| | | | \\| |/ __|_ _|_ _/ _ \\| \\| |\n # | | | __ || |\\__ \\ | _|| |_| | .` | (__ | | | | (_) | .` |\n # |_| |_||_|___|___/ |_| \\___/|_|\\_|\\___| |_| |___\\___/|_|\\_|\n #\n # Create the turtle graphics screen and set a few basic properties.\n screen = turtle.Screen()\n screen.setup( WIDTH, HEIGHT, MARGIN, MARGIN )\n screen.bgcolor( \"SkyBlue\" )\n\n # Create two turtles, one for drawing and one for writing.\n artist = turtle.Turtle()\n writer = turtle.Turtle()\n\n # Change the artist turtle's shape so the artist and writer are distinguishable.\n artist.shape( \"turtle\" )\n\n # Make the animation as fast as possible and hide the turtles.\n if DRAW_FAST:\n screen.delay( 0 )\n artist.hideturtle()\n artist.speed( \"fastest\" )\n writer.hideturtle()\n writer.speed( \"fastest\" )\n\n # Set a few properties of the writing turtle useful since it will only be writing.\n writer.setheading( 90 ) # Straight up, which makes it look sort of like a cursor.\n writer.penup() # A turtle's pen does not have to be down to write text.\n writer.setposition( 0, HEIGHT // 2 - FONT_SIZE * 2 ) # Centered at top of the screen.\n\n return screen, artist, writer", "def reset(self):\n TNavigator.reset(self)\n TPen._reset(self)\n self._clear()\n self._drawturtle()\n self._update()", "def draw_flower_advanced():\n draw_flower()\n turtle.left(90)\n turtle.up()\n turtle.forward(150)\n turtle.left(90)\n turtle.forward(150)\n turtle.right(90)\n turtle.down()", "def draw_petal():\n turtle.forward(30)\n turtle.left(45)\n turtle.forward(30)\n turtle.left(135)\n turtle.forward(30)\n turtle.left(45)\n turtle.forward(30)\n turtle.left(135)", "def draw_petal():\n turtle.forward(30)\n turtle.left(45)\n turtle.forward(30)\n turtle.left(135)\n turtle.forward(30)\n turtle.left(45)\n turtle.forward(30)\n turtle.left(135)", "def run(self):\n global delta\n while not glfw.window_should_close(self.win):\n # clear draw buffer and depth buffer (<-TP2)\n GL.glClear(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)\n\n win_size = glfw.get_window_size(self.win)\n view = self.trackball.view_matrix()\n projection = self.trackball.projection_matrix(win_size)\n\n # Met à jour le temps écoulé\n # Nécessaire pour calculer les déplacements\n # effectués grâce au clavier\n current_frame_time = glfw.get_time()\n delta = (current_frame_time - self.last_frame_time)\n self.last_frame_time = current_frame_time\n self.renderShadows(self.depth, self.light_dir)\n # draw our scene objects\n self.draw(projection, view, identity())\n\n # flush render commands, and swap draw buffers\n glfw.swap_buffers(self.win)\n\n # Poll for and process events\n glfw.poll_events()", "def game_draw(self):\n pass", "def redraw_window(win, board, playtime, strikes):\n win.fill((250, 250, 250))\n\n # Display time\n font = pygame.font.SysFont(\"georgia\", 30)\n txt = font.render(\"Time Elapsed: \" + str(format_time(playtime)), 1, (0, 0, 0))\n win.blit(txt, (540 - 300, 560))\n\n # Display strikes\n txt = font.render(\"X \" * strikes, 1, (255, 0, 0))\n win.blit(txt, (10, 560))\n\n # Draw grid lines and board\n board.draw(win)", "def redraw(self):\n self._create()", "def draw(self):\n if self.node:\n if self.async:\n if self.cancel_draw:\n self.after_cancel(self.cancel_draw)\n self.cancel_draw = self.after(3, self._draw)\n else: self._draw()", "def redraw(self, state: EngineeringState) -> None:\n pass", "def draw_monster(generikmon):\r\n turtle.clear()\r\n mirror = 1\r\n drawhalfmonster(mirror, generikmon)\r\n mirror = -1\r\n drawhalfmonster(mirror, generikmon) #draws second half of monster\r\n return()", "def redraw(self):\n self._view.delete(tk.ALL)\n self._view.draw_entities(self._world.get_all_things())\n # calculate the health and score in every step\n max_hp = self._player.get_max_health()\n current_hp = self._player.get_health()\n # if player is invincible, don't change health\n self._statue.set_health(current_hp / max_hp, self._player.get_invincible())\n self._statue.set_score(self._player.get_score())", "def updateGraphics():\n _root.update_idletasks()", "def refresh_screen(self):\n stdscr = self.stdscr\n stdscr.refresh()", "def shapes2():\r\n turtle.up()\r\n turtle.backward(100)\r\n turtle.left(270)\r\n turtle.forward(100)\r\n turtle.left(90)\r\n turtle.backward(700)\r\n shapes()", "def down():\n turtleTmp.pendown()", "def update(self):\n self._curses_window.clear()\n\n self._curses_window.addstr(0, 0,\n \"{:5s} {:5.1f}\".format(\n 'time:', round(time() - self.simulation_start, 1)\n )\n + \"\\t{:13s} {:4.1f}\".format(\n ' steps per s:', round(1 / global_vars.step_duration, 1)\n )\n + \"\\t{:4s} {:4d}\".format(' step:', global_vars.step)\n\t\t\t+ \"\\n{:4s} {:4d}{:1s}\".format('Death cause.. eaten:', global_vars.h_eaten, 'h')\n\t\t\t+ \"\\t{:4s} {:4d}{:1s}{:4d}{:1s}\".format(\n 'starved:', global_vars.h_starved, 'h/', global_vars.c_starved, 'c'\n )\n\t\t\t+ \"\\t{:4s} {:4d}{:1s}{:4d}{:1s}\".format(\n 'trampled:', global_vars.h_trampled, 'h/', global_vars.c_trampled, 'c'\n )\n + \"\\t{:4s} {:4d}{:1s}{:4d}{:1s}\".format(\n 'natural death:', global_vars.h_age, 'h/', global_vars.c_age, 'c'\n )\n )\n\n self._curses_window.noutrefresh()", "def move_turtle(self):\n self.forward(self.move_speed)", "def shapes():\r\n turtle.up()\r\n turtle.forward(500)\r\n turtle.down()\r\n draw_hexagon()\r\n draw_square()\r\n draw_triangle()", "def run(self):\r\n ##boucle appellant render() 30fois par seconde\r\n r = 0\r\n while r == 0:\r\n r = self.update()\r\n self.render()\r\n time.sleep(1/30)\r\n return r", "def main():\n init_turtle() # initialize the turtle\n draw_long_shape() # draw the long shape\n print(\"Close window to quit.\")\n turtle.done() # pause until the user closes the window", "def done_paint(self):\r\n curses.panel.update_panels()\r\n curses.doupdate()", "def setTurtle(t):\r\n t.pu()\r\n t.goto(initialCoordinates())", "def draw(self, force=False):\n self.display.draw(force)", "def draw_lines_fast(orbit_pos, factor):\n for i, (x, y) in enumerate(orbit_pos):\n draw_lines(orbit_pos, x, y, factor, i)\n turtle.update()", "def update_display(self):\r\n\r\n # The display.update() Updates the screen, making the new frame replace the old one. \r\n pg.display.update()\r\n \r\n # clock.tick sets a framerate for the game.\r\n # This is to make the game run at a stable fps \r\n self.clock.tick(cng.FRAMERATE)", "def draw(self, screen):\n # Snow is circular in this case... ;-)\n pygame.draw.circle(screen, self.color, (int(self.x), int(self.y)),\n self.size)\n # now update things for the next loop\n # update the x position\n self.x += self.dx\n # update the y position\n self.y += self.dy\n # bounds checking\n if self.x < 0 or self.x > size_x:\n # floated off the edge of the screen so do a reset\n self.x = random.randrange(size_x)\n self.y = 0\n self.dy = random.randrange(1, 30) + random.random()\n if (self.y > size_y):\n # floated off the bottom of the screen so drift again from the top\n self.x = random.randrange(size_x)\n self.y = 0\n self.dy = random.randrange(1, 30) + random.random()", "def loop(self):\n self.screen.fill((0, 0, 0))\n self.clock.tick(FU_FRAME_RATE)\n self.level.update_loop(self.screen, self.clock)\n self.handle_events()", "def update(direction): \n if direction == 3:\n head[0] -=1\n elif direction == 2:\n head[0] += 1\n elif direction == 1:\n head[1] -= 1\n elif direction == 0:\n head[1] += 1\n \n screen.refresh()\n time.sleep(0.1)", "def run(self):\n while not self.done:\n dt = self.clock.tick(self.fps)\n self.event_loop()\n self.update(dt)\n self.draw()\n pygame.display.flip()\n # pygame.display.update() # can be used to update only part of the screen", "def refresh(self):\n self.goto(self.starting_position)", "def done(self):\n turtle.done()", "def loop(self):\r\n while True:\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n return\r\n if not self.done:\r\n self.update_path()\r\n self.algorithm()\r\n\r\n self.screen.fill(self.LINE_COLOR)\r\n self.draw_board()\r\n pygame.display.flip()\r\n self.clock.tick(self.FPS)", "def main():\r\n intialize()\r\n draw_hexagon()\r\n draw_square()\r\n draw_triangle()\r\n shapes()\r\n shapes2()\r\n print (\"Close the window\")\r\n turtle.done()", "def _redraw_graph(self) -> None:\n self._clear_drawing()\n self.draw_graph()", "def _update_screen(self):\n self.screen.fill(self.rain_settings.bg_color)\n self.rain.draw(self.screen)\n\n pygame.display.flip()", "def run():\n\n window = get_window()\n\n # Used in some unit test\n if os.environ.get('ARCADE_TEST'):\n window.on_update(window._update_rate)\n window.on_draw()\n elif window.headless:\n # We are entering headless more an will emulate an event loop\n import time\n\n # Ensure the initial delta time is not 0 to be\n # more in line with how a normal window works.\n delta_time = window._draw_rate\n last_time = time.perf_counter()\n\n # As long as we have a context --\n while window.context:\n # Select active view or window\n active = window.current_view or window\n\n active.on_update(delta_time)\n if window.context:\n active.on_draw()\n\n # windwow could be closed in on_draw\n if window.context:\n window.flip()\n\n now = time.perf_counter()\n delta_time, last_time = now - last_time, now\n else:\n import sys\n if sys.platform != 'win32':\n # For non windows platforms, just do pyglet run\n pyglet.app.run(window._draw_rate)\n else:\n # Ok, some Windows platforms have a timer resolution > 15 ms. That can\n # drop our FPS to 32 FPS or so. This reduces resolution so we can keep\n # FPS up.\n import contextlib\n import ctypes\n from ctypes import wintypes\n\n winmm = ctypes.WinDLL('winmm')\n\n class TIMECAPS(ctypes.Structure):\n _fields_ = (('wPeriodMin', wintypes.UINT),\n ('wPeriodMax', wintypes.UINT))\n\n def _check_time_err(err, func, args):\n if err:\n raise WindowsError('%s error %d' % (func.__name__, err))\n return args\n\n winmm.timeGetDevCaps.errcheck = _check_time_err\n winmm.timeBeginPeriod.errcheck = _check_time_err\n winmm.timeEndPeriod.errcheck = _check_time_err\n\n @contextlib.contextmanager\n def timer_resolution(msecs=0):\n caps = TIMECAPS()\n winmm.timeGetDevCaps(ctypes.byref(caps), ctypes.sizeof(caps))\n msecs = min(max(msecs, caps.wPeriodMin), caps.wPeriodMax)\n winmm.timeBeginPeriod(msecs)\n yield\n winmm.timeEndPeriod(msecs)\n\n with timer_resolution(msecs=10):\n pyglet.app.run(window._draw_rate)", "def render(self):\n step = 1\n while step < self.number_steps and self.update():\n step += 1", "def time(self):\n\n self.timing = True\n self.scramble()\n\n self.disp = False", "def update_screen(rk_settings, screen, rock, stars, bullets):\r\n\t# Redraw the screen during each pass through the loop.\r\n\tscreen.fill(rk_settings.bg_color)\r\n\tfor bullet in bullets.sprites():\r\n\t\tbullet.draw_bullet()\r\n\trock.blitme()\r\n\tstars.draw(screen)\r\n\t\t\r\n\t# Make the most recently drawn screen visible.\r\n\tpygame.display.flip()", "def drawCircle(r):\r\n # create a turtle-painter instance using turtle library\r\n painter = turtle.Turtle()\r\n\r\n # turtle properties (we want the turtle to look nicer)\r\n painter.shape(\"turtle\") # setting painter shape to turtle\r\n painter.shapesize(3,3,1) # making turtle-painter 3 times bigger\r\n painter.color(\"limegreen\") # setting painting color to limegreen\r\n\r\n # move the turtle-painter to ready position\r\n painter.pu() # we just move without drawing anything\r\n x0 = coordX(r, 0) # compute initial coordinate x0\r\n y0 = coordY(r, 0) # compute initial coordinate y0\r\n\r\n painter.goto(x0,y0) # move the turtle to the ready position\r\n \r\n # tell the turtle to put pencil down on the paper\r\n painter.pd()\r\n\r\n # draw a circle\r\n for theta in range(0, 361, 1):\r\n x = coordX(r, theta, useradians = False)\r\n y = coordY(r, theta, useradians = False)\r\n\r\n painter.goto(x,y)\r\n\r\n # tell the turtle to put pencil up from the paper\r\n painter.pu()\r\n # hide the painter after he finished to draw\r\n painter.ht()\r\n print(\"Draw a circle of r = \", r )", "def drawChanges(self):\n self.draw(wait=False)\n draw(self.values,color='yellow',bbox=None,clear=False,shrink=self.shrink)", "def drawRectangle_1():\n Lucia.color(\"green\",\"yellow\") # Sets the pen color to green and fill color to yellow\n Lucia.seth(90) # Set the initial orientation of the turtle to 0 degrees\n Lucia.begin_fill()\n Lucia.forward(50) # Move the turtle forward by 50 units in the direction that it was pointing\n Lucia.left(90) # Turn the turtle left by 90 degrees relative to the direction it was pointing\n Lucia.forward(100) # Move the turtle forward by 100 units\n Lucia.left(90)\n Lucia.forward(50)\n Lucia.left(90)\n Lucia.forward(100)\n Lucia.left(90) # Make sure the turtle is oriented back to its initial orientation\n Lucia.end_fill()", "def loops_back_to_screen(self):\r\n for segment in self.all_turtles:\r\n if segment.xcor() < -300 or segment.xcor() > 300:\r\n segment.goto(-segment.xcor(), segment.ycor())\r\n\r\n elif segment.ycor() < -300 or segment.ycor() > 300:\r\n segment.goto(segment.xcor(), -segment.ycor())" ]
[ "0.69018406", "0.65853024", "0.6480182", "0.6454136", "0.6448314", "0.62953043", "0.62129325", "0.61860746", "0.6181711", "0.6174027", "0.61470187", "0.61320156", "0.6114843", "0.61122423", "0.60956675", "0.6092913", "0.6088006", "0.60669345", "0.6064306", "0.604553", "0.6025935", "0.6016878", "0.6016402", "0.601633", "0.6010667", "0.6009889", "0.6007923", "0.6003247", "0.5984217", "0.597154", "0.596858", "0.59600204", "0.59585744", "0.5956506", "0.59347296", "0.5927584", "0.5922716", "0.591674", "0.59143203", "0.5914202", "0.59117687", "0.5909472", "0.5904038", "0.58809847", "0.5880755", "0.5865087", "0.5838634", "0.58373046", "0.583626", "0.58319944", "0.5815285", "0.5808917", "0.5803197", "0.5792124", "0.57841486", "0.5780918", "0.57618785", "0.5756114", "0.5752377", "0.5739201", "0.5739201", "0.5737614", "0.57374185", "0.5735695", "0.5735015", "0.5734847", "0.57321894", "0.5730847", "0.5730831", "0.5727241", "0.5724793", "0.57225525", "0.57200783", "0.57195926", "0.5712091", "0.5711534", "0.57098764", "0.57063025", "0.5702504", "0.5702053", "0.56944084", "0.56878704", "0.56841725", "0.5682521", "0.5682451", "0.5681154", "0.5678555", "0.56578964", "0.5648485", "0.5637377", "0.5634068", "0.5626643", "0.5623313", "0.56213117", "0.56208867", "0.56111974", "0.5605993", "0.5605406", "0.560354", "0.5599754", "0.55944526" ]
0.0
-1
The list of all turtles attached to this Window This attribute may not be altered directly
def turtles(self): return self._turtles[:]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def turtles(self):\n return self._turtles", "def turbines(self):\n return self.turbine_map.turbines", "def getturtle(self):\n return self", "def thermostats(self):\n\n return self._thermostats", "def lights(self):\n return list(self.GetLights())", "def get_light_list(self):\n return self.light_array", "def lights(self) -> List[dict]:\n return self.items_by_domain(\"light\")", "def swing_list(self):\n return self._swing_list", "def graphicsItems(self):\n return self.ctrl.getGraphicsItems()", "def getAllTriStimulus(self):\n return self.tristimulus", "def terminals(self) -> AbstractSet[Terminal]:\n return self._terminals", "def wires(self):\n return [o.wires for o in self.obs]", "def getListOfAllInstantiatedElements(self):\n return _libsbml.Submodel_getListOfAllInstantiatedElements(self)", "def get_triples(self):\n return [\n triple\n for uid, cuds_object in self._registry.items()\n for triple in cuds_object.get_triples()\n ]", "def _all_subnodes(self):\n return self.__dict__.values()", "def listglobal(self):\n return list(self.attributes.keys())", "def items(self) -> List[RadioStation]:\n return self._items", "def terminals(self) -> List[Terminal]:\n return [terminal for prim in self.primitives for terminal in prim._terminals]", "def get_all_thermals(self):\n return self._thermal_list", "def trios(self):\n return self._trios", "def byteruns(self):\n return self._byteruns", "def drawables(self):\n\treturn self._Widget__w['drawables']", "def getRaceList(self):\n\t\tl = []\n\t\tfor r in self.races:\n\t\t\tl.append(r.name)\n\t\treturn l", "def get_symbols_list(self):\n return self.symbols_list", "def items(self):\n return self.root.items()", "def getAll(self):\n return self.__lst", "def getTouchdowns(self):\n return self.touchdowns", "def _get_all_spectra(self):\n pass", "def get_rings(self):\n return iter(self)", "def list(self):\n return self.cell.objects+self.cell.tempObjects", "def get_all_drawables(self): \n drawables = []\n if len(self.component_list) > 0:\n for c in self.component_list:\n drawables.append(c.get_drawables())\n return drawables", "def get_node_list(self):\n return []", "def makeTurtles(num):\n turtles = []\n for i in range(num):\n t = turtle.Turtle()\n #t.speed(0) # can set this for superfast disc movement\n t.up()\n t.shape('square')\n t.shapesize(stretch_len=(2 + i)) #bottom turtle is longest\n t.goto(0, num - i)\n turtles.append(t)\n return turtles", "def etls(self):\r\n return self._etls", "def reactors(self):\n return self._reactors", "def make_list(self):\n return list(self.widget_dict.values())", "def get_all_setups_nodes():\n ta_roots = get_all_setups_roots()\n ta_nodes = [TechAnim_Setup(x) for x in ta_roots]\n return ta_nodes", "def terminals(self):\n\n return self._terminals.getSlice(0)", "def terminals(self):\n\n return self._terminals.getSlice(0)", "def list(self):\n if self.handle == None: return []\n return self.handle.variables.keys()", "def get_orphans(self):\n return self.orphans", "def all(self):\n return list(self)", "def getSymbols(self):\n return self.alpha.getSymbols()", "def get_all_variables(self):\n return self._properties.copy()", "def get_all(self):\n # s = torch.FloatTensor(self._states).to(device)\n # a = torch.FloatTensor(self._actions).to(device)\n # r = torch.FloatTensor(self._rewards).to(device)\n return self._episodes", "def twin_axes (self):\n return self._twin_axes", "def getAllTheta(self):\n return self.theta_set", "def list(self):\n\n result = []\n for i in self.bots:\n result.append(i.name)\n return result", "def getListOfAdditionalGraphicalObjects(self):\n return _libsbml.Layout_getListOfAdditionalGraphicalObjects(self)", "def get_all_variables(self):\n return [self.item]", "def getstate(self):\n return [elem.getstate() for elem in self]", "def reset(self):\n for Myturtle in self._turtles:\n Myturtle._setmode(self._mode)\n Myturtle.reset()", "def get_all_nodes(self):\n # NOTE: return copy, so no one will screw\n # our list?\n return self.nodes", "def wires(self) -> Iterator[TopoDS_Wire]:\n return map(Wire, self._top_exp.wires())", "def __dir__(self) -> list[str]:\n d = list(super().__dir__())\n d.extend([w.name for w in self._list if not w.gui_only])\n return d", "def get_all_windows(self):\n success, result = self.manager.c.eval(\n textwrap.dedent(\n \"\"\"\n [win.wid for win in self.core.mapped_windows]\n \"\"\"\n )\n )\n assert success\n return eval(result)", "def symbols(self) -> List[SingleMapping]:\n return self._symbols", "def rvs(self):\n return self._root.rvs()", "def rays(self):\n try:\n return self._rays\n except:\n self._rays = [list(x) for x in self.ray_generator()]\n return self._rays", "def get_all_variables(self):\n return []", "def _get_current_session_tiling_list(self) -> List:\n return self._data[-1][History._TILINGS]", "def all_descendants(self):\r\n return Node.s_all_descendants(self)", "def nodes(self): \n return [n for n in self.iternodes()]", "def list(self):\n return self._get_list()", "def get_all_nodes(self):\n return self._get_all_nodes()", "def children_list(self):\n return [\n # self.notify,\n # self.snap_multiplier,\n # self.range_low, self.range_high,\n # self.activity_threshold\n ]", "def swing_list(self):\n return None", "def getArrettes(self) -> list:\n return self._arrettes", "def Targets(self):\n return self._targets", "def get_all_hubs(self):\n return self.all_hubs", "def getEntireASGList( self ):\r\n return self.__trackASG.keys()", "def get_nodes(self):\n return [node for node in self._nodes.itervalues()]", "def get_nodes(self):\n return [node for node in self._nodes.itervalues()]", "def get_children(self):\n return []", "def listofstars():\n a = []\n for star in Star.select():\n a.append(star.name)\n return a", "def tank_name_list(self):\n return list(self._node_reg.tank_names)", "def getTrajectories(self):\n\n\t\t\treturn self.__XPEnsembleBefore, self.__XPEnsembleAfter", "def getVehicles(self):\n return self.vehicles", "def legends(self):\n return [leg for leg in self._legends]", "def get_all_refobjs(self, ):\n return cmds.ls(type=\"jb_reftrack\")", "def list(self):\n return self._observe_list", "def screenshots(self):\n return self._screenshots", "def get_drawn_objects(self):\n return self._drawnObjects", "def get_radios(self):\n return self.get_object(\"radio\")", "def all_hypernyms(self):\n hypernyms = []\n for path in self.hypernym_paths():\n for synset in path:\n if synset is not self:\n hypernyms.append(synset)\n return set(hypernyms)", "def wheel_attributes(self):\n wheel1 = self.wheel\n wheel2 = TranslatedShape(shape_in=wheel1,\n displacement=Vector(0.,\n 0.,\n self.positions[1][0]\n - self.positions[1][1]))\n wheel3 = MirroredShape(shape_in=wheel1,\n reference_point=translate(self.position,\n \"y\",\n self.width_car / 2),\n vector1=Vector(1, 0, 0),\n vector2=Vector(0, 0, 1))\n wheel4 = MirroredShape(shape_in=wheel2,\n reference_point=translate(self.position,\n \"y\",\n self.width_car/2),\n vector1=Vector(1, 0, 0),\n vector2=Vector(0, 0, 1))\n return [wheel1, wheel2, wheel3, wheel4]", "def knobs(self):\n return self.Knobs(self)", "def surfaces(self):\n return self._surfaces", "def directions(self):\n return []", "def get_all_latched(self):\n return self.__latched_states", "def GetSignals(cls):\n return []", "def list(self) -> list:\n return list(self)", "def __init__(self):\n self._tyrannosaurus = []\n self._triceratops = []", "def get_aux_windows(self):\n return self.aux_windows.keys()", "def swing_modes(self):\n return self._swing_list", "def get_all(self):\n return self.__items", "def get_visuals(self):\n return self.scene['visuals']", "def get_root_children(self):\n return self.browser.tags", "def observables(self):\r\n return self.__obs", "def GetChildren( self ):\n children = [\n cWrpr \n for cWrpr in GameNodePath.GetChildren( self ) \n if not cWrpr.data.getPythonTag( TAG_IGNORE )\n ]\n return children" ]
[ "0.8411543", "0.6130211", "0.5962633", "0.59047854", "0.5882312", "0.5679309", "0.56639826", "0.5651566", "0.5635582", "0.5587322", "0.5562676", "0.5513544", "0.5510087", "0.5497294", "0.54633754", "0.54598933", "0.5458061", "0.5453143", "0.5407893", "0.53814805", "0.53572404", "0.5338319", "0.53330684", "0.5332764", "0.5330036", "0.53272617", "0.5320306", "0.53073496", "0.52968353", "0.5290663", "0.5281225", "0.52697515", "0.52472186", "0.5209588", "0.52004737", "0.51952636", "0.5192751", "0.5190569", "0.5190569", "0.5180263", "0.5169613", "0.5169093", "0.51667076", "0.51597446", "0.5155991", "0.51537657", "0.5142152", "0.5134291", "0.51322645", "0.51316565", "0.51135516", "0.5109808", "0.5107661", "0.5105984", "0.5105073", "0.50942886", "0.50902313", "0.50854963", "0.50827295", "0.50796163", "0.5073391", "0.5072602", "0.5059538", "0.50561327", "0.5054482", "0.50437737", "0.5035762", "0.5034846", "0.5032455", "0.5018206", "0.50157326", "0.50088036", "0.50088036", "0.5008548", "0.5001892", "0.5000745", "0.4986056", "0.49829388", "0.49828917", "0.49694416", "0.49662733", "0.49624503", "0.4960534", "0.49593878", "0.49585962", "0.49553642", "0.4954316", "0.49542436", "0.49502167", "0.4944997", "0.49420595", "0.4941953", "0.4939035", "0.49295476", "0.49247038", "0.49245423", "0.49243566", "0.49209186", "0.491695", "0.49142146" ]
0.8240958
1
The list of all pens attached to this Window This attribute may not be altered directly
def pens(self): return self._pencils[:]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def drawables(self):\n\treturn self._Widget__w['drawables']", "def getPixels(self):\n\t\treturn self.strip.ledsColorBuffer", "def swing_list(self):\n return self._swing_list", "def getPixelsBuffer(self):\n\t\treturn self.leds", "def get_list_powers(self):\r\n return self.ps", "def getMenuItemPixels(cls):\n return cls.menuItemPixels", "def graphicsItems(self):\n return self.ctrl.getGraphicsItems()", "def window_handles(self):\n pass", "def children_list(self):\n return [\n # self.notify,\n # self.snap_multiplier,\n # self.range_low, self.range_high,\n # self.activity_threshold\n ]", "def make_list(self):\n return list(self.widget_dict.values())", "def prvs(self): \n return self._link_reg.prvs", "def swing_list(self):\n return None", "def gpio_properties(self):\n res = self._dll.JLINK_EMU_GPIO_GetProps(0, 0)\n if res < 0:\n raise errors.JLinkException(res)\n\n num_props = res\n buf = (structs.JLinkGPIODescriptor * num_props)()\n res = self._dll.JLINK_EMU_GPIO_GetProps(ctypes.byref(buf), num_props)\n if res < 0:\n raise errors.JLinkException(res)\n\n return list(buf)", "def colors(self):\r\n\t\treturn self._colors", "def listdimension(self):\n return list(self.dimensions.keys())", "def pids(self):\r\n return copy(self._pids)", "def get_all_drawables(self): \n drawables = []\n if len(self.component_list) > 0:\n for c in self.component_list:\n drawables.append(c.get_drawables())\n return drawables", "def port_list(self):\n return self._port_list", "def power_pumps(self):\n return self._link_reg.power_pumps", "def pumps(self): \n return self._link_reg.pumps", "def get_components_drawables(self):\n # print self.component_list\n print len(self.component_list)\n for c in self.component_list:\n return c.get_drawables()", "def colors(self):\n return self[\"colors\"]", "def colors(self):\n return self[\"colors\"]", "def colors(self):\n return self._colors", "def knobs(self):\n return self.Knobs(self)", "def get_Pbs(self):\r\n return self.Pbs", "def GetAttributes(self, pane):\r\n\r\n attrs = []\r\n attrs.extend([pane.window, pane.frame, pane.state, pane.dock_direction,\r\n pane.dock_layer, pane.dock_pos, pane.dock_row, pane.dock_proportion,\r\n pane.floating_pos, pane.floating_size, pane.best_size,\r\n pane.min_size, pane.max_size, pane.caption, pane.name,\r\n pane.buttons, pane.rect, pane.icon, pane.notebook_id,\r\n pane.transparent, pane.snapped, pane.minimize_mode])\r\n\r\n return attrs", "def ppix(self):\n return self._ppix", "def get_light_list(self):\n return self.light_array", "def lights(self):\n return list(self.GetLights())", "def channels(self): # type: (...) -> List[BlendingRangePair]\n return self._channels", "def bindings(self):\n return self.__bindings", "def listglobal(self):\n return list(self.attributes.keys())", "def imageList(self):\n return self.__imageList", "def change_marker_props(self, pens):\n for marker in self._lines:\n for line in marker:\n line.setPen(pens.get_display_pen())\n\n for marker in self._points:\n for point in marker:\n point.setPen(pens.get_display_pen())", "def props(self):\n return self._props", "def props(self):\n return self._props", "def GetImageList(self):\r\n\r\n return self._imageListNormal", "def ports(self): # type: () -> t.Dict[str, t.List[t.Dict[str, str]]]\n return self.network_settings['Ports']", "def getValues(self):\n return [self.scale_min, self.scale_max]", "def intensities(self):\n return self._intensities.copy()", "def monitoredProcs(self):\n return self._pidToProcess.itervalues()", "def getPixels(self):\n self._logger.debug(\"getPixels\")", "def getViewPorts(self):\n return self._viewPorts", "def pvalues(self):\n return self._pvalues", "def pvalues(self):\n return self._pvalues", "def wires(self):\n return [o.wires for o in self.obs]", "def get_list_powers(self):\r\n return self._api.get_list_powers()", "def iter_colors(self):\n return itervalues(self)", "def points(self):\n return self._points", "def points(self):\n return self._points", "def points(self):\n return self._points", "def points(self):\n return self._points", "def points(self):\n return self._points", "def get_plot_options(self):\n plot_options = []\n # Get pumping rate plot options\n op = self.pumprate.get_plot_options()\n if op['visible']:\n plot_options.append(op)\n # Get associated data options\n for i in range(self.well_count()):\n well_options = self.wells[i].get_plot_options()\n plot_options.extend(well_options)\n return(plot_options)", "def bins (self):\n return self._bins", "def bins (self):\n return self._bins", "def ports(self):\n return self.attrs.get('NetworkSettings', {}).get('Ports', {})", "def pids(self):\n return self._pidToProcess.iterkeys()", "def get_prominent_figures():\n result = []\n handles = get_all_handles()\n\n for handle in handles:\n result.append(handle)\n\n return jsonify({\n 'handles': result\n })", "def getCoordinates(self):\n return list(self.gridVars.keys())", "def graphics_npoints(self):\n npoints_ = _pychidg.f90wrap_graphics_npoints(self=self._handle)\n return npoints_", "def getChildPIDs(self):\n\t\treturn self.pids", "def list_descriptors(self):\n raise NotImplementedError", "def bins(self):\n return self._bins", "def get_all_windows(self):\n success, result = self.manager.c.eval(\n textwrap.dedent(\n \"\"\"\n [win.wid for win in self.core.mapped_windows]\n \"\"\"\n )\n )\n assert success\n return eval(result)", "def apertures(self):\n return self._apertures", "def _prop(self):\n return [\"%s = %s\" % (str(k), repr(v)) for k, v in self.prop.items()]", "def get_parameters_list(self):\n return self.description[\"config\"][\"values\"].keys()", "def FramesizeList(self):\n\t\treturn self._get_attribute('framesizeList')", "def pbvs(self): \n return self._link_reg.pbvs", "def psvs(self): \n return self._link_reg.psvs", "def getListOfPorts(self):\n return _libsbml.CompModelPlugin_getListOfPorts(self)", "def wheels_properties(self):\n height_wheels = 180.\n radius_wheels = 300.\n width_wheels = 80.\n return height_wheels, radius_wheels, width_wheels", "def __iter__(self) -> Iterator[Number]:\n return (getattr(self, p) for p in ['xmin', 'ymin', 'xmax', 'ymax'])", "def get_pow_all(self):\n return map(self.get_pow, self.circles.keys())", "def get_plots(self):\n return list(self.plots.values())", "def screens_channels_properties_to_remove(self) -> ConfigNodePropertyArray:\n return self._screens_channels_properties_to_remove", "def instruction_probabilities(self):\n return list(self.instruction_pool.values())", "def get_visual_properties(self) -> dict:\n return self._vis_properties", "def widgets(self) -> Munch:\n return self._widgets", "def getBindings(self):\n return self.getBindingManager().getBindings()", "def image_list(self):\n return self._image_list", "def image_properties(self):\n return self._image_properties", "def probs(self) -> List:\n return self._probs", "def get_drawables(self):\n w,h = self.image.get_size()\n return [DrawableSurface(self.image, \n pygame.Rect(self.pos_x, self.pos_y, w, h))]", "def get_controllable_properties(element):\n log.debug(\"utils\", \"element %r, %d\", element, isinstance(element, gst.Bin))\n res = []\n if isinstance(element, gst.Bin):\n for child in element.elements():\n res.extend(get_controllable_properties(child))\n else:\n for prop in gobject.list_properties(element):\n if prop.flags & gst.PARAM_CONTROLLABLE:\n log.debug(\"utils\", \"adding property %r\", prop)\n res.append((element, prop))\n return res", "def pin_nums(self):\n return self._pin_nums", "def rgb(self):\n return [self.__r, self.__g, self.__b]", "def get_drawables(self):\n to_draw = []\n for k,v in self._to_draw.items():\n if isinstance(v,Iterable):\n for i in v:\n to_draw.append(i)\n else:\n to_draw.append(v)\n return to_draw", "def getNativeDigitsList(self):\r\n return self.phone.sx('(send config-manager get-setting \"./yapas/display/native-digits\")',convertToString=True, doNotReport=True)", "def __iter__(self):\n return self._visible_setting_names_gen", "def points(self) -> PointList:\n return self._points", "def GetButtonsImageList(self):\r\n\r\n return self._imageListButtons", "def swing_modes(self):\n return self._swing_list", "def _size_pixels(self, renderer):\n return renderer.points_to_pixels(self.size)", "def get_color_list(self):\n lst = []\n\n _lib.caca_get_dither_color_list.argtypes = [_Dither]\n _lib.caca_get_dither_color_list.restype = ctypes.POINTER(ctypes.c_char_p)\n\n for item in _lib.caca_get_dither_color_list(self):\n if item is not None and item != \"\":\n lst.append(item)\n else:\n #memory occurs otherwise\n break\n\n return lst", "def properties(self):\n return self._props", "def GetProperties(self):\n return [FOLD]", "def GetProperties(self):\n return [FOLD]" ]
[ "0.6194239", "0.5969787", "0.59447014", "0.58146423", "0.58107215", "0.5731491", "0.5615673", "0.557614", "0.5560097", "0.55471444", "0.5541459", "0.5525621", "0.54055196", "0.53989977", "0.5346334", "0.53336155", "0.53113025", "0.5300297", "0.52949387", "0.52927554", "0.5269405", "0.52664447", "0.52664447", "0.5252926", "0.5240103", "0.52373326", "0.5233286", "0.51972896", "0.51836956", "0.5163817", "0.51609844", "0.51586545", "0.51538247", "0.51466775", "0.5145791", "0.5140194", "0.5140194", "0.5127417", "0.51264995", "0.5116611", "0.51161426", "0.51090556", "0.5105914", "0.51042247", "0.51011336", "0.51011336", "0.50954044", "0.50943667", "0.5093776", "0.5087762", "0.5087762", "0.5087762", "0.5087762", "0.5087762", "0.50865734", "0.5085242", "0.5085242", "0.5079913", "0.5079223", "0.50751686", "0.50560933", "0.5056071", "0.50500643", "0.5048232", "0.5039044", "0.5038116", "0.50358135", "0.5030054", "0.50288385", "0.50280553", "0.5025959", "0.50200886", "0.5013804", "0.50133175", "0.50003034", "0.4999043", "0.4994703", "0.4994619", "0.4994416", "0.49941754", "0.4991716", "0.49869883", "0.4986516", "0.49858353", "0.49827278", "0.49823618", "0.49823546", "0.49754423", "0.4963466", "0.49634418", "0.4962786", "0.49613822", "0.49611595", "0.4960742", "0.49596682", "0.49596605", "0.49589443", "0.4950894", "0.49466926", "0.49466926" ]
0.7543301
0
The list containing all supported turtle shapes This attribute may not be altered directly
def shapes(self): return self._frame.getshapes()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def shapes(self):\n return [load_node(item) for item in self.get_attribute('shapes')]", "def turtles(self):\n return self._turtles", "def shapes(self):\n return self.container['shapes']", "def turtles(self):\n return self._turtles[:]", "def shapes(self):\n assert self._shapes is not None, (\"BaseSlide.shapes referenced \"\n \"before assigned\")\n return self._shapes", "def get_tumor_stroma_shapes(self):\n print(\"Deprecated. Use get_path_class_shapes() instead.\")", "def register_shapes():\n turtle.Screen().register_shape(\"saphire.gif\")\n turtle.Screen().register_shape(\"player_right.gif\")\n turtle.Screen().register_shape(\"player_left.gif\")\n turtle.Screen().register_shape(\"walls.gif\")", "def shapes():\r\n turtle.up()\r\n turtle.forward(500)\r\n turtle.down()\r\n draw_hexagon()\r\n draw_square()\r\n draw_triangle()", "def getshapes(self):\n return sorted(self._shapes.keys())", "def made_shapes(self):\n return self._made_shapes", "def type_shapes(self):\n return self._type_shapes", "def shapes():\n # -- Define a list of locations to search for, starting by\n # -- adding in our builtin shape locations\n paths = [\n os.path.join(\n os.path.dirname(os.path.dirname(__file__)),\n 'shapes',\n ),\n ]\n\n # -- If we have any paths defined by environment\n # -- variables we should add them here\n if constants.PLUGIN_ENVIRONMENT_VARIABLE in os.environ:\n paths.extend(\n os.environ[constants.PLUGIN_ENVIRONMENT_VARIABLE].split(';'),\n )\n\n shape_list = list()\n\n for path in paths:\n for root, _, files in os.walk(path):\n for filename in files:\n if filename.endswith('.json'):\n shape_list.append(\n os.path.join(\n root,\n filename,\n ),\n )\n\n return shape_list", "def shape_type(self):\n return \"circle\"", "def shapes2():\r\n turtle.up()\r\n turtle.backward(100)\r\n turtle.left(270)\r\n turtle.forward(100)\r\n turtle.left(90)\r\n turtle.backward(700)\r\n shapes()", "def get_path_class_shapes(self):\n df = self.df_roi\n self.tumor_shape = MultiPolygon([self._get_shape(i) for i in df[df.class_ == \"Tumor\"]['geometry']])\n self.stroma_shape = MultiPolygon([self._get_shape(i) for i in df[df.class_ == \"Stroma\"]['geometry']])\n self.dcis_shape = MultiPolygon([self._get_shape(i) for i in df[df.class_ == \"Other\"]['geometry']]) \n\n # path_class_qupath_names = [\"Tumor\", \"Stroma\", \"Other\"]\n # for path_class in path_class_qupath_names:\n # mpolygon = MultiPolygon([self._get_shape(i) for i in df[df.class_ == path_class]['geometry']])\n\n # # replace name\n # if path_class == \"Other\":\n # path_class = \"dcis\"\n\n # attr_name = path_class.lower() + \"_shape\"\n # setattr(self, path_class, mpolygon)", "def get_bullet_drawables(self):\n return [bullet.get_drawables() for bullet in self.bullets]", "def get_all_drawables(self): \n drawables = []\n if len(self.component_list) > 0:\n for c in self.component_list:\n drawables.append(c.get_drawables())\n return drawables", "def createPickColor():\n color_list = []\n\n for i in range(50, 450, 100): #Create the 4 shapes to show colors\n point1 = g.Point(50, i)\n point2 = g.Point(100, i+50)\n shape = g.Rectangle(point1, point2)\n color_list.append(shape)\n\n #Set the right colors\n color_list[0].setFill(\"Blue\")\n color_list[1].setFill(\"Green\")\n color_list[2].setFill(\"Yellow\")\n color_list[3].setFill(\"Red\")\n\n return color_list", "def shapes(self, shapes):\n\n self.container['shapes'] = shapes", "def shapes(self, shape_list):\n for item in shape_list:\n item.store()\n shape_list_uuids = [item.uuid for item in shape_list]\n self.set_attribute('shapes', shape_list_uuids)", "def drawables(self):\n\treturn self._Widget__w['drawables']", "def shape(self):", "def shape(self):", "def get_tool_shape_ids(self):\n\n tool_shape_ids = [self.variables.zoom_rect_id,\n self.variables.select_rect_id]\n return tool_shape_ids", "def GetShapes(self, *args):\n return _XCAFDoc.XCAFDoc_ShapeTool_GetShapes(self, *args)", "def __init__(self):\n self.superelevations = []\n self.shapes = []", "def input_type_shapes(self):\n return self._input_type_shapes", "def makeTurtles(num):\n turtles = []\n for i in range(num):\n t = turtle.Turtle()\n #t.speed(0) # can set this for superfast disc movement\n t.up()\n t.shape('square')\n t.shapesize(stretch_len=(2 + i)) #bottom turtle is longest\n t.goto(0, num - i)\n turtles.append(t)\n return turtles", "def clear_selected_shapes(self):\n self.shapes_to_draw = []", "def shape(self) -> str:\n return \"circle\"", "def auto_shapes(self):\n return self.container['auto_shapes']", "def to_shapes(self): \n stroke_color = self.raw.get('color', None)\n fill_color = self.raw.get('fill', None)\n width = self.raw.get('width', 0.0)\n\n iso_shapes = []\n\n # convert to strokes\n if not stroke_color is None:\n iso_shapes.extend(self._to_strokes(width, stroke_color))\n\n # convert to rectangular fill\n if not fill_color is None:\n iso_shapes.extend(self._to_fills(fill_color))\n\n return iso_shapes", "def shape(self) -> Shape:", "def list():\n return [Cliff.CLIFF_L,\n Cliff.CLIFF_FL,\n Cliff.CLIFF_R,\n Cliff.CLIFF_FR,\n Cliff.VIRTUAL_WALL]", "def get_random_shape(self):\n\n shape_name = random.choice([\"circle\", \"square\", \"rectangle\"])\n return self.get_shape(shape_name)", "def shape(self):\n for component in ('x', 'y', 'z', 'r', 't'):\n arr = getattr(self, component)\n if arr is not None:\n return arr.shape\n return ()", "def get_drawables(self):\n to_draw = []\n for k,v in self._to_draw.items():\n if isinstance(v,Iterable):\n for i in v:\n to_draw.append(i)\n else:\n to_draw.append(v)\n return to_draw", "def get(self):\n with self.lock:\n return list(self.jobShapes)", "def draw(hyp):\r\n print 'g.createShape(',hyp.getAttList(),')'\r\n print type(hyp.getAttList())\r\n g.createShape(hyp.getAttList())", "def getOutShapes(self):\n\t\treturn self.output_shape", "def graphicsItems(self):\n return self.ctrl.getGraphicsItems()", "def shapes_for(self, ent):\n return self._ent_to_shapes[ent]", "def _drawturtle(self):\n screen = self.screen\n shape = screen._shapes[self.Myturtle.shapeIndex]\n ttype = shape._type\n titem = self.Myturtle._item\n if self._shown and screen._updatecounter == 0 and screen._tracing > 0:\n self._hidden_from_screen = False\n tshape = shape._data\n if ttype == \"polygon\":\n if self._resizemode == \"noresize\": w = 1\n elif self._resizemode == \"auto\": w = self._pensize\n else: w =self._outlinewidth\n shape = self._polytrafo(self._getshapepoly(tshape))\n fc, oc = self._fillcolor, self._pencolor\n screen._drawpoly(titem, shape, fill=fc, outline=oc,\n width=w, top=True)\n elif ttype == \"image\":\n screen._drawimage(titem, self._position, tshape)\n elif ttype == \"compound\":\n for item, (poly, fc, oc) in zip(titem, tshape):\n poly = self._polytrafo(self._getshapepoly(poly, True))\n screen._drawpoly(item, poly, fill=self._cc(fc),\n outline=self._cc(oc), width=self._outlinewidth, top=True)\n else:\n if self._hidden_from_screen:\n return\n if ttype == \"polygon\":\n screen._drawpoly(titem, ((0, 0), (0, 0), (0, 0)), \"\", \"\")\n elif ttype == \"image\":\n screen._drawimage(titem, self._position,\n screen._shapes[\"blank\"]._data)\n elif ttype == \"compound\":\n for item in titem:\n screen._drawpoly(item, ((0, 0), (0, 0), (0, 0)), \"\", \"\")\n self._hidden_from_screen = True", "def draw(hyp):\n print 'g.createShape(',hyp.getAttList(),')'\n print type(hyp.getAttList())\n g.createShape(hyp.getAttList())", "def output_type_shapes(self):\n return self._output_type_shapes", "def draw_shape(self, r=0, g=0, b=0): # black is the default color\r\n turtles= turtle.Turtle()\r\n turtles.speed(0) # Makes the turtle speed up\r\n turtles.color(r, g, b)\r\n turtles.showturtle()\r\n turtles.penup()\r\n turtles.pendown()\r\n\r\n # draws the Shape to the screen\r\n\r\n for i in range(self.num_sides):\r\n turtles.forward(self.side_length)\r\n turtles.left(360/(self.num_sides))\r\n turtles.hideturtle()", "def implement_shape(self, shape):\n if self.flag == 0:\n for coord in INDICES:\n self.kill(coord)\n for coord in shape:\n self.givebirth(coord)", "def data_shapes(self):", "def shape_in(self):\n return [c.size for c in self.coords]", "def get_light_list(self):\n return self.light_array", "def colorShape(objList, color):\r\n i = color\r\n # find the color index by names:\r\n if color == 'yellow': i = 17\r\n elif color == 'red': i = 13\r\n elif color == 'blue': i = 6\r\n elif color == 'cian': i = 18\r\n elif color == 'green': i = 7\r\n elif color == 'darkRed': i = 4\r\n elif color == 'darkBlue': i = 15\r\n elif color == 'white': i = 16\r\n elif color == 'black': i = 1\r\n elif color == 'gray': i = 3\r\n elif color == 'none': i = 0\r\n # find shapes and apply the color override:\r\n shapeTypeList = ['nurbsCurve', 'nurbsSurface', 'mesh', 'subdiv']\r\n if objList:\r\n for objName in objList:\r\n objType = cmds.objectType(objName)\r\n # verify if the object is the shape type:\r\n if objType in shapeTypeList:\r\n # set override as enable:\r\n cmds.setAttr(objName+\".overrideEnabled\", 1)\r\n # set color override:\r\n cmds.setAttr(objName+\".overrideColor\", i)\r\n # verify if the object is a transform type:\r\n elif objType == \"transform\":\r\n # find all shapes children of the transform object:\r\n shapeList = cmds.listRelatives(objName, shapes=True, children=True)\r\n if shapeList:\r\n for shape in shapeList:\r\n # set override as enable:\r\n cmds.setAttr(shape+\".overrideEnabled\", 1)\r\n # set color override:\r\n cmds.setAttr(shape+\".overrideColor\", i)", "def get_data_shapes():\n return [None, None, None], [None, None], [None, ]", "def get_background_drawables(self):\n return [background.get_drawables() for background in self.background]", "def shape_type(self):\n return \"rectangle\"", "def __init__(self):\n self.tape_tag = None\n self.independentVariableShapeList = []\n self.dependentVariableShapeList = []", "def get_radios(self):\n return self.get_object(\"radio\")", "def shape_halo(self):\n return [c.size for c in self.coords_halo]", "def GetFreeShapes(self, *args):\n return _XCAFDoc.XCAFDoc_ShapeTool_GetFreeShapes(self, *args)", "def tools(self):\n tool1 = TranslatedShape(shape_in=\n RotatedShape(shape_in=\n Cylinder(radius=\n self.wheels_properties[\n 1] + 10.,\n height=400.,\n position=self.position),\n rotation_point=self.position,\n vector=Vector(1, 0, 0),\n angle=radians(90)),\n displacement=\n Vector(self.wheels_properties[0],\n 299.,\n -self.positions[1][0]))\n tool2 = TranslatedShape(shape_in=tool1,\n displacement=Vector(0.,\n 0.,\n self.positions[1][0]\n - self.positions[1][1]))\n tool3 = MirroredShape(shape_in=tool1,\n reference_point=translate(self.position,\n \"y\",\n self.width_car / 2),\n vector1=Vector(1, 0, 0),\n vector2=Vector(0, 0, 1))\n tool4 = MirroredShape(shape_in=tool2,\n reference_point=translate(self.position,\n \"y\",\n self.width_car / 2),\n vector1=Vector(1, 0, 0),\n vector2=Vector(0, 0, 1))\n return [tool1, tool2, tool3, tool4]", "def load_shape(self, shape):\n Points = []\n for surf in shape:\n function = self.function_dict[surf[\"type\"]]\n points = function(surf)\n Points.append(points)\n Points = np.concatenate(Points, 0)\n return Points", "def shape_list(x):\n shape = list(x.shape)\n\n return shape", "def shape(self):\n return self._shape", "def get_non_tool_shape_ids(self):\n\n all_shape_ids = self.variables.shape_ids\n tool_shape_ids = self.get_tool_shape_ids()\n return list(numpy.setdiff1d(all_shape_ids, tool_shape_ids))", "def shapes_list(inp):\n shapes_static = inp.get_shape().as_list()\n shapes_dynamic = tf.shape(inp)\n cleaned_shape = [shapes_dynamic[i] if s is None else s for i, s in enumerate(shapes_static)]\n return cleaned_shape", "def create_random_shapes(shapesList):\n for _ in range(5):\n shapesList.append( Circle(randint(1,5)) )\n\n for _ in range(5):\n shapesList.append( Rectangle(randint(1,5), randint(1,5)) )", "def wheel_attributes(self):\n wheel1 = self.wheel\n wheel2 = TranslatedShape(shape_in=wheel1,\n displacement=Vector(0.,\n 0.,\n self.positions[1][0]\n - self.positions[1][1]))\n wheel3 = MirroredShape(shape_in=wheel1,\n reference_point=translate(self.position,\n \"y\",\n self.width_car / 2),\n vector1=Vector(1, 0, 0),\n vector2=Vector(0, 0, 1))\n wheel4 = MirroredShape(shape_in=wheel2,\n reference_point=translate(self.position,\n \"y\",\n self.width_car/2),\n vector1=Vector(1, 0, 0),\n vector2=Vector(0, 0, 1))\n return [wheel1, wheel2, wheel3, wheel4]", "def get_shape(self):\n if not self.channels_first:\n return [None] + self.w + [self.numoffeatures]\n else:\n return [None] + [self.numoffeatures] + self.w", "def shape(self):\r\n return self._shape", "def get_data_shapes():\r\n return ([None, None, None], [None, None], [None, None, None])", "def shape(self):\n return self._observe_list.shape", "def supported_color_modes(self) -> set[str] | None:\n color_modes = [COLOR_MODE_ONOFF]\n if self.dp_code_bright in self.tuya_device.status:\n color_modes.append(COLOR_MODE_BRIGHTNESS)\n\n if self.dp_code_temp in self.tuya_device.status:\n color_modes.append(COLOR_MODE_COLOR_TEMP)\n\n if (\n self.dp_code_colour in self.tuya_device.status\n and len(self.tuya_device.status[self.dp_code_colour]) > 0\n ):\n color_modes.append(COLOR_MODE_HS)\n return set(color_modes)", "def __init__(self, commands=[], turtle_name=\"Terry\", speed=6, shape=\"classic\"):\n super().__init__()\n turtle.colormode(255)\n self._name = turtle_name\n super().speed(speed)\n super().shape(shape)\n self.commands = commands\n self._pc = 0\n self._loop_stack = []\n self._variables = {'x':0, 'y':0}", "def supported_operation_modes(\n self,\n ) -> list[HVACModeT]:", "def matches_shapes(self, shape_):\n m = []\n for i, spec in enumerate(self.shapes):\n if matches(spec, shape_):\n m.append(i)\n return m", "def shape(self) -> Shape:\n raise NotImplementedError()", "def get_orphans(self):\n return self.orphans", "def shape(self, name=None):\n if name is None:\n return self.Myturtle.shapeIndex\n if not name in self.screen.getshapes():\n raise TurtleGraphicsError(\"There is no shape named %s\" % name)\n self.Myturtle._setshape(name)\n self._update()", "def get_background_drawables(self):\n return self.background.get_drawables()", "def shapes_list(l, print_=False):\n shps = []\n for x in l:\n if print_:\n print(np.asarray(x).shape)\n shps.append(np.asarray(x).shape)\n return shps", "def shape(self):\n return self.active.shape", "def _modes(self):\n answer = []\n for i in dir(self):\n if i.startswith('handle_'):\n answer.append(i.replace('handle_', ''))\n return answer", "def supportedAlleles(self):\n raise NotImplementedError", "def get_components_drawables(self):\n # print self.component_list\n print len(self.component_list)\n for c in self.component_list:\n return c.get_drawables()", "def shape(self):\n return None", "def update_shape_sprite(self, entity: Entity):\n \n shape_sprite: ShapeSprite = entity.shape_sprite\n \n if entity.id not in self.entities_shapelist:\n entity_shapelist = arcade.ShapeElementList()\n \n # we need to convert from general colours to arcade specific colours\n entity_shapelist.append(arcade.create_rectangles_filled_with_colors(\n shape_sprite.point_list, [COLOUR_MAP[x] for x in shape_sprite.color_list])\n )\n else:\n entity_shapelist = self.entities_shapelist[entity.id]\n\n entity_shapelist.center_x = shape_sprite.position_x\n entity_shapelist.center_y = SCREEN_HEIGHT - shape_sprite.position_y\n entity_shapelist.draw()\n \n self.entities_shapelist[entity.id] = entity_shapelist", "def layers_in_shapes(self) -> dict:\n shapes = {}\n\n for lyr in self.layers:\n shapes[lyr.name] = lyr.input_shape\n\n return shapes", "def __str__(self):\n return \"shape[]\"", "def shape_to_list(shape):\n if isinstance(shape, (list, tuple)):\n return shape\n tmp = []\n if shape == \"\":\n return ()\n for i in shape:\n tmp.append(i.value)\n return tmp", "def shape(self):\n path = super(Arrow, self).shape()\n path.addPolygon(self.arrowHead)\n return path", "def get_shape(self, name):\n\n if name == \"circle\":\n return Circle(random.randint(1, 10))\n\n elif name == \"square\":\n return Square(random.randint(1, 10))\n\n elif name == \"rectangle\":\n return Rectangle(random.randint(1, 10), random.randint(1, 10))", "def select_shape(self, feature, value, **kwargs):\n self.shapes_to_draw.append(\n {'shapes': self.shapes[(self.shapes[feature] == value)]['path'].values,\n 'args': kwargs})", "def listofstars():\n a = []\n for star in Star.select():\n a.append(star.name)\n return a", "def get_types(self) -> List[str]:\n return sorted(list(self._radii.keys()))", "def speed_list(self) -> list:\n wink_supported_speeds = self.wink.fan_speeds()\n supported_speeds = []\n if SPEED_AUTO in wink_supported_speeds:\n supported_speeds.append(SPEED_AUTO)\n if SPEED_LOWEST in wink_supported_speeds:\n supported_speeds.append(SPEED_LOWEST)\n if SPEED_LOW in wink_supported_speeds:\n supported_speeds.append(SPEED_LOW)\n if SPEED_MEDIUM in wink_supported_speeds:\n supported_speeds.append(SPEED_MEDIUM)\n if SPEED_HIGH in wink_supported_speeds:\n supported_speeds.append(SPEED_HIGH)\n return supported_speeds", "def pshape(self):\n try:\n return plist([x.pshape() for x in self], root=self.__root__)\n except Exception:\n return plist([len(self)], root=self.__root__)", "def shape(self) -> List[str]:\r\n shape = {x: \"\" for x in self.all_qubits}\r\n trans_dict = {'b': 'i', 'd': 'o', 'c': 'io', 'i': 'i', 'o': 'o'}\r\n last_time_used = {x: -math.inf for x in self.all_qubits}\r\n for time_step in self.operations_by_time:\r\n for gate_name in self.operations_by_time[time_step]:\r\n gate = self.operations_by_name[gate_name]\r\n for i, qubit in enumerate(gate['qubits']):\r\n if last_time_used[qubit] >= time_step:\r\n raise ValueError(\r\n \"Invalid connection: Qubit {} used multiple times at timestep {}\".format(qubit, time_step))\r\n last_time_used[qubit] = time_step\r\n shape[qubit] += \"\".join([trans_dict[letter] for letter in gate['operation'].shape[i]])\r\n pattern = re.compile(\"^o?(io)*i?$\")\r\n for qubit in shape:\r\n if pattern.match(shape[qubit]):\r\n shape[qubit] = shape[qubit][: 2 - len(shape[qubit]) % 2]\r\n else:\r\n raise ValueError(\"Invalid connection on qubit {}\".format(qubit))\r\n return tuple(shape[qubit] for qubit in sorted(shape))", "def initialize(turtle_shape, bg_color, turtle_color, turtle_speed):\n turtle_instance = turtle.Turtle()\n turtle_instance.shape(turtle_shape)\n turtle.bgcolor(bg_color)\n turtle_instance.color(turtle_color)\n turtle_instance.speed(turtle_speed)\n return turtle_instance", "def get_list(self):\n return self._FF_TYPES", "def get_list(self):\n return self._FF_TYPES", "def redraw_all_shapes(self):\n\n for shape_id in self.variables.shape_ids:\n pixel_coords = self.get_vector_object(shape_id).image_coords\n if pixel_coords:\n new_canvas_coords = self.shape_image_coords_to_canvas_coords(shape_id)\n self.modify_existing_shape_using_canvas_coords(shape_id, new_canvas_coords, update_pixel_coords=False)" ]
[ "0.69762677", "0.6729221", "0.65511197", "0.65308475", "0.6335289", "0.6164756", "0.6140908", "0.6124906", "0.6000171", "0.5989234", "0.5968777", "0.58869016", "0.57155895", "0.5698968", "0.56792194", "0.5672313", "0.5628772", "0.56259376", "0.5622486", "0.5607531", "0.55537033", "0.55349636", "0.55349636", "0.5511032", "0.55057585", "0.54845077", "0.5473095", "0.5465905", "0.5441569", "0.5411334", "0.54037166", "0.53935516", "0.53655493", "0.53558946", "0.5350917", "0.5348902", "0.5315611", "0.53119844", "0.5310059", "0.5307203", "0.5285598", "0.52586347", "0.52457917", "0.5235234", "0.5235157", "0.52123505", "0.5204925", "0.5199339", "0.5177476", "0.5167804", "0.5139099", "0.51057065", "0.50869685", "0.5066315", "0.50647295", "0.50637835", "0.5059616", "0.5054757", "0.5046156", "0.5031055", "0.50309557", "0.50308543", "0.502619", "0.50221664", "0.5005315", "0.50034845", "0.49966484", "0.49918464", "0.49861056", "0.49845737", "0.49742144", "0.4973508", "0.49724954", "0.49688092", "0.4968371", "0.49545616", "0.49485284", "0.49318847", "0.49270394", "0.49251089", "0.4921735", "0.49215943", "0.49077666", "0.49041656", "0.49032265", "0.49012405", "0.4901091", "0.48909995", "0.48867857", "0.48867494", "0.4883504", "0.4877149", "0.48691836", "0.4867665", "0.4866912", "0.485794", "0.48546058", "0.48518535", "0.48518535", "0.48471755" ]
0.6370586
4
Destroys this window and its associated assets
def __del__(self): try: self._frame._destroy() except: pass self._turtles = [] self._pencils = [] del self._frame
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def destroy(self):\n for window in self.windows:\n try:\n destroy_window(window)\n except:\n pass", "def destroy_window(self) -> None:\n self.master.destroy()\n self.master.master.create_right_left_containers()", "def destroy(self):\n self.window.destroy_output_panel(self.name)", "def __onclosing(self):\n self.window.destroy()", "def onCloseWindow(self, event):\r\n\r\n self.Destroy()", "def delwin(self):\n\t\tfor c in self.components:\n\t\t\tc.delwin()\n\t\tself.win = None", "def destroy(self):\n\n sceneOpts = self.sceneOpts\n contentPanel = self.contentPanel\n\n sceneOpts .removeListener('showXCanvas', self.name)\n sceneOpts .removeListener('showYCanvas', self.name)\n sceneOpts .removeListener('showZCanvas', self.name)\n sceneOpts .removeListener('labelSize', self.name)\n sceneOpts .removeListener('fgColour', self.name)\n sceneOpts .removeListener('showLabels', self.name)\n self.displayCtx .removeListener('location', self.name)\n self.displayCtx .removeListener('bounds', self.name)\n self.displayCtx .removeListener('selectedOverlay', self.name)\n self.displayCtx .removeListener('displaySpace', self.name)\n self.displayCtx .removeListener('radioOrientation', self.name)\n self.overlayList.removeListener('overlays', self.name)\n\n self.__labelMgr.destroy()\n self.__xcanvas.destroy()\n self.__ycanvas.destroy()\n self.__zcanvas.destroy()\n self.__removeEditMenu()\n\n contentPanel.Unbind(wx.EVT_SIZE)\n\n self.__xcanvas = None\n self.__ycanvas = None\n self.__zcanvas = None\n self.__focusedCanvas = None\n self.__labelMgr = None\n\n canvaspanel.CanvasPanel.destroy(self)", "def close(self):\n self.window.destroy()\n self.buttons_window.destroy()", "def DeleteWindow(self):\r\n\r\n if self._wnd:\r\n self._wnd.Destroy()\r\n self._wnd = None", "def destroy(self):\n tk.Frame.destroy(self)", "def destroy_on_close(self):\n self.deleteLater()", "def destroy(self):\r\n self._tidy()\r\n self.stop()\r\n try:\r\n self.opengl.destroy(self)\r\n except:\r\n pass\r\n if self.external_mouse:\r\n try:\r\n self.external_mouse.stop()\r\n except:\r\n pass_\r\n try:\r\n self.mouse.stop()\r\n except:\r\n pass\r\n try:\r\n self.tkwin.destroy()\r\n except:\r\n pass\r\n Display.INSTANCE = None", "def _close_window(self):\n render_window = self._iren.GetRenderWindow()\n render_window.Finalize()\n self._iren.TerminateApp()\n\n del render_window, self._iren, self._ren, self._renWin", "def destroy(self):\r\n if self.cur_message is not None:\r\n self.cur_message.destroy()\r\n self.cur_message = None\r\n if self.board is not None:\r\n self.board.destroy()\r\n self.board = None\r\n hold_sub_displays = True\r\n if not hold_sub_displays and self.game_control is not None:\r\n self.game_control.destroy()\r\n self.game_control = None\r\n if not hold_sub_displays and self.player_control is not None:\r\n self.player_control.destroy()\r\n self.player_control = None\r\n if not hold_sub_displays and self.score_window is not None:\r\n self.score_window.destroy()\r\n self.score_window = None", "def destroy(self):\r\n self.__destroy()", "def destroy_view(self): \n\n self.canvas.destroy()\n self.scrollbar.destroy()\n self.header_frame.destroy()\n self.button_frame.destroy()\n self.twitter_canvas.destroy()\n self.twitter_scrollbar.destroy()", "def finalizeExit(self) -> None:\n base.graphicsEngine.removeAllWindows()\n if self.win is not None:\n print(\"Exiting KarelCraft app, bye!\")\n self.closeWindow(self.win)\n self.win = None\n self.destroy()\n sys.exit()", "def destroy(self):\n self.root.stop()", "def unload(self):\n main.msgQ.removeEvent(Constants.CMSG_CHANGE_AVATAR_TYPE)\n main.msgQ.removeEvent(Constants.CMSG_CHANGE_TEAM_PVP)\n main.msgQ.removeEvent(Constants.CMSG_START_TO_READY_GAME)\n main.msgQ.removeEvent(Constants.CMSG_CANCEL_TO_JOIN_GAME)\n main.msgQ.removeEvent(Constants.CMSG_START_SIXTY_SECONDS_COUNTER)\n self.mainFrame.destroy()", "def destroy(self):\n gameengine.GameEngine().game_objects.remove(self)", "def image_window_destroy(self, widget, data=None):\n self._quit()", "def destroy(self):\n self.unbindAllWidgets()\n self.__func = None\n self.__instance = None", "def cleanup(self, window):\n if self._components:\n for component in self._components:\n component.cleanup(window)", "def destructor(self):\n cv2.destroyAllWindows()", "def destroy(self):\n bullet_tools.tear_down_scene()", "def close(self):\n self._screen = None\n pygame.display.quit()", "def destroy(self, *args):\n logger.debug(\"WarningSc.destroy called\")\n if self.manageGTK:\n if self.quit:\n sys.exit(0)\n else:\n self.gui.get_object(self.window).destroy()\n while gtk.events_pending():\n gtk.main_iteration()", "def cleanup(self):\n pygame.quit()", "def destroy (self,event=None):\n \n self.top.withdraw() # Don't allow this window to be destroyed.", "def delete_window(self):\r\n self.mw.eval('::ttk::CancelRepeat')\r\n SlTrace.lg(\"Closing windows\")\r\n ''' \r\n ActiveCheck.clear_active() # Disable activities\r\n if self.score_win is not None:\r\n self.score_win.destroy()\r\n self.score_win = None\r\n if self.mw is not None and self.mw.winfo_exists():\r\n self.mw.quit()\r\n self.mw.destroy()\r\n self.mw = None\r\n '''\r\n if self.on_exit is not None:\r\n self.on_exit()\r\n \r\n sys.exit() # Else quit\r", "def quit(self):\n\n self.main_window.destroy()", "def close(self):\n\n cv2.destroyWindow(winname=self.title)", "def destroy(self):\n widget = self.widget\n if widget:\n # On Windows, it's not sufficient to simply destroy the\n # widget. It appears that this only schedules the widget \n # for destruction at a later time. So, we need to explicitly\n # unparent the widget as well.\n widget.setParent(None)\n if widget.isWidgetType():\n widget.destroy()\n self.widget = None", "def destroy(self):\n self.__overlayList.removeListener('overlays', self.__name)\n base.Action.destroy(self)", "def destroy(self):\r\n self.visible = False", "def DestroyHintWindow(self):\r\n\r\n if self._hint_window:\r\n\r\n self._hint_window.Destroy()\r\n self._hint_window = None", "def destroy(self):\n\n self.cmapTexture.destroy()\n\n for tex in (self.modulateTexture,\n self.clipTexture,\n self.colourTexture):\n tex.deregister(self.name)\n glresources.delete(tex.getTextureName())\n\n self.removeListeners()\n self.deregisterAuxImage('modulate')\n self.deregisterAuxImage('clip')\n self.deregisterAuxImage('colour')\n\n self.modulateTexture = None\n self.clipTexture = None\n self.colourTexture = None\n self.modulateImage = None\n self.clipImage = None\n self.colourImage = None\n self.modulateOpts = None\n self.clipOpts = None\n self.colourOpts = None\n\n glimageobject.GLImageObject.destroy(self)", "def destroy_all(self):\n\n for k in self.widgets:\n self.widgets[k].destroy()\n self.widgets = {}\n self.window.destroy()\n self.window = tk.Frame(self.root)\n self.window.pack(side=\"top\", fill=\"both\", expand=True)", "def _destroy(self):\n root = self._root\n turtle.Turtle._pen = None\n turtle.Turtle._screen = None\n self._root = None\n self._canvas = None\n turtle.TurtleScreen._RUNNING = True\n root.destroy()", "def OnExit(self, event):\n \n print 'Cleaning up...'\n self.Destroy()", "def leave_page(self):\n self.window.destroy()", "def __destroy_ui(self):\n # Remove the viewable area from Gedit's side panel\n self.__side_panel.remove_item(self.__view_port)\n\n # Empty class's properties\n self.__tree_view = None\n self.__side_panel = None\n\n self.__view_port.destroy()\n self.__view_port = None", "def close(self):\n self.destroy()", "def done(self):\n self.root.destroy()", "def callback_destroy( self ):\r\n self.winRunning = False\r\n self.rootWin.destroy()\r\n exit()", "def destroy(self):\r\n self._obj.destroy()\r\n self._obj = None", "def close_window(self):\n # Window - END\n self.root.destroy()", "def close(self):\n \n self.renderer.RemoveActor(self._crosshair.actor)\n self.renderer.RemoveActor(self._scalar_bar_actor)\n self.renderer.RemoveActor(self._orientation_annotation)\n self.renderer.RemoveActor(self._corner_annotation)\n \n for layer in self._layers :\n self.renderer.RemoveActor(layer.actor)\n \n for gui_annotation in self._gui_annotations.values() :\n self.renderer.RemoveActor(gui_annotation.shape_actor)\n self.renderer.RemoveActor(gui_annotation.text_actor)", "def unload(self):\n for action in self.actions:\n self.iface.removePluginMenu(\n self.tr(u'&Convert to 3D'),\n action)\n self.iface.removeToolBarIcon(action)\n # remove the toolbar\n del self.toolbar", "def destroy(self):\n self.context.destroy()", "def unload(self):\n if self.material_background:\n self.parent.removeItem(self.material_background)\n self.material_background = None\n if self.mod_background:\n self.parent.removeItem(self.mod_background)\n self.mod_background = None\n if self.material_foreground:\n self.parent.removeItem(self.material_foreground)\n self.material_foreground = None\n if self.mod_foreground:\n self.parent.removeItem(self.mod_foreground)\n self.mod_foreground = None\n if self.liquid:\n self.parent.removeItem(self.liquid)\n self.liquid = None", "def quit(self):\n self.window.quit()\n self.window.destroy()", "def delete_win(self, *args):\n if cmds.window(self.win_name, ex=1):\n cmds.deleteUI(self.win_name)", "def emit_and_destroy(self):\n self.f1_frame.scene.stop_animation()\n self.drone_frame.scene.stop_animation()\n self.roomba_frame.scene.stop_animation()\n self.car_frame.scene.stop_animation()\n self.turtle_frame.scene.stop_animation()\n self.pepper_frame.scene.stop_animation()\n\n self.switch_window.emit()\n self.f1_frame.scene.view.deleteLater()\n self.drone_frame.scene.view.deleteLater()\n self.roomba_frame.scene.view.deleteLater()\n self.car_frame.scene.view.deleteLater()\n self.turtle_frame.scene.view.deleteLater()\n self.pepper_frame.scene.view.deleteLater()", "def clean_up(self):\n cv2.destroyAllWindows()\n # self.vs.release()", "def exitGame(self):\n self.myBoard.clearFrame()\n for tileRow in self.myBoard.tiles:\n for tile in tileRow:\n tile.destroy()\n del Tile.images[:]\n del self.myBoard.images[:]\n self.myBoard.destroy()\n self.destroy()\n exit(0)", "def destroy(self):\n\n self.renderTexture .destroy()\n self.cmapTexture .destroy()\n self.negCmapTexture.destroy()\n self.lutTexture .destroy()\n\n self.removeListeners()\n self.deregisterLut()\n\n globject.GLObject.destroy(self)\n\n if self.flatShader is not None: self.flatShader.destroy()\n if self.dataShader is not None: self.dataShader.destroy()\n\n self.dataShader = None\n self.flatShader = None\n self.activeShader = None\n\n self.lut = None\n self.renderTexture = None\n self.cmapTexture = None\n self.negCmapTexture = None\n self.lutTexture = None", "def destroy():\n if QFGUI.__instance is not None:\n QFGUI.__instance.__running = False\n try:\n QFGUI.__instance.__gui_app.terminate()\n except socket.error: # ignore\n pass\n QFGUI.__instance.__gui_app = None\n del QFGUI.__instance.__gui_server_thread # clean up resource\n QFGUI.__instance.__gui_server_thread = None\n QFGUI.__instance.__qf = None\n QFGUI.__instance = None\n #\n # DO NOT COMMENT: important to tell unittest that GUI is destroyed\n print \"*** Destroyed QF GUI ***\"", "def destroyWindow(windowType):\n if windowType == \"volume\":\n os.remove(MY_VOL_PATH)\n\n elif windowType == \"backlight\":\n os.remove(MY_BACKLIGHT_PATH)\n Gtk.main_quit()", "def __del__(self):\n self.destroy()", "def clean(self):\n for i in self.winfo_children():\n i.destroy()", "def reset(self, window):\n self.__close_preview(window)\n self.__clear_context()", "def close_app(self):\n BASE_DIR = os.path.dirname(os.path.abspath(__file__))\n\n db_path = os.path.join(BASE_DIR, \"..\", \"DATA\", \"AIRCRAFT_COLLISION_FORECAST_SYSTEM.db\")\n clean_table(db_path, 'AIRPLANES')\n\n BASE_DIR = os.path.dirname(os.path.abspath(__file__))\n\n img_path = os.path.join(BASE_DIR, \"..\", \"GUI\", \"IMAGE\")\n\n # img_path = 'GUI\\\\IMAGE\\\\'\n img_file_names = [file_name for file_name in listdir(img_path) if isfile(join(img_path, file_name))]\n for file_name in img_file_names:\n if file_name not in ('map_marker.png', 'airplane_marker.png', 'collision_marker.png'):\n os.remove(os.path.join(img_path, file_name))\n print('Closing app')\n self.app.root_window.close()", "def cleanup(self):\r\n\r\n # Remove strip from window.\r", "def close(self):\n\n\t\tself._window.close()", "def exit(self):\n if self.window:\n self.window.close()", "def destroy(self):\n pass # Nothing for now", "def endWindow(self):\n\t\tself.vidcap.release()\n\t\tcv2.destroyWindow(\"show\")", "def save_before_close(self):\n if self.db_window:\n self.db_window.destroy()\n self.destroy()", "def close(self):\n self._close_viewer_window()\n self.env.close()", "def on_cleanup(self):\n\n pygame.quit()", "def quit_click(self):\n\n self.parent.destroy()", "def destroy(self):\n pass", "def destroy(self):\n pass", "def destroy(self):\n pass", "def destroy(self):\n pass", "def cancel(self):\n self.top.destroy()", "def deinit(self):\n self._font.close()", "def destructor(self):\n print(\" [SB Live] Terminating...\\n\",\n \"[SB Live] View most recent cache in \\'cache/replay.mov\\'\")\n self.killThread = True\n self.t.join()\n self.root.destroy()\n self.vs.release() # release web camera\n self.cache.release()\n self.replayStream.release()\n cv2.destroyAllWindows() # it is not mandatory in this application", "def destroy(self):\n\n pass", "def _destroy(self):\n # FIXME: Arrange for a more controlled shutdown through the credentials\n\n self.replay_window_persisted = True\n self.sequence_number_persisted = self.sender_sequence_number\n self._store()\n\n del self.sender_key\n del self.recipient_key\n\n os.unlink(self.lockfile.lock_file)\n self.lockfile.release()\n\n self.lockfile = None", "def statDestroy():\n root.destroy()\n statView()", "def close(self):\n if(screen == self):\n screen = None", "def destroy(self):\n if self._ptr is not None:\n # run and remove destructor on c data\n _global_destroy(self._display, self._ptr)\n ffi.gc(self._ptr, None)\n self._ptr = None\n self._display = None", "def exit(self):\n self.root.grab_release()\n self.root.destroy()", "def onClose(self, event): \n \n self.Destroy()\n return", "def exit(self):\n GRobot.exit_lock.acquire()\n if not self._deleted:\n if self.inspector:\n self.inspector.close()\n sip.delete(self.inspector)\n\n if self.display:\n self.webview.close()\n sip.delete(self.webview)\n\n if self.page and not sip.isdeleted(self.page):\n sip.delete(self.page)\n\n GRobot._liveRobot -= 1\n\n if GRobot._liveRobot == 0 and GRobot._loop is not None:\n GRobot._kill_loop=gevent.spawn_later(20,self.kill_loop)\n\n\n self._deleted = True\n\n GRobot.exit_lock.release()", "def removeScene(self):\n del self.scene, self.imgPixmapItem", "def remove_window(self, window: AbstractView) -> None:\n self._logger.debug(\"running\")\n self.removeSubWindow(window)\n self._logger.debug(\"done\")", "def unload(self):\n self.iface.removePluginRasterMenu(self.menu, self.action)\n self.iface.removeRasterToolBarIcon(self.action)", "def unload(self):\n self.iface.pluginToolBar().removeAction(self.openDialogAction)", "def exit(self):\n \t\troot.destroy()\n \t\tpass", "def destroy_mat(self):\n\n if self._mat is None:\n return\n\n if self._shell:\n destroy_shell_context(self._mat)\n\n self._mat.destroy()\n self._mat = None", "def clear_screen(self):\r\n lst_grid = self.root.grid_slaves()\r\n for widget in lst_grid:\r\n widget.destroy()\r\n lst_pack = self.root.pack_slaves()\r\n for widget in lst_pack:\r\n widget.destroy()", "def bye(self):\n self._frame._destroy()\n self._turtles = []\n self._gpens = []\n del self._frame", "def __window_close(self):\n pass", "def unload(self):\n for action in self.actions:\n self.iface.removePluginRasterMenu(\n self.tr(u'&Hybriddekning'),\n action)\n self.iface.removeToolBarIcon(action)\n # remove the toolbar\n del self.toolbar", "def unload(self):\n for action in self.actions:\n self.iface.removePluginMenu(\n self.tr(u'&PacSafe'),\n action)\n self.iface.removeToolBarIcon(action)\n # remove the toolbar\n del self.toolbar", "def unload(self):\n for action in self.actions:\n self.iface.removePluginMenu(\n self.tr(u'&PolygonByPolarCoordinates'),\n action)\n self.iface.removeToolBarIcon(action)\n # remove the toolbar\n del self.toolbar", "def unload(self):\n for action in self.actions:\n self.iface.removePluginMenu(\n self.tr(u'&CHOUCAS'),\n action)\n self.iface.removeToolBarIcon(action)\n # remove the toolbar\n del self.toolbar", "def unload(self):\n for action in self.actions:\n self.iface.removePluginMenu(\n self.tr(u'&Create xyzrgb from Mosaic/DSM'),\n action)\n self.iface.removeToolBarIcon(action)" ]
[ "0.7758888", "0.7666368", "0.76103795", "0.7549023", "0.75396657", "0.749664", "0.74601287", "0.7457397", "0.7421807", "0.7339096", "0.73000395", "0.72744155", "0.7256992", "0.72431695", "0.7238555", "0.7213867", "0.7204563", "0.71423733", "0.7119873", "0.7090933", "0.7061171", "0.70511407", "0.70408297", "0.7016762", "0.6986384", "0.6978324", "0.69704646", "0.6966765", "0.6949641", "0.694166", "0.6935619", "0.69206995", "0.69110984", "0.69050163", "0.6903453", "0.69018126", "0.6892647", "0.68895936", "0.6874586", "0.68729335", "0.6861786", "0.6844735", "0.6841508", "0.679743", "0.6776919", "0.6774244", "0.6767136", "0.6766207", "0.67652464", "0.6723242", "0.671631", "0.671461", "0.67083395", "0.67077255", "0.67066544", "0.67059255", "0.6669194", "0.6658357", "0.6655379", "0.6632422", "0.66317", "0.661518", "0.66119826", "0.66105473", "0.66093993", "0.6585281", "0.6577487", "0.6575614", "0.65641606", "0.6557198", "0.6546722", "0.65424603", "0.65297335", "0.65297335", "0.65297335", "0.65297335", "0.6525651", "0.6517546", "0.6509867", "0.6495265", "0.64862686", "0.6468969", "0.6459824", "0.6459395", "0.64532495", "0.6450933", "0.64463353", "0.6445482", "0.64443797", "0.64426374", "0.64418244", "0.64390063", "0.64364475", "0.6431333", "0.64250183", "0.6423568", "0.64188594", "0.6416926", "0.64154136", "0.6414408", "0.64059997" ]
0.0
-1
Resize this window to the current set dimensions
def _reshape(self): self._frame._setup(width=self._width,height=self._height, startx=self._x,starty=self._y)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __window_resizeTo(self, iWidth, iHeight):\n pass", "def resize(self, width, height):\n geo = self.geometry\n # Start of menu.\n self.menu_start = self.window.width - (geo.menu_width +\\\n geo.horizontal_margin + geo.scroll_bar_width)\n # Update vertical span of the window.\n self.current_view_span = height - self.status_bar.height\n # Call the resize method of all objects in the current window.\n for object in self.object_list:\n object.resize(width, height)\n # Just one call to the adaptive plot height is needed. Therefore the\n # calls need to be here.\n if self.waveforms:\n self.utils.adaptPlotHeight()", "def resize(self, width, height):\n\n\t\tself._window.resize(width, height)", "def _set_size(self):\n if self.width_key is not None:\n width = config.get(self.width_key)\n height = config.get(self.height_key)\n self.window.resize(width, height)", "def resize(self):\n h, w = self.win.getmaxyx()\n self.maxh, self.maxw = h, w\n if w == 0 or h == 2:\n return\n self.win.resize(h, w)\n self.lpane.do_resize(h, w)\n self.rpane.do_resize(h, w)\n self.statusbar.resize(h, w)\n self.tabbar.resize(1,w)\n self.regenerate()\n self.display()", "def resize_to(self, width, height):\n\n self.driver.resize_window_to(self.handle, width, height)", "def resize(self):\r\n del self.win\r\n self.__create_win()", "def resize(self, win, width:int, height:int):\r\n\r\n\t\tglViewport(0, 0, width, height)", "def resize(self):\n\t\tself.win.erase()\n\t\tfor c in self.components:\n\t\t\tc.resize()\n\t\tself.draw(True)", "def resize(self, *args):\n if self.parent is None: # when deleted\n return\n if self.parent.render_window is None: # BasePlotter\n return\n\n if self._prior_window_size != self.parent.window_size:\n self._prior_window_size = self.parent.window_size\n\n actor = self._actors['background']\n image_data = actor.GetInput()\n origin = image_data.GetOrigin()\n extent = image_data.GetExtent()\n spacing = image_data.GetSpacing()\n xc = origin[0] + 0.5 * (extent[0] + extent[1]) * spacing[0]\n yc = origin[1] + 0.5 * (extent[2] + extent[3]) * spacing[1]\n yd = (extent[3] - extent[2] + 1) * spacing[1]\n dist = self.camera.distance\n\n # make the longest dimensions match the plotting window\n img_dim = np.array(image_data.dimensions[:2])\n self.camera.focus = np.array([xc, yc, 0.0])\n self.camera.position = np.array([xc, yc, dist])\n\n ratio = img_dim / np.array(self.parent.window_size)\n scale_value = 1\n if ratio.max() > 1:\n # images are not scaled if larger than the window\n scale_value = ratio.max()\n\n if self._scale is not None:\n scale_value /= self._scale\n\n self.camera.parallel_scale = 0.5 * yd / self._scale", "def setwinsize(self, rows, cols):", "def __window_resizeBy(self, xDelta, yDelta):\n pass", "def set_igv_window_size(self, width=800, height=600):\n self.set_igv_window_width(width)\n self.set_igv_window_height(height)", "def _resize_image(self, event):\n self.window_width = event.width\n self.window_height = event.height", "def resize_display(self, (w, h)):\n self.surface = pygame.display.set_mode((w, h), pygame.RESIZABLE)", "def SetWindowSize(self, size):\n self.WINDOW_SIZE = size", "def resize(self, x=0, y=0, w=0, h=0):\r\n if w <= 0:\r\n w = self.max_width\r\n if h <= 0:\r\n h = self.max_height\r\n self.width = w\r\n self.height = h\r\n\r\n self.left = x\r\n self.top = y\r\n self.right = x + w\r\n self.bottom = y + h\r\n self.opengl.resize(x, y, w, h)", "def resize(self, dims):\n width, height = dims[:2]\n self.logger.debug(\"renderer reconfigured to %dx%d\" % (\n width, height))\n\n # create cairo surface the size of the window\n #surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, width, height)\n depth = len(self.rgb_order)\n self.surface_arr = np.zeros((height, width, depth), dtype=np.uint8)\n\n stride = cairo.ImageSurface.format_stride_for_width(cairo.FORMAT_ARGB32,\n width)\n surface = cairo.ImageSurface.create_for_data(self.surface_arr,\n cairo.FORMAT_ARGB32,\n width, height, stride)\n self.surface = surface\n\n # fill surface with background color;\n # this reduces unwanted garbage in the resizing window\n cr = cairo.Context(self.surface)\n\n # fill surface with background color\n cr.rectangle(0, 0, width, height)\n r, g, b = self.viewer.get_bg()\n cr.set_source_rgba(r, g, b)\n cr.fill()\n\n super(CanvasRenderer, self).resize(dims)", "def configure_window(self, width, height):\n self.configure_surface(width, height)", "def SizeWindows(self):\n self._SizeWindows()", "def setSize(self, width, height):\n frameWidth = width\n frameHeight = height\n repaint()", "def resize(self, width: int, height: int):\n pass", "def resize(self, yx=None):\n if yx == None:\n yx = self.screen.getmaxyx()\n self.screen.clear()\n curses.resizeterm(yx[0], yx[1])\n self.setup_windows(resize = True)\n self.screen.refresh()", "def resize(self, size):\n self.widget.resize(*size)", "def Pane_Resized( self, new_sizes ):\r\n if(new_sizes[0] > 200 ):\r\n cb.xtotal = new_sizes[0]-100\r\n self.canvas_one.config(width = new_sizes[0])\r\n self.canvas_scale.config(width = new_sizes[0])\r\n else:\r\n cb.xtotal = 200-100\r\n self.canvas_one.config(width = 200)\r\n self.canvas_scale.config(width = 200)\r\n if (len(new_sizes) > 1 ):\r\n self.canvas_two.config(width=new_sizes[1])\r\n self.system.Draw()", "def update_dimensions(self):\r\n # stores the old screen height for cleaning the screen\r\n old_w_height = self.w_height\r\n\r\n self.w_width, self.w_height = get_terminal_size()\r\n # see __init__\r\n self.w_width -= self.w_width % 2\r\n self.w_height -= self.w_height % 2\r\n\r\n # no need to clear screen if window size hasn't changed\r\n if old_w_height != self.w_height:\r\n self.clear_screen(old_w_height)", "def resize(self):\r\n Win.resize(self)\r\n self.write(\"### console has been resized\")", "def set_resolution(self, width, height):\n self.driver.set_window_size(width, height, self.driver.window_handles[0])", "def resize(self, event=None):\n #self.render()\n self.__resize_background(event)\n #self.__delete_background()\n #self.__create_background(self._imfname)\n for sym in self.itersymbols():\n sym.sym.resize(event)", "def resize(self):\n pass", "def on_parent_resize(self, event):\n #self.resize()\n #self.resize_scaled(drag_rootx=self.resize_frame.winfo_rootx())\n self.resize_scaled(current=MathStat.lerp(0,\n self.prop_frame.winfo_width(), self.last_right_bias))", "def defaultWindowSize(self):\n self.resize(self.defaultWindowWidth, self.defaultWindowHeight)", "def size_with_window(self, size_with_window):\n\n self.container['size_with_window'] = size_with_window", "def adjust_screen_size(self) -> None:\n if self.screen:\n max_row, max_cols = self.screen.getmaxyx()\n if max_row < MIN_SIZE + len(self.all_items):\n self.screen.resize(self.menu_height, max_cols)\n self.draw()", "def resizeEvent(self, event):\n self.updateViewer()", "def resize(self, auto_layout = False, **kwds):\n\t\told_auto_layout = self.auto_layout\n\t\ttry:\n\t\t\tself.auto_layout = auto_layout\n\t\t\tself.set(**kwds)\n\t\tfinally:\n\t\t\tself.auto_layout = old_auto_layout", "def __ev_resize(self, event):\n\n new_size = event.dict['size']\n surface_size = self.__screen.get_size()\n old_center = self.__screen.get_rect().center\n if new_size != surface_size:\n self.__screen = pygame.display.set_mode(new_size,\n self.__screen.get_flags(),\n self.__screen.get_bitsize())\n self.init(offset=vect_diff(self.__screen.get_rect().center,\n old_center))\n self.__screen_width, self.__screen_height = self.__screen.get_size()", "def save_my_size(self):\n if not settings.get_bool('maximized', False):\n width, height = self.get_size()\n settings.set('width', width)\n settings.set('height', height)", "def resizeEvent(self, event):\n self.resized.emit()\n return super(PiWndow, self).resizeEvent(event)", "def window_size(self, window_size):\n\n self._window_size = window_size", "def ev_windowsizechanged(self, event: WindowResized) -> None:", "def resize_child_window(self):\n s = struct.pack('HHHH', 0, 0, 0, 0)\n x = fcntl.ioctl(0,termios.TIOCGWINSZ,s)\n fcntl.ioctl(self.child_fd,termios.TIOCSWINSZ,x)", "def setWindowSize(width,height):\n dislin.winsiz(width,height)", "def set_size(self, width, height):\n cairo.cairo_xcb_surface_set_size(self._pointer, width, height)\n self._check_status()", "def _update_dimensions(self):\n _, self.width = self.window.getmaxyx()\n self.spacing = self.width // self.total_columns", "def resize (self):\n return self._arrange_displays()", "def resize(self, rows, cols, minecount, event=None):\n self.clearFrame()\n #reset relevant instance variables\n self.rows = rows\n self.cols = cols\n self.numMines = minecount\n self.numChecked = 0\n self.numFlags = 0\n self.minesArmed = False\n self.startTime = None\n\n #re-add all elements on the board\n self.setUpFrame()\n self.addTiles(rows,cols,minecount)\n\n #resize window to fit the new board size\n windowWidth = str(20*cols+40)\n windowHeight = str(20*rows+60)\n self.parent.minsize(windowWidth, windowHeight)\n self.parent.maxsize(windowWidth, windowHeight)\n self.parent.geometry(windowWidth+'x'+windowHeight)", "def updatesize(frame):\n winwid, winhgt = frame.winfo_width(), frame.winfo_height()\n scrwid, scrhgt = frame.winfo_screenwidth(), frame.winfo_screenheight()\n newx, newy = math.floor(scrwid * 0.99) - winwid, math.floor(scrhgt * 0.01)\n frame.master.geometry(\"{}x{}+{}+{}\".format(winwid, winhgt, newx, newy))", "def reshape(w, h):\n global win_width, win_height\n win_width = w\n win_height = h\n glutPostRedisplay() # May need to call a redraw...", "def maximize(self):\n lib.SDL_MaximizeWindow(self._ptr)", "def resizeRect(self, width, height):\n rect = self.rect()\n rect.setWidth(width)\n rect.setHeight(height)\n self.setRect(rect)", "def resizeGL(self, width, height):\n self.width, self.height = width, height\n gl.glViewport(0, 0, width, height)\n gl.glMatrixMode(gl.GL_PROJECTION)\n gl.glLoadIdentity()\n gl.glOrtho(0, 1, 0, 1, 0, 1)", "def resizeGL(self,Width,Height):\n return", "def setWindowSize(self, width, height, windowHandle='current'):\n cmdId = self.executeCommand(Command.SET_WINDOW_SIZE, {'width': int(width), 'height': int(height), \n \"windowHandle\": windowHandle})\n return cmdId", "def setSize(self, width, height):\n dw = (width - self.width()) / 2.0\n dh = (height - self.height()) / 2.0\n rect = self.sceneRect()\n rect.adjust(-dw, -dh, dw, dh)\n self.setSceneRect(rect)", "def resizeGL(self, width, height):\n self._sceneviewer.setViewportSize(width, height)\n # resizeGL end", "def resizeEvent(self, *args, **kwargs):\n self.windowMoved.emit()", "def on_canvas_resize(self, event) -> None:\r\n\r\n self.painter.adjust_to_canvas()\r\n self.painter.draw_board()", "def change_size(self, width, height):\n oldw = float(self.size().width())\n oldh = float(self.size().height())\n\n if self.indicator_type == 'session':\n neww = int(oldw + oldw * (width / 100.0))\n if neww > 0:\n self.setFixedSize(neww, oldh)\n elif self.indicator_type == 'unit':\n newh = int(oldh + oldh * (height / 100.0))\n if newh > 0:\n self.setFixedSize(oldw, newh)\n\n self.set_font_size()", "def set_size(self, width, height):\n # Combine the height and width to single string to be passed to root\n set_str = '{}x{}'.format(str(width), str(height))\n self.root.geometry(set_str)", "def resize(w, h):\n global width, height, scale\n\n r = radius\n glViewport(0, 0, w, h)\n width = w\n height = h\n\n glMatrixMode(GL_PROJECTION)\n glLoadIdentity()\n if w > h:\n glOrtho(-w/h*r, w/h*r, -r, r, -r, r)\n scale = 2.0 * r / h \n else:\n glOrtho(-r, r, -h/w * r, h/w * r, -r, r)\n scale = 2.0 * r / w", "def do_relayout(self):\n # This method is called whenever a relayout is requested. By\n # default, this is when the layout children change. In that case\n # we just need to update the min and max sizes. We are a top\n # level window, so no one really cares about our size hint. \n self.update_minimum_size()\n self.update_maximum_size()", "def ev_windowsizechanged(self, event: tcod.event.WindowResized) -> T | None:", "def DoSetSize(self, x, y, width, height, flags=wx.SIZE_AUTO):\r\n\r\n self._rect = wx.Rect(x, y, max(1, width), max(1, height))\r\n self.DoSizing()", "def resizePreview(self):\n ratio = float(self.qIma.width()) / float(self.qIma.height())\n if self.qIma.width() > self.qIma.height():\n width = 300\n height = int(float(width) / ratio)\n else:\n height = 170\n width = int(float(height) / ratio)\n if 'prodManager' in os.path.basename(self._ima):\n width = 300\n height = 170\n self.lPreview.setMinimumSize(width, height)\n self.lPreview.setMaximumSize(width, height)", "def OnSize(self, event):\r\n\r\n self.Layout()", "def _save_size(self):\n if self.width_key is not None:\n (width, height) = self.window.get_size()\n config.set(self.width_key, width)\n config.set(self.height_key, height)\n config.save()", "def size(self):\n\n\t\treturn self._window.size", "def setWindowGeometry(x,y,width,height):\n dislin.window(x,y,width,height)", "def relayout(self): \n\t\t#self.urmaswin.Layout()\n\t\t#wx.CallAfter(self.urmaswin.Layout)\n\t\t#wx.CallAfter(self.visualizer.OnSize)", "def resize(self,event):\n if event.widget==self.master:\n Y=event.height\n X=event.width\n self.seqframe.configure(width=X-self.canvas_border_x,\n height=Y-self.canvas_border_y)\n return", "def screensize(self, canvwidth=None, canvheight=None, bg=None):\n return self._resize(canvwidth, canvheight, bg)", "def on_resize(self, _: int = 0) -> None:\n assert CursesMenu.stdscr is not None\n screen_rows, screen_cols = CursesMenu.stdscr.getmaxyx()\n curses.resizeterm(screen_rows, screen_cols)\n self.draw()", "def canvasResize(self, width: int, height: int):\n # Ensure board is initialized. If not, make canvas fill window.\n if not hasattr(self, \"board\"):\n self.canvas.configure(width=self.top.winfo_width(\n ) - 4, height=self.top.winfo_height() - 4)\n return\n\n boardaspectratio: float = self.board.height / self.board.width # y/x\n canvasaspectratio: float = height / width # y/x\n if boardaspectratio <= canvasaspectratio:\n canvaswidth = int(width)\n canvasheight = int(boardaspectratio * width)\n else:\n canvaswidth = int(height / boardaspectratio)\n canvasheight = int(height)\n # TODO: For some reason this doesn't work without the -4, or else it slowly expands by 4px recursively.\n self.canvas.configure(width=canvaswidth - 4, height=canvasheight - 4)\n # Resize icons\n iconsize = (max(1, int(canvaswidth/self.board.width)),\n max(1, int(canvasheight/self.board.height)))\n bombscaled = bombimage.resize(iconsize)\n self.bombphoto = ImageTk.PhotoImage(bombscaled)\n flagscaled = flagimage.resize(iconsize)\n self.flagphoto = ImageTk.PhotoImage(flagscaled)\n # Resize font\n # If negative, font size is measured in pixels.\n self.fontscaled.configure(\n size=int(canvasheight/self.board.height * -3/4))\n # Draw\n self.render()\n # Resize victory message\n if self.victoryMessage is not None:\n self.canvas.moveto(self.victoryMessage, int(canvaswidth/2 - self.fontvictory.measure(\n \"VICTORY\")/2), int(canvasheight/2 + self.fontvictory.cget(\"size\")/2))", "def resizeEvent(self, event):\n self.autosize()\n super().resizeEvent(event)", "def set_geometry(self, width, height, fullscreen=False):\n self.root.tk.call(\"tk\", \"scaling\", self.scaling_factor)\n if fullscreen:\n initial_dimensions = (self.root.winfo_screenwidth(), self.root.winfo_screenheight())\n else:\n initial_dimensions = (round(width * self.scaling_factor),\n round(height * self.scaling_factor))\n\n if fullscreen and sys.platform == \"win32\":\n self.root.state('zoomed')\n elif fullscreen:\n self.root.attributes('-zoomed', True)\n else:\n self.root.geometry(\"{}x{}+80+80\".format(str(initial_dimensions[0]),\n str(initial_dimensions[1])))\n logger.debug(\"Geometry: %sx%s\", *initial_dimensions)", "def configure_canvas(self):\r\n self.window.update_idletasks() # this updates window size\r\n\r\n border = 10\r\n self.canvas.config(\r\n width=self.window.winfo_reqwidth() + border,\r\n height=min(350, self.window.winfo_reqheight() + border,))\r\n self.canvas.configure(scrollregion=(\r\n 0, 0,\r\n self.window.winfo_reqwidth() + border,\r\n self.window.winfo_reqheight() + border))", "def getwinsize(self):", "def ev_windowresized(self, event: WindowResized) -> None:", "def setWindowSize(self, value):\n return self._set(windowSize=value)", "def fit_to_window(self):\n if self.view:\n self.view.fit_view()", "def _SetSize(self, pixels = None):\n if not pixels:\n pixels = self.GetClientSize()\n self.canvas.SetSize(pixels)\n self.figure.set_size_inches(pixels[0]/self.figure.get_dpi(),\n pixels[1]/self.figure.get_dpi())", "def adjustSize( self ):\n cell = self.scene().cellWidth() * 2\n minheight = cell\n minwidth = 2 * cell\n \n # fit to the grid size\n metrics = QFontMetrics(QApplication.font())\n width = metrics.width(self.displayName()) + 20\n width = ((width/cell) * cell) + (cell % width)\n \n height = self.rect().height()\n \n # adjust for the icon\n icon = self.icon()\n if icon and not icon.isNull():\n width += self.iconSize().width() + 2\n height = max(height, self.iconSize().height() + 2)\n \n self.setMinimumWidth(max(width, minwidth))\n self.setMinimumHeight(max(height, minheight))\n \n self.rebuild()", "def get_window_size(self):\n raise NotImplementedError", "def on_resize(self, width, height):\n self.ctx.viewport = 0, 0, width, height\n self.program['projection'] = Mat4.perspective_projection(self.aspect_ratio, 0.1, 100, fov=60)", "def on_resize(event):\n gloo.set_viewport(0, 0, *event.physical_size)", "def get_window_size(self):\n return self.__window_size", "def OnResize(self, event):\n self._resizing = True\n self._resize_timer.Start(60, True)", "def update_resize(self, viewer, dims):\n self.recalc(viewer)", "def UpdateSizing(self):\n def closure(pane):\n pane.MinSize(self.GetBestSize())\n self._PaneInfoOperation(closure)", "def resizeEvent(self, event):\n\n self.settings.setValue(\"geometry\", self.saveGeometry())\n\n super().resizeEvent(event)", "def resize(self, width, height):\n if self.example:\n self.example.resize(width, height)", "def resizeEvent(self, _event: Optional[QResizeEvent] = None) -> None:\n\n current_frame = self.scene().current_frame\n\n if current_frame is not None:\n # EXTREMELY IMPORTANT LINE!\n # The sceneRect grows but never shrinks automatically\n self.scene().setSceneRect(current_frame.boundingRect())\n self.fitInView(current_frame.boundingRect(), Qt.KeepAspectRatio)", "def on_user_resize(self, event):\n self.resize_scaled(drag_rootx=event.x_root + self._mouse_drag_offset)", "def placeWindow(self):\r\n\t\t# window size\r\n\t\tw = 600\r\n\t\th = 300\r\n\t\t# find the screen size\r\n\t\tsw = self.parent.winfo_screenwidth()\r\n\t\tsh = self.parent.winfo_screenheight()\r\n\t\t# now define the location on the current screen\r\n\t\tx = (sw/2-0.5*w)\r\n\t\ty = (sh/2-0.5*h)\r\n\t\tself.parent.geometry('%dx%d+%d+%d' % (w, h, x, y))", "def resizeEvent(self, event):\n self.refresh_images(resize=True)\n QMainWindow.resizeEvent(self, event)", "def on_resize(self, width, height):\n self.gamestatemanager.peek().on_resize(width, height)", "def set_canvas_size(self, width, height):\n self.canvas.config(width = int(width), height = int(height))", "def updateSize(self, *args):\n width = self.width.get()\n height = self.height.get()\n self.initialXScale.config(to=width)\n self.initialYScale.config(to=height)\n # error check that state is not outside bounds\n for ball, state in self.ballStates.items():\n if state[0] > width:\n state[0] = width\n if state[1] > height:\n state[1] = height", "def autoResize(self):\n\t\t#self.infoLabelBox.set_size_request(1,1)\n\t\timgSize = [self.currentPixbuf.get_width() * self.scaleFactor, self.currentPixbuf.get_height() * self.scaleFactor]\n\t\timgSize = map(lambda x: max(int(x), 1), imgSize)\n\t\tif not self.fullscreenToggle:\n\t\t\tself.resize(imgSize[0], imgSize[1])\n\t\t\tposition = ( int(0.5 * (self.get_screen().get_width() - imgSize[0])),\n\t\t\t\tint(0.5 * (self.get_screen().get_height() - imgSize[1])))\n\t\t\tself.move(position[0], position[1])\n\t\t\tself.fixed.move(self.imgDisplay, 0, 0)\n\t\t\tif not self.hideTransparent and self.imgTrans.bgOn:\n\t\t\t\tself.imgTrans.set_size_request(imgSize[0], imgSize[1])\n\t\t\t# make eventbox the same size as image\n\t\t\t# this will not be correct when infoLabelBox is visible\n\t\t\tself.eventBox.set_size_request(imgSize[0], imgSize[1])\n\t\telse:\n\t\t\tself.fixed.move(self.imgDisplay, max(0, int((self.get_size()[0] - imgSize[0]) / 2)),\n\t\t\t\tmax(0, int((self.get_size()[1] - imgSize[1]) / 2)))\n\t\t\tif not self.hideTransparent and self.imgTrans.bgOn:\n\t\t\t\tself.imgTrans.set_size_request(int(self.get_size()[0]), int(self.get_size()[1]))\n\t\t\t# make eventbox the same size as screen\n\t\t\tself.eventBox.set_size_request(self.get_size()[0],self.get_size()[1])", "def rescale(self, event: tkinter.Event) -> None:\n # the properties which are linked to the event of reconfiguration\n # contain all the new sizes of the panel :\n self.width, self.height = event.width - 4, event.height - 4\n # The subtraction of 4 pixels is here to compensate the width\n # of the 'highlight bordure' rolling the canvas)\n self.draw_board()" ]
[ "0.7875411", "0.77100426", "0.765141", "0.7498725", "0.74862427", "0.7299506", "0.7242203", "0.71922106", "0.718488", "0.71594423", "0.7113716", "0.6998481", "0.69652283", "0.6843507", "0.6833935", "0.6828651", "0.6818293", "0.6815363", "0.6738181", "0.669341", "0.6678333", "0.6669186", "0.663756", "0.6625979", "0.662144", "0.66061467", "0.6584958", "0.657917", "0.65613943", "0.65596825", "0.653228", "0.6507232", "0.6498475", "0.6493947", "0.64902914", "0.64700633", "0.64687055", "0.6466811", "0.6444145", "0.6425803", "0.6419594", "0.6405188", "0.6399939", "0.6381386", "0.6354456", "0.6349317", "0.63435984", "0.6340384", "0.63325024", "0.6313979", "0.6297324", "0.62881297", "0.62675846", "0.626757", "0.6266384", "0.6264608", "0.62327033", "0.62143505", "0.6211376", "0.6186114", "0.61679244", "0.6164619", "0.6164271", "0.6157704", "0.61276907", "0.6111455", "0.6110966", "0.60928434", "0.60890794", "0.6087874", "0.6084802", "0.608148", "0.60725224", "0.60716397", "0.6053946", "0.60468984", "0.60417056", "0.60281664", "0.6017751", "0.6010346", "0.6007198", "0.59975946", "0.59957826", "0.5993652", "0.59844285", "0.5981799", "0.5974708", "0.59701943", "0.5958751", "0.5956875", "0.59485006", "0.5934703", "0.593252", "0.59213245", "0.5920534", "0.59105027", "0.5910287", "0.5903362", "0.59013677", "0.5900807", "0.5886846" ]
0.0
-1
Add a turtle to this window.
def _addTurtle(self,turt): assert (type(turt) == Turtle), "Parameter %s is not a valid Turtle object" % `turt` self._turtles.append(turt)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_new(event):\n t = turtle.Turtle()\n screen_w, screen_h = t.screen._window_size()\n t.goto(event.x - screen_w//2, screen_h //2 - event.y)", "def add_body(self):\r\n new_turtle = generate_turtle()\r\n new_turtle.goto(self.all_turtles[-1].position())\r\n self.all_turtles.append(new_turtle)", "def setTurtle(t):\r\n t.pu()\r\n t.goto(initialCoordinates())", "def init_turtle():\n turtle.up()\n turtle.home()", "def initialize(turtle_shape, bg_color, turtle_color, turtle_speed):\n turtle_instance = turtle.Turtle()\n turtle_instance.shape(turtle_shape)\n turtle.bgcolor(bg_color)\n turtle_instance.color(turtle_color)\n turtle_instance.speed(turtle_speed)\n return turtle_instance", "def make_window(colr, ttle):\n w = turtle.Screen()\n w.bgcolor(colr)\n w.title(ttle)\n return w", "def make_window(colr, ttle):\n w = turtle.Screen()\n w.bgcolor(colr)\n w.title(ttle)\n w.setup(width=1800, height=600)\n return w", "def cool_turtle():\n # Make the TurtleWindow.\n window = rg.TurtleWindow()\n\n # Make the SimpleTurtle.\n cool_turtle = rg.SimpleTurtle('turtle')\n cool_turtle.pen = rg.Pen('forest green', 1) # Try thickness 5 too\n cool_turtle.speed = 1 # Slow\n\n # Move the SimpleTurtle to her starting position.\n start_at = rg.Point(100, -50)\n cool_turtle.pen_up()\n cool_turtle.go_to(start_at)\n cool_turtle.pen_down()\n\n # Set up some parameters that control the nature of the shape drawn.\n size = 100 # Try 150 too\n angle = 1 # Try 20 too\n iterations = 360 # Try 90 too\n\n # Store the animation speed (to reset it later).\n tracer_n, tracer_d = window.tracer(), window.delay()\n\n # Make the animation go much faster.\n # First number: bigger means faster.\n # Second number: bigger means slower.\n window.tracer(5, 5)\n\n for _ in range(iterations):\n cool_turtle.right(angle)\n cool_turtle.draw_square(size)\n\n # Reset the animation to its original speed.\n window.tracer(tracer_n, tracer_d)\n\n window.close_on_mouse_click()", "def _drawturtle(self):\n screen = self.screen\n shape = screen._shapes[self.Myturtle.shapeIndex]\n ttype = shape._type\n titem = self.Myturtle._item\n if self._shown and screen._updatecounter == 0 and screen._tracing > 0:\n self._hidden_from_screen = False\n tshape = shape._data\n if ttype == \"polygon\":\n if self._resizemode == \"noresize\": w = 1\n elif self._resizemode == \"auto\": w = self._pensize\n else: w =self._outlinewidth\n shape = self._polytrafo(self._getshapepoly(tshape))\n fc, oc = self._fillcolor, self._pencolor\n screen._drawpoly(titem, shape, fill=fc, outline=oc,\n width=w, top=True)\n elif ttype == \"image\":\n screen._drawimage(titem, self._position, tshape)\n elif ttype == \"compound\":\n for item, (poly, fc, oc) in zip(titem, tshape):\n poly = self._polytrafo(self._getshapepoly(poly, True))\n screen._drawpoly(item, poly, fill=self._cc(fc),\n outline=self._cc(oc), width=self._outlinewidth, top=True)\n else:\n if self._hidden_from_screen:\n return\n if ttype == \"polygon\":\n screen._drawpoly(titem, ((0, 0), (0, 0), (0, 0)), \"\", \"\")\n elif ttype == \"image\":\n screen._drawimage(titem, self._position,\n screen._shapes[\"blank\"]._data)\n elif ttype == \"compound\":\n for item in titem:\n screen._drawpoly(item, ((0, 0), (0, 0), (0, 0)), \"\", \"\")\n self._hidden_from_screen = True", "def make_turtle(color, size):\n t = turtle.Turtle()\n t.color(color)\n t.pensize(size)\n return t", "def _spawn_turtle(self, trt_x, trt_y, name=None):\n\n\t\tif name is None or name == \"\":\n\t\t\tname = self._create_unique_turtle_name()\n\t\telif self._has_turtle(name):\n\t\t\treturn \"\"\n\n\t\tturtle = Turtle(name, Point(trt_x, trt_y))\n\t\tself._turtles[name] = turtle\n\n\t\trospy.loginfo(\"New turtle [%s] at x=[%d], y=[%d]\", name, trt_x, trt_y)\n\n\t\treturn name", "def make_turtle(colr, sz):\n t = turtle.Turtle()\n t.color(colr)\n t.pensize(sz)\n return t", "def turtle(self,turtleType):\n if self.turtleType == turtleType:\n return\n if self.turtleType and self.turtleType != PLAYER:\n self.mc.removeEntity(self.turtleId)\n self.turtleType = turtleType\n if turtleType == PLAYER:\n self.turtleId = None\n elif turtleType:\n self.turtleId = self.mc.spawnEntity(turtleType,\n self.position.x,self.position.y,self.position.z,\n \"{NoAI:1}\")\n self.setEntityCommands()\n self.positionOut()\n self.directionOut()", "def turtle_setup():\n # ___ ___ _ _ ___ _____ __ __ ___ ___ ___ _____ __\n # | \\ / _ \\ | \\| |/ _ \\_ _| | \\/ |/ _ \\| \\_ _| __\\ \\ / /\n # | |) | (_) | | .` | (_) || | | |\\/| | (_) | |) | || _| \\ V /\n # |___/ \\___/ |_|\\_|\\___/ |_| |_| |_|\\___/|___/___|_| |_|\n # _____ _ _ ___ ___ ___ _ _ _ _ ___ _____ ___ ___ _ _\n # |_ _| || |_ _/ __| | __| | | | \\| |/ __|_ _|_ _/ _ \\| \\| |\n # | | | __ || |\\__ \\ | _|| |_| | .` | (__ | | | | (_) | .` |\n # |_| |_||_|___|___/ |_| \\___/|_|\\_|\\___| |_| |___\\___/|_|\\_|\n #\n # Create the turtle graphics screen and set a few basic properties.\n screen = turtle.Screen()\n screen.setup( WIDTH, HEIGHT, MARGIN, MARGIN )\n screen.bgcolor( \"SkyBlue\" )\n\n # Create two turtles, one for drawing and one for writing.\n artist = turtle.Turtle()\n writer = turtle.Turtle()\n\n # Change the artist turtle's shape so the artist and writer are distinguishable.\n artist.shape( \"turtle\" )\n\n # Make the animation as fast as possible and hide the turtles.\n if DRAW_FAST:\n screen.delay( 0 )\n artist.hideturtle()\n artist.speed( \"fastest\" )\n writer.hideturtle()\n writer.speed( \"fastest\" )\n\n # Set a few properties of the writing turtle useful since it will only be writing.\n writer.setheading( 90 ) # Straight up, which makes it look sort of like a cursor.\n writer.penup() # A turtle's pen does not have to be down to write text.\n writer.setposition( 0, HEIGHT // 2 - FONT_SIZE * 2 ) # Centered at top of the screen.\n\n return screen, artist, writer", "def make_turtle(color, size):\n t = turtle.Turtle()\n t.color(color)\n t.pensize(size)\n t.hideturtle() # do not show turtle\n t.speed(0) # 0 - 10 scale, 0 is fastest\n return t", "def draw_petal():\n turtle.forward(30)\n turtle.left(45)\n turtle.forward(30)\n turtle.left(135)\n turtle.forward(30)\n turtle.left(45)\n turtle.forward(30)\n turtle.left(135)", "def draw_petal():\n turtle.forward(30)\n turtle.left(45)\n turtle.forward(30)\n turtle.left(135)\n turtle.forward(30)\n turtle.left(45)\n turtle.forward(30)\n turtle.left(135)", "def add_donut(self):\n self.scenes[self.current_scene].add_object(Donut())\n self.redraw()", "def make_window(color, title):\n w = turtle.Screen()\n w.bgcolor(color)\n w.title(title)\n return w", "def draw_shape(self, r=0, g=0, b=0): # black is the default color\r\n turtles= turtle.Turtle()\r\n turtles.speed(0) # Makes the turtle speed up\r\n turtles.color(r, g, b)\r\n turtles.showturtle()\r\n turtles.penup()\r\n turtles.pendown()\r\n\r\n # draws the Shape to the screen\r\n\r\n for i in range(self.num_sides):\r\n turtles.forward(self.side_length)\r\n turtles.left(360/(self.num_sides))\r\n turtles.hideturtle()", "def _prepare_turtle():\n turtle.setup(width=screen_width)\n turtle.shape(turtle_shape)\n turtle.title(title)", "def init():\n turtle.setworldcoordinates(-WINDOW_WIDTH / 2, -WINDOW_WIDTH / 2,\n WINDOW_WIDTH / 2, WINDOW_HEIGHT / 2)\n\n turtle.up()\n turtle.setheading(0)\n turtle.title('squares')\n pass", "def up():\n turtleTmp.penup()", "def screen_setup(screen_size):\n window = turtle.Screen()\n window.bgcolor(\"black\")\n window.title(\"Maze Game\")\n window.setup(screen_size, screen_size)", "def main():\n tortue_1 = turtle.Turtle()\n tortue_1.shape(\"turtle\")\n tortue_1.color(\"aquamarine4\")\n longueur = 200\n largeur = 200\n nbre_carres = 3\n angle_entre_carres = 15\n for i in range(nbre_carres):\n trace_rectangle(tortue_1, longueur, largeur)\n tortue_1.left(angle_entre_carres * (i + 1))\n\n turtle.exitonclick() # Empêche la fenêtre de se fermer automatiquement à la fin du tracé", "def draw_flower():\n turtle.right(45)\n draw_petal()\n turtle.right(90)\n draw_petal()\n turtle.right(90)\n draw_petal()\n turtle.right(90)\n draw_petal()\n turtle.right(135)\n turtle.forward(150)", "def drawRectangle_1():\n Lucia.color(\"green\",\"yellow\") # Sets the pen color to green and fill color to yellow\n Lucia.seth(90) # Set the initial orientation of the turtle to 0 degrees\n Lucia.begin_fill()\n Lucia.forward(50) # Move the turtle forward by 50 units in the direction that it was pointing\n Lucia.left(90) # Turn the turtle left by 90 degrees relative to the direction it was pointing\n Lucia.forward(100) # Move the turtle forward by 100 units\n Lucia.left(90)\n Lucia.forward(50)\n Lucia.left(90)\n Lucia.forward(100)\n Lucia.left(90) # Make sure the turtle is oriented back to its initial orientation\n Lucia.end_fill()", "def shapes():\r\n turtle.up()\r\n turtle.forward(500)\r\n turtle.down()\r\n draw_hexagon()\r\n draw_square()\r\n draw_triangle()", "def __init__(self):\r\n pen.up()\r\n pen.setheading(0)\r\n pen.hideturtle()\r\n turtle.title(\"My name\")\r\n pen.speed(0)\r\n pen.right(90)\r\n pen.forward(40)\r\n pen.left(90)", "def main():\n # Your code here\n draw_graph(turtle, -500, -200, 0)", "def draw_triangle():\r\n turtle.forward(100)\r\n turtle.left(120)\r\n turtle.forward(100)\r\n turtle.left(120)\r\n turtle.forward(100)\r\n turtle.left(120)", "def shapes2():\r\n turtle.up()\r\n turtle.backward(100)\r\n turtle.left(270)\r\n turtle.forward(100)\r\n turtle.left(90)\r\n turtle.backward(700)\r\n shapes()", "def draw_flower_advanced():\n draw_flower()\n turtle.left(90)\n turtle.up() #Raise pen for movement\n turtle.forward(150)\n turtle.left(90)\n turtle.forward(150)\n turtle.right(90)\n turtle.down() #lower pen for drawing", "def draw_flower_advanced():\n draw_flower()\n turtle.left(90)\n turtle.up()\n turtle.forward(150)\n turtle.left(90)\n turtle.forward(150)\n turtle.right(90)\n turtle.down()", "def drawCircle(r):\r\n # create a turtle-painter instance using turtle library\r\n painter = turtle.Turtle()\r\n\r\n # turtle properties (we want the turtle to look nicer)\r\n painter.shape(\"turtle\") # setting painter shape to turtle\r\n painter.shapesize(3,3,1) # making turtle-painter 3 times bigger\r\n painter.color(\"limegreen\") # setting painting color to limegreen\r\n\r\n # move the turtle-painter to ready position\r\n painter.pu() # we just move without drawing anything\r\n x0 = coordX(r, 0) # compute initial coordinate x0\r\n y0 = coordY(r, 0) # compute initial coordinate y0\r\n\r\n painter.goto(x0,y0) # move the turtle to the ready position\r\n \r\n # tell the turtle to put pencil down on the paper\r\n painter.pd()\r\n\r\n # draw a circle\r\n for theta in range(0, 361, 1):\r\n x = coordX(r, theta, useradians = False)\r\n y = coordY(r, theta, useradians = False)\r\n\r\n painter.goto(x,y)\r\n\r\n # tell the turtle to put pencil up from the paper\r\n painter.pu()\r\n # hide the painter after he finished to draw\r\n painter.ht()\r\n print(\"Draw a circle of r = \", r )", "def draw_square():\r\n turtle.forward(100)\r\n turtle.left(90)\r\n turtle.forward(100)\r\n turtle.left(90)\r\n turtle.forward(100)\r\n turtle.left(90)\r\n turtle.forward(100)\r\n turtle.left(90)", "def main():\n init_turtle() # initialize the turtle\n draw_long_shape() # draw the long shape\n print(\"Close window to quit.\")\n turtle.done() # pause until the user closes the window", "def onclick(self, fun, btn=1, add=None):\n self.screen._onclick(self.Myturtle._item, fun, btn, add)\n self._update()", "def main():\r\n intialize()\r\n draw_hexagon()\r\n draw_square()\r\n draw_triangle()\r\n shapes()\r\n shapes2()\r\n print (\"Close the window\")\r\n turtle.done()", "def register_shapes():\n turtle.Screen().register_shape(\"saphire.gif\")\n turtle.Screen().register_shape(\"player_right.gif\")\n turtle.Screen().register_shape(\"player_left.gif\")\n turtle.Screen().register_shape(\"walls.gif\")", "def draw_flower():\n turtle.right(45)\n draw_petal()\n turtle.right(90)\n draw_petal()\n turtle.right(90)\n draw_petal()\n turtle.right(90)\n draw_petal()\n turtle.right(135)\n turtle.forward(150) # draws the stem", "def draw_graph(pencil: turtle.Turtle, posx, posy, data_list):\n # Your code here\n data_list = [100, 50, 150, 300, 200, 100, 50, 150, 300, 200.9, 200]\n turtle.setup(1500, 800)\n t.penup()\n t.goto(posx, posy)\n t.pendown()\n draw_bars(turtle, data_list)\n draw_legend(turtle, data_list)\n t.done()", "def draw_window_pane():\n houseturtle.begin_fill()\n for y in range(4):\n houseturtle.pendown()\n houseturtle.forward(35)\n houseturtle.left(90)\n houseturtle.penup()\n houseturtle.end_fill()", "def add_head(self):\n self.scenes[self.current_scene].add_object(Head())\n self.redraw()", "def __init__(self, pos=(0, 0)):\n super().__init__() # Call 'turtle' initiation\n self.penup() # Stop displaying trail\n self.shapesize(stretch_wid=1, stretch_len=1) # Set dimensions of ball object to same height and width\n self.color(\"white\") # Set colour to white\n self.shape(\"circle\") # Set ball shape to round\n self.setpos(pos) # Move ball to desired position on screen\n self.x_dir = 1 # Set ball horizontal movement to right\n self.y_dir = 1 # Set ball vertical movement to up", "def symbol_add(self):\n\t\t\n\t\tif self.source == None:\n\t\t\tself.log.info(\"You need to open a database first\")\n\t\t\treturn\n\t\t\t\n\t\tw = tksym.WindowAdd(self.root, self.source)\n\t\tself.refresh_all()", "def draw_you_guess_it():\n window = rg.TurtleWindow()\n\n tx = rg.SimpleTurtle('turtle')\n tx.pen = rg.Pen('blue', 20)\n tx.speed = 5 # Medium\n\n tx.left(60)\n tx.forward(200)\n\n tx.pen_up()\n tx.left(120)\n tx.forward(100)\n tx.left(120)\n\n tx.pen_down()\n tx.forward(200)\n\n window.close_on_mouse_click()", "def down():\n turtleTmp.pendown()", "def getturtle(self):\n return self", "def turtle_race():\n # Create the turtle screen and two turtles (leave this as the first line).\n screen, artist, writer = turtle_setup()\n\n # Rename the artist turtle and move her to the left, above the x-axis.\n flojo = artist # Flo-Jo, https://en.wikipedia.org/wiki/Florence_Griffith_Joyner\n flojo.shape( \"turtle\" )\n flojo.color( \"blue\" ) # USA!\n flojo.penup()\n flojo.setposition( -WIDTH // 2 + MARGIN, MARGIN * 2 )\n flojo.setheading( 0 )\n flojo.pendown()\n\n # Create a new turtle, below the x-axis, to race against the turtle formerly known as artist.\n usain = turtle.Turtle() # Usain Bolt, https://en.wikipedia.org/wiki/Usain_Bolt\n usain.shape( \"turtle\" )\n usain.color( \"green\" ) # Jamaica\n usain.penup()\n usain.setposition( -WIDTH // 2 + MARGIN, -MARGIN * 2 )\n usain.setheading( 0 )\n usain.pendown()\n\n # TODO 7: Implement the turtle race as described in the lab document.\n writer.write( \"And they're off . . .\", align=\"center\", font=( \"Times\", FONT_SIZE, \"bold\" ) )\n while True:\n flojo.forward( random.randint( MARGIN // 4, MARGIN ) )\n usain.forward( random.randint( MARGIN // 4, MARGIN ) )\n\n # Wait for the user to click before closing the window (leave this as the last line).\n screen.exitonclick()", "def repositionTurtle(t, x, y):\n t.up()\n t.goto(x, y)\n t.down()", "def rules_window(self):\r\n t = turtle.Turtle()\r\n t.hideturtle()\r\n t.penup()\r\n t.goto(-200,230)\r\n t.color(\"#696969\")\r\n t.write(\"The Game of Nim\", font=((\"Arial\"), 40, \"normal\"))\r\n t.penup()\r\n t.goto(-330, 200)\r\n t.write(\"Welcome to the Game of Nim!\", font= ((\"Arial\"), 20, \"normal\"))\r\n\r\n t.goto(-330, 130)\r\n t.write(\"The objective of the game is to beat the computer\" + \"\\n\" + \"by taking the last circle.\", font= ((\"Arial\"), 20, \"normal\"))\r\n\r\n t.goto(-330, 60)\r\n t.write(\"The rules are simple you can take one to four circles\" +\"\\n\" \"at a time.\", font = ((\"Arial\"), 20, \"normal\"))\r\n\r\n t.goto(-330, -10)\r\n t.write(\"To choose how many circles you want to remove\" +\"\\n\" +\"just click on the up and down buttons.\", font= ((\"Arial\"), 20, \"normal\"))\r\n\r\n t.goto(-330, -50)\r\n t.write(\"To enter you choice press the circular button.\", font = ((\"Arial\"), 20, \"normal\"))\r\n\r\n t.goto(-330, -95)\r\n t.write(\"Press q at anytime to exit the game.\", font = ((\"Arial\"), 20, \"normal\"))\r\n\r\n t.goto(-330, -140)\r\n t.write(\"Have fun!\", font = ((\"Arial\"), 20, \"normal\"))\r\n\r\n t.goto(-330, -220)\r\n t.write(\"Press the Play Game button to continue on with\" +\"\\n\" + \"the game.\", font = ((\"Arial\"),20, \"normal\"))\r\n t.goto(200, -250)\r\n t.color(\"#20b2aa\")\r\n t.pendown()\r\n t.begin_fill()\r\n # Creates the play game button\r\n for y in range(2):\r\n t.forward(135)\r\n t.left(90)\r\n t.forward(30)\r\n t.left(90)\r\n t.end_fill()\r\n t.color(\"#696969\")\r\n t.penup()\r\n t.goto(200, -250)\r\n t.write(\"Play Game\", font = (\"Arial\", 20, \"normal\"))", "def draw_pink_square():\n window = rg.TurtleWindow()\n\n pink_turtle = rg.SimpleTurtle('turtle')\n pink_turtle.pen = rg.Pen('DeepPink', 5)\n pink_turtle.speed = 1 # Slowest\n\n pink_turtle.draw_square(80)\n\n window.close_on_mouse_click()", "def __init__(self, commands=[], turtle_name=\"Terry\", speed=6, shape=\"classic\"):\n super().__init__()\n turtle.colormode(255)\n self._name = turtle_name\n super().speed(speed)\n super().shape(shape)\n self.commands = commands\n self._pc = 0\n self._loop_stack = []\n self._variables = {'x':0, 'y':0}", "def add_textbox(self, left, top, width, height):\n id = self.__next_shape_id\n name = 'TextBox %d' % (id-1)\n sp = self.__sp(id, name, left, top, width, height, is_textbox=True)\n self.__spTree.append(sp)\n shape = Shape(sp)\n self.__shapes.append(shape)\n return shape", "def draw_graphic(self):\r\n\r\n t = Turtle()\r\n text = Turtle()\r\n s = t.getscreen()\r\n s.bgcolor(\"orange\")\r\n count = 0\r\n while count < 1:\r\n text.penup()\r\n text.setposition(-100, -100)\r\n text.pencolor(\"purple\")\r\n text.write(\"{}, area: {:.2f}, perimeter: {:.2f}\".format(self.name, self.area(), self.perimeter()), align=\"left\",\r\n font=(\"Arial\", 20, \"bold\"))\r\n t.goto(0, 0)\r\n t.pen(pencolor=\"purple\", fillcolor=\"green\", pensize=6, speed=20)\r\n t.fillcolor(\"red\")\r\n t.begin_fill()\r\n t.pendown()\r\n t.circle(self.__radius)\r\n t.end_fill()\r\n delay(30)\r\n t.clear()\r\n t.reset()\r\n text.clear()\r\n text.reset()\r\n count += 1", "def __init__(self, level, treasures, maze_size):\n turtle.Turtle.__init__(self)\n self.shape(\"player_right.gif\")\n self.color(\"blue\")\n self.penup()\n self.pensize(1)\n self.speed(0)\n self.score = 0\n self.level = level\n self.treasures = treasures\n self.maze_size = maze_size\n self.end_writer = writers.EndWriter(maze_size)\n\n turtle.Screen().onkey(self.go_left, \"Left\")\n turtle.Screen().onkey(self.go_right, \"Right\")\n turtle.Screen().onkey(self.go_up, \"Up\")\n turtle.Screen().onkey(self.go_down, \"Down\")\n turtle.Screen().onkey(self.find_path, \"f\")", "def move_turtle(self):\n self.forward(self.move_speed)", "def add(self, output_svg: Drawing) -> None:\n pass", "def draw_tree(self) -> None:\n import turtle\n\n def height(head):\n return 1 + max(height(head.left), height(head.right)) if head else -1\n\n def jump_to(x, y):\n t.penup()\n t.goto(x, y)\n t.pendown()\n\n def draw(node, x, y, dx):\n if node:\n t.goto(x, y)\n jump_to(x, y - 20)\n t.write(node.val, align=\"center\", font=(\"Arial\", 15, \"normal\"))\n t.circle(10)\n draw(node.left, x - dx, y - 50, dx / 2)\n jump_to(x, y - 20)\n draw(node.right, x + dx, y - 50, dx / 2)\n\n t = turtle.Turtle()\n t.speed(0)\n turtle.delay(0)\n h = height(self)\n jump_to(0, 100 + 10 * h)\n draw(self, 0, 100 + 10 * h, 20 * h)\n t.hideturtle()\n turtle.mainloop()", "def turtle_movement(turtle_shape, bg_color, turtle_color, turtle_speed):\n turtle_name = initialize(turtle_shape, bg_color,\n turtle_color, turtle_speed)\n\n for i in range(36):\n for i in range(4):\n turtle_name.forward(200)\n turtle_name.right(90)\n turtle_name.right(10)", "def draw_sun():\n lisandro.penup()\n lisandro.goto(40, 90)\n lisandro.begin_fill()\n lisandro.circle(150) # draws out a circle with a radius of 150 for the sun.\n lisandro.end_fill()\n lisandro.hideturtle()", "def drawPetal(size, petalColor):\n turtle.color(petalColor)\n turtle.begin_fill()\n for x in range(6):\n turtle.forward(size)\n turtle.left(60)\n turtle.end_fill()", "def win_statement(self, text):\r\n wn = turtle.Screen()\r\n t = turtle.Turtle()\r\n t.penup()\r\n t.hideturtle()\r\n t.color(\"white\")\r\n t.goto(self.posn.x, self.posn.y)\r\n t.write(text, font= (\"Arial\", 60, \"normal\"))\r\n wn.exitonclick()", "def add(self):\n\n self.scene.projs.add(self)\n self.scene.all.add(self.scene.projs)\n self.rotate()", "def setColor(color):\n turtleTmp.color = color\n turtleTmp.penColor(color)", "def new_car(self):\r\n random_num = random.randint(1, 3)\r\n if random_num == 1:\r\n new_car = Turtle('square')\r\n new_car.shapesize(stretch_wid=1, stretch_len=2)\r\n new_car.penup()\r\n new_car.color(random.choice(COLOURS))\r\n random_y = random.randint(-240, 270)\r\n new_car.goto(280, random_y)\r\n self.all_cars.append(new_car)", "def draw_housing_2():\r\n tom.pensize(3)\r\n tom.color(\"black\", \"darkgrey\")\r\n tom.begin_fill()\r\n tom.forward(80)\r\n tom.left(90)\r\n tom.forward(200)\r\n tom.circle(40, 180)\r\n tom.forward(200)\r\n tom.left(90)\r\n tom.end_fill()\r\n tom.hideturtle()", "def makeTurtles(num):\n turtles = []\n for i in range(num):\n t = turtle.Turtle()\n #t.speed(0) # can set this for superfast disc movement\n t.up()\n t.shape('square')\n t.shapesize(stretch_len=(2 + i)) #bottom turtle is longest\n t.goto(0, num - i)\n turtles.append(t)\n return turtles", "def addTerminal(self, name, **opts):\n opts.update(renamable=True, removable=True)\n name = self.nextTerminalName(name)\n term = NetTerminal(self, name, **opts)\n self.terminals[name] = term\n if term.isInput():\n self._inputs[name] = term\n elif term.isOutput():\n self._outputs[name] = term\n self.graphicsItem().updateTerminals()\n self.sigTerminalAdded.emit(self, term)\n return term", "def moveturtle(x,y,t):\n t.penup()\n t.goto(x,y)\n t.pendown()", "def Screen():\n if Myturtle._screen is None:\n Myturtle._screen = _Screen()\n return Myturtle._screen", "def clone(self):\n screen = self.screen\n self._newLine(self._drawing)\n\n Myturtle = self.Myturtle\n self.screen = None\n self.Myturtle = None # too make self deepcopy-able\n\n q = deepcopy(self)\n\n self.screen = screen\n self.Myturtle = Myturtle\n\n q.screen = screen\n q.Myturtle = _TurtleImage(screen, self.Myturtle.shapeIndex)\n\n screen._turtles.append(q)\n ttype = screen._shapes[self.Myturtle.shapeIndex]._type\n if ttype == \"polygon\":\n q.Myturtle._item = screen._createpoly()\n elif ttype == \"image\":\n q.Myturtle._item = screen._createimage(screen._shapes[\"blank\"]._data)\n elif ttype == \"compound\":\n q.Myturtle._item = [screen._createpoly() for item in\n screen._shapes[self.Myturtle.shapeIndex]._data]\n q.currentLineItem = screen._createline()\n q._update()\n return q", "def draw(self, scene):\n color = self.settings['target'][\"color\"]\n scene.add(svg.Circle(self.position + Point2(10, 10), self.radius, color=color))", "def create_options_for_test_runner(self):\n self.options = turtle.Turtle(\n verbosity=VERBOSITY_NORMAL,\n summary_mode=False,\n )", "def draw_housing():\n tess.pensize(3)\n tess.color(\"black\", \"darkgrey\")\n tess.begin_fill()\n tess.forward(80)\n tess.left(90)\n tess.forward(200)\n tess.circle(40, 180)\n tess.forward(200)\n tess.left(90)\n tess.end_fill()\n tess.hideturtle()", "def addToSimulation(self, tick):\n self.currentRouteBeginTick = tick\n try:\n # traci.vehicle.add(self.id, self.__createNewRoute(tick), tick, -4, -3) for SUMO v < 1.2.0\n traci.vehicle.add(self.id, self.__createNewRoute(tick)) # for SUMO v1.2.0\n traci.vehicle.subscribe(self.id, (tc.VAR_ROAD_ID,))\n # ! currently disabled for performance reasons\n # traci.vehicle.setAccel(self.id, self.acceleration)\n # traci.vehicle.setDecel(self.id, self.deceleration)\n # traci.vehicle.setImperfection(self.id, self.imperfection)\n if self.smartCar:\n None\n # set color to red\n # if self.currentRouterResult.isVictim:\n # traci.vehicle.setColor(self.id, (0, 255, 0, 0))\n # else:\n # traci.vehicle.setColor(self.id, (255, 0, 0, 0))\n else:\n # dump car is using SUMO default routing, so we reroute using the same target\n # putting the next line left == ALL SUMO ROUTING\n traci.vehicle.changeTarget(self.id, self.currentRouterResult.route[-1])\n except Exception as e:\n print(\"error adding\" + str(e))\n # try recursion, as this should normally work\n # self.addToSimulation(tick)", "def _removeTurtle(self,turt):\n if turt in self._turtles:\n self._turtles.remove(turt)", "def move_turtle(self, x, y):\n tortuga = self.turtle\n if self.capture_mode:\n tortuga.setheading(tortuga.towards(x, y))\n tortuga.setpos(x, y)\n self.add_punto(Punto(x, y))", "def game_window(self):\r\n t = turtle.Turtle()\r\n t.hideturtle()\r\n\r\n for total_num in range(self.circle):\r\n t.hideturtle()\r\n t.speed(20)\r\n t.penup()\r\n t.goto(self.posn.x,self.posn.y)\r\n t.pendown()\r\n t.color(\"#40e0d0\")\r\n t.begin_fill()\r\n t.circle(30)\r\n t.end_fill()\r\n self.posn.x= self.posn.x+65\r\n if self.posn.x>=25:\r\n self.posn.y= self.posn.y-65\r\n self.posn.x=-300", "def draw_t(self):\r\n pen.forward(20)\r\n pen.left(90)\r\n pen.down()\r\n pen.forward(40)\r\n pen.right(90)\r\n pen.up()\r\n pen.forward(20)\r\n pen.down()\r\n pen.back(40)\r\n pen.up()\r\n pen.right(90)\r\n pen.forward(40)\r\n pen.left(90)\r\n pen.forward(50)", "def add_row(self, shape, attributes):\n if isinstance(shape, shapefile.shapefile._Shape):\n self.w._shapes.append(shape)\n else:\n if self.shapeType in (1, 8, 11, 21, 25, 31):\n self.w.point(*shape)\n elif self.shapeType in (3, 13, 23):\n self.w.line(shape)\n else:\n self.w.poly(shape)\n self.w.record(*attributes)", "def add_unit(self):\n self.q(css=self._bounded_selector(self.ADD_BUTTON_SELECTOR)).click()", "def gohome(turtle):\n turtle.penup()\n turtle.goto(0,0)\n turtle.pendown()", "def initialize_plotter(width, height, min_x, max_x, min_y, max_y):\n global x_begin, x_end, x_increment\n turtle.delay(0)\n x_begin, x_end = min_x, max_x\n turtle.setup(width=width, height=height)\n turtle.screensize(width, height)\n turtle.setworldcoordinates(min_x, min_y, max_x, max_y)\n x_increment = (max_x - min_x)/width\n turtle.hideturtle()\n turtle.pencolor('black')\n turtle.penup()\n turtle.setposition(min_x, 0)\n turtle.setheading(0)\n turtle.pendown()\n turtle.forward(max_x - min_x)\n turtle.penup()\n turtle.setposition(0, min_y)\n turtle.setheading(90)\n turtle.pendown()\n turtle.forward(max_y - min_y)", "def append(self, shape: Shape):\n self.shapes.append(shape)", "def display(self, color = (190,205,205), add = False): \r\n s += pgl.Shape(pgl.FaceSet( [[0,0,0],[1,0,0],[1,1,0],[0,1,0]], [[0,1,2,3]]) , pgl.Material((0,100,0)))", "def draw_long_shape():\n turtle.fillcolor('blue')\n draw_block()\n turtle.forward(50)\n draw_block()\n turtle.forward(50)\n draw_block()\n turtle.forward(50)\n draw_block()\n turtle.back(150)", "def draw_circle(c):\n turtle.circle(c.radius)", "def draw_circle(c):\n turtle.circle(c.radius)", "def add_window(self, window):\n if not self.valid_window(window):\n return False\n self.windows.append(window)\n window.tiler = self\n if window not in self.start_positions.keys():\n self.start_positions[window] = window.display_size\n\n rules = config.GET_RULES(window.classname)\n if rules is not None and re.search(rules[\"regex\"], window.title) is not None:\n if \"floating\" in rules:\n window.set_floating(rules[\"floating\"])\n if \"decorated\" in rules:\n if bool(rules[\"decorated\"]):\n window.enable_decoration()\n else:\n window.disable_decoration()\n if \"position\" in rules:\n window.move_to(tuple(rules[\"position\"]))\n\n print(\"Added window: {0}\".format(window))\n window.print_window_styles()\n return True", "def turtle_fonts():\r\n turtle.hideturtle()\r\n turtle.title(\"Front Options\")\r\n turtle.penup()\r\n turtle.setpos(-FONT_SIZE * 5, FONT_SIZE * 5)\r\n turtle.setup(FONT_SIZE * 20, FONT_SIZE * 20)\r\n turtle.right(90)\r\n turtle.write(\"Arial\", align=\"left\", font=(\"Arial\", FONT_SIZE, \"normal\"))\r\n turtle.forward(FONT_SIZE * 2)\r\n turtle.write(\"Comic Sans MS\", align=\"left\", font=(\"Comic Sans MS\", FONT_SIZE, \"normal\"))\r\n turtle.forward(FONT_SIZE * 2)\r\n turtle.write(\"Lucida Grande\", align=\"left\", font=(\"Lucida Grande\", FONT_SIZE, \"normal\"))\r\n turtle.forward(FONT_SIZE * 2)\r\n turtle.write(\"Tahoma\", align=\"left\", font=(\"Tahoma\", FONT_SIZE, \"normal\"))\r\n turtle.forward(FONT_SIZE * 2)\r\n turtle.write(\"Verdana\", align=\"left\", font=(\"Verdana\", FONT_SIZE, \"normal\"))\r\n turtle.forward(FONT_SIZE * 2)\r\n turtle.write(\"Helvetica\", align=\"left\", font=(\"Helvetica\", FONT_SIZE, \"normal\"))\r\n turtle.forward(FONT_SIZE * 2)\r\n turtle.write(\"Times New Roman\", align=\"left\", font=(\"Times New Roman\", FONT_SIZE, \"normal\"))\r\n turtle.done()", "def add_plant(self, desc, obj_list):\n self.plants.append((desc, obj_list))\n if len(self.plants) == 1:\n self.set_default_brush()", "def reset(self):\n self._turtle.clear()\n self._turtle.setposition((0,0)) \n self._turtle.shape('turtle')\n self.color = 'red'\n self.heading = 180\n self.speed = 0", "def add_bowl(self, env, bowl_color, width, height):\n\n bowl_size = (0.12, 0.12, 0)\n bowl_urdf = \"bowl/bowl.urdf\"\n bowl_pose = self.get_random_pose(env, bowl_size)\n bowl_id = env.add_object(bowl_urdf, bowl_pose, \"fixed\")\n pb.changeVisualShape(\n bowl_id, -1, rgbaColor=utils.COLORS[bowl_color] + [1])\n bowl_pix = utils.xyz_to_pix(bowl_pose[0], self.bounds, self.pix_size)\n bowl_obj_info = {\n \"obj_id\": bowl_id,\n \"pose\": bowl_pose,\n \"size\": bowl_size,\n \"urdf\": bowl_urdf,\n \"color\": bowl_color,\n \"pix\": bowl_pix,\n \"unknown_color\": bowl_color in utils.EVAL_COLORS,\n \"region\": determine_region(bowl_pix[0], bowl_pix[1], width, height),\n }\n\n return bowl_obj_info", "def light_positions(turtle, color, pos, hide=0):\n if hide == 1:\n turtle.hideturtle()\n turtle.penup()\n turtle.forward(40)\n turtle.left(90)\n turtle.forward(pos)\n turtle.shape(\"circle\")\n turtle.shapesize(3)\n turtle.fillcolor(color)", "def draw_wheel():\r\n\touter_radius = 1\r\n\tthickness = .4\r\n\tif wireframe:\r\n\t\tglutWireTorus(thickness,outer_radius - thickness,8,8)\r\n\telse:\r\n\t\tglutSolidTorus(thickness,outer_radius - thickness,8,8)\r\n\t\tglPushAttrib(GL_CURRENT_BIT)\r\n\t\tglPushAttrib(GL_LIGHTING_BIT)\r\n\t\tglDisable(GL_LIGHTING)\r\n\t\tglColor3f(0,0,0)\r\n\t\tglutWireTorus(thickness+.01,outer_radius - thickness + 0.005,8,8)\t\r\n\t\tglPopAttrib()\r\n\t\tglPopAttrib()", "def create_board_window():\n wn = turtle.Screen()\n wn.setworldcoordinates(0, 0, WIDTH+1, HEIGHT+1)\n t = turtle.Turtle()\n t.pensize(1)\n t.speed(0)\n t.hideturtle()\n return (wn, t)", "def done(self):\n turtle.done()", "def draw_block():\n turtle.down()\n turtle.begin_fill()\n turtle.pensize(3)\n turtle.forward(50)\n turtle.left(90)\n turtle.forward(50)\n turtle.left(90)\n turtle.forward(50)\n turtle.left(90)\n turtle.forward(50)\n turtle.left(90)\n turtle.end_fill()\n turtle.up()" ]
[ "0.68280035", "0.6453635", "0.6225176", "0.6220096", "0.5967827", "0.59463257", "0.5936159", "0.59193295", "0.5886304", "0.58368546", "0.5714807", "0.5633327", "0.5586595", "0.55554694", "0.552829", "0.5502977", "0.5502977", "0.5468977", "0.5448978", "0.5388961", "0.5375666", "0.53516215", "0.5297699", "0.5274558", "0.52661216", "0.5258611", "0.5241604", "0.5207058", "0.5205266", "0.5188304", "0.5188018", "0.51663154", "0.51529616", "0.5137824", "0.51368946", "0.5126749", "0.5124462", "0.5097848", "0.50720984", "0.5053409", "0.5045994", "0.4997847", "0.49660236", "0.49565628", "0.49489665", "0.49331284", "0.49280933", "0.4917928", "0.4909942", "0.4906782", "0.4879851", "0.4852811", "0.48428717", "0.48417363", "0.48374486", "0.48280957", "0.4816493", "0.480432", "0.48034894", "0.48007601", "0.479873", "0.4787447", "0.4785", "0.47797188", "0.47792265", "0.4762531", "0.47610748", "0.47547567", "0.4743522", "0.4731988", "0.47294924", "0.47285932", "0.47214156", "0.47134796", "0.4709938", "0.47007754", "0.4690551", "0.4676391", "0.46711764", "0.46610248", "0.46365544", "0.46295053", "0.46288198", "0.46209043", "0.46165368", "0.46023205", "0.45931447", "0.45681396", "0.45653215", "0.45653215", "0.45413378", "0.45383096", "0.4530588", "0.4514748", "0.45112976", "0.4508315", "0.45079714", "0.45028186", "0.4496763", "0.4488696" ]
0.7149174
0
Add a pen to this window.
def _addPen(self,pen): assert (type(pen) == Pen), "Parameter %s is not a valid graphics pen" % `turt` self._pencils.append(pen)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pen(self, width=None, rgb=None, alpha=None):\n self.call('pen', width, rgb, alpha)", "def pen(self, pen=None, **pendict):\n _pd = {\"shown\" : self._shown,\n \"pendown\" : self._drawing,\n \"pencolor\" : self._pencolor,\n \"fillcolor\" : self._fillcolor,\n \"pensize\" : self._pensize,\n \"speed\" : self._speed,\n \"resizemode\" : self._resizemode,\n \"stretchfactor\" : self._stretchfactor,\n \"outline\" : self._outlinewidth,\n \"tilt\" : self._tilt\n }\n\n if not (pen or pendict):\n return _pd\n\n if isinstance(pen, dict):\n p = pen\n else:\n p = {}\n p.update(pendict)\n\n _p_buf = {}\n for key in p:\n _p_buf[key] = _pd[key]\n\n if self.undobuffer:\n self.undobuffer.push((\"pen\", _p_buf))\n\n newLine = False\n if \"pendown\" in p:\n if self._drawing != p[\"pendown\"]:\n newLine = True\n if \"pencolor\" in p:\n if isinstance(p[\"pencolor\"], tuple):\n p[\"pencolor\"] = self._colorstr((p[\"pencolor\"],))\n if self._pencolor != p[\"pencolor\"]:\n newLine = True\n if \"pensize\" in p:\n if self._pensize != p[\"pensize\"]:\n newLine = True\n if newLine:\n self._newLine()\n if \"pendown\" in p:\n self._drawing = p[\"pendown\"]\n if \"pencolor\" in p:\n self._pencolor = p[\"pencolor\"]\n if \"pensize\" in p:\n self._pensize = p[\"pensize\"]\n if \"fillcolor\" in p:\n if isinstance(p[\"fillcolor\"], tuple):\n p[\"fillcolor\"] = self._colorstr((p[\"fillcolor\"],))\n self._fillcolor = p[\"fillcolor\"]\n if \"speed\" in p:\n self._speed = p[\"speed\"]\n if \"resizemode\" in p:\n self._resizemode = p[\"resizemode\"]\n if \"stretchfactor\" in p:\n sf = p[\"stretchfactor\"]\n if isinstance(sf, (int, float)):\n sf = (sf, sf)\n self._stretchfactor = sf\n # if \"shearfactor\" in p:\n # self._shearfactor = p[\"shearfactor\"]\n if \"outline\" in p:\n self._outlinewidth = p[\"outline\"]\n if \"shown\" in p:\n self._shown = p[\"shown\"]\n if \"tilt\" in p:\n self._tilt = p[\"tilt\"]\n \n self._update()", "def SetConnectionPen(self, pen):\r\n\r\n self._dottedPen = pen\r\n self._dirty = True", "def SetPen(*args):\n return _gdi_.GraphicsContext_SetPen(*args)", "def SetPen(*args, **kwargs):\n return _gdi_.DC_SetPen(*args, **kwargs)", "def setPen(self, *args, **kwargs):\n if kwargs == {} and (args == () or args == ('default',)):\n self.opts['pen'] = fn.mkPen(getConfigOption('foreground'))\n else:\n self.opts['pen'] = fn.mkPen(*args, **kwargs)\n\n self.picture = None\n self.update()", "def SetPen(*args, **kwargs):\n return _gdi_.PseudoDC_SetPen(*args, **kwargs)", "def penup(self):\n if not self._drawing:\n return\n self.pen(pendown=False)", "def set_pen_color(self, color: tuple) -> Rectangle:\n self.pen.color = color\n return self", "def test_set_pen(self):\n painter = biotracker.QPainter()\n painter.setPen(100, 50, 30, 33)\n self.assertEqual(\"p(100,50,30,33)\", painter.to_msg())", "def SetBorderPen(self, pen):\r\n\r\n self._borderPen = pen\r\n self.RefreshSelected()", "def pensize(self, width):\n self._penwidth = width", "def __init__(self, *args, **kwargs):\n _gdi_.GraphicsPen_swiginit(self,_gdi_.new_GraphicsPen(*args, **kwargs))", "def setPenColor( self, color ):\n self._penColor = QColor(color)\n self.setDirty()", "def CreatePen(*args, **kwargs):\n return _gdi_.GraphicsRenderer_CreatePen(*args, **kwargs)", "def CreatePen(*args, **kwargs):\n return _gdi_.GraphicsContext_CreatePen(*args, **kwargs)", "def draw(self, renderer):\n renderer.drawRect(pyui.colors.black, self.windowRect)\n renderer.drawText( \"Strokes: %d\" % len(self.strokes), (650,50), pyui.colors.white)\n for start, end, color in self.strokes:\n renderer.drawLine(start[0], start[1], end[0], end[1], color)", "def add_draw(self, draw):\n self.draws.append(draw)", "def add_brush(self, item: 'Solid') -> None:\n self.brushes.append(item)", "def draw(self, win):\n self.rect.draw(win)\n self.text.draw(win)", "def test_set_pen_noalpha(self):\n painter = biotracker.QPainter()\n painter.setPen(100, 50, 30)\n self.assertEqual(\"p(100,50,30,255)\", painter.to_msg())", "def setSymbol(self, \n symbolStyle=None, \n brushColor=None, brushStyle=None, \n penColor=None, penWidth=None, penStyle=None, \n symbolHeight=None, symbolWidth=None):\n for item in self.__selectedCurves:\n oldSymbol = item.symbol()\n if symbolStyle is None:\n symbolStyle = oldSymbol.style()\n if brushColor is None:\n brushColor = oldSymbol.brush().color()\n if brushStyle is None:\n brushStyle = oldSymbol.brush().style()\n if penColor is None:\n penColor = oldSymbol.pen().color()\n if penWidth is None:\n penWidth = oldSymbol.pen().width()\n if penStyle is None:\n penStyle = oldSymbol.pen().style()\n if symbolHeight is None:\n symbolHeight = oldSymbol.size().height()\n if symbolWidth is None:\n symbolWidth = oldSymbol.size().width()\n pen = QtGui.QPen(penColor, penWidth, penStyle)\n symbol = Qwt.QwtSymbol(symbolStyle, oldSymbol.brush(), pen, QtCore.QSize(width, height)) \n item.setSymbol(symbol)\n self.replot()", "def draw_but(self, window):\n # draws the rectangular button\n p1 = graphics.Point(self.cen_point_x - self.width / 2, \n self.cen_point_y - self.height / 2)\n p2 = graphics.Point(self.cen_point_x + self.width / 2, \n self.cen_point_y + self.height / 2)\n self.button = graphics.Rectangle(p1, p2)\n self.button.setOutline(\"Orange\")\n self.button.draw(window)\n \n # draws the text on the button\n self.text.draw(window)", "def draw_s(self):\r\n pen.down()\r\n pen.forward(40)\r\n pen.left(90)\r\n pen.forward(20)\r\n pen.left(90)\r\n pen.forward(40)\r\n pen.right(90)\r\n pen.forward(20)\r\n pen.right(90)\r\n pen.forward(40)\r\n pen.up()\r\n pen.back(40)\r\n pen.right(90)\r\n pen.forward(40)\r\n pen.left(90)\r\n pen.forward(50)", "def draw(self):\n if self.is_clicked:\n pg.draw.circle(self.window, self.color, (self.x, self.y), self.r, 0)\n else:\n pg.draw.circle(self.window, self.color, (self.x, self.y), self.r, 1)", "def __init__(self, *args, **kwargs):\n _gdi_.Pen_swiginit(self,_gdi_.new_Pen(*args, **kwargs))", "def paint(self):\r\n self.win.bkgd(\" \", COLOR_PAIR[\"con_text\"])", "def draw_a(self):\r\n pen.down()\r\n pen.left(90)\r\n pen.forward(40)\r\n pen.right(90)\r\n pen.forward(40)\r\n pen.right(90)\r\n pen.forward(40)\r\n pen.up()\r\n pen.back(20)\r\n pen.right(90)\r\n pen.down()\r\n pen.forward(40)\r\n pen.up()\r\n pen.left(90)\r\n pen.forward(20)\r\n pen.left(90)\r\n pen.forward(50)", "def draw_n(self):\r\n pen.down()\r\n pen.left(90)\r\n pen.forward(40)\r\n pen.right(135)\r\n pen.forward(1.414*40)\r\n pen.left(135)\r\n pen.forward(40)\r\n pen.up()\r\n pen.back(40)\r\n pen.right(90)\r\n pen.back(40)\r\n pen.forward(50)", "def pencolor(self, *args):\n if args:\n color = self._colorstr(args)\n if color == self._pencolor:\n return\n self.pen(pencolor=color)\n else:\n return self._color(self._pencolor)", "def penColor( self ):\n return self._penColor", "def wdraw_line(self, wx0, wy0, wx1, wy1, color, arrow):\r\n dx0, dy0 = self.w_to_d(wx0, wy0)\r\n dx1, dy1 = self.w_to_d(wx1, wy1)\r\n self.canvas.create_line(dx0, dy0, dx1, dy1, fill=color, arrow=arrow)", "def OnPaint(self, event=None):\n dc = wx.PaintDC(self)\n dc.Clear()\n dc.SetPen(wx.Pen(self.pen_color, self.thickness))\n # Draw Horizontal Crosshair\n dc.DrawLine(self.screenwidth_half - self.width_half, self.screenheight_half,\n self.screenwidth_half + self.width_half, self.screenheight_half)\n # Draw Horizontal Crosshair\n dc.DrawLine(self.screenwidth_half, self.screenheight_half - self.height_half,\n self.screenwidth_half, self.screenheight_half + self.height_half)", "def draw(self):\n self.strip.show()", "def draw(self):\n arcade.draw_xywh_rectangle_filled(\n self.x, self.y, self.width, self.height, self.fill.color\n )\n arcade.draw_xywh_rectangle_outline(\n self.x, self.y, self.width, self.height, self.pen.color, 3\n )", "def drawWidget(self, qp):\n # Prepare brush.\n brush = QtGui.QBrush()\n brush.setStyle(Qt.SolidPattern)\n if self.is_selected():\n # Fill selected circle with dimmed color\n brush.setColor(self.color_dimmed)\n else:\n brush.setColor(self.parentWidget().BACKGROUND_COLOR)\n qp.setBrush(brush)\n\n # Prepare pen.\n pen = QtGui.QPen()\n pen.setColor(self.color)\n pen.setWidth(2);\n qp.setPen(pen)\n\n size = self.size()\n w = size.width()\n h = size.height()\n center = QPoint(w // 2, h // 2)\n radius = min(w, h) // 2 - 2\n \n qp.drawEllipse(center, radius, radius)", "def draw(self):\n self.menu_pointer.draw()", "def draw_o(self):\r\n pen.down()\r\n pen.forward(40)\r\n pen.left(90)\r\n pen.forward(40)\r\n pen.left(90)\r\n pen.forward(40)\r\n pen.left(90)\r\n pen.forward(40)\r\n pen.left(90)\r\n pen.up()\r\n pen.forward(50)", "def draw_point(self, point, symbol='\\u25CF'): \n \n point_box = Text(self.frame, bg=\"white\", fg=\"black\", wrap=WORD,\n height=8, font=('arial', 14), pady=5, padx=20)\n\n point_box.insert(1.0, symbol + ' ')\n point_box.insert('insert', point)\n point_box.configure(state='disabled')\n point_box.pack(side=TOP, fill=X)", "def _create(self):\n if self.h >= 2:\n # Draw standard shape\n for i in range(1, self.h - 1):\n self.window.addch(i, 0, curses.ACS_VLINE | self.colour) # '|'\n\n # Draw scrolling bar if necessary\n if self.size > 0:\n end = min(self.pos + self.size, self.h)\n for i in range(self.pos, end):\n self.window.addch(i, 0, chr(0x2588), self.colour) # '█'\n\n # Draw arrows if necessary\n if self.counter > 0:\n self.window.addch(0, 0, chr(0x25B2), self.colour) # '▲'\n if self.counter < self.content_size - self.h:\n self.window.addch(self.h - 1, 0, chr(0x25BC), self.colour) # '▼'\n\n # Finally refresh window\n self.window.refresh()", "def pensize(self, width=None):\n if width is None:\n return self._pensize\n self.pen(pensize=width)", "def draw(self, painter: QPainter):\n pass", "def paintEvent(self, evt):\n paint = QPainter()\n paint.begin(self)\n self.paint(paint)\n paint.end()", "def pencolor(self):\n return self._pencolor", "def draw(self, win, outline=None):\n # Call this method to draw the button on the screen\n if outline:\n pygame.draw.rect(win, outline, (self.x - 2, self.y - 2, self.width + 4, self.height + 4), 0)\n\n pygame.draw.rect(win, self.color, (self.x, self.y, self.width, self.height), 0)\n\n if self.text != '':\n font = pygame.font.SysFont('comicsans', 30)\n text = font.render(self.text, 1, black)\n win.blit(text, (self.x + (self.width // 2 - text.get_width() // 2), self.y + (self.height // 2 - text.get_height() // 2)))", "def draw(self, win):\n pygame.draw.rect(win, self.color, self.rect)", "def draw_line(self, x):\n self.PDF.setStrokeColor(black01)\n self.PDF.setLineWidth(1)\n self.PDF.line(75, x, 550, x)\n self.PDF.setStrokeColor(\"black\")", "def paint(self):\n self.paint_snake()\n self.paint_apple()\n root.mainloop()", "def SetStipple(*args, **kwargs):\n return _gdi_.Brush_SetStipple(*args, **kwargs)", "def addch(self, stdscr, y, x, text):\n stdscr.addch(y, x, text, curses.color_pair(self.i))", "def paintEvent(self, event):\r\n qp = QtGui.QPainter()\r\n qp.begin(self)\r\n qp.setRenderHint(QtGui.QPainter.Antialiasing, True)\r\n qp.setPen(self.pen_color)\r\n qp.setBrush(self.fill_color)\r\n qp.drawRect(0, 0, self.parent.width(), self.parent.height())\r\n self.resize(self.parent.width(), self.parent.height())\r\n qp.end()", "def addPlot(self, X, Y, color = \"k\"):\n if color in self.colors: color = self.colors[color] \n \n path = QtGui.QPainterPath()\n path.moveTo(X[0],Y[0])\n for i in xrange(1, len(X)):\n path.lineTo(X[i],Y[i])\n self.pathItem_list.append(self.scene.addPath(path, QtGui.QPen(color)))", "def paintEvent(self, event):\n qp = QtGui.QPainter(self)\n br = QtGui.QBrush(QtGui.QColor(100, 10, 10, 40))\n qp.setBrush(br)\n qp.drawRect(QtCore.QRect(self.begin, self.end))", "def graphicsDraw(self, win, center):\n\t\tlastPoint = None\n\t\tfor p in self.points:\n\t\t\tthisPoint = Point(p[0] + center.x, p[1] + center.y)\n\t\t\tif lastPoint is not None:\n\t\t\t\tline = Line(lastPoint, thisPoint)\n\t\t\t\tline.draw(win)\n\t\t\tlastPoint = thisPoint", "def pendown(self):\n if self._drawing:\n return\n self.pen(pendown=True)", "def penblock(self, block):\n self.block = block", "def on_mouse_click(self, event: Event):\r\n self.control.add_gem(self.row, self.column)", "def underline(self, underline):\n\n self._underline = underline", "def draw(self):\n arcade.draw_rectangle_outline(self.position_x, self.position_y, self.radius, self.color)", "def draw_button(self):\n # Draw the button's outline\n pg.draw.rect(self.screen, self.text_color, pg.Rect(self.rect.left - 1, self.rect.top - 1, self.rect.width + 2, self.rect.height + 2))\n\n # Draw the button\n pg.draw.rect(self.screen, self.button_color, self.rect)\n\n # Blit the button's text onto it\n self.screen.blit(self.txt_surface, self.txt_surface_rect)", "def draw(self):\n arcade.draw_rectangle_filled(self.center.x, self.center.y, PADDLE_WIDTH, PADDLE_HEIGHT, PADDLE_COLOR)\n pass", "def addBL(self):\n self.parent.copyCurrentWinState(self.pltw)\n vname = self.pltw.curvelist[self.cpos].name + 'BL'\n (nvec, npt) = np.shape(self.pltw.blklst[self.blkno])\n if self.pltw.pasteVector(self.data[2], self.blkno, vname):\n xname = self.pltw.getVnam(self.blkno, self.xpos)\n xvinfo = vectInfo(self.blkno, self.xpos, xname)\n yvinfo = vectInfo(self.blkno, nvec, vname)\n self.pltw.curvelist.append(curveInfo(vname, xvinfo, yvinfo))\n self.pltw.updatePlot()\n self.pltw.dirty = True\n self.pltw.activecurv = self.cpos\n self.parent.updateUI()\n self.hide()", "def drawBorder(self):\n if self._focused:\n self._window.attron(curses.A_BOLD)\n else:\n self._window.attroff(curses.A_BOLD)\n self._window.border()\n self._window.addstr(0, 1, self.__title)\n self._window.attroff(curses.A_BOLD)", "def draw( self, **kw ):\n pass", "def do_expose_event(self, widget, event):\n\n self.set_up_pangocairo(widget, event)\n\n self.draw(*self.window.get_size())", "def draw_h(self):\r\n pen.forward(40)\r\n pen.left(90)\r\n pen.down()\r\n pen.forward(40)\r\n pen.up()\r\n pen.back(20)\r\n pen.left(90)\r\n pen.down()\r\n pen.forward(40)\r\n pen.right(90)\r\n pen.up()\r\n pen.forward(20)\r\n pen.down()\r\n pen.back(40)\r\n pen.right(90)\r\n pen.up()\r\n pen.forward(50)", "def create_paddle(self, pos):\n\n self.shape(\"square\")\n self.penup()\n self.color(\"blue\")\n self.shapesize(stretch_wid=1, stretch_len=4)\n self.setpos(pos)", "def GetPen(*args, **kwargs):\n return _gdi_.StockGDI_GetPen(*args, **kwargs)", "def drawCircle(x, y, r):\n pen1.up()\n pen1.goto(x,y)\n pen1.down()\n pen1.circle(r)", "def add(self, output_svg: Drawing) -> None:\n pass", "def paintEvent(self, event):\n\n painter = QPainter(self)\n\n painter.setPen(QPen(Qt.cyan, 3, Qt.DotLine))\n\n x = round(self.geometry.width()/2 - 150)\n y = round(self.geometry.height()/6)\n height = round(4*self.geometry.height()/6)\n \n painter.drawRect(x, y, 175, height)\n painter.end()", "def paint(self, draw, x, y, w, h):\n\t\tpass", "def DrawBackground(self, dc, wnd, rect):\r\n \r\n # draw background\r\n dc.SetBrush(self._bkbrush)\r\n dc.SetPen(wx.TRANSPARENT_PEN)\r\n dc.DrawRectangle(-1, -1, rect.GetWidth()+2, rect.GetHeight()+2)\r\n\r\n # draw base line\r\n dc.SetPen(wx.GREY_PEN)\r\n dc.DrawLine(0, rect.GetHeight()-1, rect.GetWidth(), rect.GetHeight()-1)", "def color(self):\n assert False, 'Pen does not have a color; use pencolor or fillcolor'", "def draw(self, scene):\n color = self.settings['target'][\"color\"]\n scene.add(svg.Circle(self.position + Point2(10, 10), self.radius, color=color))", "def BeginDrawing(*args, **kwargs):\n return _gdi_.PseudoDC_BeginDrawing(*args, **kwargs)", "def draw_self(self, x, y):\n noStroke()\n fill(1.0, 0.5, 0.6)\n ellipse(x, y, 100, 100)\n bottom_half = createShape()\n bottom_half.beginShape()\n bottom_half.vertex(x, y)\n bottom_half.vertex(x+100, y)\n bottom_half.vertex(x+100, y+50)\n bottom_half.vertex(x+50, y+25)\n bottom_half.vertex(x, y+50)\n bottom_half.endShape()\n shape(bottom_half, -50, 0)\n\n self.eyes.display(x, y - 15, self.looking)", "def draw(self):\n self.figure.show()\n self.figure.canvas.draw()", "def update_brush(self):\r\n \r\n brushset = self.brushset\r\n \r\n self.grfx[0].brushset = brushset\r\n pass", "def __create_win(self):\r\n self.__calc_size()\r\n try:\r\n self.win = curses.newwin(self.height, self.width, self.posy, self.posx)\r\n self.panel = curses.panel.new_panel(self.win)\r\n self.win.scrollok(True)\r\n self.win.keypad(1)\r\n self.do_paint()\r\n except Exception:\r\n self.win = None\r\n self.panel = None", "def draw(self):\n radius = self.width / 2\n center_x = self.x + radius\n center_y = self.y + radius\n arcade.draw_circle_filled(center_x, center_y, radius, self.fill.color)\n arcade.draw_circle_outline(\n center_x, center_y, radius, self.pen.color, 3)", "def add_canvas(self, fig):\r\n self.canvas = FigureCanvas(fig)\r\n self.toolbar = NavigationToolbar(self.canvas,\r\n self, coordinates=True)\r\n self.canvas_vlayout.addWidget(self.toolbar)\r\n self.canvas_vlayout.addWidget(self.canvas)\r\n self.canvas.draw()", "def risePen(gcode):\r\n gcode.append(\"M300 S46\")\r\n #gcode.append(\"G0 Z0.1000\")\r", "def drawPoints(self, qp):\n\n# pen = self.pen\n\n\n size = self.size()\n self.yOffset = [size.height()*0.2 + size.height()*0.618/self.NUM_CHANNEL * y for y in xrange(self.NUM_CHANNEL) ]\n\n for ix in xrange(self.NUM_CHANNEL):\n self.pen.setStyle(Qt.SolidLine)\n self.pen.setWidth(2)\n self.pen.setBrush(self.PEN_COLOR[ix])\n self.pen.setCapStyle(Qt.RoundCap)\n self.pen.setJoinStyle(Qt.RoundJoin)\n qp.setPen(self.pen)\n\n qp.drawLine(self.x - 2, self.yOffset[ix] - \\\n self.data_1[ix] * self.DISPLAY_SCALING[ix],\\\n self.x , self.yOffset[ix] - \\\n self.data[ix] * self.DISPLAY_SCALING[ix])", "def draw(self):\n # s1 = ShowPoint(self.cnv, self.p1.xpt, self.p1.ypt)\n # s2 = ShowPoint(self.cnv, self.p2.xpt, self.p2.ypt)\n # s1.draw()\n # # s2.draw()\n self.cnv.create_line(self.p1.xpt, self.p1.ypt, self.p2.xpt, self.p2.ypt)", "def on_draw(event):\n # First, we clear the window in white\n # (it is necessary to do that at every frame)\n gloo.set_clear_color((1.0, 1.0, 1.0, 1.0))\n gloo.clear()\n program.draw(\"line_strip\")", "def draw(self) -> None:\n assert self.screen is not None\n self.screen.border()\n self.screen.addstr(2, 2, self.title, curses.A_STANDOUT)\n self.screen.addstr(4, 2, self.subtitle, curses.A_BOLD)\n\n for index, item in enumerate(self.all_items):\n self.draw_item(index, item)\n\n self.refresh_screen()\n if self._debug_screens: # pragma: no cover all\n with _SCREENDUMP_DIR.joinpath(f\"{self.title}-{time.time()}\").open(\n \"wb\",\n ) as f:\n self.screen.putwin(f)\n with _SCREENDUMP_DIR.joinpath(\n f\"stdscr-{self.title}-{time.time()}\",\n ).open(\"wb\") as f:\n self.screen.putwin(f)", "def __init__(self, size, val=-1, width=5, brush=None, pen=None,\n suffix='px', offset=None):\n pg.GraphicsObject.__init__(self)\n pg.GraphicsWidgetAnchor.__init__(self)\n self.setFlag(self.ItemHasNoContents)\n self.setAcceptedMouseButtons(QtCore.Qt.NoButton)\n\n if brush is None:\n brush = pg.getConfigOption('foreground')\n self.brush = pg.fn.mkBrush(brush)\n self.pen = pg.fn.mkPen(pen)\n self._width = width\n self.size = size\n if offset is None:\n offset = (0, 0)\n self.offset = offset\n\n self.bar = QtWidgets.QGraphicsRectItem()\n self.bar.setPen(self.pen)\n self.bar.setBrush(self.brush)\n self.bar.setParentItem(self)\n\n if val == -1:\n val = size\n\n self.text = pg.TextItem(text=pg.fn.siFormat(val, suffix=suffix),\n anchor=(0.5, 1))\n self.text.setParentItem(self)", "def display(self, color = (190,205,205), add = False): \r\n s += pgl.Shape(pgl.FaceSet( [[0,0,0],[1,0,0],[1,1,0],[0,1,0]], [[0,1,2,3]]) , pgl.Material((0,100,0)))", "def _draw_apple(self):\n if self._apple is not None:\n self._sensehat.set_pixel(self._apple.position.x, self._apple.position.y, self._apple.color)", "def _append_svg(self, svg, before_prompt=False):\n self._append_custom(self._insert_svg, svg, before_prompt)", "def draw_point(self, p):\n length = 3\n self.set_line_width(0.1)\n self.set_source_rgba(0, 0, 1, 1)\n self.move_to(p.x + length, p.y)\n self.line_to(p.x - length, p.y)\n self.stroke()\n self.move_to(p.x, p.y + length)\n self.line_to(p.x, p.y - length)\n self.stroke()", "def paintEvent(self, event):\n painter = QPainter(self)\n opt = QStyleOption()\n opt.initFrom(self)\n self.style().drawPrimitive(QStyle.PE_Widget, opt, painter, self)", "def on_mouse_press(self, x, y, button, modifiers):\n self.add_wall()", "def give_space(self):\r\n pen.forward(20)", "def create(self):\n self.parent.copyCurrentWinState(self.pltw)\n # add a new vector\n vname = self.pltw.curvelist[self.cpos].name + 'BL'\n (nvec, npt) = np.shape(self.pltw.blklst[self.blkno])\n if self.pltw.pasteVector(self.data[2], self.blkno, vname):\n xname = self.pltw.getVnam(self.blkno, self.xpos)\n xvinfo = vectInfo(self.blkno, self.xpos, xname)\n yvinfo = vectInfo(self.blkno, nvec, vname)\n self.pltw.curvelist.append(curveInfo(vname, xvinfo, yvinfo))\n self.pltw.updatePlot()\n self.pltw.dirty = True\n self.pltw.activecurv = self.cpos\n self.parent.updateUI()\n self.hide()", "def draw_pink_square():\n window = rg.TurtleWindow()\n\n pink_turtle = rg.SimpleTurtle('turtle')\n pink_turtle.pen = rg.Pen('DeepPink', 5)\n pink_turtle.speed = 1 # Slowest\n\n pink_turtle.draw_square(80)\n\n window.close_on_mouse_click()", "def draw(self, window):\n super().draw(window)\n self.health_bar(window)\n for bullet in self.bullets:\n bullet.draw(window)", "def paintEvent(self, paint_event):\n opt = QStyleOption()\n opt.initFrom(self)\n p = QPainter(self)\n self.style().drawPrimitive(QStyle.PE_Widget, opt, p, self)", "def insertSymbol(self, txt):\n if self.__lastFocusWidget == e5App().getObject(\"Shell\"):\n e5App().getObject(\"Shell\").insert(txt)\n else:\n aw = self.activeWindow()\n if aw is not None:\n curline, curindex = aw.getCursorPosition()\n aw.insert(txt)\n aw.setCursorPosition(curline, curindex + len(txt))" ]
[ "0.6879283", "0.6544037", "0.6461168", "0.63628376", "0.629559", "0.62903273", "0.6202111", "0.61660004", "0.61124593", "0.5987985", "0.5954313", "0.59414625", "0.58582664", "0.5821973", "0.57936865", "0.5761476", "0.5666378", "0.55963165", "0.55450845", "0.54638934", "0.5450481", "0.5449085", "0.5416598", "0.53866947", "0.53828204", "0.53809124", "0.53408736", "0.53077817", "0.52982634", "0.52855396", "0.52829593", "0.5277016", "0.5261675", "0.52374595", "0.5228861", "0.5178101", "0.51227814", "0.5119335", "0.5098837", "0.5074883", "0.50684685", "0.503976", "0.5020407", "0.5019702", "0.49923933", "0.49826175", "0.49716565", "0.4967948", "0.4962486", "0.49365535", "0.4935244", "0.49298024", "0.49266708", "0.49169236", "0.49161372", "0.49119693", "0.48999032", "0.4898798", "0.48829716", "0.487955", "0.48777428", "0.48694685", "0.48514742", "0.48434046", "0.48430964", "0.4840214", "0.4827763", "0.48252174", "0.48199078", "0.48152003", "0.4793062", "0.4787142", "0.47782376", "0.47646493", "0.47628507", "0.47572854", "0.4756574", "0.4747299", "0.47410062", "0.47380164", "0.47378206", "0.47305256", "0.47279066", "0.47278938", "0.47263038", "0.47246283", "0.47239307", "0.472173", "0.4717454", "0.4715668", "0.47050333", "0.47033232", "0.46994627", "0.4697455", "0.4695455", "0.46952996", "0.4695124", "0.46910623", "0.4688783", "0.468823" ]
0.7343928
0
Remove a turtle from this window.
def _removeTurtle(self,turt): if turt in self._turtles: self._turtles.remove(turt)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __del__(self):\n self.clear()\n self._screen._removeTurtle(self)\n del self._turtle", "def __del__(self):\n self._screen._removePen(self)\n del self._turtle", "def clear(self):\n self._turtle.clear()", "def clear(self):\n self._turtle.clear()", "def _destroy(self):\n root = self._root\n turtle.Turtle._pen = None\n turtle.Turtle._screen = None\n self._root = None\n self._canvas = None\n turtle.TurtleScreen._RUNNING = True\n root.destroy()", "def remove(self):\n self.node.destroy()", "def __del__(self):\n try:\n self._frame._destroy()\n except:\n pass\n self._turtles = []\n self._pencils = []\n del self._frame", "def bye(self):\n self._frame._destroy()\n self._turtles = []\n self._gpens = []\n del self._frame", "def quit():\n #quits from python turtle graphics screen\n bye()", "def delwin(self):\n\t\tfor c in self.components:\n\t\t\tc.delwin()\n\t\tself.win = None", "def remove(self):\n self.ren.RemoveActor(self.actor)\n \n self.visible = 0", "def remove(self) -> None:\n self.map.remove_brush(self)", "def remove_circle(self, removing):\r\n t = turtle.Turtle()\r\n# For whatever number, either the user of the computer, is removing it will draw over the existing circles on the screen.\r\n for total_num in range(removing):\r\n t.speed(20)\r\n t.penup()\r\n t.goto(self.posn.x,self.posn.y)\r\n t.pendown()\r\n t.color(\"#696969\") # Changes the color to dark grey\r\n t.begin_fill()\r\n t.circle(30)\r\n t.end_fill()\r\n# Moves the turtle to the next row to start removing circle\r\n self.posn.x=self.posn.x+65\r\n if self.posn.x>=25:\r\n self.posn.y= self.posn.y-65\r\n self.posn.x=-300", "def removePick(self):\n self.pnt = None\n vtkRenWin.delMarker(self.renWin)", "def destroy(self):\n bullet_tools.tear_down_scene()", "def removeLatticeFrame(self):\n self.latticeFrame.remove()", "def clear(self):\n self._frame.clear()\n self._turtles = []\n self._gpens = []", "def RemoveShape(self, *args):\n return _XCAFDoc.XCAFDoc_ShapeTool_RemoveShape(self, *args)", "def delete_current_shape(self):\n print(\"deleting shape!\")\n self.shapes.remove(self.current_shape)\n self.current_shape = None\n self.changed()", "def removeTerminal(self, term):\n if isinstance(term, NetTerminal):\n name = term._name\n Node.removeTerminal(self, name)", "def OnRemoveAutomation(self, event, automation):\n\n self.app.RemoveAutomation(automation)\n for child in self.GetChildren():\n child.Destroy()\n\n self.Draw()", "def down():\n turtleTmp.pendown()", "def remove_button(self):\n self.scene.remove_child(self.toggle_button_el)", "def delete_ball(self):\r\n self.movement = \"\"\r\n self.canvas.delete(self.ball)", "def reset(self):\n TNavigator.reset(self)\n TPen._reset(self)\n self._clear()\n self._drawturtle()\n self._update()", "def destroy(self):\n self.root.stop()", "def delete(self):\n\t\tself.canvas.delete('node_'+self.identifier)\n\t\tself.canvas.tag_unbind('node_'+self.identifier,\"<Any>\")", "def removeScene(self):\n del self.scene, self.imgPixmapItem", "def reset(self):\n self._turtle.clear()\n self._turtle.setposition((0,0)) \n self._turtle.shape('turtle')\n self.color = 'red'\n self.heading = 180\n self.speed = 0", "def remove(self):\n\n\t\t\t\tself.parent.thing.remove_sheet(self.thing)\n\t\t\t\tdel self.parent[self.label]", "def turtle(self,turtleType):\n if self.turtleType == turtleType:\n return\n if self.turtleType and self.turtleType != PLAYER:\n self.mc.removeEntity(self.turtleId)\n self.turtleType = turtleType\n if turtleType == PLAYER:\n self.turtleId = None\n elif turtleType:\n self.turtleId = self.mc.spawnEntity(turtleType,\n self.position.x,self.position.y,self.position.z,\n \"{NoAI:1}\")\n self.setEntityCommands()\n self.positionOut()\n self.directionOut()", "def clear_press(self):\n\n for win in self.window.additional_windows:\n win.del_win()\n\n pos = self.window.physics_canvas.physics_objects\n self.window.physics_canvas.physics_objects = []\n\n for obj in pos:\n self.window.physics_canvas.canvas.delete(obj.canvas_id)\n\n for force in self.window.physics_canvas.interacting_forces:\n force.remove()\n\n for particle in self.window.physics_canvas.particles:\n self.window.physics_canvas.canvas.delete(particle.canvas_id)", "def forceRemove( self ):\n scene = self.scene()\n if ( scene ):\n scene.forceRemove(self)", "def close(self):\n \n self.renderer.RemoveActor(self._crosshair.actor)\n self.renderer.RemoveActor(self._scalar_bar_actor)\n self.renderer.RemoveActor(self._orientation_annotation)\n self.renderer.RemoveActor(self._corner_annotation)\n \n for layer in self._layers :\n self.renderer.RemoveActor(layer.actor)\n \n for gui_annotation in self._gui_annotations.values() :\n self.renderer.RemoveActor(gui_annotation.shape_actor)\n self.renderer.RemoveActor(gui_annotation.text_actor)", "def remove_song(self):\n self.stop()\n self.listbox.delete(\"anchor\")\n pygame.mixer.music.stop()", "def delete(self):\n # exit contains our clean up code\n self.exit()\n GenericAnimatedProp.GenericAnimatedProp.delete(self)", "def remove(self):\n if self._parent:\n self._parent.removeChild(self)\n else:\n self.clear()", "def remove_object(self, name):\n if name in self._objects:\n del self._objects[name]\n else:\n raise ValueError('Object {} not in scene!'.format(name))\n self.close_renderer()", "def remove_object_from_canvas(self, tk_object):\n self.canvas.delete(tk_object)", "def up():\n turtleTmp.penup()", "def __remove_brick(self, g_object):\n if type(g_object) == GRect:\n self.__window.remove(g_object)\n self.__bricks_total -= 1\n self.__score += 1\n self.__set_record_board()", "def destroy(self):\n\t\tfor team in range(len(self.dots)): #will cycle through each team\n\t\t\tfor i in range(len(self.dots[team])): #will cycle through each member of the team\n\t\t\t\tdot = self.dots[team][i]\n\t\t\t\tdot.removeNode()\n\t\tself.mousePosition.removeNode()\n\t\tself.mapimage.removeNode()\n\t\tself.map.removeNode()", "def cleanup(self):\r\n\r\n # Remove strip from window.\r", "def deleteBall(self):\n self._ball = None", "def __del_robot(self):\n if len(self.__robots) == 0:\n # Alert the user and return\n self.scene.append_to_caption(\n '<script type=\"text/javascript\">alert'\n '(\"No robot to delete\");</script>')\n return\n\n # Clear the robot visuals\n self.__robots[self.__selected_robot].set_reference_visibility(False)\n self.__robots[self.__selected_robot].set_robot_visibility(False)\n\n # Remove from UI\n new_list = []\n for name in self.__ui_controls.get('menu_robots').choices:\n new_list.append(name)\n\n del new_list[self.__selected_robot]\n del self.__robots[self.__selected_robot]\n del self.__teachpanel[self.__selected_robot]\n\n self.__selected_robot = 0\n # Update UI\n self.__reload_caption(new_list)\n # Select the top item\n if len(self.__ui_controls.get('menu_robots').choices) > 0:\n self.__ui_controls.get('menu_robots').index = 0", "def removeFrame(self, frame):\n for widget in frame.winfo_children():\n widget.destroy()\n\n frame.pack_forget()", "def removeTooltip(self): \n if self.tooltipWindow:\n self.window.remove_child(self.tooltipWindow) \n self.tooltipWindow.destroy ()\n self.tooltipWindow = None", "def destroy(self):\n for node in self.find_references():\n node.destroy()\n self._bld.RemoveObject(self.get_sobj())", "def RemoveSelf(self):\n self.__context.builder.WaveletRemoveSelf(self.GetWaveId(), self.GetId())\n # TODO(davidbyttow): Locally remove the robot.", "def remove(self):\r\n game_ref.remove(self)", "def destroy(self):\r\n self._tidy()\r\n self.stop()\r\n try:\r\n self.opengl.destroy(self)\r\n except:\r\n pass\r\n if self.external_mouse:\r\n try:\r\n self.external_mouse.stop()\r\n except:\r\n pass_\r\n try:\r\n self.mouse.stop()\r\n except:\r\n pass\r\n try:\r\n self.tkwin.destroy()\r\n except:\r\n pass\r\n Display.INSTANCE = None", "def destroy(self):\n tk.Frame.destroy(self)", "def unwatch(self, tid):\n link = self._refs.pop(tid, None)\n current = greenlet.getcurrent()\n if hasattr(current, 'unlink'):\n # This is a Gevent enhanced Greenlet. Remove the SpawnedLink we\n # linked to it.\n current.unlink(link)", "def on_stop(self):\r\n app = adsk.core.Application.cast(adsk.core.Application.get())\r\n ui = app.userInterface\r\n palette = ui.palettes.itemById(self.palette_id)\r\n\r\n for handler in self.html_handlers:\r\n palette.incomingFromHTML.remove(handler)\r\n\r\n if palette:\r\n palette.deleteMe()\r\n\r\n super().on_stop()", "def done(self):\n turtle.done()", "def remove(self):\n self._world.remove_mob(self)", "def DelDiv(self):\n if self.created:\n self.CloseImage()\n command = \"\"\"$('#{}').remove();\"\"\".format(self.wid)\n get_ipython().run_cell_magic('javascript', '', command)\n self.created = False\n self.wid = uuid.uuid4().hex", "def remove_object(self, n_id):\r\n\r\n # remove shapes\r\n for patch in self.shapes[n_id]:\r\n patch.remove()\r\n del self.shapes[n_id]\r\n\r\n # remove text\r\n if self.show_labels:\r\n for text in self.labels[n_id]:\r\n text.remove()\r\n del self.labels[n_id]", "def destroy(self):\r\n self.visible = False", "def clearwin(event=None):\r\n # for child in mframe.winfo_children():\r\n # child.destroy()\r\n global mframe\r\n mframe.destroy()\r\n mframe = tkinter.Frame(main, width=800, height=600, background='pink')\r\n mframe.pack(fill=\"both\", expand=True, padx=20, pady=20)", "def remove(self):\n self.hide()\n self.deleteLater()", "def cog_unload(self):\n self._get_sketch_prompt.cancel()", "def __del__(self):\n\n # Delete sprite (if it has been defined)\n try:\n self.canvas.delete(self.sprite)\n except AttributeError:\n pass\n except tk.TclError:\n pass", "def delete_win(self, *args):\n if cmds.window(self.win_name, ex=1):\n cmds.deleteUI(self.win_name)", "def remove_node(self, node):\n self.nodes.remove(node)\n node.close()", "def clear_scene(self, event):\n self.shapes = []\n self.redraw()", "def unregister(self):\r\n self.__screen.unregister_asteroid(self)", "def discard(self) -> None:\n\n self.plot.close()", "def del_lx(self):\r\n del self._lx", "def reset(self):\n self._turtle.clear()\n self._turtle.setposition((0,0)) \n try:\n self._turtle.shape('pen.gif')\n except:\n self._turtle.shape('classic')\n self._turtle.color('red')\n self.speed = 0\n \n #pair = self._turtle.color()\n self._pencolor = self._turtle.color()[0]\n self._fillcolor = self._turtle.color()[0]", "def clear(self):\n self._delayvalue = _CFG[\"delay\"]\n self._colormode = _CFG[\"colormode\"]\n self._delete(\"all\")\n self._bgpic = self._createimage(\"\")\n self._bgpicname = \"nopic\"\n self._tracing = 1\n self._updatecounter = 0\n self._turtles = []\n self.bgcolor(\"white\")\n for btn in 1, 2, 3:\n self.onclick(None, btn)\n self.onkeypress(None)\n for key in self._keys[:]:\n self.onkey(None, key)\n self.onkeypress(None, key)\n Myturtle._pen = None", "def destroy(self):\n gameengine.GameEngine().game_objects.remove(self)", "def close(self):\n def destroy(comp):\n for child in comp.children:\n destroy(child)\n comp.destroy()\n \n destroy(get_base().node_manager.wrap(self))\n get_base().plugin_manager.on_scene_close()\n \n # Now remove the root node. If the root node was render, reset base\n # in order to remove and recreate the default node set.\n if self.rootNp is get_base().render:\n get_base().reset()\n\n self.rootNp.removeNode()", "def remove_animation(attr):\n pm.cutKey(attr, clear=True)", "def remove_from_hand(self):\n pass", "def clear(self):\r\n if self.groundPath:\r\n self.groundPath.clearProjectTexture(self.stage)\r\n self.groundPath = None\r\n\r\n if self.lightPath:\r\n self.lightPath.detachNode()\r\n self.lightPath = None\r\n\r\n if self.cameraPath:\r\n self.cameraPath.detachNode()\r\n self.cameraPath = None\r\n self.camera = None\r\n self.lens = None\r\n\r\n if self.buffer:\r\n base.graphicsEngine.removeWindow(self.buffer)\r\n self.tex = None\r\n self.buffer = None", "def __del__(self):\n #print 'del in'\n if hasattr(self,'root'):\n #print 'del root'\n if self.root:\n #print 'del circ'\n self.root.delete_circulars()\n del self.root", "def _onRemove(self, event):\n index = self.colorlist.GetSelection()\n del self.graphColors[index]\n self._tupleListToStrings()\n if len(self.graphColors) > 0:\n self.colorlist.SetSelection(0)\n self._updateButtons(None)", "def remove(self, widget):\n self.widgets.remove(widget)\n widget.destroy()", "def destroy (self,event=None):\n \n # This is enough to disable fillbox.\n self.top.withdraw()", "def remove_window(self, window: AbstractView) -> None:\n self._logger.debug(\"running\")\n self.removeSubWindow(window)\n self._logger.debug(\"done\")", "def remove(self):\n self.model_or_sim.remove_package(self)", "def destroy(self):\n widget = self.widget\n if widget:\n # On Windows, it's not sufficient to simply destroy the\n # widget. It appears that this only schedules the widget \n # for destruction at a later time. So, we need to explicitly\n # unparent the widget as well.\n widget.setParent(None)\n if widget.isWidgetType():\n widget.destroy()\n self.widget = None", "def _remove_buffer(self):\n if self._buffer is not None:\n self._engine.remove_window(self._buffer)\n self._buffer = None\n self._region = None", "def delete_window(self):\r\n self.mw.eval('::ttk::CancelRepeat')\r\n SlTrace.lg(\"Closing windows\")\r\n ''' \r\n ActiveCheck.clear_active() # Disable activities\r\n if self.score_win is not None:\r\n self.score_win.destroy()\r\n self.score_win = None\r\n if self.mw is not None and self.mw.winfo_exists():\r\n self.mw.quit()\r\n self.mw.destroy()\r\n self.mw = None\r\n '''\r\n if self.on_exit is not None:\r\n self.on_exit()\r\n \r\n sys.exit() # Else quit\r", "def remove_obj(self, obj_name):\n self.scene.remove_world_object(obj_name)", "def remove_brush(self, brush: 'Solid') -> None:\n try:\n self.brushes.remove(brush)\n except ValueError:\n pass # Already removed.", "def delete(self):\n del self.shx.atoms[self.index]", "def end_fill():\n turtleTmp.end_fill()", "def destroy(self):\n self.window.destroy_output_panel(self.name)", "def destroy (self,event=None):\n \n self.top.withdraw() # Don't allow this window to be destroyed.", "def remove(self) -> None:\n self.map.remove_ent(self)", "def __del__(self):\r\n Phidget.dispose(self)", "def remove_self(self):\n self.parent.remove(self.element)", "def destroy_window(self) -> None:\n self.master.destroy()\n self.master.master.create_right_left_containers()", "def remove(self):\n if self.removed:\n return\n self._remove()\n self.removed = True", "def DeleteWindow(self):\r\n\r\n if self._wnd:\r\n self._wnd.Destroy()\r\n self._wnd = None", "def close(self):\n if(screen == self):\n screen = None", "def removeFromParentAndDelete(self):\n return _libsbml.KineticLaw_removeFromParentAndDelete(self)", "def remove(self):\n if isinstance(self, Vertex):\n self.graph.remove_vertex(self)\n else:\n self.graph.remove_edge(self)" ]
[ "0.77124417", "0.750477", "0.66874486", "0.66874486", "0.6680987", "0.6195617", "0.6168465", "0.6085901", "0.58742696", "0.5845661", "0.5834183", "0.5833665", "0.58261734", "0.57959485", "0.5791254", "0.5777234", "0.56776667", "0.5611826", "0.5599132", "0.5564557", "0.5559916", "0.55426276", "0.5541145", "0.5524702", "0.5507968", "0.5495123", "0.54903394", "0.5483298", "0.5476851", "0.54644626", "0.5445585", "0.5423513", "0.54192877", "0.54090947", "0.53885436", "0.53799295", "0.5373187", "0.53545606", "0.5348984", "0.53384554", "0.5327083", "0.5317627", "0.5285014", "0.52750385", "0.5240642", "0.5228981", "0.5225643", "0.52186346", "0.52125216", "0.5211026", "0.52037835", "0.52002406", "0.5199642", "0.5197979", "0.51969695", "0.5162641", "0.5162031", "0.5159614", "0.51450247", "0.5136749", "0.513511", "0.5129977", "0.51177144", "0.51132447", "0.5113167", "0.5110586", "0.5107795", "0.5106833", "0.5106092", "0.5105349", "0.51023906", "0.5095861", "0.50723124", "0.50718534", "0.5067906", "0.50651425", "0.50609386", "0.50559664", "0.504328", "0.50417143", "0.50415325", "0.50410485", "0.50363106", "0.5024609", "0.5013426", "0.50133455", "0.5003046", "0.50012416", "0.499656", "0.4994013", "0.49893236", "0.49758026", "0.49706563", "0.49699697", "0.4960099", "0.49588442", "0.49580467", "0.49567005", "0.49566185", "0.49561185" ]
0.7395165
2