query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| negatives
sequencelengths 19
20
| metadata
dict |
---|---|---|---|
Iterator yielding 1 HyperLogLog.hmap per sequence in given iterable list_of_sequences iterable of iterable | def compute_hmaps(list_of_sequences):
for sequence in list_of_sequences:
hll = HLL.HyperLogLog64(k)
hll.extend(sequence)
yield hll | [
"def init_compute_hmaps(k):\n def compute_hmaps(list_of_sequences):\n \"\"\"\n Iterator yielding 1 HyperLogLog.hmap per sequence in given iterable\n \n list_of_sequences - iterable of iterable\n \"\"\"\n for sequence in list_of_sequences:\n hll = HLL.HyperLogLog64(k)\n hll.extend(sequence)\n yield hll\n return compute_hmaps",
"def seq_map(seq, container, one_result=True):\n results = list(list(seq.run([val])) for val in container)\n if one_result and not all(map(lambda l: len(l) == 1, results)):\n raise lena.core.LenaValueError(\n \"some results are not of length one, {}\".format(results)\n )\n if one_result:\n return [l[0] for l in results]\n return results",
"def interSequence(seq_maps):\n seq_map = {}\n \n return seq_map",
"def collect(sequence, function):\n for seq in __builtin__.map(function, sequence):\n for x in seq:\n yield x",
"def gmap(generator, f):\n for item in generator:\n yield f(item)",
"def _map_input(self, input_stream):\n for key, value in self.reader(input_stream):\n mapper_result = self.mapper((key,value))\n if mapper_result:\n for k, v in mapper_result:\n yield k, v\n if self.final_mapper != NotImplemented:\n for k,v in self.final_mapper():\n yield k,v\n self._flush_batch_incr_counter()",
"def iter(self) -> Iterator[Sequence]:\n ...",
"def iteritems(self):\n for seq in self:\n yield seq.identifier, seq",
"def _all_splits(seq):\n for index in range(1, len(seq)):\n yield (seq[0:index], seq[index:])",
"def hsps(self) -> Generator[Union[HSP, LSP], None, None]:\n for readAlignments in self:\n for readAlignment in readAlignments:\n for hsp in readAlignment.hsps:\n yield hsp",
"def __iter__(self):\n\t\tfor k in self.__map:\n\t\t\tyield k",
"def map(self, seqs, ids):\n return PoolIterator(self, seqs, ids, self.nproc * 2 + 10)",
"def triple_map(func, iterable):\n # YOUR CODE GOES HERE #\n for i in iterable:\n yield func(func(func(i)))",
"def _get_iterator(self, data: List[str], batch_size: int, sequence_length: int) -> Iterator[Tuple[torch.LongTensor, torch.LongTensor]]:\n\n ids = self._to_ids(data, batch_size)\n iterator = []\n for i in range(0, ids.size(1) - sequence_length, sequence_length):\n inputs = ids[:, i:i+sequence_length]\n targets = ids[:, (i+1):(i+1)+sequence_length]\n iterator.append((inputs, targets))\n\n return iter(iterator)",
"def iter_zipped_logs(*log_streams, prefix=\"> \", show_intervals=None, show_timestamp=False):\n\n # A sorted queue of (timestamp, stream) tuples (lowest-timestamp first)\n streams = PriorityQueue()\n stream_names = []\n for i, stream in enumerate(log_streams):\n if not isinstance(stream, tuple):\n tstream = TimestampedStream(stream, prefix)\n else:\n tstream = TimestampedStream(*stream)\n\n n = tstream.get_next()\n if n:\n stream_names.append(tstream.name)\n streams.put(n)\n\n last_ts = None\n if show_intervals:\n from easypy.units import Duration\n\n def formatted(line, current_ts, last_ts):\n fmt = \"{:>7}{}\"\n if (current_ts and last_ts):\n return fmt.format(Duration(current_ts - last_ts).render(show_intervals), line)\n else:\n return fmt.format(\"\", line)\n else:\n def formatted(line, current_ts, last_ts):\n return line\n\n if show_timestamp:\n _formatted = formatted\n\n def formatted(line, current_ts, last_ts):\n line = _formatted(line, current_ts, last_ts)\n dt = datetime.fromtimestamp(current_ts)\n return \"{:%Y-%m-%d %H:%M:%S.%f} {}\".format(dt, line)\n\n while not streams.empty():\n current_ts, line, stream = streams.get()\n yield formatted(line, current_ts, last_ts)\n last_ts = current_ts\n while True:\n n = stream.get_next()\n if not n:\n break # stream ended\n ts, line, stream = n\n if ts and ts > current_ts:\n streams.put((ts, line, stream))\n break # timestamp advanced\n yield formatted(line, ts, last_ts)\n if ts:\n last_ts = ts",
"def input_generator():\n for i, line in enumerate(sorted_inputs):\n if i % batch_size == 0:\n batch_num = (i // batch_size) + 1\n yield _encode_and_add_eos(line, subtokenizer)",
"def process_samples(lims_process):\n for artifact in lims_process.all_inputs():\n for lims_sample in artifact.samples:\n yield {'sample': lims_sample, 'artifact': artifact}",
"def mapmany(self, function):\r\n return Iterable(itertools.chain.from_iterable(map(function, self.__iterable)))",
"def multi_stream_iter(client, log_group, streams, positions=None):\n positions = positions or {s: Position(timestamp=0, skip=0) for s in streams}\n event_iters = [\n log_stream(client, log_group, s, positions[s].timestamp, positions[s].skip) for s in streams\n ]\n events = []\n for s in event_iters:\n if not s:\n events.append(None)\n continue\n try:\n events.append(next(s))\n except StopIteration:\n events.append(None)\n\n while some(events):\n i = argmin(events, lambda x: x[\"timestamp\"] if x else 9999999999)\n yield (i, events[i])\n try:\n events[i] = next(event_iters[i])\n except StopIteration:\n events[i] = None"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns an estimate to the number of distinct elements in items items a sequence of elements k number of hash functions spark_context a spark context | def estimate_distinct_elements_parallel(lists_of_items, k, spark_context):
hll = spark_context.parallelize(lists_of_items) \
.mapPartitions(init_compute_hmaps(k)) \
.reduce(lambda x, y :x + y)
return hll.cardinality | [
"def estimate_distinct_elements(items, k):\n hll = HLL.HyperLogLog64(k)\n hll.extend(items)\n return hll.cardinality",
"def get_count_distinct_user():\n count_distinct_users = rdd_review_data\\\n .map(lambda x: x[\"user_id\"])\\\n .distinct()\\\n .count()\n\n results[\"C\"] = count_distinct_users",
"def frequent_itemset(transactions, minsup):\n pass",
"def n_keys_for_partition(self, partition):\n pass",
"def _calc_distinct_id_counts(db_config, month, year):\n with utils.create_db_connection(db_config) as conn, conn.cursor() as cursor, utils.CodeProfiler() as cp:\n results = defaultdict(lambda: dict(num_triplets=0, num_imeis=0, num_imsis=0, num_msisdns=0,\n num_imei_imsis=0, num_imei_msisdns=0, num_imsi_msisdns=0))\n cursor.execute(\n \"\"\"SELECT operator_id,\n data_date,\n (hll_cardinality(COALESCE(hll_union_agg(triplet_hll), hll_empty())))::BIGINT AS num_triplets,\n (hll_cardinality(COALESCE(hll_union_agg(imei_hll), hll_empty())))::BIGINT AS num_imeis,\n (hll_cardinality(COALESCE(hll_union_agg(imsi_hll), hll_empty())))::BIGINT AS num_imsis,\n (hll_cardinality(COALESCE(hll_union_agg(msisdn_hll), hll_empty())))::BIGINT AS num_msisdns,\n (hll_cardinality(COALESCE(hll_union_agg(imei_imsis_hll), hll_empty())))::BIGINT\n AS num_imei_imsis,\n (hll_cardinality(COALESCE(hll_union_agg(imei_msisdns_hll), hll_empty())))::BIGINT\n AS num_imei_msisdns,\n (hll_cardinality(COALESCE(hll_union_agg(imsi_msisdns_hll), hll_empty())))::BIGINT\n AS num_imsi_msisdns\n FROM daily_per_mno_hll_sketches\n WHERE date_part('month', data_date) = %(month)s\n AND date_part('year', data_date) = %(year)s\n GROUP BY CUBE (operator_id, data_date)\n \"\"\",\n {'month': month, 'year': year}\n )\n\n results = [res._asdict() for res in cursor]\n\n return results, cp.duration, [cp.duration]",
"def get_cluster_count_all(self, context, filters=None):",
"def getNumberOfDistinctParticipatingOptions(self):",
"def collect_reducer_count(values):\n return len(values)",
"def count(self, item):\n return len([1 for x in self.data.values() if x == item])",
"def train(self):\n # get the item user history dict\n item_to_users = dict()\n for index, rows in self.data.iterrows():\n uid = rows['user_id']\n item = rows['item_id']\n if item not in item_to_users:\n item_to_users[item] = dict()\n if uid not in item_to_users[item]:\n item_to_users[item][uid] = 1\n\n # caculate the item similarity\n item_similarity = dict()\n for item1 in item_to_users:\n item_similarity[item1] = dict()\n users1 = set(item_to_users[item1].keys())\n for item2 in item_to_users:\n if item2 != item1:\n users2 = set(item_to_users[item2].keys())\n sim = len(users1 & users2)/np.sqrt((len(users1)*len(users2)))\n if item2 not in item_similarity[item1]:\n item_similarity[item1][item2] = sim\n else:\n item_similarity[item1][item2] += sim\n return item_similarity",
"def _occurrences(item, items):\n n = 0\n for e in items:\n if e is item:\n n = n + 1\n\n return n",
"def _get_counts_for_unique(input_x, unique_x):\n dic = {}\n o_array = input_x.asnumpy()\n for idx in range(o_array.size):\n val = o_array[idx]\n if val not in dic:\n dic[val] = 1\n else:\n dic[val] += 1\n\n u_array = unique_x.asnumpy()\n counts_lst = [dic[val] for val in u_array]\n\n return Tensor(onp.array(counts_lst), input_x.dtype)",
"def CountUniqueElements(UniqueElements,ProcessedString):\n \n nUnique=len(UniqueElements)\n localCounter=[0 for k in range(nUnique)]\n UniqueDictionary=UniqueToDictionary(UniqueElements)\n \n for val in ProcessedString:\n try:\n localPosition=UniqueDictionary[val]\n localCounter[localPosition]=localCounter[localPosition]+1\n except KeyError:\n pass\n return localCounter",
"def getSizePlueOneItemSet(Klist):\n candidate = list()\n for e in Klist:\n for f in Klist:\n a = e.union(f)\n if len(a) == len(e)+1:\n candidate.append(a)\n #print(candidate)\n #print(len(candidate))\n newlist = []\n for i in candidate:\n if i not in newlist:\n newlist.append(i)\n candidate = newlist\n #print(candidate)\n \"\"\" here is the normal pruning process \"\"\"\n newlist = []\n for e in candidate:\n counter = 0\n for f in globOriginalList:\n if(f.issuperset(e)):\n counter = counter+ 1\n if((counter/float(globNumberOfTransactions)) >= globMinSup):\n newlist.append(e)\n #print(len(candidate))\n return newlist",
"def getCount(self, combs: list):\n counts_dict = defaultdict(int)\n for itemSet in combs:\n itemSet = tuple(sorted(itemSet))\n for group in self.data:\n if set(itemSet) <= group:\n counts_dict[itemSet] += 1\n\n return counts_dict",
"def find_clu_size_seq(self):\n if np.all([type(i)==int for i in self.clusters]):\n sorted_cluster = sorted(self.clusters)\n else:\n sorted_cluster = sorted(self.clusters, key=lambda v: str(v))\n return [len(self.clu2elm_dict[clu]) for clu in sorted_cluster]",
"def get_num_keys_total(self) -> int:\n leaves = self.get_leaf_nodes()\n return sum([leaf.get_key_size() for leaf in leaves])",
"def find_frequent_items_by_sequencset(transactions, frequent_sequences):\n items = {}\n\n for sequence in frequent_sequences:\n for item in sequence:\n if item not in items:\n items[item]=0\n\n for transaction in transactions:\n for item in transaction:\n if item in items:\n items[item] += 1\n\n\n items = {k: v for k, v in sorted(items.items(), key=lambda item: item[1], reverse = True)}\n return items",
"def compute_distribution(result_set, print_out=True):\n cnt_len = {1:0, 2:0, 3:0, 4:0, 5:0}\n cnt_duplicates = 0\n for r in result_set:\n r = r[0]\n tmp = []\n for l in r:\n tmp.extend(l)\n len_tmp = len(tmp)\n cnt_len[len_tmp] += 1\n if len(set(tmp)) < len_tmp:\n cnt_duplicates += 1\n\n if print_out:\n print(f\"Distribution of lengths: {cnt_len}\")\n print(f\"Sequences containing duplicates: {cnt_duplicates} / {len(result_set)}\")\n else:\n return cnt_len, cnt_duplicates"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
| The sellers geocode | def geocode(self):
return self.__geocode | [
"def geocode(self, recode=False):\n if not self.lat or not self.long or recode:\n # get the geocoordinates for the adress\n # TODO log geocodings into the db\n g = geocoders.Google(settings.GOOGLE_API_KEY)\n adr = '%s, %s %s, %s' % (self.street, self.zipcode, self.city, self.country)\n (self.lat, self.long) = g.geocode(adr)[1]\n self.save()\n return (self.lat, self.long)",
"def geo_lookup(self, event):\n\n self.log('Geocode request:', event.data)\n\n lookup = self.geocode(event.data)\n response = {\n 'component': 'isomer.ors.ors',\n 'action': 'geo_lookup',\n 'data': lookup\n }\n\n self.fireEvent(send(event.client, response))",
"def geo_lookup(self, event):\n\n place = event.data\n\n self.log('Geocode request:', place)\n\n client = openrouteservice.Client(\n key=self.config.ors_api_key)\n address = openrouteservice.client.pelias_autocomplete(\n client,\n text=place\n )\n\n self.log(address, pretty=True, lvl=debug)\n\n response = {\n 'component': 'isomer.ors.ors',\n 'action': 'geo_lookup',\n 'data': address\n }\n\n self.fireEvent(send(event.client.uuid, response))",
"def user_geocode(geocode_dict):\n\tuser_data = geocode_dict['results'][0]['geometry']['location']\n\tuser_loc = (user_data['lat'], user_data['lng'])\n\n\treturn user_loc",
"def test_geocode_address(self):\n self._select_geocoder()\n resource = GeocoderResource()\n req = HttpRequest()\n req.method = 'GET'\n req.GET['q'] = \"370 17th St, Denver, CO 80202\"\n bundle = resource.build_bundle(request=req)\n results = resource.obj_get_list(bundle)\n self.assertApxEqual(results[0].lat, 39.7434926) \n self.assertApxEqual(results[0].lng, -104.9886368)",
"def test_geocode(self):\n self._select_geocoder()\n loc = Location()\n latlng = loc._geocode(\"370 17th St Denver CO 80202\")\n self.assertApxEqual(latlng[0], 39.7438167)\n self.assertApxEqual(latlng[1], -104.9884953)",
"def geocode_trademarks(df, geo_code=['long','lat']):\n df_c = df.copy()\n # We have trailing spaces in the postcodes\n \n df_c[\"postcode\"] = [\n x.strip() if pd.isnull(x) is False else np.nan for x in df_c['postcode']]\n\n # Read nspl\n nspl = pd.read_csv(f\"{DATA_RAW}/nspl/Data/NSPL_FEB_2020_UK.csv\") \n\n # The trademark dataset only provides information for the first part of the postcode\n # We split the nspl postcodes to merge on a smaller dataset\n nspl[\"pcds_1st\"] = nspl[\"pcds\"].apply(lambda x: x.split(\" \")[0])\n\n nspl_short = nspl.drop_duplicates(\"pcds_1st\")[[\"pcds_1st\"]+geo_code]\n\n merged = pd.merge(df_c, nspl_short, left_on=\"postcode\", right_on=\"pcds_1st\")\n\n return merged",
"def location_search(cls, postcode):\n api_url = f\"http://v0.postcodeapi.com.au/suburbs/{postcode}.json\"\n response = requests.get(api_url)\n return json.loads(response.text) or None",
"def geocode_location(address):\n try:\n result = Geocoder.geocode(address)\n lat, lng = result[0].coordinates\n if result.city != \"San Francisco\": # Database only returns foodtrucks in San Francisco\n return None\n return lat, lng\n except:\n return None",
"def test_geocode_city_state(self):\n self._select_geocoder()\n resource = GeocoderResource()\n req = HttpRequest()\n req.method = 'GET'\n req.GET['q'] = \"golden, co\"\n bundle = resource.build_bundle(request=req)\n results = resource.obj_get_list(bundle)\n self.assertApxEqual(results[0].lat, 39.756655, .001) \n self.assertApxEqual(results[0].lng, -105.224949, .001)",
"def test_geocode_city(self):\n self._select_geocoder()\n resource = GeocoderResource()\n req = HttpRequest()\n req.method = 'GET'\n req.GET['q'] = \"Denver\"\n bundle = resource.build_bundle(request=req)\n results = resource.obj_get_list(bundle)\n self.assertApxEqual(results[0].lat, 39.737567, .01)\n self.assertApxEqual(results[0].lng, -104.9847179, .01)",
"def location(locations):\r\n ctx = ssl.create_default_context(cafile=certifi.where())\r\n geopy.geocoders.options.default_ssl_context = ctx\r\n\r\n geo = Nominatim(user_agent=\"map_main.py\", timeout=10)\r\n location1 = geo.geocode(locations)\r\n return location1.latitude, location1.longitude",
"def rlis_geocode(addr_str):\n\n base_url = 'http://gis.oregonmetro.gov/rlisapi2/locate/'\n url_template = '{0}?token={1}&input={2}&form=json'\n url = url_template.format(base_url, ops.rlis_token, addr_str)\n response = requests.get(url)\n\n if response.status_code != 200:\n print 'unable to establish connection with rlis api'\n print 'status code is: {0}'.format(response.status_code)\n return response.status_code\n\n json_rsp = response.json()\n if json_rsp['error']:\n print 'the following address could not be geocoded by the rlis api:'\n print \"'{0}'\".format(addr_str)\n print 'the following error message was returned:'\n print \"'{0}'\".format(json_rsp['error']), '\\n'\n return None\n else:\n return json_rsp['data'][0]",
"def getGeocodeLocation(inputString):\n\n city = inputString.replace(\" \", \"+\")\n\n # Want results back in a JSON. Adding API key and input string to query.\"\n url = f\"https://maps.googleapis.com/maps/api/geocode/json?address={city}&key={google_api_key}\"\n\n # Request url and make the response a json that Python can read.\n r = requests.get(url).json()\n \n latitude = r[\"results\"][0][\"geometry\"][\"location\"][\"lat\"]\n longitude = r[\"results\"][0][\"geometry\"][\"location\"][\"lng\"]\n\n return (latitude, longitude)",
"def geocode_address():\n address = request.args.get('address', '380 New York St, Redlands, CA')\n geocode_dict = geocode_address_executor(address)\n\n # write address to db\n create_address(session, search_string=address,\n lat=geocode_dict['location']['y'],\n lon=geocode_dict['location']['x'])\n return json.dumps(geocode_dict)",
"def test_geocode_with_default_geocoder(self):\n self._select_geocoder()\n geocoder = get_geocoder()\n address = \"370 17th St, Denver\"\n results = list(geocoder.geocode(address, exactly_one=False))\n self.assertTrue(len(results) > 0)\n place, (lat, lng) = results[0]\n self.assertEqual(lat, 39.7434926) \n self.assertEqual(lng, -104.9886368)",
"def get_geo_location(address, max_result):\n if Geocoder.isPresent():\n print(\"GeoCoder is present...\")\n geo = Geocoder(PythonActivity.mActivity, Locale.getDefault())\n print(\"Looked up addresses\")\n java_list = geo.getFromLocationName(address, max_result)\n if java_list:\n print(\"List found...\")\n addresses = []\n for addr in java_list.toArray():\n addresses.append(_GeoAddress(\n city=str(addr.getLocality()),\n county=str(addr.getSubAdminArea()),\n country=str(addr.getAdminArea()),\n postcode=str(addr.getPostalCode()),\n second_address=str(addr.getThoroughfare()),\n house_number=str(addr.getSubThoroughfare()),\n latitude=addr.getLatitude(),\n longitude=addr.getLongitude()\n ))\n return addresses\n else:\n print(\"No list found...\")\n else:\n print(\"No GeCoder present\")\n return []",
"def get_city_name(zipcode):\n try:\n city = geocode(zipcode)\n city = find_between(city, '\"', '\"') # remove json formatting\n city = city.split(', ') # separate into parts\n city[1] = remove_numbers(city[1])\n return ', '.join(city).strip() # return final value\n except:\n print 'Your city was not found, resorting to default.'\n return 'Austin, TX, USA' # show sample on break",
"def geocode_city_state(city, state):\n # geolocator = Nominatim()\n # location = geolocator.geocode(city + \", \" + state)\n # query db for similar city\n city_entry = City.query.filter(City.state==state, City.city.like('%'+city+'%')).first()\n\n return jsonify({'lat': city_entry.lat, 'lng': city_entry.lng})",
"def geocode(self):\n\n # If found in cache, return coords\n if self._address in Location._geocode_cache:\n lat, lon = Location._geocode_cache[self._address]\n self.set_lat_lon(lat, lon)\n return\n\n # Real geocoding begins here\n try:\n conn = httplib.HTTPSConnection(\"maps.googleapis.com\")\n params = {'sensor' : 'false', 'address' : self._address}\n url = \"/maps/api/geocode/xml?\" + urllib.urlencode(params)\n conn.request(\"GET\", url)\n r = conn.getresponse()\n if r.status == 200:\n geo_xml = r.read()\n if geo_xml:\n # Find lat, lon in returned XML\n t = xml.etree.ElementTree.fromstring(geo_xml)\n lat = t.findall('result/geometry/location/lat')\n lon = t.findall('result/geometry/location/lng')\n if lat and lon:\n # Successful\n self.set_lat_lon(float(lat[0].text),\n float(lon[0].text))\n return\n else:\n err = \"couldn't resolve address to lat,lon. Try another.\"\n else:\n err = \"not responding. Try later\"\n else:\n err = \"or network failure. Try later\"\n except Exception:\n err = \"exception\"\n if err:\n raise Usage(\"Google geocoder \" + err)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
| Invoice number of the payment | def invoice_number(self):
return self.__invoice_number | [
"def generate_invoice_no(self):\n part1 = self.compName\n print(\"CNAME:\", str(part1))\n cname = str(part1)\n cnamehalf = cname[:3]\n t = datetime.datetime.now()\n t = t.strftime('%m%d%Y%H%M%S')\n par = str(t)\n part2 = str(par)\n aaa = cnamehalf.upper() + part2\n self.billNumber = aaa",
"def bitpay_invoice_id(self):\n return self._bitpay_invoice_id",
"def get_current_invoiceID() -> str:\n return DATABASE.get('/Invoices/currentInvoiceID', None)",
"def bill_number(self):\n return self._bill_number",
"def increment_current_invoiceID() -> str:\n invoiceID = int(DATABASE.get('/Invoices/currentInvoiceID', None))\n invoiceID += 1\n DATABASE.put(\"/Invoices\", \"currentInvoiceID\", invoiceID)\n return str(invoiceID)",
"def invoice_details(request, invoice_id):\n header = get_api_header()\n resp = r.get(_url_invoices(_base_url(request)),\n headers=header, params={'id': invoice_id},verify=False)\n if resp.status_code != 200:\n return {}\n else:\n result = json.loads(resp.content)\n if result[u'count'] == 0:\n return {}\n else:\n return result[u'results'][0]",
"def get_bill_payment_id(self):\n return self.bill_payment_id",
"def invoice_by_number(self, context, params):\n\n accesstoken = util.get_xero_client(context[\"headers\"])\n headers = {\n \"Accept\" : \"application/json\",\n \"xero-tenant-id\" : params.get(\"organization_id\"),\n \"Authorization\" : f\"Bearer {accesstoken}\"\n }\n number = params.get('number')\n response = requests.request(\"GET\", f'{self.base_url}Invoices?InvoiceNumbers={number}', headers=headers).text\n response = json.loads(response)\n\n data = XeroInvoice(\n organization_id= params.get(\"organization_id\"),\n item_code= response[\"Invoices\"][0][\"LineItems\"][0][\"Item\"][\"Code\"],\n invoice_id= response[\"Invoices\"][0][\"InvoiceID\"],\n description= response[\"Invoices\"][0][\"LineItems\"][0][\"Description\"],\n name= response[\"Invoices\"][0][\"Contact\"][\"Name\"],\n status= response[\"Invoices\"][0][\"Status\"],\n currency= response[\"Invoices\"][0][\"CurrencyCode\"], \n creation_date= response[\"Invoices\"][0][\"DateString\"],\n due_date= response[\"Invoices\"][0][\"DueDateString\"],\n branding_theme= response[\"Invoices\"][0][\"BrandingThemeID\"],\n number= response[\"Invoices\"][0][\"InvoiceNumber\"],\n reference= response[\"Invoices\"][0][\"Reference\"],\n line_items_type= response[\"Invoices\"][0][\"LineAmountTypes\"],\n quantity= response[\"Invoices\"][0][\"LineItems\"][0][\"Quantity\"],\n unit_price= response[\"Invoices\"][0][\"LineItems\"][0][\"UnitAmount\"],\n discount= response[\"Invoices\"][0][\"LineItems\"][0][\"DiscountRate\"],\n account= response[\"Invoices\"][0][\"LineItems\"][0][\"AccountCode\"],\n tax_rate= response[\"Invoices\"][0][\"LineItems\"][0][\"TaxAmount\"]\n )\n return data.__dict__",
"def test_sendInvoice(recipient: str = CONTACT_CARD, amount: int = 10) -> 'reference_number':\r\n\r\n # Action\r\n status, result = u.sendInvoice(\"sendInvoice to contact card\", recipient, amount)\r\n\r\n # Assertion\r\n AssertResultIsRefNum(status, result)",
"def test_get_receipts_by_invoice(self):\n pass",
"def invoice_print(self):\n self.ensure_one()\n self.sent = True\n return self.env['report'].get_action(self, 'ioud_new_invoice.ioud_new_invoice')",
"def payment_id(self):\n return numbers.PaymentID(hexlify(self._decoded[65:-4]).decode())",
"def cancel_invoice(self, payment_account: PaymentAccount, # pylint:disable=unused-argument, no-self-use\n inv_number: str): # pylint: disable=unused-argument\n return None",
"def get_invoice(trip):\n invoice = ''\n if 'invoice' in trip.keys():\n invoice = trip['invoice']\n if invoice != '':\n return invoice\n if invoice == '' and 'jobs' in trip.keys():\n try:\n invoice = trip['jobs']\n except KeyError as e:\n invoice = ''\n else:\n invoice = ''\n if invoice == '':\n if 'ShipmentId' in trip.keys():\n invoice = trip['ShipmentId']\n if invoice == '':\n if 'lr_number' in trip.keys():\n invoice = trip['lr_number']\n return invoice",
"def get_payment_description(self, inv):\n description = '%s Invoice %d form %s Form Entry Id %d billed to %s %s' % (\n get_setting('site', 'global', 'sitedisplayname'),\n inv.id,\n self.form.title,\n inv.object_id,\n inv.bill_to_first_name,\n inv.bill_to_last_name,\n )\n\n return description",
"def generate_order_number(self, cart):\n self.no_order = ORDER_BASE + cart.id\n return str(self.no_order)",
"def get_id(self):\n if self.integration_number is None:\n return '1'\n else:\n return str(self.integration_number + 1)",
"def test_declineInvoice() -> 'reference_number':\r\n\r\n # Setup\r\n status = \"\"\r\n result = \"\"\r\n\r\n # Action\r\n _, invoices = u.getInvoices(getInvoicesParams)\r\n incoming_invoices = [invoice for invoice in invoices if invoice['direction'] == 'Incoming'\r\n and invoice['status'] == 'Awaiting']\r\n if len(incoming_invoices) > 0 and 'Error' not in incoming_invoices:\r\n status, result = u.declineInvoice(incoming_invoices[0]['invoiceid'])\r\n else:\r\n raise Exception(\"There is no incoming invoices or got Error on request\")\r\n\r\n # Assertion\r\n AssertResultIsRefNum(status, result)",
"def create_invoice(self):\n tax = .07 # tax for calculation\n total = 0 # var for total tax\n print(repr(self.customer))\n for item in self._items_with_price:\n print(item + '.....$' + str(self._items_with_price[item]))\n total = self._items_with_price[item] + total\n tax = round(total * tax, 2)\n total = tax + total\n\n print('Tax........' + \"${:,.2f}\".format(tax))\n print('Total...... ' + \"${:,.2f}\".format(total))",
"def update_invoice(self, # pylint:disable=too-many-arguments,no-self-use,unused-argument\n payment_account: PaymentAccount, # pylint: disable=unused-argument\n line_items: [PaymentLineItem], invoice_id: int, # pylint: disable=unused-argument\n paybc_inv_number: str, reference_count: int = 0, # pylint: disable=unused-argument\n **kwargs):\n return None"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns a key for `COURSIER_CACHE` determined by the configured repositories. This helps us avoid a cache poisoning issue that we uncovered in 14577. | def _coursier_cache_prefix(self) -> str:
sha = sha256()
for repo in self.repos:
sha.update(repo.encode("utf-8"))
return sha.digest().hex() | [
"def _get_cache_key(self, **kwargs):\n key = 'cartodb_%s_' % _geohash.encode(\n kwargs.pop('lat'), kwargs.pop('lon'))[:8]\n key += '_'.join([\n '%s=%s' % (k, kwargs[k]) for k in sorted(kwargs.iterkeys())])\n return key",
"def get_cache_key(self):\n\n return self.cache_key",
"def cache_key(self):\n return \" \".join([\n str(self.query._Query__kind),\n str(self.query._Query__ancestor),\n str(self.query._Query__filters),\n str(self.query._Query__orders),\n str(self.query._Query__app),\n str(self.query._Query__namespace)\n ]).replace(\" \", \"_\")",
"def getCacheKey(self):\n\t\treturn self.cacheKey",
"def _get_cache_key(r: WSGIRequest, c: BaseCache) -> str:\n r = _chop_querystring(r)\n r = _chop_cookies(r)\n return get_cache_key(r, None, r.method, c)",
"def cache_key(self, url):\n\n return f\"IXF-CACHE-{url}\"",
"def _get_cache_key(self, **kwargs):\n m = md5()\n for significant_kwarg in self.significant_kwargs:\n key, to_str = significant_kwarg\n m.update(to_str(kwargs[key]))\n\n if hasattr(self, 'cache_prefix'):\n cache_prefix = self.cache_prefix\n else:\n cache_prefix = '%s.%s' % (self.__module__, self.__name__)\n return '%s:%s' % (cache_prefix, m.hexdigest())",
"def get_cache_key(class_name, settings=()):\n return '#{0}:{1}'.format(class_name, hash(tuple(settings)))",
"def get_cache_key(self, request, view):\n ip_address = request.data.get('ip_address')\n return self.cache_format % {\n 'scope': self.scope,\n 'ident': ip_address or self.get_ident(request)\n }",
"def cache_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"cache_name\")",
"def redis_cache_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"redis_cache_id\")",
"def get_cache_filename():\n filename = '{}.json'.format(get_project_name())\n return os.path.join(get_cache_folder(), filename)",
"def __cache_key__(*args, **kwargs):\n return args_to_key(base, args, kwargs, False)",
"def getCacheName(self, type):\n\t\t\n\t\treturn None",
"def caching_keys(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"caching_keys\")",
"def get_cache_key(self, language, country, key):\n if isinstance(key, str):\n _plural_int = None\n else:\n _plural_int = key[1]\n key = key[0]\n\n return '{}/{}/{}/{}'.format(language, country, key, _plural_int)",
"def get_cache_key():\n # Note: the boto3 default session is used,so get the AWS access key from there\n return boto3.DEFAULT_SESSION.get_credentials().access_key",
"def cache_key_domain(self):\n return self.build_cache_key(\n \"consumer_site__domain\",\n self.model.__name__,\n self.kwargs[\"uuid\"],\n )",
"def cache_id(self):\n raise NotImplementedError('implement in subclass')"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Print the range of the doselevels of a guideline drugUsagesSlo | def printDoseLevelRange(outputDestination, doseLevel):
ll = doseLevel.getOwnSlotValue(kb.getSlot("lower_limit"))
ul = doseLevel.getOwnSlotValue(kb.getSlot("upper_limit"))
av = doseLevel.getOwnSlotValue(kb.getSlot("abstract_value"))
if (ll):
llString = roundFloat(ll,1)
else:
llString ="None"
if (ul):
ulString = roundFloat(ul, 1)
else:
ulString = "None"
if (av):
avString = av.getName()
else:
avString = "None"
outputDestination.write(avString+"\t"+llString+"\t"+ulString) | [
"def test_get_drillstrings(self):\n drillstrings = self.corva.get_drillstrings(self.well_name)\n self.assertListEqual([drillstring.start_depth for drillstring in drillstrings], [15735, 9404, 8399, 6314, 1])",
"def levelOfDetailRange(self): # real signature unknown; restored from __doc__\n pass",
"def granularity():",
"def get_range(self):\n if self.battery_size == 24:\n range = 200\n elif self.battery_size == 34:\n range = 330\n\n print(f\"this car goes about {range} miles\")",
"def _describe_range(): # pragma: no cover\n\n\t\treturn ''",
"def get_range(self):\r\n if self.battery_size == 70:\r\n range = 240\r\n elif self.battery_size == 85:\r\n range = 270\r\n \r\n message = \"This car can go approximately \" + str(range)\r\n message += \" miles on a full charge.\"\r\n print(message)",
"def printOffsets(self):\n for station in self.ssStations:\n for num, row in self.subspaces[station].iterrows():\n print('%s, %s, min=%3f, max=%3f, range=%3f' %\n (row.Station, row.Name, row.Offsets[0], row.Offsets[2],\n row.Offsets[2] - row.Offsets[0]))",
"def total_hit_dice_prettified(self):\n return '{}d{}'.format(self.vocation.level, self.vocation.hit_die)",
"def printLevelsOfMarginals(das_module, queries, schema, qset_name):\n dpq_marginals = set()\n for qname in queries:\n dpq_marginals = dpq_marginals.union(qname.split(CC.SCHEMA_CROSS_JOIN_DELIM))\n das_module.log_and_print(f\"###\\nLevels of the marginals of {qset_name} DP queries to be measured:\")\n for qname in dpq_marginals:\n if qname != 'detailed':\n das_module.log_and_print(f\"{qname} levels:\\n------------------------\\n\" +\n \"\\n\".join(schema.getQueryLevel(qname)) +\n \"\\n---------------------------------\", cui=False)",
"def get_range(self): # Add range according to our battery capacity.\n if self.battery_size == 75:\n range = 260\n elif self.battery_size == 100:\n range = 315\n print(f\"This car can go abt. {range} miles on a full-charge.\")",
"def print_graduation_status():\n # Student information\n has_dean_permission = True\n has_advisor_permission = True\n is_approved_senior = True\n accumulated_credits = 2\n\n # Graduation status display; this is what the students will start with.\n if accumulated_credits >= 40 and has_advisor_permission:\n print(\"This student can graduate.\")\n elif accumulated_credits >= 64 and is_approved_senior:\n print(\"This student can graduate.\")\n elif has_dean_permission:\n print(\"This student can graduate.\")\n else:\n print(\"This student cannot graduate.\")",
"def display_range(range):\r\n return str(range[0]) + \"-\" + str(range[1])",
"def display_sampler_diags(fit):\n rhat_worst,n_eff_int_site=get_sampler_diags(fit)\n if (rhat_worst>1.1)|(rhat_worst<0.9):\n rhatlabel.button_style='danger'\n else:\n rhatlabel.button_style='success'\n if n_eff_int_site<1000:\n nefflabel.button_style='warning'\n else:\n nefflabel.button_style='success'\n\n rhatlabel.description='R_hat: %1.2f'%rhat_worst\n nefflabel.description='n_eff:'+str(n_eff_int_site)\n minB,maxB=np.percentile(fit['int_site'],(2.5,97.5),axis=0)\n banclabel.description='B_anc %3.1f'%minB+'- %3.1f'%maxB+' μT'\n cdiff=np.diff(np.percentile(fit['c'],(2.5,97.5),axis=0))/np.percentile(fit['int_site'],50)\n Bdiff=np.diff([minB,maxB])/np.percentile(fit['int_site'],50)\n\n if (cdiff>=1)&(Bdiff>=0.4):\n gradelabel.description='Category: D'\n if(fit['k'].shape[1])<5:\n gradelabel.button_style='warning'\n else:\n gradelabel.button_style='danger'\n elif(cdiff<1)&(Bdiff>=0.4):\n gradelabel.description='Category: C'\n gradelabel.button_style='warning'\n elif(cdiff>=1)&(Bdiff<0.4):\n gradelabel.description='Category: B'\n gradelabel.button_style='success'\n elif(cdiff<1)&(Bdiff<0.4):\n gradelabel.description='Category: A'\n gradelabel.button_style='success'",
"def learning_rate_range():\n # Lower and upper bounds\n #######\n lower_bound, upper_bound = 1e-6, 0.1\n #######\n return lower_bound, upper_bound",
"def test_target_ranges(self):\r\n self.goto_mycare()\r\n self.goto_target_range_screen()\r\n highvalue = self.critical_high()\r\n self.set_critical_low()\r\n initialvalue = self.after_meal_overall_high(highvalue)\r\n self.before_meal_fasting_high(initialvalue)\r\n self.low()\r\n self.critical_low()\r\n self.summary_view()",
"def showDist(self):\n pris = {}\n for p in self.pickleP.subtree():\n pri = int(self.getat(p.v, 'priority'))\n if pri not in pris:\n pris[pri] = 1\n else:\n pris[pri] += 1\n pris = sorted([(k,v) for k,v in pris.iteritems()]) \n for pri in pris:\n if pri[0] in self.priorities:\n g.es('%s\\t%d\\t%s' % (self.priorities[pri[0]]['short'], pri[1],\n self.priorities[pri[0]]['long']))",
"def showLimits():\r\n limits = css.serviceInfo.limits\r\n print limits",
"def print_distance_utilities(self):\n for row in self.grid:\n for cell in row:\n if cell.distance_utility >= sys.maxsize:\n print(\" MAX \", end=\" \")\n else:\n print(\"{:05.2f}\".format(cell.distance_utility), end=\" \")\n print()\n print()",
"def info(argv):\n\n\tif len(argv) != 1:\n\t\thelp()\n\t\treturn 1\n\n\tdistrib = get_contour_distrib(argv[0])\n\tkeys = distrib.keys()\n\tkeys.sort()\n\tmax = keys[-1]\n\tmin = keys[0]\n\n\tprint \"Elevation range: %0.2f - %0.2fm:\\n\" % (min, max)\n\tprint \"\\tElevation | count \"\n\tprint \"\\t-----------------------\"\n\n\tfor k in keys:\n\t\tprint \"\\t%5.2fm | %4d\" % (k, distrib[k])\n\t\t \n\treturn 0"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Importable function that accepts a prompt and a time (in seconds) This function waits for an input and returns an empty string if a TimeoutExpired exception is raised. If an input is made before the set timer expires, the function returns the input | def timed_input(prompt='', timer=10):
try:
answer = __input_with_timeout(prompt, timer)
except TimeoutExpired:
return ''
else:
return answer | [
"def ask_time():\n # get time spent on task in minutes\n while True:\n user_time = input(\"Please enter the time spent on task in minutes >\")\n if checkers.return_int(user_time):\n output = checkers.return_int(user_time)\n break\n return output",
"def pause(msg=\"\",time_out='3h',error_on_timeout=True,default_input=''):\n\n BuiltIn().log(\"Pause and wait `%s` for user input\" % time_out)\n BuiltIn().log_to_console(msg)\n input = None\n wait = DateTime.convert_time(time_out)\n\n renat_batch = BuiltIn().get_variable_value('${RENAT_BATCH}')\n if renat_batch is None:\n i, o, e = select.select( [sys.stdin], [], [], wait)\n if i:\n input = sys.stdin.readline().strip()\n BuiltIn().log(\"User input detected. Input was `%s`\" % input)\n else:\n if not error_on_timeout:\n input = default_input\n BuiltIn().log(\"Pause finished with time out. Input was `%s`\" % input)\n else:\n raise Exception(\"ERROR: timeout while waiting for user input\")\n else:\n BuiltIn().log(\"Pausing is ignored in batch mode\")\n return input",
"def wait_for_prompt(self, timeout_s=None):\n with self._cond:\n # _LOG.debug(\"UserInput.wait_for_prompt\")\n if self._prompt:\n if timeout_s is None:\n self._cond.wait(3600 * 24 * 365)\n else:\n self._cond.wait(timeout_s)\n if self._response is None:\n self.mark_operator_attendance_end()\n raise PromptUnansweredError\n return self._response",
"def entry_time():\n while True:\n try:\n time = abs(int(input(\"How many minutes did the task take? \")))\n return time\n except ValueError:\n print(red_err(\"Please try again using an integer to represent minutes spent on task.\"))",
"def request_input():\n user_input = input(\"Do you wish to get the next closest time?\\n\"\n \"Type 'Y' to continue. \\n\").upper()\n return user_input",
"def requestUntilSuccess(\n string: str,\n invalid_msg: str = 'Invalid input',\n validInput: Callable[[Any], bool] = lambda x: x is not None,\n transformInput: Callable[[str], Any] = lambda x: x\n) -> str:\n user_input = None\n while not validInput(user_input):\n print('=====================================')\n user_input = input(string)\n if not validInput(user_input):\n print(invalid_msg)\n return transformInput(user_input)",
"def get_duration():\n message = \"\"\n while True:\n clear()\n duration = input(\n \"{}How long did it take to complete the task? \"\n \"(in minutes)\\n> \".format(message))\n try:\n duration = int(duration)\n if duration < 1:\n raise ValueError\n except ValueError:\n message = \"Duration must be a positive whole number.\\n\\n\"\n else:\n return duration",
"def get_timeout(cmd, input):\n start = time.time()\n try:\n subprocess.run(cmd + [input], stdout=subprocess.PIPE, stderr=subprocess.PIPE, timeout = 20)\n except subprocess.TimeoutExpired:\n return 25\n return min((int(time.time() - start) + 1) * 2, 25)",
"def inputStartTime(self):\n while True:\n try:\n hoursStart = int(input(\"Enter starting hour between 1-24: \"))\n minutesStart = int(input(\"Enter starting minutes between 1-59: \"))\n secondStart = int(input(\"Enter starting seconds between 1-59: \"))\n if hoursStart not in range(1, 25) or (minutesStart or secondStart) not in range(1, 60):\n print(\"Enter number should be in mention range\")\n continue\n break\n except Exception as e:\n print(e)\n\n return (hoursStart*60*60) + (minutesStart * 60) + secondStart",
"def get_employee():\n message = \"\"\n while True:\n clear()\n employee = input(\n \"{}Which employee completed the task?\\n> \".format(message))\n if len(employee) > 60:\n message = \"Name must be 60 or fewer characters.\\n\\n\"\n continue\n return employee",
"def input(prompt=None):\n if prompt:\n sys.stderr.write(str(prompt))\n return builtins.input()",
"def time_out():",
"def prompt (self, timeout=-1):\n\n if timeout == -1:\n timeout = self.timeout\n i = self.expect([self.PROMPT, TIMEOUT], timeout=timeout)\n if i==1:\n return False\n return True",
"def _timeout_(cmd, timeout):\n if timeout:\n return \"%s %d\"%(cmd,timeout)\n return cmd",
"def input(prompt=\"\"):\n _print_stderr(\" >> {}\".format(prompt), end=\"\")\n return builtins.input()",
"async def get_private_text_input(self, timeout: int) -> typing.Optional[str]:\n\n try:\n text = await self.bot.wait_for('message',\n check=self.bot.checks.wait_for_message_check(self.ctx),\n timeout=timeout)\n except asyncio.TimeoutError:\n await self.ctx.send(\":zzz: You took too long to reply.\")\n return None\n\n if not text.content:\n await self.ctx.send(\":x: You didn't reply with text.\")\n return None\n\n else:\n try:\n await text.delete()\n except Exception:\n pass\n\n return text.content",
"def input_text(thePrompt: str, theInputWidth: int, theDefaultInput: str = None, **kwds):\n box = Dialog(**kwds)\n d = box.margin\n\n def ok():\n box.dismiss(True)\n\n def cancel():\n box.dismiss(False)\n\n lb = Label(thePrompt)\n lb.topleft = (d, d)\n tf = TextField(theInputWidth)\n if theDefaultInput:\n tf.set_text(theDefaultInput)\n tf.enter_action = ok\n tf.escape_action = cancel\n tf.top = lb.top\n tf.left = lb.right + 5\n box.add(lb)\n box.add(tf)\n tf.focus()\n box.shrink_wrap()\n if box.present():\n return tf.get_text()\n else:\n return None",
"def _get_user_input(prompt):\n\n _inp = ''\n while not _inp:\n _inp = input(prompt)\n\n return _inp",
"def prompt_for_value(message_text):\n\n sys.stdout.write(f\"{message_text}: \")\n sys.stdout.flush()\n return sys.stdin.readline().rstrip()"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Handle all nodes of the listed node classes. | def process(self):
for node_class in self.setup["node_classes"]:
for node in nuke.allNodes(recurseGroups=True):
class_name = node.Class()
if class_name != node_class:
continue
self.logger.info("%s '%s' because its node class (%s) is "
"included in %s", self.setup["mode"],
node.name(), class_name,
self.setup["node_classes"])
self.handle_node(node) | [
"def node_classes(self):\n node_classes = []\n for node_type, node_attrs in data.iteritems():\n node_class = node_attrs.get('class', None)\n \n if node_class and node_class not in node_classes:\n node_classes.append(node_class)\n return sorted(node_classes)",
"def load_ontology_classes(self, base_class=None):\n sparql_query = '''\n SELECT DISTINCT ?ont_node ?label\n {\n ?ont_node rdfs:subClassOf* <%s> .\n ?ont_node rdfs:label ?label\n }\n '''\n\n count = 0\n qres = self.rdf_graph.query(sparql_query % base_class)\n\n for (ont_node, ont_label) in qres:\n uri = str(ont_node)\n label = str(ont_label)\n self.current_classes[uri] = label\n count +=1\n\n '''\n Add the children too\n '''\n self.get_children(uri=uri)\n\n logger.debug(\"parsed %i classes\"%count)",
"def updateNodes(nodes):\r\n global node_names\r\n node_names.extend(n.classname for n in nodes)",
"def tree_find_nodes(self, tree, node_name, data_class):\r\n\r\n aList = tree.all_nodes()\r\n\r\n result = []\r\n for el in aList:\r\n if node_name is None or el.tag == node_name:\r\n if isinstance(el.data, data_class):\r\n result.append(el)\r\n\r\n return result",
"def _get_node_types(self):\n for type in self.cfg.node_types:\n self._node_types[type.name] = type.label",
"def nodes(uids):\n for nobj in source.nodes(uids):\n node(nobj)\n fuel_data(nobj)",
"def allNodeTypes(includeAbstract=bool):\n pass",
"def process(self):\n\n self.process_classes(Steps.FLATTEN)\n self.filter_classes()\n self.process_classes(Steps.SANITIZE)\n self.process_classes(Steps.RESOLVE)\n self.process_classes(Steps.FINALIZE)\n self.designate_classes()",
"def classify(tree, data):\n return map(lambda rec: get_class(rec, tree), data)",
"def build_cases(node: ASTNodeType) -> None:\n # Don't bother processing classes unless they actually have\n # concrete subclasses, otherwise we would be producing dead code.\n if not node.concrete_subclasses:\n return\n\n to_pop = False\n\n if node == root_node:\n # As a special case, emit actions for the root node outside of\n # the top-level CASE block as we don't need to dispatch on\n # anything for them: they always must be applied.\n actions = actions_for_node(node, node_var)\n if actions:\n result.append(actions)\n\n else:\n # If there are actions for this node, add a matcher for them\n # and process the subclasses in a nested CASE block.\n actions = actions_for_node(node, Matcher.new_node_var(node))\n if actions:\n m = Matcher(node, actions)\n case_stack[-1].matchers.append(m)\n case_stack.append(m.inner_case)\n to_pop = True\n\n for subcls in node.subclasses:\n build_cases(subcls)\n\n if to_pop:\n case_stack.pop()",
"def Visit(self, node):\n mapping = self._mapping\n\n # Build a visitor that performs the old_class -> new_class mapping:\n class Visitor(visitors.Visitor):\n visits_all_node_types = True\n name_to_class = mapping\n for name, new_cls in mapping.iteritems():\n\n def Visit(self, node):\n # Python doesn't allow us to build this as a closure, so we have to\n # use the clunky way of retrieving the replacement class.\n cls = self.name_to_class.get(node.__class__.__name__)\n if cls is not None:\n return cls(*node)\n else:\n return node\n locals()[\"Visit\" + name] = Visit\n return node.Visit(Visitor())",
"def classify(self, dataTypes):\n dataClasses = {}\n for dataType in dataTypes:\n dataClasses[dataType.dataTypeName] = dataType.dataClasses[\"dataIndex\"]\n for node in self.chainGraph.nodes:\n if node.dataClasses[\"dataIndex\"] is None:\n matches = node.get_matching_classes(dataClasses)\n try:\n c = matches.pop()\n node.rollup_dataClass(c)\n node.dataClasses[\"dataIndex\"] = c\n except IndexError:\n pass\n for c in matches:\n copy = self.copy_node(node)\n copy.rollup_dataClass(c)\n copy.dataClasses[\"dataIndex\"] = c",
"def add_nodes(self, fusions: List[hmn_fusion.Fusion]) -> None:\n for fusion in fusions:\n self.add_node(fusion)",
"def visit_class(self, cls) -> None:\n if cls not in self._visited:\n self._visited.append(cls)\n\n if cls.dependencies:\n for dependency in cls.dependencies:\n if dependency in self.afferent_couplings:\n self.afferent_couplings[dependency] += 1\n else:\n self.afferent_couplings[dependency] = 1\n\n self.visit_children(cls)",
"def EnterClassType(self, node):\n nodes = [node]\n seen = set()\n while nodes:\n cur_node = nodes.pop(0)\n if cur_node in seen:\n continue\n seen.add(cur_node)\n for prefix, cls in self._Lookup(cur_node):\n if isinstance(cls, pytd.Alias) and isinstance(cls.type, pytd.ClassType):\n if cls.type.cls:\n cls = cls.type.cls\n else:\n nodes.append(cls.type)\n if isinstance(cls, pytd.Class):\n node.cls = cls\n return\n else:\n logging.warning(\"Couldn't resolve %s: Not a class: %s\",\n prefix + node.name, type(cls))",
"def handle(self, namespace):\n for label in namespace.labels:\n self.handle_label(label, namespace)\n else:\n self.handle_no_labels(namespace)",
"def CLASSDEF(self, node):\r\n for deco in node.decorator_list:\r\n self.handleNode(deco, node)\r\n for baseNode in node.bases:\r\n self.handleNode(baseNode, node)\r\n if not PY2:\r\n for keywordNode in node.keywords:\r\n self.handleNode(keywordNode, node)\r\n self.pushScope(ClassScope)\r\n if self.withDoctest:\r\n self.deferFunction(lambda: self.handleDoctests(node))\r\n for stmt in node.body:\r\n self.handleNode(stmt, node)\r\n self.popScope()\r\n self.addBinding(node, ClassDefinition(node.name, node))",
"def subclasses(nodes, c):\n con = concrete(nodes)\n return filter (lambda node: node.name != c and node.is_a (c), con)",
"def add_child_classes(node):\n for para in node.traverse(nodes.paragraph):\n para[\"classes\"] = ([] if \"classes\" in para else para[\"classes\"]) + [\"card-text\"]\n for title in node.traverse(nodes.title):\n title[\"classes\"] = ([] if \"classes\" in title else title[\"classes\"]) + [\n \"card-title\"\n ]"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Tuples of (sample, read group) names. | def unit_names(self):
return [UnitName(sample.name, rg.name)
for sample in self.samples.values()
for rg in sample.read_groups.values()] | [
"def sample_names(self):\n return self._sample_names",
"def get_read_group(wildcards):\n return r\"-R '@RG\\tID:{run}\\tSM:{sample}-{condition}\\tPL:{platform}'\".format(\n sample=wildcards.sample,\n condition=wildcards.condition,\n run=units.loc[(wildcards.sample, wildcards.unit, wildcards.condition), \"fq1\"].split(\"/\")[-1].split(\".\")[0],\n platform=units.loc[(wildcards.sample, wildcards.unit, wildcards.condition), \"platform\"])",
"def get_read_group(wildcards):\n return r\"-R '@RG\\tID:{sample}\\tSM:{sample}\\tPL:{platform}'\".format(sample=wildcards.sample, platform=units.loc[(wildcards.sample, wildcards.unit), \"platform\"])",
"def _get_groupNames(self) -> \"std::vector< std::string,std::allocator< std::string > >\" :\n return _core.Attributes__get_groupNames(self)",
"def names(self) -> Tuple[str, ...]:\n return tuple(key for key, _ in self.segments)",
"def get_group_sample_ids(self, sample_group):\n return self._sample_group_lut[sample_group]['samples'].keys()",
"def getSampleNames(self, sampleIds):\n result = [None] * len(sampleIds)\n index = 0\n for sampleId in sampleIds:\n if sampleId in self.sampleId2Name:\n result[index] = self.sampleId2Name[sampleId]\n index += 1\n return result",
"def fixture_sample_tag_names(vcf_tag_name: str, sample_tag_name: str) -> List[str]:\n return [vcf_tag_name, sample_tag_name]",
"def get_sample_name( bamfile ):\n header = bamfile.header\n if 'RG' in header:\n if type(header['RG']) is list:\n return(header['RG'][0]['SM'])\n else:\n return(header['RG']['SM'])\n return( False )",
"def reader_groups(self) -> pulumi.Output[Optional[Sequence[str]]]:\n return pulumi.get(self, \"reader_groups\")",
"def get_sample_ids_to_label(samples_file):\n sample_label_tuples = []\n lines = open(samples_file, 'U')\n for line in lines:\n if line.startswith('#'):\n continue\n line_pieces = [x.strip() for x in line.split('\\t')]\n if len(line_pieces) == 2:\n sample_label_tuples.append(tuple(line_pieces[0:2]))\n lines.close()\n\n return sample_label_tuples",
"def group_names_for_display(self):\n return self.demographic_group_name, \"non-\" + self.demographic_group_name",
"def get_group_sb(ms):\n result = re.findall(\"_SBgr(\\d+)-(\\d+)_\", ms)\n result_single = re.findall(\"_SB(\\d+)_\", ms)\n if result:\n group = int(result[0][0])\n sb_per_group = int(result[0][1])\n elif result_single:\n group = int(result_single[0])\n sb_per_group = 1\n else:\n group = None\n sb_per_group = None\n return group, sb_per_group",
"def sys_names(self):\n names = []\n for sample in self.samples:\n names.extend(sample.sys_names())\n return list(set(names))",
"def keys(self):\n ds = self._nc_handle\n group_keys = list(ds.groups.keys())\n var_keys = list(ds.variables.keys())\n return tuple(group_keys) + tuple(var_keys)",
"def get_stats(self):\n\n \"\"\"#See samtools stats\n # 1526795 + 0 in total (QC-passed reads + QC-failed reads)\n 13 + 0 secondary\n 0 + 0 supplementary\n 0 + 0 duplicates\n 3010 + 0 mapped (0.20% : N/A)\n 1526782 + 0 paired in sequencing\n 763391 + 0 read1\n 763391 + 0 read2\n 2700 + 0 properly paired (0.18% : N/A)\n 2976 + 0 with itself and mate mapped\n 21 + 0 singletons (0.00% : N/A)\n 0 + 0 with mate mapped to a different chr\n 0 + 0 with mate mapped to a different chr (mapQ>=5)\n \"\"\"\n d = {}\n\n samflags_count = self.get_samflags_count()\n\n # all reads - (supplementary alignmnt + secondary alignmnt)\n d[\"total_reads\"] = len(self) - (samflags_count[256] + samflags_count[2048])\n # all reads - (unmapped + supplementary alignmnt + secondary alignmnt)\n d[\"mapped_reads\"] = d[\"total_reads\"] - samflags_count[4]\n d[\"unmapped_reads\"] = samflags_count[4]\n d[\"mapped_proper_pair\"] = samflags_count[2]\n d[\"reads_duplicated\"] = samflags_count[1024]\n d[\"secondary_reads\"] = samflags_count[256]\n return d",
"def _read_groups(self, node):\n for child in node:\n assert child.tag == 'group'\n gname = child.attrib['name']\n assert gname not in self.groups\n group = []\n self.groups[gname] = group\n for child2 in child:\n group.append(enum_name(child2.attrib['name']))",
"def test_extract_fastq_sample_name(self):\n filenames = [\n \"NA12345 - 4KC_S7_L001_R1_001.fastq.gz\",\n \"NA12345 - 4KC_S7_L001_R2_001.fastq.gz\",\n \"NA12345 - 4KC_S7_L002_R1_001.fastq.gz\",\n \"NA12345 - 4KC_S7_L002_R2_001.fastq.gz\",\n \"L2000552_S1_R1_001.fastq.gz\",\n \"L2000552_S1_R2_001.fastq.gz\",\n \"L1000555_S3_R1_001.fastq.gz\",\n \"L1000555_S3_R2_001.fastq.gz\",\n \"L1000555_S3_R3_001.fastq.gz\",\n \"L3000666_S7_R1_001.fastq.gz\",\n \"L4000888_S99_R1_001.fastq.gz\",\n \"L4000888_S3K_S99_R2_001.fastq.gz\",\n \"L4000888_SK_S99_I1_001.fastq.gz\",\n \"L400S888_S99_I2_001.fastq.gz\",\n \"L400S888_S5-9_S99_I2_001.fastq.gz\",\n \"PTC_TsqN999999_L9900001_S101_I2_001.fastq.gz\",\n \"PRJ111119_L1900000_S102_I2_001.fastq.gz\",\n \"MDX199999_L1999999_topup_S201_I2_001.fastq.gz\",\n ]\n\n for name in filenames:\n sample_name = fastq.extract_fastq_sample_name(name)\n logger.info((sample_name, name))\n self.assertTrue(\"_R\" not in sample_name)\n\n self.assertIsNone(fastq.extract_fastq_sample_name(\"L1999999_topup_R1_001.fastq.gz\"))",
"def groups(self):\n groups = tuple()\n if self.is_group1():\n groups += (1,)\n if self.is_group2():\n groups += (2,)\n if self.is_group3():\n groups += (3,)\n return groups"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the given setting key if it has not yet been set. | def set_default_setting(self, key, value):
if self.settings.get(key) is None:
self.settings[key] = value | [
"def set_value(parameter, key, value):\n if not key in parameter:\n parameter[key] = value",
"def SetKeyword(key, value):",
"def set(self, workflow_id: str, key: str, value: Optional[str]) -> None:\n raise NotImplementedError",
"def direct_set(self, key: str, value):\n set_store_value(self.store, key, value)",
"def set_key_field(self, key_field):\n return self.set_param('key_field', key_field)",
"def __setattr__(self, key, value):\n try:\n if key in _GA(self, \"options_dict\"):\n _GA(self, \"set\")(key, value)\n except AttributeError:\n pass\n _SA(self, key, value)",
"def setCacheKey(self, key):\n\t\tself.cacheKey = key",
"def set_default(key, value):\n global options\n global valid_options\n\n if key not in options:\n options[key] = value\n\n # Track this is a valid option so we can validate that customers didn't\n # mistype any options\n valid_options.add(key)",
"def _check_key_setting_consistency(self, params_kkrimp, key, val):\n param_ok = True\n\n #TODO implement checks\n\n if not param_ok:\n raise ValueError('Trying to set key \"{}\" with value \"{}\" which is in conflict to previous settings!'.format(key, val))",
"def __setattr__(self, key, val):\n if key.startswith(\"_\"):\n object.__setattr__(self, key, val)\n else:\n self._kwargs[key] = val",
"def set_option(self, key, value):\n self.options.set(key, value)",
"def set_parm(self, key, val=None):\n\n self.__parm[key] = val",
"def set(self, key, config, value=1):\n value_dict = self.param_dict.get(key)\n if value_dict is None:\n self.param_dict[key] = value_dict = {}\n value_dict[config] = value\n self.keys.append(key)\n\n present_list = self.config_dict.get(config)\n if present_list is None:\n self.config_dict[config] = present_list = []\n present_list.append(key)",
"def setDefault(key, value, context=None):",
"def set(self, key, value):\n self.config[key] = value\n self.saveConfig()",
"def setitem(self, key, value):",
"def set(key, value, description=\"\"):\n p = Preference.select(Preference.q.pref_key == key)\n if p.count() == 0:\n Preference(pref_key=key, \n pref_value=value,\n pref_description=description)\n else:\n p[0].pref_value = value\n if description:\n p[0].pref_description = description",
"def _set_sourcing_key(self, sourcing_key):\r\n self.meta['sourcing_key'] = sourcing_key",
"def put_value(self, key, value):\n if self.settings.has_key(key):\n storage.put_setting(self.db, key, value)\n self.settings[key] = value\n else:\n logging.warning(\"Trying to update a settings key that does not exists! (%s)\", key)\n raise Exception(\"Trying to update a settings key that does not exists!\")"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns the working directory of the run A default value can optionally be given if the ``settings.workdir`` key is not defined in the config. | def get_workdir(self, default=None):
return getnattr(self._raw, ["settings", "workdir"], default) | [
"def _driver_workdir(self):\n return self.config.get('workdir', '/tmp')",
"def get_worker_working_directory():\n working_directory = \"tmp/netSLS\"\n if _CONFIG_PARSER.has_option(\"DEFAULT\", \"WorkerWorkingDirectory\"):\n working_directory = _CONFIG_PARSER.get(\"DEFAULT\", \"WorkerWorkingDirectory\")\n\n # MaxiNet executes commands with root permission. We don't want to mess up our workers.\n if abspath(working_directory) == \"/\":\n working_directory = \"/tmp/netSLS\"\n\n return working_directory",
"def get_workdir(prefix=\"work_dirs/run\"):\n if prefix.endswith(os.sep):\n prefix = prefix[:-1]\n sec = time.time()\n md5 = hashlib.md5(str(sec).encode()).hexdigest()\n timestr = time.strftime('%Y%m%d_%H%M%S', time.localtime(sec))\n workdir = \"{prefix}_{timestr}_{md5:.6}\".format(prefix=prefix, timestr=timestr, md5=md5)\n return workdir",
"def workingDir() -> str:\n d = os.getcwd()\n _log.verify( len(d) == 0 or not (d[-1] == '/' or d[-1] == '\\\\'), \"*** Internal error 13123212-2: %s\", d)\n return d + \"/\"",
"def get_working_dir(self, gerrit, project):\n return os.path.join(\n os.getcwd(), '%s-%s-tmp' % (gerrit['host'], project))",
"def getcwd():\r\n try:\r\n a = os.stat(os.environ['PWD'])\r\n b = os.stat(os.getcwd())\r\n if a.ino == b.ino and a.dev == b.dev:\r\n working_dir = os.environ['PWD']\r\n else:\r\n working_dir = os.getcwd()\r\n except:\r\n working_dir = os.getcwd()\r\n return working_dir",
"def _output_directory_default(self):\n return os.getcwd()",
"def testWorkingDir(self):\n\n os.environ.pop(\"TUNE_ORIG_WORKING_DIR\", None)\n working_dir = os.getcwd()\n\n def f(config):\n assert os.environ.get(\"TUNE_ORIG_WORKING_DIR\") == working_dir\n\n ray.init(num_cpus=1)\n tune.run(f)\n ray.shutdown()",
"def default_base_dir():\n cwd = Path('.').resolve()\n\n pwd = os.environ.get('PWD')\n if pwd is None:\n return cwd\n\n pwd = Path(pwd)\n if not pwd.is_absolute():\n return cwd\n\n if cwd != pwd.resolve():\n return cwd\n\n return pwd",
"def get_current_dir():\n return os.getcwd()",
"def set_workdir(self, workdir=None):\n self.Script.set_work_directory(workdir)\n return",
"def get_current_dir() -> str:\n return os.getcwd()",
"def cwd() -> str:\n return os.path.abspath(os.getcwd())",
"def get_pilot_work_dir(workdir):\n\n return os.path.join(workdir, \"PanDA_Pilot2_%d_%s\" % (os.getpid(), str(int(time.time()))))",
"def BuildCWD(self):\n return ROOT_DIR",
"def get_run_path(self):\r\n return self.__run_path",
"def get_current_dir():\n return os.path.dirname(os.path.abspath(getsourcefile(lambda: 0)))",
"def get_project_dir():\n current_dir = os.path.dirname(os.path.realpath(__file__))\n current_dir = os.path.dirname(current_dir)\n current_dir = os.path.dirname(current_dir)\n # current_dir = os.path.dirname(current_dir)\n # current_dir = os.path.dirname(current_dir)\n return current_dir",
"def getBuildDir(self):\n default = 'build'\n pathstr = self.getCustom('Build', 'builddir', default)\n pathstr = self._getAbsPath(pathstr)\n\n return pathstr"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test VersionSelector().pick_winner() with "None" versions. | def test_basic_selector_none_versions(unsupported_cve_none_versions):
candidates = [
PackageNameCandidate('io.vertx:testtools', Decimal('10.0')),
]
selector = VersionSelector(unsupported_cve_none_versions, candidates, 'java')
winner = selector.pick_winner() # don't throw TypeError here
assert not winner | [
"def test_none():\n ver = _version.Version(\"\", True, 8)\n assert_equals(unicode(ver), u'0.0.0-dev-r8')\n\n ver = _version.Version(\"\", False, 9)\n assert_equals(unicode(ver), u'0.0.0')",
"def test_election_winners_0_votes() -> None:\n e = Election(date(2000, 2, 8))\n e.update_results('r1', 'ndp', 0)\n e.update_results('r2', 'np', 0)\n res1 = e.election_winners()\n assert res1.sort() == ['ndp', 'np'].sort()",
"def test_select_latest_version():\n lat_ver = select_latest_version(\"-1\", [\"3.4.5\", \"3.4.1\"], \"pkg\")\n assert lat_ver == \"3.4.5\"\n\n lat_ver = select_latest_version(\"-1\", [\"3.4.1\", \"3.4.5\"], \"pkg\")\n assert lat_ver == \"3.4.5\"\n\n lat_ver = select_latest_version(\"3.4.5\", [\"3.4.1\", \"3.4.0\"], \"pkg\")\n assert lat_ver == \"3.4.5\"\n\n lat_ver = select_latest_version(\"-1\", [\"-1\", \"-1\"], \"pkg\")\n assert lat_ver == ''\n\n lat_ver = select_latest_version([\"abc\"], [{\"a\": \"b\"}, [\"b\"]], \"pkg\")\n assert lat_ver == ''",
"def test_election_winners_empty() -> None:\n e = Election(date(2000, 2, 8))\n res1 = e.election_winners()\n assert res1.sort() == ['ndp', 'np'].sort()",
"def randomise_poll_winner():\n poll_id = int(input(\"Enter a poll id you want to select a winner from: \"))\n poll = Poll.get(poll_id)\n _print_poll_options(poll.options)\n\n option_id = int(input(\"Enter an option id you want to select a winner from: \"))\n votes = Option.get(option_id).votes\n winner = random.choice(votes)\n print(f\"The randomly selected winner is '{winner[0]}'\")",
"def test_election_winners_1_riding_2_party() -> None:\n e = Election(date(2000, 2, 8))\n e.update_results('r1', 'ndp', 19)\n e.update_results('r1', 'np', 69)\n res1 = e.election_winners()\n assert res1 == ['np']",
"def test_election_winners_2_riding_1_party() -> None:\n e = Election(date(2000, 2, 8))\n e.update_results('r1', 'ndp', 19)\n e.update_results('r2', 'ndp', 69)\n res1 = e.election_winners()\n assert res1 == ['ndp']",
"def test_riding_winners_1_party_zero_vote() -> None:\n e = Election(date(2000, 2, 8))\n e.update_results('r1', 'ndp', 0)\n res1 = e.riding_winners('r1')\n assert res1 == ['ndp']",
"def winner(self):\n if len(self.whos_alive()) == 1:\n return self.whos_alive()[0]\n else:\n return None",
"def test_no_potential_reviewers(self):\n self.handler = HighfiveHandlerMock(\n Payload({}), repo_config=self.fakes['config']['empty']\n ).handler\n chosen_reviewers, mentions = self.choose_reviewers(\n self.fakes['diff']['normal'], 'alexcrichton',\n self.fakes['global_']['base']\n )\n assert set([None]) == chosen_reviewers\n assert set() == mentions",
"def looking_for_winner(self, player):\n if len(player.cards) == 0:\n self.winner = player\n else:\n self.winner = None\n \n return self.winner",
"def pick_winner(self) :\n winning_combo = []\n count = 0\n while count <= 3 :\n random = self.lotto_elements[randint(0, 14)]\n winning_combo.append(random)\n count += 1\n return winning_combo",
"def test_simple_election_riding_winners_tie() -> None:\n e = Election(date(2000, 2, 8))\n e.update_results('r1', 'ndp', 1)\n e.update_results('r1', 'lib', 1)\n e.update_results('r1', 'pc', 1)\n assert e.riding_winners('r1') == ['ndp', 'lib', 'pc']",
"def _check_winner(self):\n for combo in self._winning_combos:\n winner = reduce(lambda x, y: x if x == y else None, [self.board[x] for x in combo])\n if winner:\n return winner\n\n return None if None in self.board else self.draw",
"def test_get_previous_version_no_previous_version(self):\n study = factories.StudyFactory.create()\n source_study_version_1 = factories.SourceStudyVersionFactory.create(study=study, i_version=1)\n source_study_version_2 = factories.SourceStudyVersionFactory.create(study=study, i_version=2)\n self.assertIsNone(source_study_version_1.get_previous_version())",
"def test_selection_none():\n assert (selection(EMPLOYEES, filter_employees_none)) is None",
"def test_intellij_get_preferred_version(self,\n mock_all_versions,\n mock_deprecated_version,\n mock_ask_preference,\n mock_preference,\n mock_write_cfg):\n mock_write_cfg.return_value = None\n ide_intellij = ide_util.IdeIntelliJ()\n\n # No IntelliJ version is installed.\n mock_all_versions.return_value = ['/a/b', '/a/c']\n mock_deprecated_version.return_value = True\n version = ide_intellij._get_preferred_version()\n self.assertEqual(version, None)\n\n # Load default preferred version.\n mock_all_versions.return_value = ['/a/b', '/a/c']\n mock_deprecated_version.return_value = False\n ide_intellij._config_reset = False\n expected_result = '/a/b'\n mock_preference.return_value = '/a/b'\n version = ide_intellij._get_preferred_version()\n self.assertEqual(version, expected_result)\n\n # Asking user the preferred version.\n mock_preference.reset()\n mock_all_versions.return_value = ['/a/b', '/a/c']\n mock_deprecated_version.return_value = False\n ide_intellij._config_reset = True\n mock_ask_preference.return_value = '/a/b'\n version = ide_intellij._get_preferred_version()\n expected_result = '/a/b'\n self.assertEqual(version, expected_result)\n\n mock_all_versions.return_value = ['/a/b', '/a/c']\n mock_ask_preference.return_value = None\n expected_result = '/a/b'\n mock_preference.return_value = '/a/b'\n version = ide_intellij._get_preferred_version()\n self.assertEqual(version, expected_result)\n\n # The all_versions list has only one version.\n mock_all_versions.return_value = ['/a/b']\n mock_deprecated_version.return_value = False\n version = ide_intellij._get_preferred_version()\n self.assertEqual(version, '/a/b')",
"def pick_winner(customers):\n\n chosen_customer = choice(customers)\n\n print(\"Tell {name} at {email} that they've won\".format(\n name=chosen_customer.name,\n email=chosen_customer.email\n ))",
"def test_package_versions_excludes_partial_releases(self):\n # Full release of both components\n rid1 = self._create_release(platforms=['platformOne'])\n pid1 = self._create_package(rid1, name='packageOne', version='1.0')\n pid2 = self._create_package(rid1, name='packageTwo', version='1.0')\n self._start_package(pid1)\n self._stop_package(pid1)\n self._start_package(pid2)\n self._stop_package(pid2)\n sleep(0.1) # To ensure some time separation\n\n # Partial release\n rid2 = self._create_release(platforms=['platformOne'])\n pid1 = self._create_package(rid2, name='packageOne', version='2.0')\n pid2 = self._create_package(rid2, name='packageTwo', version='2.0')\n self._start_package(pid1)\n self._stop_package(pid1)\n self._start_package(pid2)\n # note - have not stopped packageTwo\n\n result = orlo.queries.package_versions(\n by_release=True, platform='platformOne').all()\n self.assertEqual(len(result), 2)\n for _, ver in result:\n # Should both be version 1.0, despite packageOne 2.0 being\n # successful\n self.assertEqual(ver, '1.0')"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
open connection with postgres | def open(self):
conn_string = f"host={self.host} user={self.user} password={self.password} dbname={self.dbname} port={self.port}"
try:
self.conn = psycopg2.connect(conn_string)
print("POSTGRES::Connection established")
except Exception as e:
print(str(e)) | [
"def setup_postgres():\n conn = psycopg2.connect(\"postgresql://python:{}@{}:5432/kin\".format(PYTHON_PASSWORD, POSTGRES_HOST))\n logging.info('Successfully connected to the database')\n return conn",
"def init_postgresql_connection():\n connection = connect(user='test',\n password='test',\n host='localhost',\n port='5432',\n database='infrastructure')\n cursor = connection.cursor()\n return connection, cursor",
"def __connect() -> psycopg2.extensions.connection:\n db_connection = psycopg2.connect(\n database=os.environ[\"DATABASE\"],\n user=os.environ[\"USER\"],\n password=os.environ[\"PASSWORD\"],\n host=os.environ[\"HOST\"],\n port=\"5432\",\n )\n\n db_connection.autocommit = True\n return db_connection",
"def connect(un, passw):\n\n conn = None\n try:\n # read connection parameters\n # params = config()\n \n # connect to the PostgreSQL server\n print('Connecting to the PostgreSQL database using dynamic secret...')\n conn = psycopg2.connect(host=pg_hostname,database=db_name, user=un, password=passw)\n \n # create a cursor\n cur = conn.cursor()\n \n # execute a statement\n # print('PostgreSQL database version:')\n # create_table_query = '''CREATE TABLE mobile\n # (ID INT PRIMARY KEY NOT NULL,\n # MODEL TEXT NOT NULL,\n # PRICE REAL); '''\n\n print(\"Retrieving table data that was created by the root user in the postgres db...\")\n read_table = 'select * from testtable;'\n # cur.execute('SELECT version()')\n # cur.execute(create_table_query)\n cur.execute(read_table)\n \n # display the PostgreSQL database server version\n db_version = cur.fetchone()\n print(db_version)\n \n # close the communication with the PostgreSQL\n cur.close()\n except (Exception, psycopg2.DatabaseError) as error:\n print(error)\n finally:\n if conn is not None:\n conn.close()\n print('Database connection closed.')",
"def test_connection():\n\n conn = None\n try:\n #read conection parameters\n params = dbconfig(filename,section)\n \n #connect to server\n print('Conecting to the PostgreSQL database...')\n conn = psycopg2.connect(**params)\n #create a cursor\n cur = conn.cursor()\n\n #execute a statement\n print('Postgres database version: ')\n cur.execute('SELECT version()')\n\n #display the postgress db server version\n db_version = cur.fetchone()\n print(db_version)\n\n #close comm with pgSQL\n cur.close()\n except (Exception, psycopg2.DatabaseError) as error:\n print(error)\n finally:\n if conn is not None:\n conn.close()\n print('Database connection closed.')",
"def connect_to_jira():\n conn = None\n try:\n # read connection parameters\n # params = config()\n \n # connect to the PostgreSQL server\n print('Connecting to the PostgreSQL database...')\n # conn = psycopg2.connect(**params)\n conn = psycopg2.connect(os.environ.get('DB_CONN', None))\n # conn.set_client_encoding('UTF-16')\n print('Connected :)')\n # create a cursor\n cur = conn.cursor()\n return conn, cur\n except (Exception, psycopg2.DatabaseError) as error:\n print(error)",
"def __connect_db(self, connection):\n\t\tstrconn = \"dbname=%(database)s user=%(user)s host=%(host)s password=%(password)s port=%(port)s sslmode=%(sslmode)s\" % connection\n\t\tpgsql_conn = psycopg2.connect(strconn)\n\t\tpgsql_conn.set_session(autocommit=True)\n\t\tpgsql_cur = pgsql_conn .cursor()\n\t\tbackend_pid = pgsql_conn.get_backend_pid()\n\t\tdb_handler = {}\n\t\tdb_handler[\"connection\"] = pgsql_conn\n\t\tdb_handler[\"cursor\"] = pgsql_cur\n\t\tdb_handler[\"pid\"] = backend_pid\n\t\treturn db_handler",
"def __init__ (self, db_con_str = 'dbname=tournament'):\n self.conn = psycopg2.connect(db_con_str)",
"def build_db_conn(host, port, database, username, password):\n postgres = PgConn(host=host,\n port=port,\n database=database,\n username=username,\n password=password)\n\n return postgres",
"def postgres():\n utils.create_db()\n try:\n yield utils.get_postgres_dsn()\n finally:\n utils.drop_db()",
"def test_postgres_connector():\n connector = PostgresConnector()\n db_connector = DBConnector(connector)\n\n connection = db_connector.connect_to_db('source/bbdd/db_info.ini')\n\n assert connection is not None",
"def connect(x=None, l1=None):\n conn = None\n y=None\n try:\n \n # connect to the PostgreSQL server\n print('Connecting to the PostgreSQL database...')\n try:\n DATABASE_URL = os.environ['DATABASE_URL']\n conn = pg2.connect(DATABASE_URL)\n except KeyError:\n params = postgres_config()\n conn = pg2.connect(**params) #?SSL mode?\n\n # create a cursor\n cur = conn.cursor()\n \n # execute a statement\n print('PostgreSQL database version:')\n cur.execute('SELECT version()')\n db_version = cur.fetchone()\n print(db_version)\n # display the PostgreSQL database server version\n if x:\n y = x(conn, l1)\n conn.commit()\n \n # close the communication with the PostgreSQL\n cur.close()\n\n except (Exception, pg2.DatabaseError) as error:\n print(error)\n finally:\n if conn is not None:\n conn.close()\n print('Database connection closed.')\n if y:\n return y",
"def connect_dsn(self, dsn):\n if self.debug:\n print(\"Connecting to PostgreSQL\")\n\n try:\n self.con = I_sql.connect(dsn)\n\n if self.debug:\n print(\"Connected to PostgreSQL\")\n\n self.con.set_isolation_level(I_sql.extensions.ISOLATION_LEVEL_AUTOCOMMIT)\n except (Exception, I_sql.DatabaseError) as error:\n print(\"SQL Connection Error Occured >> \")\n print(error)\n return self.con",
"def database_connection(self, database_name):\n return psycopg2.connect(dbname=database_name, user='andela', host='localhost', password='bootcamp')",
"def create_connection(self) -> psycopg2.extensions.connection:\n conn = psycopg2.connect(\n **self.db_settings\n )\n conn.set_session(\n isolation_level=psycopg2.extensions.ISOLATION_LEVEL_SERIALIZABLE\n )\n return conn",
"async def try_connect_postgres(self, host, user, password, database):\n\n while True:\n logger.info(\"Trying to connect to postgres... {}@{}/{}\".format(user, host, database))\n logger.debug(\"loop: {}\".format(self.loop))\n try:\n postgres = await aiopg.create_pool(\n loop=self.loop,\n host=host,\n user=user,\n database=database,\n password=password)\n logger.info(\"Successfully connected to postgres\")\n return postgres\n except:\n logger.warn(\"Failed to connect to postgres\")\n time.sleep(5)",
"def dbconn_from_args(argv=sys.argv[1:], environ=os.environ):\n parser = optparse.OptionParser()\n parser.add_option(\"-D\",\"--dbname\", action=\"store\", dest=\"dbname\",\n help=\"database name of the topology network\")\n parser.add_option(\"-H\",\"--dbhost\", action=\"store\", dest=\"dbhost\",\n help=\"database host address of the topology network\")\n parser.add_option(\"-P\",\"--dbport\", action=\"store\", dest=\"dbport\",\n help=\"database port of the topology network\")\n parser.add_option(\"-U\",\"--dbuser\", action=\"store\", dest=\"dbuser\",\n help=\"database user name of the topology network\")\n parser.add_option(\"-X\",\"--dbpwrd\", action=\"store\", dest=\"dbpwrd\",\n help=\"database user password of the topology network\")\n\n (options, args) = parser.parse_args(argv)\n # Options have precedence over environment variables, which have\n # precedence over defaults.\n\n # NB: None (or null) host and port values are completely reasonable and\n # mean a local (unix domain socket) connection. This way postgresql can\n # be configured without network support, which is convenient and secure.\n dbhost = options.dbhost or environ.get('PGHOST')\n dbport = options.dbport or environ.get('PGPORT')\n # postgres is the default database role, and why not use it?\n dbuser = options.dbuser or environ.get('PGUSER', 'postgres')\n # A None password is also valid but it implies the server must be configured\n # to support either 'trust' or 'ident' identification. For a local server this\n # is convenient, but it isn't secure for network installations. Review\n # man pg_hba.conf for the details.\n dbpwrd = options.dbpwrd or environ.get('PGPASSWORD')\n dbname = options.dbname or environ.get('PGDATABASE', 'eu_power_160718')\n\n try:\n logging.info(\"PostgreSQL connection parameters:\")\n logging.info(\"--> Host: \" + str(dbhost))\n logging.info(\"--> Port: \" + str(dbport))\n logging.info(\"--> User: \" + str(dbuser))\n logging.info(\"--> Database: \" + str(dbname))\n return psycopg2.connect(host=dbhost, port=dbport, user=dbuser,\n password=dbpwrd, database=dbname)\n except psycopg2.Error as e:\n logging.error(\"Could not connect to database with supplied information\", exc_info=True)\n if len(argv) == 0 or len(args) == len(argv):\n parser.print_help()\n raise e",
"def create_database():\n # connect to default database\n conn = psycopg2.connect(database='postgres', user='dunya', port='5432')\n conn.set_session(autocommit=True)\n cur = conn.cursor()\n\n # create sparkify database with UTF8 encoding\n cur.execute(\"DROP DATABASE IF EXISTS sparkifydb\")\n cur.execute(\"CREATE DATABASE sparkifydb WITH ENCODING 'utf8' TEMPLATE template0\")\n\n # close connection to default database\n conn.close()\n\n # connect to sparkify database\n conn = psycopg2.connect(database='sparkifydb', user='dunya', port='5432')\n cur = conn.cursor()\n\n return cur, conn",
"def test_postgresql_connect_fail(self):\n if _is_backend_avail('postgres', user=\"openstack_cifail\"):\n self.fail(\"Shouldn't have connected\")"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
fetch_pdb_chain_uniprot with existing file | def test_fetch_pdb_chain_existing_file_pass(self):
success_msg = re.compile(
r'^Found local copy of.*',
re.IGNORECASE
)
chain_fp = os.path.join(
self.test_data_dp,
'data',
'initial_filtering_data',
'tsv_data',
'pdb_chain_uniprot.tsv')
with captured_stdout() as stdout:
fetch_pdb_chain_uniprot(chain_fp)
result = stdout.getvalue().strip()
print(result)
self.assertTrue(success_msg.search(result))
return None | [
"def read_lookup_table(filename):\n\n pdb_id_file = open(filename, \"r\")\n uniprot_pdb_dict = {}\n for line in pdb_id_file:\n pdb_id = str(line[7:-1])\n uniprot_id_for_dict = str(line[:-6])\n uniprot_pdb_dict.setdefault(uniprot_id_for_dict,[]).append(pdb_id)\n\n return uniprot_pdb_dict",
"def extract_chain(pdb_path, chain_id_arg='first'):\n pdb_name = os.path.basename(pdb_path) # With .pdb extension\n pdb_id = os.path.splitext(pdb_name)[0] # Without .pdb extension\n out_filename = \"results/\" + pdb_id + chain_id_arg + '.pdb'\n\n with open(pdb_path, 'r') as pdb_in, \\\n open(out_filename, 'w') as pdb_out:\n i = 0\n\n for line in pdb_in:\n resName = line[17:20].strip()\n resID_pdb = line[22:26]\n\n if (line[0:4] == \"ATOM\") or ((line[0:6] == \"HETATM\") and\n ( (resName == \"MET\") or resName == \"MSE\") ):\n chain_ID = line[21:22].strip()\n i += 1\n\n if chain_id_arg == 'first':\n if i == 1: # If it is the 1st ATOM line read\n first_chain_id = chain_ID\n\n if chain_ID == first_chain_id:\n pdb_out.write(line)\n else:\n break\n\n else:\n if chain_ID == chain_id_arg:\n pdb_out.write(line)\n\n if not os.stat(out_filename).st_size: # If the file is empty\n print(\"ERROR! The chain ID you specified does not belong to \" +\n pdb_path + ' !\\n')\n os.remove(out_filename) # Clean the empty file\n sys.exit(1)\n\n # Rename file, in the case where no chain had been specified:\n if chain_id_arg == 'first':\n os.system(\"mv \" + out_filename + \" results/\" + pdb_id +\n first_chain_id + '.pdb')\n return (pdb_id + first_chain_id + '.pdb', pdb_id + first_chain_id)\n\n return (pdb_id + chain_id_arg + '.pdb', pdb_id + chain_id_arg)",
"def ExtractChainText(file_name):\n\n pdb_file = open(file_name, 'r')\n\n chain_atoms_found = {}\n chain_text = {}\n\n for line in pdb_file:\n if line[0:6] == 'ATOM ':\n chainID = line[21:22]\n if (not chainID in chain_atoms_found):\n chain_atoms_found[chainID] = {}\n chain_text[chainID] = []\n for atom_name in heavy_atoms:\n chain_atoms_found[chainID][atom_name] = False\n\n chain_text[chainID].append(line)\n\n atom_type = line[12:16]\n res_type = line[17:20]\n if res_type in res_types:\n #if ((atom_type in atoms_found) and (not atoms_found[atom_type])):\n # print('found atom_type=\\\"'+atom_type+'\\\", in res_type=\\\"'+res_type+'\\\"')\n chain_atoms_found[chainID][atom_type] = True\n\n\n for chainID in chain_atoms_found:\n search_criteria_satisfied = True\n for atom_type in chain_atoms_found[chainID]:\n if (not chain_atoms_found[chainID][atom_type]):\n search_criteria_satisfied = False\n if search_criteria_satisfied:\n sys.stderr.write(\" Chain \\\"\"+chainID+\"\\\" contains DNA.\\n\")\n # Then create a new PDB file with a name similar to the original:\n pdb_file_chain_name = file_name\n i = pdb_file_chain_name.lower().rfind('.pdb')\n if i != -1:\n pdb_file_chain_name = (pdb_file_chain_name[:i] +\n '_' + chainID +\n pdb_file_chain_name[i:])\n else:\n pdb_file_chain_name = file_name + '_' + chainID\n sys.stderr.write(' Creating file \\\"'+pdb_file_chain_name+'\\\"\\n')\n pdb_file_chain = open(pdb_file_chain_name, 'w')\n pdb_file_chain.write(''.join(chain_text[chainID]))\n pdb_file_chain.close()\n\n pdb_file.close()",
"def proteinDl(combinedId):\n print(\"Downloading secondary metabolite proteins\")\n\n proteins = bio.dbFetch(\"\"\"\n SELECT torg.name, torg.org_id, proteins.prot_seqkey, sp.sm_short, proteins.prot_seq FROM (SELECT * FROM organism WHERE name IN ('%s')) torg\n JOIN smurf_papa AS sp ON torg.org_id = sp.org_id AND sp.sm_short != 'none'\n JOIN proteins ON sp.org_id = proteins.org_id AND sp.sm_protein_id = proteins.prot_seqkey;\n \"\"\" % \"','\".join(orgs) )\n\n proteins = [(org, org_id, protein_id, sm_short, bio.cleanProtSeq(seq.decode(\"UTF-8\"))) for org, org_id, protein_id, sm_short, seq in proteins]\n\n return(proteins)",
"def uniprot_to_csv_on_disk(self):\n uniprot_dict = {}\n uniprot_csv_path = os.path.join(self.save_dir, 'swissprot_{}.csv'.format(self.mode))\n uniprot_pickle_path = os.path.join(self.save_dir, 'swissprot_{}.p'.format(self.mode))\n out_csv = open(uniprot_csv_path, 'a')\n with open(self.uniprot_file_path, \"r\") as in_fobj:\n curr_prot_id = ''\n curr_F_GOs = []\n curr_P_GOs = []\n curr_C_GOs = []\n curr_ECs = []\n curr_structure = []\n seq = False\n for line in in_fobj:\n fields = line.strip().split()\n flag = fields[0]\n if flag == 'ID' and len(fields) >= 2:\n curr_prot_id = fields[1]\n uniprot_dict[curr_prot_id] = {}\n elif flag == 'DE' and len(fields) >= 2:\n rec_name = re.search(r'(?<=Full=)(.+?)[;\\s]', line)\n ec_nr = re.search(r'(?<=EC=)([0-9.-]*?)[;\\s]', line)\n if ec_nr:\n curr_ECs.append(ec_nr.group(1))\n elif rec_name:\n uniprot_dict[curr_prot_id]['rec_name'] = rec_name.group(1)\n elif flag == 'DR' and len(fields) >= 2:\n '''\n abfrage fuer GOS und PFAM\n '''\n # ask for GO first:\n dr_fields = [ref.rstrip('.;') for ref in fields[1:]]\n # TODO: should we filter for funcitonalilty here?\n if dr_fields[0] == 'GO' and dr_fields[2].startswith('F'):\n curr_F_GOs.append(dr_fields[1])\n # try:\n # uniprot_dict[curr_prot_id]['GO'].append(dr_fields[1])\n # except KeyError:\n # uniprot_dict[curr_prot_id]['GO'] = [dr_fields[1]]\n elif dr_fields[0] == 'GO' and dr_fields[2].startswith('P'):\n curr_P_GOs.append(dr_fields[1])\n elif dr_fields[0] == 'GO' and dr_fields[2].startswith('C'):\n curr_C_GOs.append(dr_fields[1])\n\n elif dr_fields[0] == 'Pfam':\n uniprot_dict[curr_prot_id]['Pfam'] = dr_fields[2:]\n else:\n pass\n elif flag == 'CC' and len(fields) >= 2:\n '''\n may content sequence caution warning\n '''\n pass\n\n elif flag == 'PE' and len(fields) >= 2:\n protein_existance = fields[1]\n uniprot_dict[curr_prot_id]['protein_existance'] = protein_existance\n\n elif flag == 'FT' and len(fields) >= 2:\n \"\"\"\n the annotated features. (http://www.uniprot.org/help/sequence_annotation)\n Those are anotations like catalytic site, binding sites and secondary structure\n \"\"\"\n ft_fields = fields[1:]\n if ft_fields[0] == 'HELIX':\n curr_structure.append(('HELIX', fields[2], fields[3]))\n if ft_fields[0] == 'STRAND':\n curr_structure.append(('STRAND', fields[2], fields[3]))\n if ft_fields[0] == 'SHEET':\n curr_structure.append(('SHEET', fields[2], fields[3]))\n if ft_fields[0] == 'TURN':\n curr_structure.append(('TURN', fields[2], fields[3]))\n elif flag == 'SQ' and len(fields) >= 2:\n seq = True\n uniprot_dict[curr_prot_id]['seq'] = ''\n elif seq == True:\n if flag == '//':\n uniprot_dict[curr_prot_id]['F_GO'] = self._full_annotation(curr_F_GOs)\n uniprot_dict[curr_prot_id]['P_GO'] = self._full_annotation(curr_P_GOs)\n uniprot_dict[curr_prot_id]['C_GO'] = self._full_annotation(curr_C_GOs)\n uniprot_dict[curr_prot_id]['EC'] = curr_ECs\n uniprot_dict[curr_prot_id]['Structure'] = curr_structure\n curr_prot_id = ''\n seq = False\n # set collectors to []\n curr_F_GOs = []\n curr_C_GOs = []\n curr_P_GOs = []\n curr_ECs = []\n curr_structure = []\n\n # write the entry to file\n uniprot_df = pd.DataFrame.from_dict(uniprot_dict, orient='index')\n uniprot_df.to_csv(out_csv, sep=';', na_rep='', header=False, index=True,\n line_terminator='\\n')\n uniprot_dict = {}\n else:\n uniprot_dict[curr_prot_id]['seq'] += ''.join(fields)\n else:\n pass\n out_csv.close()",
"def getProtAssoc(databaseName, path, idProt=\"Hepcidin\"):\n\t\n\t\n\t\n\tconnect, cursor = connection(path+\"/\"+databaseName)\n\t#cursor = connect.cursor()\n\t\n\t#PRINT SOME INFORMATIONS\n\tprint(\"SQL: SELECT DISTINCT LOWER(TargetLabel) FROM \"+bcolors.HEADER+\"tname\"+bcolors.ENDC+\" WHERE LOWER(SourceLabel) LIKE LOWER(\\\"%\"+bcolors.HEADER+idProt+bcolors.ENDC+\"%\\\") AND LOWER(TargetEntityType)=LOWER(\\\"p\\\") ORDER BY Period\")\n\tprint(\"ProtID querry: \"+bcolors.HEADER+idProt+bcolors.ENDC)\n\t\n\t#DO THE MATHS\n\tcursor.execute(\"SELECT name FROM sqlite_master WHERE type='table' ORDER BY name\") #get all tables names\n\tfor ttuples in cursor.fetchall():\n\t\ttname = ttuples[0]\n\t\tprint(\"Searching assoc in \" +bcolors.HEADER+tname+bcolors.ENDC+ \" ...\")\n\n\t\tsqlstr = \"SELECT DISTINCT LOWER(TargetLabel) FROM \" +tname+ \" WHERE LOWER(SourceLabel) LIKE LOWER(\\\"%\"+idProt+\"%\\\") AND LOWER(TargetEntityType)=LOWER(\\\"p\\\") ORDER BY Period\"\n\t\tcursor.execute(sqlstr)\n\n\t\t#FILE WRITING\n\t\twith open(path+\"/requestResult/\"+idProt+\"_protAssoc_\"+tname+\".txt\", \"w\") as f:\n\t\t\tfor elements in cursor.fetchall():\n\t\t\t\tf.write(elements[0]+\"\\n\")\n\n\tconnect.commit()\n\tcloseConnection(cursor, connect)",
"def get_nucl2prot_accession():\n reg_exp = regex.compile(r\"gpprotdata.jsp\\?seqAccno=([0-9A-Z]+).+?\"\n \"gbnucdata.jsp\\?seqAccno=([0-9A-Z]+)\", \n regex.DOTALL|regex.MULTILINE|regex.VERBOSE)\n accession = reg_exp.findall(driver.page_source)\n accession = \"\\n\".join([\"|\".join(pair) for pair in accession])\n with open(\"./data/download/nucleotide2protein\", \"a\") as handle:\n handle.write(accession + \"\\n\")",
"def blast_local(fasta_file, db):\n blastp_exec = get_blast_exec()\n cmd = f\"{blastp_exec} -query {fasta_file} -db {db} -outfmt 6\"\n\n p = subprocess.run(\n shlex.split(cmd), stdout=subprocess.PIPE, stderr=subprocess.PIPE\n )\n\n if p.returncode != 0:\n log.error(p.stderr.decode())\n raise Exception(\"BLAST failed\")\n\n out = p.stdout.decode(\"utf-8\").split(os.linesep)\n uniprot_id = out[0].split(\"\\t\")[1]\n return uniprot_id",
"def uniprot_to_csv(self):\n uniprot_dict = {}\n uniprot_csv_path = os.path.join(self.save_dir, 'uniprot_prefiltered_{}.csv'.format(self.mode))\n #uniprot_pickle_path = os.path.join(self.save_dir, '_{}.p'.format(self.mode))\n with open(self.uniprot_file_path, \"r\") as in_fobj:\n curr_prot_id = ''\n curr_F_GOs = []\n curr_P_GOs = []\n curr_C_GOs = []\n curr_ECs = []\n curr_structure = []\n seq = False\n for line in in_fobj:\n fields = line.strip().split()\n flag = fields[0]\n if flag == 'ID' and len(fields) >= 2:\n curr_prot_id = fields[1]\n uniprot_dict[curr_prot_id] = {}\n elif flag == 'DE' and len(fields) >= 2:\n rec_name = re.search(r'(?<=Full=)(.+?)[;\\s]', line)\n ec_nr = re.search(r'(?<=EC=)([0-9.-]*?)[;\\s]', line)\n if ec_nr:\n curr_ECs.append(ec_nr.group(1))\n elif rec_name:\n uniprot_dict[curr_prot_id]['rec_name'] = rec_name.group(1)\n elif flag == 'DR' and len(fields) >= 2:\n '''\n abfrage fuer GOS und PFAM\n '''\n # ask for GO first:\n dr_fields = [ref.rstrip('.;') for ref in fields[1:]]\n # TODO: should we filter for funcitonalilty here?\n if dr_fields[0] == 'GO' and dr_fields[2].startswith('F'):\n curr_F_GOs.append(dr_fields[1])\n # try:\n # uniprot_dict[curr_prot_id]['GO'].append(dr_fields[1])\n # except KeyError:\n # uniprot_dict[curr_prot_id]['GO'] = [dr_fields[1]]\n elif dr_fields[0] == 'GO' and dr_fields[2].startswith('P'):\n curr_P_GOs.append(dr_fields[1])\n elif dr_fields[0] == 'GO' and dr_fields[2].startswith('C'):\n curr_C_GOs.append(dr_fields[1])\n\n elif dr_fields[0] == 'Pfam':\n uniprot_dict[curr_prot_id]['Pfam'] = dr_fields[2:]\n else:\n pass\n elif flag == 'CC' and len(fields) >= 2:\n '''\n may content sequence caution warning\n '''\n pass\n\n elif flag == 'PE' and len(fields) >= 2:\n protein_existance = fields[1]\n uniprot_dict[curr_prot_id]['protein_existance'] = protein_existance\n\n elif flag == 'FT' and len(fields) >= 2:\n \"\"\"\n the annotated features. (http://www.uniprot.org/help/sequence_annotation)\n Those are anotations like catalytic site, binding sites and secondary structure\n \"\"\"\n ft_fields = fields[1:]\n if ft_fields[0] == 'HELIX':\n curr_structure.append(('HELIX', fields[2], fields[3]))\n if ft_fields[0] == 'STRAND':\n curr_structure.append(('STRAND', fields[2], fields[3]))\n if ft_fields[0] == 'SHEET':\n curr_structure.append(('SHEET', fields[2], fields[3]))\n if ft_fields[0] == 'TURN':\n curr_structure.append(('TURN', fields[2], fields[3]))\n elif flag == 'SQ' and len(fields) >= 2:\n seq = True\n uniprot_dict[curr_prot_id]['seq'] = ''\n elif seq == True:\n if flag == '//':\n uniprot_dict[curr_prot_id]['F_GO'] = self._full_annotation(curr_F_GOs)\n uniprot_dict[curr_prot_id]['P_GO'] = self._full_annotation(curr_P_GOs)\n uniprot_dict[curr_prot_id]['C_GO'] = self._full_annotation(curr_C_GOs)\n uniprot_dict[curr_prot_id]['EC'] = curr_ECs\n uniprot_dict[curr_prot_id]['Structure'] = curr_structure\n curr_prot_id = ''\n seq = False\n # set collectors to []\n curr_F_GOs = []\n curr_C_GOs = []\n curr_P_GOs = []\n curr_ECs = []\n curr_structure = []\n else:\n uniprot_dict[curr_prot_id]['seq'] += ''.join(fields)\n else:\n pass\n uniprot_df = pd.DataFrame.from_dict(uniprot_dict, orient='index')\n for key in uniprot_dict.keys():\n uniprot_dict.pop(key)\n\n uniprot_df.to_csv(uniprot_csv_path, sep=';', na_rep='', header=True, index=True,\n line_terminator='\\n')\n #uniprot_df.to_pickle(uniprot_pickle_path)",
"def ReadXeasyProt(self, fileName):\n #for the XEASY\n import ReadXeasy\n if _DoesFileExist(fileName) == 0:\n return\n #important - clean atomlist and atomdicfa:\n self.atomlist = []\n self.atomdicfa = {}\n print 'reading the .prot file', fileName\n self.fileName = fileName\n XPROT = ReadXeasy.XeasyProt()\n XPROT.ReadProt(fileName)\n for EACH in XPROT.atomlist:\n ATOM = Atom()\n ATOM.residuenumber = EACH.fragmentnumber\n ATOM.atomname = EACH.ariaatomname\n if EACH.shift == '999.000':\n ATOM.shift = None\n else:\n ATOM.shift = EACH.shift\n ATOM.shifterror = EACH.shifterror\n ATOM.xeasyatomname = EACH.xeasyatomname\n ATOM.xeasyatomnumber = EACH.atomnumber\n self.AddAtom(ATOM)\n self.RemoveDoubleQuotes() #conversion of \" into ''",
"def getchains(pdbfile):\n try:\n read = open(pdbfile,'r')\n except IOError:\n print(\"getchains: Couldn't open file %s\"%(pdbfile))\n raise\n else:\n result = []\n for line in read:\n if line[0:4]=='ATOM':\n if line[21] not in result and line[21].isalnum():\n result.append(line[21])\n elif \"_\" not in result and not line[21].isalnum():\n result.append(\"_\")\n read.close()\n return result",
"def run(pdbid, biounit=False):\n\n base_url = 'https://files.rcsb.org/download/'\n pdb_type = '.pdb1' if biounit else '.pdb'\n pdb_url = base_url + pdbid.lower() + pdb_type + '.gz'\n\n try:\n request = Request(pdb_url)\n opener = build_opener()\n url_data = opener.open(request).read()\n\n except HTTPError as e:\n emsg = '[!] Error fetching structure: ({0}) {1}\\n'\n sys.stderr.write(emsg.format(e.code, e.msg))\n return\n\n else:\n\n try:\n buf = IO(url_data)\n gz_handle = gzip.GzipFile(fileobj=buf, mode='rb')\n for line in gz_handle:\n yield line.decode('utf-8')\n\n except IOError as e:\n emsg = '[!] Error fetching structure: ({0}) {1}\\n'\n sys.stderr.write(emsg.format(e.code, e.msg))\n return\n\n finally:\n gz_handle.close()",
"def load_pdb_into_using_file_object(self, file_obj):\n\n #source_data = numpy.genfromtxt(file_obj, dtype=\"S6,S5,S5,S4,S2,S4,S4,S8,S8,S8,S6,S6,S10,S2,S2\", names=['record_name', 'serial', 'name', 'resname', 'chainid', 'resseq', 'empty', 'x', 'y', 'z', 'occupancy', 'tempfactor', 'empty2', 'element', 'charge'], delimiter=[6, 5, 5, 4, 2, 4, 4, 8, 8, 8, 6, 6, 10, 2, 2])\n source_data = numpy.genfromtxt(file_obj, dtype=\"S6,S5,S5,S5,S1,S4,S4,S8,S8,S8,S6,S6,S10,S2,S3\", names=['record_name', 'serial', 'name', 'resname', 'chainid', 'resseq', 'empty', 'x', 'y', 'z', 'occupancy', 'tempfactor', 'empty2', 'element', 'charge'], delimiter=[6, 5, 5, 5, 1, 4, 4, 8, 8, 8, 6, 6, 10, 2, 3])\n \n if source_data.ndim == 0: source_data = source_data.reshape(1, -1) # in case the pdb file has only one line\n \n # get the ones that are ATOM or HETATOM in the record_name\n or_matrix = numpy.logical_or((source_data['record_name'] == \"ATOM \"), (source_data['record_name'] == \"HETATM\"))\n indices_of_atom_or_hetatom = numpy.nonzero(or_matrix)[0]\n self.__parent_molecule.set_atom_information(source_data[indices_of_atom_or_hetatom])\n\n # now, some of the data needs to change types\n # first, fields that should be numbers cannot be empty strings\n for field in self.__parent_molecule.get_constants()['i8_fields'] + self.__parent_molecule.get_constants()['f8_fields']:\n check_fields = self.__parent_molecule.get_atom_information()[field]\n check_fields = numpy.core.defchararray.strip(check_fields)\n indices_of_empty = numpy.nonzero(check_fields == '')[0]\n self.__parent_molecule.get_atom_information()[field][indices_of_empty] = '0'\n \n # now actually change the type\n old_types = self.__parent_molecule.get_atom_information().dtype\n descr = old_types.descr\n for field in self.__parent_molecule.get_constants()['i8_fields']:\n index = self.__parent_molecule.get_atom_information().dtype.names.index(field)\n descr[index] = (descr[index][0], 'i8')\n for field in self.__parent_molecule.get_constants()['f8_fields']:\n index = self.__parent_molecule.get_atom_information().dtype.names.index(field)\n descr[index] = (descr[index][0], 'f8')\n new_types = numpy.dtype(descr)\n self.__parent_molecule.set_atom_information(self.__parent_molecule.get_atom_information().astype(new_types))\n \n # remove some of the fields that just contain empty data\n self.__parent_molecule.set_atom_information(self.__parent_molecule.numpy_structured_array_remove_field(self.__parent_molecule.get_atom_information(), ['empty', 'empty2']))\n \n # the coordinates need to be placed in their own special numpy array to facilitate later manipulation\n self.__parent_molecule.set_coordinates(numpy.vstack([self.__parent_molecule.get_atom_information()['x'], self.__parent_molecule.get_atom_information()['y'], self.__parent_molecule.get_atom_information()['z']]).T)\n self.__parent_molecule.set_atom_information(self.__parent_molecule.numpy_structured_array_remove_field(self.__parent_molecule.get_atom_information(), ['x', 'y', 'z'])) # now remove the coordinates from the atom_information object to save memory\n \n # string values in self.__parent_molecule.information.get_atom_information() should also be provided in stripped format for easier comparison\n fields_to_strip = ['name', 'resname', 'chainid', 'element']\n for f in fields_to_strip: self.__parent_molecule.set_atom_information(append_fields(self.__parent_molecule.get_atom_information(), f + '_stripped', data=numpy.core.defchararray.strip(self.__parent_molecule.get_atom_information()[f])))",
"def download_uniprot(db, folder, versions):\n # check if files are already downloaded, if not fetch them from uniprot site\n socket.setdefaulttimeout(120.)\n print('...downloading databases and taxonomy files...')\n db_name = 'uniprot_%s_%s.fasta.gz' % (db, versions[db],)\n base_address = 'ftp://ftp.expasy.org/databases/uniprot/current_release/knowledgebase/complete/'\n db_address = 'uniprot_%s.fasta.gz' % (db,)\n db_address = base_address + db_address\n files_addresses = [ (os.path.join(folder, db_name), db_address),\n (os.path.join(folder, 'taxdump.tar.gz'),\n 'ftp://ftp.ncbi.nih.gov/pub/taxonomy/taxdump.tar.gz'),\n (os.path.join(folder, 'speclist.txt'),\n 'ftp://ftp.ebi.ac.uk/pub/databases/uniprot/current_release/knowledgebase/complete/docs/speclist.txt')]\n files_addresses.reverse()\n for (file_name, address) in files_addresses:\n if os.path.exists(file_name):\n pass\n else:\n print('...downloading', file_name)\n x = reporter()\n try:\n urllib.request.urlretrieve(address, file_name, reporthook=x.report)\n except:\n print('...WARNING: download may have hung at EOF or other error')\n finally:\n urllib.request.urlcleanup()\n return",
"def getChainIDsFromPDB(cls, filename, qparent=None):\n extension = filename.split('.')[-1].lower()\n if extension == 'pdb':\n linelist = []\n for line in open(filename, 'U'):\n if line[:6] == 'COMPND' and line[10:70].split(':')[0].strip() == 'CHAIN':\n linelist = line[17:].split(', ')\n linelist[0] = linelist[0].strip()\n if ';' in linelist[-1]:\n linelist[-1] = linelist[-1].split(';')[0]\t#removes the terminating semicolon and extra whitespace\n while True:\n try: linelist.remove('NULL')\n except: break\n return linelist\n if linelist == []:\n return []\n else:\n raise NotImplementedError, 'NYI'",
"def parse_pdb(path):\n\n pdb_dict = defaultdict(lambda: defaultdict(list))\n res_dict = defaultdict(list)\n with open(path) as o:\n lines = o.readlines()\n for line in lines:\n if line[:4] == 'ATOM':\n atom_info = process_atom_info(line)\n identifier = '{}{}'.format(\n atom_info['res_name'],\n atom_info['res_no']\n )\n pdb_dict[atom_info['chain']][identifier].append(atom_info)\n if identifier not in res_dict[atom_info['chain']]:\n res_dict[atom_info['chain']].append(identifier)\n return pdb_dict,res_dict",
"def add_chain_from_segid(pdb_path):\n temp_f = tempfile.NamedTemporaryFile(delete=False, mode=\"w+t\")\n with open(pdb_path) as fh:\n for line in list(pdb_segxchain.run(fh)):\n temp_f.writelines(line)\n temp_f.close()\n # REPLACE!\n new_pdb_path = shutil.move(temp_f.name, pdb_path)\n return new_pdb_path",
"def get_pose_from_pdb_with_chain(path, chain):\n p = PDBParser()\n struct = p.get_structure('TEST', path)\n c = struct[0][chain]\n io = PDBIO()\n io.set_structure(c)\n # Yuck - we have to save in PDB state\n io.save('/tmp/mypdb.pdb')\n pose = pose_from_pdb('/tmp/mypdb.pdb')\n os.remove('/tmp/mypdb.pdb')\n return pose",
"def uniprotAPICall(protein_name):\n # API call to UniRef DB\n base_url = \"http://www.uniprot.org/uniprot/\"\n extension = \".xml\"\n my_response = requests.get(base_url + protein_name + extension)\n \n # For successful API call, response code will be 200 (OK)\n if not my_response.ok:\n print \"UniProt node not found: \" + str(protein_name) \n return\n\n # get root of the XML response\n root = ET.fromstring(my_response.content)\n rep_member = root.find('{http://uniprot.org/uniprot}entry')\n\n # set up dict to put in info\n member_dict = {}\n\n # Add any properties that have type - id pairings\n for prop in rep_member.iter():\n if 'type' in prop.attrib and 'id' in prop.attrib:\n member_dict[prop.attrib['type'].replace(\" \", \"_\")] = prop.attrib['id']\n # else:\n # member_dict[prop.attrib['type'].replace(\n # \" \", \"_\")] = prop.attrib['id']\n \n # Get protein accession. Ex: Q8KM74\n member_dict['UniProtKB_accession'] = rep_member.find('{http://uniprot.org/uniprot}accession').text\n member_dict['id'] = member_dict['UniProtKB_accession']\n\n # Get specific protein accession. Ex: Q8KM74_METTR\n member_dict['UniProtKB_ID'] = rep_member.find('{http://uniprot.org/uniprot}name').text\n\n # Get source organism\n member_dict['source_organism'] = rep_member.find('{http://uniprot.org/uniprot}organism').find('{http://uniprot.org/uniprot}name').text\n\n # Get protein existance: http://www.uniprot.org/help/protein_existence\n member_dict['protein_existence'] = rep_member.find('{http://uniprot.org/uniprot}proteinExistence').attrib['type'] if 'type' in rep_member.find('{http://uniprot.org/uniprot}proteinExistence').attrib else None\n \n # Get protein length\n member_dict['length'] = int(rep_member.find('{http://uniprot.org/uniprot}sequence').attrib['length']) if 'length' in rep_member.find('{http://uniprot.org/uniprot}sequence').attrib else None\n\n #print member_dict\n #name = UniProtKB_accession, UniProtKB_ID (has the _1343), UniProtKB_accession, id = UniProtKB_ID, length, protein_name, source_organism, NCBI_taxonomy, UniParc_ID, Pfam,Supfam\n\n return ClustNode(member_dict)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
r""" Apply the JordanWigner transform to a FermionOperator, InteractionOperator, or DiagonalCoulombHamiltonian to convert to a QubitOperator. | def jordan_wigner(operator):
if isinstance(operator, FermionOperator):
return _jordan_wigner_fermion_operator(operator)
if isinstance(operator, MajoranaOperator):
return _jordan_wigner_majorana_operator(operator)
if isinstance(operator, DiagonalCoulombHamiltonian):
return _jordan_wigner_diagonal_coulomb_hamiltonian(operator)
if isinstance(operator, InteractionOperator):
return _jordan_wigner_interaction_op(operator)
raise TypeError("Operator must be a FermionOperator, "
"MajoranaOperator, "
"DiagonalCoulombHamiltonian, or "
"InteractionOperator.") | [
"def convert_wpo_and_openfermion(operator):\n def _count_qubits(openfermion_operator):\n \"\"\" Counts the number of qubits in the openfermion.operator\"\"\" \n nb_qubits = 0\n for sett, coef in openfermion_operator.terms.items():\n if len(sett)>0:\n nb_qubits = max(nb_qubits, max([s[0] for s in sett]))\n return nb_qubits+1\n \n # (commented out version is for updated git version of openfermion)\n # import openfermion\n # if type(operator) is openfermion.ops.operators.qubit_operator.QubitOperator:\n if str(operator.__class__) == \"<class 'openfermion.ops._qubit_operator.QubitOperator'>\":\n nb_qubits = _count_qubits(operator)\n\n iden = Pauli.from_label('I'*nb_qubits)\n qiskit_operator = WeightedPauliOperator([(0., iden)])\n for sett, coef in operator.terms.items():\n new_sett = 'I'*nb_qubits\n for s in sett:\n new_sett = new_sett[:(s[0])] + s[1] + new_sett[(s[0]+1):]\n pauli = Pauli.from_label(new_sett)\n op = WeightedPauliOperator([(coef, pauli)])\n # print(coef)\n # print(new_sett)\n qiskit_operator = qiskit_operator + op\n return qiskit_operator \n else:\n raise NotImplementedError(\"Currently only converts 1 way, openfermion-> qiskit wpo\")",
"def _openfermion_to_pennylane(qubit_operator, wires=None):\n n_wires = (\n 1 + max(max(i for i, _ in t) if t else 1 for t in qubit_operator.terms)\n if qubit_operator.terms\n else 1\n )\n wires = _process_wires(wires, n_wires=n_wires)\n\n if not qubit_operator.terms: # added since can't unpack empty zip to (coeffs, ops) below\n return np.array([0.0]), [qml.Identity(wires[0])]\n\n xyz2pauli = {\"X\": qml.PauliX, \"Y\": qml.PauliY, \"Z\": qml.PauliZ}\n\n coeffs, ops = zip(\n *[\n (\n coef,\n qml.operation.Tensor(*[xyz2pauli[q[1]](wires=wires[q[0]]) for q in term])\n if len(term) > 1\n else (\n xyz2pauli[term[0][1]](wires=wires[term[0][0]])\n if len(term) == 1\n else qml.Identity(wires[0])\n )\n # example term: ((0,'X'), (2,'Z'), (3,'Y'))\n )\n for term, coef in qubit_operator.terms.items()\n ]\n )\n\n return np.real(np.array(coeffs, requires_grad=False)), list(ops)",
"def _setup_Q(self):\n self.Q_s = [None for _ in range(self.p+1)]\n self.Q_s[self.p] = np.eye(self.layers[self.p-1])\n for i in range(self.p-1, -1, -1):\n self.Q_s[i] = np.dot(self.U_s[i], self.Q_s[i+1])",
"def test_hermitian_conjugated_qubit_op_consistency(self):\n ferm_op = (FermionOperator('1^ 2') + FermionOperator('2 3 4') +\n FermionOperator('2^ 7 9 11^'))\n\n # Check that hermitian conjugation commutes with transforms\n self.assertEqual(jordan_wigner(hermitian_conjugated(ferm_op)),\n hermitian_conjugated(jordan_wigner(ferm_op)))\n self.assertEqual(bravyi_kitaev(hermitian_conjugated(ferm_op)),\n hermitian_conjugated(bravyi_kitaev(ferm_op)))",
"def left_jacobian_Q_matrix(cls, xi):\n phi = xi[:, :3] # rotation part\n mu = xi[:, 3:6] # velocity part\n rho = xi[:, 6:9] # translation part\n\n px = SO3.wedge(phi)\n mx = SO3.wedge(mu)\n rx = SO3.wedge(rho)\n\n ph = phi.norm(p=2, dim=1)\n ph2 = ph * ph\n ph3 = ph2 * ph\n ph4 = ph3 * ph\n ph5 = ph4 * ph\n\n cph = ph.cos()\n sph = ph.sin()\n\n m1 = 0.5\n m2 = (ph - sph) / ph3\n m3 = (0.5 * ph2 + cph - 1.) / ph4\n m4 = (ph - 1.5 * sph + 0.5 * ph * cph) / ph5\n\n m2 = m2.unsqueeze(dim=1).unsqueeze(dim=2).expand_as(rx)\n m3 = m3.unsqueeze(dim=1).unsqueeze(dim=2).expand_as(rx)\n m4 = m4.unsqueeze(dim=1).unsqueeze(dim=2).expand_as(rx)\n\n v1 = mx\n v2 = px.bmm(mx) + mx.bmm(px) + px.bmm(mx).bmm(px)\n v3 = px.bmm(px).bmm(mx) + mx.bmm(px).bmm(px) - 3. * px.bmm(mx).bmm(px)\n v4 = px.bmm(mx).bmm(px).bmm(px) + px.bmm(px).bmm(mx).bmm(px)\n\n t1 = rx\n t2 = px.bmm(rx) + rx.bmm(px) + px.bmm(rx).bmm(px)\n t3 = px.bmm(px).bmm(rx) + rx.bmm(px).bmm(px) - 3. * px.bmm(rx).bmm(px)\n t4 = px.bmm(rx).bmm(px).bmm(px) + px.bmm(px).bmm(rx).bmm(px)\n\n Q_v = m1 * v1 + m2 * v2 + m3 * v3 + m4 * v4\n Q_p = m1 * t1 + m2 * t2 + m3 * t3 + m4 * t4\n\n return Q_v, Q_p",
"def __or__(self, qubits):\n # Check that input is only one qureg or one qubit\n qubits = self.make_tuple_of_qureg(qubits)\n if len(qubits) != 1:\n raise TypeError(\"Only one qubit or qureg allowed.\")\n # Check that if hamiltonian has only an identity term,\n # apply a global phase:\n if len(self.hamiltonian.terms) == 1 and () in self.hamiltonian.terms:\n Ph(-1 * self.time * self.hamiltonian.terms[()]) | qubits[0][0]\n return\n num_qubits = len(qubits[0])\n non_trivial_qubits = set()\n for term in self.hamiltonian.terms:\n for index, _ in term:\n non_trivial_qubits.add(index)\n if max(non_trivial_qubits) >= num_qubits:\n raise ValueError(\"hamiltonian acts on more qubits than the gate is applied to.\")\n # create new TimeEvolution gate with rescaled qubit indices in\n # self.hamiltonian which are ordered from\n # 0,...,len(non_trivial_qubits) - 1\n new_index = {}\n non_trivial_qubits = sorted(non_trivial_qubits)\n for i, qubit in enumerate(non_trivial_qubits):\n new_index[qubit] = i\n new_hamiltonian = QubitOperator()\n for term in self.hamiltonian.terms:\n new_term = tuple((new_index[index], action) for index, action in term)\n new_hamiltonian.terms[new_term] = self.hamiltonian.terms[term]\n new_gate = TimeEvolution(time=self.time, hamiltonian=new_hamiltonian)\n new_qubits = [qubits[0][i] for i in non_trivial_qubits]\n # Apply new gate\n cmd = new_gate.generate_command(new_qubits)\n apply_command(cmd)",
"def express(self):\n matrices = self.allele + (self.to_zero,)\n if self.precision > 0:\n self.molecule.openState.xform = M.chimera_xform(\n M.multiply_matrices(*numpy_around(matrices, self.precision).tolist()))\n else:\n self.molecule.openState.xform = M.chimera_xform(\n M.multiply_matrices(*matrices))",
"def internal_product_on_basis(self, I, J):",
"def jacobian_world(self,\n q: Optional[Sequence[float]] = None) -> np.ndarray:\n q = self.joints if q is None else q\n j_fl = self.jacobian_flange(q)\n pose = self.fk(q)\n rotation = pose[:3, :3]\n j_tr = np.zeros(\n (ROTATION_VECTOR_LENGTH * 2, ROTATION_VECTOR_LENGTH * 2),\n dtype=float\n )\n j_tr[:ROTATION_VECTOR_LENGTH, :ROTATION_VECTOR_LENGTH] = \\\n rotation\n j_tr[ROTATION_VECTOR_LENGTH:, ROTATION_VECTOR_LENGTH:] = \\\n rotation\n j_w = np.dot(j_tr, j_fl)\n\n return j_w",
"def get_coupling_matrix(self, J):\n\n a = np.array([[4,1,0,1,4]])\n r2 = a + a.T\n W = np.zeros((5,5))\n W[r2 == 1] = J[0]\n W[r2 == 2] = J[1]\n W[r2 == 4] = J[2]\n W[r2 == 5] = J[3]\n return W",
"def _translate_Q_to_theta(self, kw):\n # Grab wavelength first so we can translate Qlo/Qhi to Tlo/Thi no\n # matter what order the keywords appear.\n wavelength = kw.get('wavelength', self.wavelength)\n if \"Q\" in kw:\n kw[\"T\"] = QL2T(kw.pop(\"Q\"), wavelength)\n if \"Qlo\" in kw:\n kw[\"Tlo\"] = QL2T(kw.pop(\"Qlo\"), wavelength)\n if \"Qhi\" in kw:\n kw[\"Thi\"] = QL2T(kw.pop(\"Qhi\"), wavelength)\n if \"slits_at_Qlo\" in kw:\n kw[\"slits_at_Tlo\"] = kw.pop(\"slits_at_Qlo\")",
"def write_in_qp(\n self, tensor: Tensor, format_: str, name_format=None, set_symms=True\n ):\n\n terms = tensor.subst_all(self.f_in_qp).simplify().local_terms\n\n # Internal book keeping, maps the cr/an order to lhs and the rhs terms\n # of the definition of the new matrix element.\n transf = {}\n\n rewritten_terms = []\n\n for term in terms:\n cr_order = 0\n an_order = 0\n indices = []\n for i in term.vecs:\n if len(i.indices) != 2:\n raise ValueError(\n 'Invalid operator to rewrite, one index expected', i\n )\n char, index = i.indices\n if char == CR:\n assert an_order == 0\n cr_order += 1\n elif char == AN:\n an_order += 1\n else:\n assert False\n\n indices.append(index)\n continue\n\n norm = factorial(cr_order) * factorial(an_order)\n order = (cr_order, an_order)\n tot_order = cr_order + an_order\n\n base = IndexedBase(format_.format(*order))\n if name_format is not None:\n base_name = name_format.format(*order)\n self.set_name(**{base_name: base})\n\n indices[cr_order:tot_order] = reversed(indices[cr_order:tot_order])\n if tot_order > 0:\n new_amp = base[tuple(indices)]\n else:\n new_amp = base.label\n orig_amp = term.amp\n\n new_sums = []\n wrapped_sums = []\n for i in term.sums:\n if new_amp.has(i[0]):\n new_sums.append(i)\n else:\n wrapped_sums.append(i)\n continue\n\n def_term = Term(\n sums=tuple(wrapped_sums), amp=orig_amp * norm, vecs=()\n )\n\n if order in transf:\n entry = transf[order]\n assert entry[0] == new_amp\n entry[1].append(def_term)\n else:\n transf[order] = (new_amp, [def_term])\n rewritten_terms.append(Term(\n sums=tuple(new_sums), amp=new_amp / norm, vecs=term.vecs\n ))\n if set_symms and (cr_order > 1 or an_order > 1):\n self.set_dbbar_base(base, cr_order, an_order)\n\n continue\n\n defs = [\n self.define(lhs, self.create_tensor(rhs_terms))\n for lhs, rhs_terms in transf.values()\n ]\n\n return self.create_tensor(rewritten_terms), defs",
"def operator_berry(hin,k=[0.,0.],operator=None,delta=0.00001,ewindow=None):\n h = multicell.turn_multicell(hin) # turn to multicell form\n dhdx = multicell.derivative(h,k,order=[1,0]) # derivative\n dhdy = multicell.derivative(h,k,order=[0,1]) # derivative\n hkgen = h.get_hk_gen() # get generator\n hk = hkgen(k) # get hamiltonian\n (es,ws) = lg.eigh(hkgen(k)) # initial waves\n ws = np.conjugate(np.transpose(ws)) # transpose the waves\n n = len(es) # number of energies\n from berry_curvaturef90 import berry_curvature as bc90\n if operator is None: operator = np.identity(dhdx.shape[0],dtype=np.complex)\n b = bc90(dhdx,dhdy,ws,es,operator,delta) # berry curvature\n return b*np.pi*np.pi*8 # normalize so the sum is 2pi Chern",
"def add_qubit(self):\n z0 = StabilizerState([[0, 1]])\n self.__init__(self.tensor_product(z0))",
"def isometrize(self):\n for idx,w0 in enumerate(self.W[0]):\n temp=np.reshape(w0,[self.d**2,self.Dbond])\n dmin=min(temp.shape)\n Q,R=np.linalg.qr(temp)\n self.W[0][idx]=np.reshape(Q,[self.d,self.d,dmin])\n\n for i in range(1,self.Nlayer):\n for idx,wj in enumerate(self.W[i]):\n temp=np.reshape(wj,[self.Dbond*self.Dbond,wj.shape[2]])\n Q,R=np.linalg.qr(temp)\n self.W[i][idx]=np.reshape(Q,[self.Dbond,self.Dbond,wj.shape[2]])",
"def EvalW(self, J):\n return _nonlininteg.InverseHarmonicModel_EvalW(self, J)",
"def EvalW(self, J):\n return _nonlininteg.NeoHookeanModel_EvalW(self, J)",
"def _apply_mps_operation(self, operation: MPSOperation, **kwargs) -> None:\n if not isinstance(operation, MPSOperation):\n raise TypeError(\n \"Argument operation should be of type MPSOperation but is \"\n f\"of type {type(operation)}.\"\n )\n if not operation.is_valid():\n raise ValueError(\"Input MPS Operation is not valid.\")\n\n if operation.is_single_qudit_operation():\n self.apply_one_qudit_gate(\n operation.node(), *operation.qudit_indices, **kwargs\n )\n elif operation.is_two_qudit_operation():\n self.apply_two_qudit_gate(\n operation.node(), *operation.qudit_indices, **kwargs\n )\n else:\n raise ValueError(\n \"Only one-qudit and two-qudit gates are supported. \"\n \"To apply a gate on three or more qudits, the gate must be \"\n \"compiled into a sequence of one- and two-qudit gates.\"\n )",
"def apply_K(self, qubitNum):\n self.apply_onequbit_gate(pQ.ops.H, qubitNum)\n self.apply_onequbit_gate(pQ.ops.S, qubitNum)\n self.apply_onequbit_gate(pQ.ops.H, qubitNum)\n self.apply_onequbit_gate(pQ.ops.Z, qubitNum)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Read the base Pool Info | def test_01_Base(self):
l_pool = poolXml._read_base(self.m_xml.pool)
# print(PrettyFormatAny.form(l_pool, 'R1-01-A - Pool'))
self.assertEqual(l_pool.Name, TESTING_POOL_NAME_0)
self.assertEqual(l_pool.Key, int(TESTING_POOL_KEY_0))
self.assertEqual(str(l_pool.Active), TESTING_POOL_ACTIVE_0)
self.assertEqual(l_pool.UUID, TESTING_POOL_UUID_0)
l_pool = poolXml._read_base(self.m_xml.pool_sect[1])
# print(PrettyFormatAny.form(l_pool, B1-01-B - 'Pool'))
self.assertEqual(l_pool.Name, TESTING_POOL_NAME_1)
self.assertEqual(l_pool.Key, int(TESTING_POOL_KEY_1))
self.assertEqual(l_pool.Active, bool(TESTING_POOL_ACTIVE_1)) | [
"def _getpool():\n logging.info(\n \"Creating a process pool with pool size {processes} (the number of CPU cores)...\".format(\n processes=os.cpu_count() or 1))\n return Pool()",
"def _get_pool_object(self):\n\n pool = [{\"status\": \"ACTIVE\",\n \"lb_method\": \"ROUND_ROBIN\",\n \"protocol\": \"TCP\",\n \"description\": \"\",\n \"health_monitors\": [],\n \"members\":\n [\n \"4910851f-4af7-4592-ad04-08b508c6fa21\"\n ],\n \"status_description\": None,\n \"id\": \"6350c0fd-07f8-46ff-b797-62acd23760de\",\n \"vip_id\": \"7a755739-1bbb-4211-9130-b6c82d9169a5\",\n \"name\": \"lb-pool\",\n \"admin_state_up\": True,\n \"subnet_id\": \"b31cdafe-bdf3-4c19-b768-34d623d77d6c\",\n \"tenant_id\": \"f6b09b7a590642d8ac6de73df0ab0686\",\n \"health_monitors_status\": [],\n \"provider\": \"haproxy\"}]\n return pool",
"def get_pools():\n global f5rest_url\n return (get_f5json(f5rest_url + 'ltm/pool'))",
"def test_01_Base(self):\n # print(PrettyFormatAny.form(self.m_pools[0], 'W1-01-A - Pools'))\n l_xml = poolXml._write_base(self.m_pools[0])\n # print(PrettyFormatAny.form(l_xml, 'W1-01-B - Pool'))\n self.assertEqual(l_xml.attrib['Name'], TESTING_POOL_NAME_0)\n self.assertEqual(l_xml.attrib['Key'], TESTING_POOL_KEY_0)\n self.assertEqual(l_xml.attrib['Active'], TESTING_POOL_ACTIVE_0)",
"def get_request_data_for_create_pool(self):\n\n request_data = {\n \"info\": {'context': {\"logging_context\": {}},\n 'service_type': \"loadbalancer\",\n 'service_vendor': \"haproxy\",\n },\n \"config\": [{\n \"resource\": \"pool\",\n \"resource_data\": {\n \"neutron_context\": self.context,\n \"pool\": self._get_pool_object()[0],\n \"driver_name\": \"loadbalancer\"\n }}]}\n return request_data",
"def __createIPpoolObj(self,ippool_info,ippool_ips):\n return ippool.IPPool(ippool_info[\"ippool_id\"],\n ippool_info[\"ippool_name\"],\n ippool_info[\"ippool_comment\"],\n ippool_ips)",
"def _fetch_ipconfig_infomation():\n \n # Launch up a shell, get the feedback\n process = portable_popen.Popen([\"ipconfig\", \"/all\"])\n\n # Get the output\n outputdata = process.stdout.readlines()\n \n # Close the pipe\n process.stdout.close()\n \n # Stores the info\n info_dict = {}\n \n # Store the current container\n current_container = None\n \n # Process each line\n for line in outputdata:\n # Strip unwanted characters\n line = line.strip(\"\\r\\n\")\n \n # Check if this line is blank, skip it\n if line.strip() == \"\":\n continue\n \n # This is a top-level line if it does not start with a space\n if not line.startswith(\" \"):\n # Do some cleanup\n line = line.strip(\" :\")\n \n # Check if this exists in the top return dictionary, if not add it\n if line not in info_dict:\n info_dict[line] = {}\n \n # Set the current container\n current_container = line\n \n # Otherwise, this line just contains some information\n else:\n # Check if we are in a container\n if not current_container:\n continue\n \n # Cleanup\n line = line.strip()\n line = line.replace(\". \", \"\")\n \n # Explode on the colon\n (key, value) = line.split(\":\",1)\n \n # More cleanup\n key = key.strip()\n value = value.strip()\n \n # Store this\n info_dict[current_container][key] = value\n \n # Return everything\n return info_dict",
"def set_default_pools(self):\n (virt_name, phys_name) = weaver.image.image.current_pools()\n self.default_virtpool = bootinfo.get_virtpool(virt_name)\n self.default_physpool = bootinfo.get_physpool(phys_name)\n self.default_directpool = bootinfo.get_virtpool(\"direct\")",
"def machine_info():\n BYTES_IN_GIG = 1073741824\n free_bytes = psutil.virtual_memory().available\n return [{\"memory\": int(free_bytes / BYTES_IN_GIG), \"cores\": multiprocessing.cpu_count(),\n \"name\": socket.gethostname()}]",
"def _get_pool(self, pool: PoolType) -> PoolType:\n if pool is not None:\n self.pool = pool\n return self.pool",
"def __get_info(self):\n with urlopen(self.dataset_url) as req:\n meta = req.info()\n return meta",
"def __getIPpoolIPsDB(self,ippool_id):\n return db_main.getHandle().get(\"ippool_ips\",\"ippool_id=%s\"%ippool_id)",
"def test_get_cloud_pool(self):\n pass",
"def get_pnp_info():\n if PNP_REGISTRY is None:\n load_pnp_info()\n return PNP_REGISTRY",
"def generate_bootinfo(self, bi):\n\n attrs = self.get_attrs()\n # Set defaults for values calculated from attributes generated\n # by layout(). This method can be called from\n # BootInfo.create_dynamic_segments(), which called prior to\n # Image.layout() in which case addresses and default pools may\n # not be known. Consequently it doesn't really matter what\n # default values are used because the bootinfo ops will be\n # thrown away once the total size is calculated.\n vbase = 0\n pbase = 0\n size = 0\n physpool_id = 0\n virtpool_id = 0\n\n # Calculate the ids of the memsections's pools.\n\n if attrs.direct:\n virtpool_id = \\\n weaver.bootinfo.bootinfo.get_virtpool('direct').get_bootinfo_id()\n elif attrs.virtpool is not None:\n virtpool_id = \\\n weaver.bootinfo.bootinfo.get_virtpool(attrs.virtpool).get_bootinfo_id()\n \n if attrs.physpool is not None:\n physpool_id = \\\n weaver.bootinfo.bootinfo.get_physpool(attrs.physpool).get_bootinfo_id()\n \n # Align the addresses to page boundaries. The pool allocators\n # will have taken this into account, but kept non-aligned\n # values to be compatible with the ELF contents.\n\n if attrs.phys_addr is not None:\n pbase = align_down(attrs.phys_addr,\n weaver.machine.machine.min_page_size())\n if attrs.virt_addr is not None:\n vbase = align_down(attrs.virt_addr,\n weaver.machine.machine.min_page_size())\n size = align_up(attrs.size + (attrs.virt_addr - vbase),\n weaver.machine.machine.min_page_size())\n\n flags = (weaver.bootinfo_elf.BI_MEM_USER |\n weaver.bootinfo_elf.BI_MEM_FIXED)\n\n if attrs.pager == \"memload\":\n flags |= weaver.bootinfo_elf.BI_MEM_LOAD\n\n # QC_MODIFIED\n if attrs.elf_flags is not None:\n if attrs.elf_flags & weaver.bootinfo_elf.MI_PBT_PAGED_SEGMENT:\n flags |= weaver.bootinfo_elf.BI_MEM_LOAD\n if (attrs.elf_flags & weaver.bootinfo_elf.MI_PBT_FLAG_SEGMENT_TYPE_MASK) == \\\n weaver.bootinfo_elf.MI_PBT_SWAPPED_SEGMENT:\n flags |= weaver.bootinfo_elf.BI_MEM_RELOAD\n if (attrs.elf_flags & weaver.bootinfo_elf.MI_PBT_FLAG_SEGMENT_TYPE_MASK) == \\\n weaver.bootinfo_elf.MI_PBT_SWAP_POOL_SEGMENT:\n flags |= weaver.bootinfo_elf.BI_MEM_SWAPPOOL\n if (attrs.elf_flags & weaver.bootinfo_elf.MI_PBT_POOLIDX_SEGMENT) == \\\n weaver.bootinfo_elf.MI_PBT_POOLIDX_SEGMENT:\n flags |= weaver.bootinfo_elf.BI_MEM_POOLIDX\n # print \"flags:\", hex(attrs.elf_flags), hex(flags)\n # END QC_MODIFIED\n\n if attrs.protected:\n flags |= weaver.bootinfo_elf.BI_MEM_PROTECTED\n if self.token_exported == 0:\n self.owner.environment.add_pd_extension_token_entry(self.ms.get_attrs().name + \"_PD_EXT\", 0, 0, 0, 0)\n self.token_exported = 1\n\n # Memsections in zones are initialised differently to\n # memsections in PDs.\n if (flags & weaver.bootinfo_elf.BI_MEM_PROTECTED):\n name = (self.get_name() + \"_PD_EXT\").upper()\n else:\n name = (self.get_name()).upper()\n if self.zone is not None:\n self.bi_name = \\\n bi.write_new_ms(owner = self.owner.get_bootinfo_id(),\n base = vbase,\n size = size,\n flags = flags,\n attr = attrs.cache_policy,\n physpool = physpool_id,\n virtpool = None,\n zone = self.zone.get_bootinfo_id(),\n name = name)\n for cap in self.caps.values():\n cap.generate_bootinfo(self, bi)\n\n else:\n self.bi_name = \\\n bi.write_new_ms(owner = self.owner.get_bootinfo_id(),\n base = vbase,\n size = size,\n flags = flags,\n attr = attrs.cache_policy,\n physpool = physpool_id,\n virtpool = virtpool_id,\n zone = None,\n name = name)\n \n for cap in self.caps.values():\n if name == 'HEAP' or name == 'STACK' or name == 'SMEM':\n if cap.name == 'rw':\n cap.generate_bootinfo(self, bi)\n else:\n cap.generate_bootinfo(self, bi)\n \n if self.need_attach():\n bi.write_attach(pd = self.owner.get_bootinfo_id(),\n ms = self.bi_name,\n rights = self.ms.attrs.attach)\n\n if attrs.elf_flags is not None:\n if (attrs.elf_flags & weaver.bootinfo_elf.BI_FULLYACCESSIBLE) == \\\n weaver.bootinfo_elf.BI_FULLYACCESSIBLE:\n bi.write_grant_cap(pd = self.owner.get_bootinfo_id(),\n cap = self.caps['master'].get_bootinfo_id())\n else:\n if (attrs.elf_flags & weaver.bootinfo_elf.BI_READABLE) == \\\n weaver.bootinfo_elf.BI_READABLE:\n bi.write_grant_cap(pd = self.owner.get_bootinfo_id(),\n cap = self.caps['read'].get_bootinfo_id())\n if (attrs.elf_flags & weaver.bootinfo_elf.BI_WRITABLE) == \\\n weaver.bootinfo_elf.BI_WRITABLE:\n bi.write_grant_cap(pd = self.owner.get_bootinfo_id(),\n cap = self.caps['write'].get_bootinfo_id())\n if (attrs.elf_flags & weaver.bootinfo_elf.BI_EXECUTABLE) == \\\n weaver.bootinfo_elf.BI_EXECUTABLE:\n bi.write_grant_cap(pd = self.owner.get_bootinfo_id(),\n cap = self.caps['execute'].get_bootinfo_id())\n else:\n if name == 'HEAP' or name == 'STACK' or name == 'SMEM':\n bi.write_grant_cap(pd = self.owner.get_bootinfo_id(),\n cap = self.caps['rw'].get_bootinfo_id())\n else:\n bi.write_grant_cap(pd = self.owner.get_bootinfo_id(),\n cap = self.caps['master'].get_bootinfo_id())\n\n # Common operations.\n \n bi.write_map(vaddr = vbase,\n size = size,\n paddr = pbase,\n scrub = self.get_attrs().should_scrub(),\n flags = flags)",
"def get_process_info(self):\n process_info = super(RemoteProcessProxy, self).get_process_info()\n process_info.update({'assigned_ip': self.assigned_ip,\n 'assigned_host': self.assigned_host,\n 'comm_ip': self.comm_ip,\n 'comm_port': self.comm_port,\n 'tunneled_connect_info': self.tunneled_connect_info})\n return process_info",
"def load_process_info(self, process_info):\n self.pid = process_info['pid']\n self.pgid = process_info['pgid']\n self.ip = process_info['ip']\n self.kernel_manager.ip = process_info['ip']",
"async def get_pool_metas():\r\n db = client['mappools']\r\n collection = db['meta']\r\n cursor = collection.find()\r\n return (await cursor.to_list(length=100))",
"def getPoolArgs(self):\n raise NotImplementedError()"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Write pool base info | def test_01_Base(self):
# print(PrettyFormatAny.form(self.m_pools[0], 'W1-01-A - Pools'))
l_xml = poolXml._write_base(self.m_pools[0])
# print(PrettyFormatAny.form(l_xml, 'W1-01-B - Pool'))
self.assertEqual(l_xml.attrib['Name'], TESTING_POOL_NAME_0)
self.assertEqual(l_xml.attrib['Key'], TESTING_POOL_KEY_0)
self.assertEqual(l_xml.attrib['Active'], TESTING_POOL_ACTIVE_0) | [
"def test_01_Base(self):\n l_pool = poolXml._read_base(self.m_xml.pool)\n # print(PrettyFormatAny.form(l_pool, 'R1-01-A - Pool'))\n self.assertEqual(l_pool.Name, TESTING_POOL_NAME_0)\n self.assertEqual(l_pool.Key, int(TESTING_POOL_KEY_0))\n self.assertEqual(str(l_pool.Active), TESTING_POOL_ACTIVE_0)\n self.assertEqual(l_pool.UUID, TESTING_POOL_UUID_0)\n l_pool = poolXml._read_base(self.m_xml.pool_sect[1])\n # print(PrettyFormatAny.form(l_pool, B1-01-B - 'Pool'))\n self.assertEqual(l_pool.Name, TESTING_POOL_NAME_1)\n self.assertEqual(l_pool.Key, int(TESTING_POOL_KEY_1))\n self.assertEqual(l_pool.Active, bool(TESTING_POOL_ACTIVE_1))",
"def generate_bootinfo(self, bi):\n self.bi_name = \\\n bi.write_new_pool(self.is_virtual())\n \n for cap in self.caps.values():\n cap.generate_bootinfo(self, bi)\n \n for (base, end, mem_type) in self.pool.get_freelist():\n if self.is_virtual():\n bi.write_add_virt_mem(self.bi_name, base, end)\n else:\n bi.write_add_phys_mem(self.bi_name, base, end)",
"def __createIPpoolObj(self,ippool_info,ippool_ips):\n return ippool.IPPool(ippool_info[\"ippool_id\"],\n ippool_info[\"ippool_name\"],\n ippool_info[\"ippool_comment\"],\n ippool_ips)",
"def set_default_pools(self):\n (virt_name, phys_name) = weaver.image.image.current_pools()\n self.default_virtpool = bootinfo.get_virtpool(virt_name)\n self.default_physpool = bootinfo.get_physpool(phys_name)\n self.default_directpool = bootinfo.get_virtpool(\"direct\")",
"def store_pool(self, pool, start):\n if isinstance(start, Integral):\n indices = map(str, range(start, start + len(pool)))\n else:\n indices = [\"(%s) + %d\" % (start, i) for i in range(len(pool))]\n name = self.array_name\n s = \"\"\n for i, index in enumerate(indices):\n s += \"%s[%s] = %s;\\n\" % (name, index, pool[i])\n return s",
"def save_pool():\n for i in range(total_models):\n with open(os.path.join(pool_dir, 'model_{}.pickle'.format(i)), 'wb') as f:\n pickle.dump(current_pool[i], f)",
"def test_02_Pool0(self):\n l_xml = poolXml._write_one_pool(self.m_pools[0])\n # print(PrettyFormatAny.form(l_xml, 'W1-02-A - Pool 0'))\n self.assertEqual(l_xml.attrib['Name'], TESTING_POOL_NAME_0)\n self.assertEqual(l_xml.attrib['Key'], TESTING_POOL_KEY_0)\n self.assertEqual(l_xml.attrib['Active'], TESTING_POOL_ACTIVE_0)\n self.assertEqual(l_xml.find('Comment').text, TESTING_POOL_COMMENT_0)\n self.assertEqual(l_xml.find('PoolType').text, TESTING_POOL_TYPE_0)",
"def pprint(self):\r\n self.refresh()\r\n print(self.pool)",
"def writeAssignments(hashring, filename):\n logging.debug(\"Dumping pool assignments to file: '%s'\" % filename)\n\n try:\n with open(filename, 'a') as fh:\n fh.write(\"bridge-pool-assignment %s\\n\" %\n time.strftime(\"%Y-%m-%d %H:%M:%S\"))\n hashring.dumpAssignments(fh)\n except IOError:\n logging.info(\"I/O error while writing assignments to: '%s'\" % filename)",
"def generate_bootinfo(self, bi):\n\n attrs = self.get_attrs()\n # Set defaults for values calculated from attributes generated\n # by layout(). This method can be called from\n # BootInfo.create_dynamic_segments(), which called prior to\n # Image.layout() in which case addresses and default pools may\n # not be known. Consequently it doesn't really matter what\n # default values are used because the bootinfo ops will be\n # thrown away once the total size is calculated.\n vbase = 0\n pbase = 0\n size = 0\n physpool_id = 0\n virtpool_id = 0\n\n # Calculate the ids of the memsections's pools.\n\n if attrs.direct:\n virtpool_id = \\\n weaver.bootinfo.bootinfo.get_virtpool('direct').get_bootinfo_id()\n elif attrs.virtpool is not None:\n virtpool_id = \\\n weaver.bootinfo.bootinfo.get_virtpool(attrs.virtpool).get_bootinfo_id()\n \n if attrs.physpool is not None:\n physpool_id = \\\n weaver.bootinfo.bootinfo.get_physpool(attrs.physpool).get_bootinfo_id()\n \n # Align the addresses to page boundaries. The pool allocators\n # will have taken this into account, but kept non-aligned\n # values to be compatible with the ELF contents.\n\n if attrs.phys_addr is not None:\n pbase = align_down(attrs.phys_addr,\n weaver.machine.machine.min_page_size())\n if attrs.virt_addr is not None:\n vbase = align_down(attrs.virt_addr,\n weaver.machine.machine.min_page_size())\n size = align_up(attrs.size + (attrs.virt_addr - vbase),\n weaver.machine.machine.min_page_size())\n\n flags = (weaver.bootinfo_elf.BI_MEM_USER |\n weaver.bootinfo_elf.BI_MEM_FIXED)\n\n if attrs.pager == \"memload\":\n flags |= weaver.bootinfo_elf.BI_MEM_LOAD\n\n # QC_MODIFIED\n if attrs.elf_flags is not None:\n if attrs.elf_flags & weaver.bootinfo_elf.MI_PBT_PAGED_SEGMENT:\n flags |= weaver.bootinfo_elf.BI_MEM_LOAD\n if (attrs.elf_flags & weaver.bootinfo_elf.MI_PBT_FLAG_SEGMENT_TYPE_MASK) == \\\n weaver.bootinfo_elf.MI_PBT_SWAPPED_SEGMENT:\n flags |= weaver.bootinfo_elf.BI_MEM_RELOAD\n if (attrs.elf_flags & weaver.bootinfo_elf.MI_PBT_FLAG_SEGMENT_TYPE_MASK) == \\\n weaver.bootinfo_elf.MI_PBT_SWAP_POOL_SEGMENT:\n flags |= weaver.bootinfo_elf.BI_MEM_SWAPPOOL\n if (attrs.elf_flags & weaver.bootinfo_elf.MI_PBT_POOLIDX_SEGMENT) == \\\n weaver.bootinfo_elf.MI_PBT_POOLIDX_SEGMENT:\n flags |= weaver.bootinfo_elf.BI_MEM_POOLIDX\n # print \"flags:\", hex(attrs.elf_flags), hex(flags)\n # END QC_MODIFIED\n\n if attrs.protected:\n flags |= weaver.bootinfo_elf.BI_MEM_PROTECTED\n if self.token_exported == 0:\n self.owner.environment.add_pd_extension_token_entry(self.ms.get_attrs().name + \"_PD_EXT\", 0, 0, 0, 0)\n self.token_exported = 1\n\n # Memsections in zones are initialised differently to\n # memsections in PDs.\n if (flags & weaver.bootinfo_elf.BI_MEM_PROTECTED):\n name = (self.get_name() + \"_PD_EXT\").upper()\n else:\n name = (self.get_name()).upper()\n if self.zone is not None:\n self.bi_name = \\\n bi.write_new_ms(owner = self.owner.get_bootinfo_id(),\n base = vbase,\n size = size,\n flags = flags,\n attr = attrs.cache_policy,\n physpool = physpool_id,\n virtpool = None,\n zone = self.zone.get_bootinfo_id(),\n name = name)\n for cap in self.caps.values():\n cap.generate_bootinfo(self, bi)\n\n else:\n self.bi_name = \\\n bi.write_new_ms(owner = self.owner.get_bootinfo_id(),\n base = vbase,\n size = size,\n flags = flags,\n attr = attrs.cache_policy,\n physpool = physpool_id,\n virtpool = virtpool_id,\n zone = None,\n name = name)\n \n for cap in self.caps.values():\n if name == 'HEAP' or name == 'STACK' or name == 'SMEM':\n if cap.name == 'rw':\n cap.generate_bootinfo(self, bi)\n else:\n cap.generate_bootinfo(self, bi)\n \n if self.need_attach():\n bi.write_attach(pd = self.owner.get_bootinfo_id(),\n ms = self.bi_name,\n rights = self.ms.attrs.attach)\n\n if attrs.elf_flags is not None:\n if (attrs.elf_flags & weaver.bootinfo_elf.BI_FULLYACCESSIBLE) == \\\n weaver.bootinfo_elf.BI_FULLYACCESSIBLE:\n bi.write_grant_cap(pd = self.owner.get_bootinfo_id(),\n cap = self.caps['master'].get_bootinfo_id())\n else:\n if (attrs.elf_flags & weaver.bootinfo_elf.BI_READABLE) == \\\n weaver.bootinfo_elf.BI_READABLE:\n bi.write_grant_cap(pd = self.owner.get_bootinfo_id(),\n cap = self.caps['read'].get_bootinfo_id())\n if (attrs.elf_flags & weaver.bootinfo_elf.BI_WRITABLE) == \\\n weaver.bootinfo_elf.BI_WRITABLE:\n bi.write_grant_cap(pd = self.owner.get_bootinfo_id(),\n cap = self.caps['write'].get_bootinfo_id())\n if (attrs.elf_flags & weaver.bootinfo_elf.BI_EXECUTABLE) == \\\n weaver.bootinfo_elf.BI_EXECUTABLE:\n bi.write_grant_cap(pd = self.owner.get_bootinfo_id(),\n cap = self.caps['execute'].get_bootinfo_id())\n else:\n if name == 'HEAP' or name == 'STACK' or name == 'SMEM':\n bi.write_grant_cap(pd = self.owner.get_bootinfo_id(),\n cap = self.caps['rw'].get_bootinfo_id())\n else:\n bi.write_grant_cap(pd = self.owner.get_bootinfo_id(),\n cap = self.caps['master'].get_bootinfo_id())\n\n # Common operations.\n \n bi.write_map(vaddr = vbase,\n size = size,\n paddr = pbase,\n scrub = self.get_attrs().should_scrub(),\n flags = flags)",
"def test_pool_create(self):\n pool_name = p_n()\n self.unittest_command(\n [_STRATIS_CLI, \"pool\", \"create\", pool_name, StratisCertify.DISKS[0]],\n 0,\n True,\n True,\n )",
"def test_03_Pool1(self):\n l_xml = poolXml._write_one_pool(self.m_pools[1])\n # print(PrettyFormatAny.form(l_xml, 'W1-03-A - Pool 1'))\n self.assertEqual(l_xml.attrib['Name'], TESTING_POOL_NAME_1)\n self.assertEqual(l_xml.attrib['Key'], TESTING_POOL_KEY_1)\n self.assertEqual(l_xml.attrib['Active'], TESTING_POOL_ACTIVE_1)\n self.assertEqual(l_xml.find('Comment').text, TESTING_POOL_COMMENT_1)\n self.assertEqual(l_xml.find('PoolType').text, TESTING_POOL_TYPE_1)",
"def _getpool():\n logging.info(\n \"Creating a process pool with pool size {processes} (the number of CPU cores)...\".format(\n processes=os.cpu_count() or 1))\n return Pool()",
"def create_init(self):\n\n with open('%s/__init__.py' % self.module_name, 'w') as file:\n file.write(\"from trytond.pool import Pool\\n\")\n file.write(self._obtain_imports())\n file.write(self._obtain_registers())",
"def _save_base_state(self, base_state, pickle_filename):\r\n _file = open(pickle_filename,\"wb\")\r\n pickle.dump(base_state,_file)\r\n _file.close()",
"def __init__(self, mode, name='GlobalAvgPool'):\n super(GlobalAvgPool, self).__init__(mode, name)",
"def setPool(self, new: bool = True):\r\n if new:\r\n pool = []\r\n plist = []\r\n for i in self.b:\r\n pool.extend([i]*self.b[i][\"Count\"])\r\n if self.b[i][\"Rarity\"] <=2 and self.b[i][\"Priority\"] >= self.prio:\r\n plist.extend([i] * min(self.b[i][\"Desire\"], self.dlim))\r\n if self.b[i][\"Rarity\"] == 3 and self.b[i][\"Priority\"] >= self.RLprio:\r\n plist.extend([i] * self.b[i][\"Desire\"])\r\n \r\n assert len(pool) == 100\r\n self.pool = pool\r\n self.poolbkp = pool\r\n self.pList = plist\r\n else:\r\n self.pool = self.poolbkp\r\n \r\n global glog\r\n if glog:\r\n global glof\r\n glof.write(\"{}\\t{}\\n\".format(self.pList, self.RLname))",
"def test_04_AllPools(self):\n l_xml, l_count = poolXml.write_all_pools_xml(self.m_pyhouse_obj)\n # print(PrettyFormatAny.form(l_xml, 'W1-04-A - Pool'))\n # l_xml1 = l_xml.find('Pool')\n l_xml2 = l_xml[0]\n self.assertEqual(l_xml2.attrib['Name'], TESTING_POOL_NAME_0)\n self.assertEqual(l_xml2.attrib['Key'], TESTING_POOL_KEY_0)\n self.assertEqual(l_xml2.attrib['Active'], TESTING_POOL_ACTIVE_0)\n self.assertEqual(l_xml2.find('Comment').text, TESTING_POOL_COMMENT_0)\n self.assertEqual(l_xml2.find('PoolType').text, TESTING_POOL_TYPE_0)",
"def gen_net_config(self):\n\n ret_net = []\n\n for i in self.all_pids:\n if i == self.pid:\n ret_net.append({\"host\": \"0.0.0.0\", \"port\": 5000})\n else:\n ret_net.append({\"host\": \"conclave-{0}-{1}-service.{2}.svc.cluster.local\"\n .format(self.compute_id, str(i), self.namespace_map[i-1]), \"port\": 5000})\n\n return json.dumps(ret_net)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Write one entire pool XML | def test_02_Pool0(self):
l_xml = poolXml._write_one_pool(self.m_pools[0])
# print(PrettyFormatAny.form(l_xml, 'W1-02-A - Pool 0'))
self.assertEqual(l_xml.attrib['Name'], TESTING_POOL_NAME_0)
self.assertEqual(l_xml.attrib['Key'], TESTING_POOL_KEY_0)
self.assertEqual(l_xml.attrib['Active'], TESTING_POOL_ACTIVE_0)
self.assertEqual(l_xml.find('Comment').text, TESTING_POOL_COMMENT_0)
self.assertEqual(l_xml.find('PoolType').text, TESTING_POOL_TYPE_0) | [
"def test_03_Pool1(self):\n l_xml = poolXml._write_one_pool(self.m_pools[1])\n # print(PrettyFormatAny.form(l_xml, 'W1-03-A - Pool 1'))\n self.assertEqual(l_xml.attrib['Name'], TESTING_POOL_NAME_1)\n self.assertEqual(l_xml.attrib['Key'], TESTING_POOL_KEY_1)\n self.assertEqual(l_xml.attrib['Active'], TESTING_POOL_ACTIVE_1)\n self.assertEqual(l_xml.find('Comment').text, TESTING_POOL_COMMENT_1)\n self.assertEqual(l_xml.find('PoolType').text, TESTING_POOL_TYPE_1)",
"def write_xml_file(self):\n for item in self.xml_lines:\n self.xml_file.write(\"{}\\n\".format(item))",
"def escribir(self):\n tree.write('metadata1.xml')\n bs = BeautifulSoup(open('metadata1.xml'), 'xml')\n archivo1 = open('metadata1.xml', \"w+\")\n archivo1.write(bs.prettify())",
"def test_04_AllPools(self):\n l_xml, l_count = poolXml.write_all_pools_xml(self.m_pyhouse_obj)\n # print(PrettyFormatAny.form(l_xml, 'W1-04-A - Pool'))\n # l_xml1 = l_xml.find('Pool')\n l_xml2 = l_xml[0]\n self.assertEqual(l_xml2.attrib['Name'], TESTING_POOL_NAME_0)\n self.assertEqual(l_xml2.attrib['Key'], TESTING_POOL_KEY_0)\n self.assertEqual(l_xml2.attrib['Active'], TESTING_POOL_ACTIVE_0)\n self.assertEqual(l_xml2.find('Comment').text, TESTING_POOL_COMMENT_0)\n self.assertEqual(l_xml2.find('PoolType').text, TESTING_POOL_TYPE_0)",
"def test_01_Base(self):\n # print(PrettyFormatAny.form(self.m_pools[0], 'W1-01-A - Pools'))\n l_xml = poolXml._write_base(self.m_pools[0])\n # print(PrettyFormatAny.form(l_xml, 'W1-01-B - Pool'))\n self.assertEqual(l_xml.attrib['Name'], TESTING_POOL_NAME_0)\n self.assertEqual(l_xml.attrib['Key'], TESTING_POOL_KEY_0)\n self.assertEqual(l_xml.attrib['Active'], TESTING_POOL_ACTIVE_0)",
"def writeXML(self, parent):\n node = xml.SubElement(parent, \"customer\")\n nodeName = xml.SubElement(node, \"name\")\n nodeName.text = str(self.getName())\n nodeFiducial = xml.SubElement(node, \"fiducialNum\")\n nodeFiducial.text = str(self.getFiducialNum())\n nodeId = xml.SubElement(node, \"id\")\n nodeId.text = str(self.getId())\n nodeBalance = xml.SubElement(node, \"balance\")\n self.getAccountBalance().writeXML(nodeBalance)\n nodeAllergies = xml.SubElement(node, \"allergies\")\n for allergy in self.getAllergies(): \n nodeAllergy = xml.SubElement(nodeAllergies, \"allergy\")\n nodeAllergy.text = allergy",
"def getXML(self):\n node = xml.Element(\"database\")\n for customer in self.getCustomers():\n customer.writeXML(node)\n \n for item in self.getItems(): \n item.writeXML(node)\n return node",
"def writeNodes(net):\n\n fd = open(\"nodes.xml\", \"w\")\n fd.write(\"<nodes>\\n\")\n for node in net._nodes:\n fd.write(\" <node id=\\\"\" + node._id + \"\\\" x=\\\"\" +\n str(node._coord[0]) + \"\\\" y=\\\"\" + str(node._coord[1]) + \"\\\"/>\\n\")\n fd.write(\"</nodes>\\n\")",
"def toXML( self ):\n from xml.dom.ext import PrettyPrint\n #grab elements out of config object to form basis of xml config file\n p = self.peapod.getElementsByTagName( \"peapod\" )[0]\n\n #loop through feed dicts in list adding elements into XML\n for rssfeed in self.feedlist:\n title_node = self.peapod.createElement( \"title\" )\n title_node.appendChild( self.peapod.createTextNode( rssfeed[\"title\"] ) )\n url_node = self.peapod.createElement( \"url\" )\n url_node.appendChild( self.peapod.createTextNode( rssfeed[\"url\"] ) )\n feed = self.peapod.createElement( \"feed\" )\n feed.appendChild( url_node )\n feed.appendChild( title_node )\n p.appendChild( feed )\n try:\n fd = open( os.path.sep.join( (self.config.options[\"homedir\"], \"peapod.xml\") ), \"w\" )\n logger.debug(\"Writing feedlist to \" + fd.name)\n PrettyPrint( p, fd )\n fd.close()\n except Exception,e:\n print e",
"def write_toXMLfile(self):\n sfbxml = self.sdict['sfbxml']\n self._make_sfbxmlfile(sfbxml)",
"def write_xml_changes(self, outfile):\n raise NotImplementedError",
"def _writeXmlFile(self, article, articleE):\n\tbodyE = articleE.find(\"body\")\n\tif bodyE == None:\n\t self.curReporter.gotNoBody(article)\n\t if self.verbose: progress('x')\n\tif not self.writeFiles: return\n\n\tfileName = 'PMC' + str(article.pmcid) + \".xml\"\n pathName = os.sep.join( [ self.curOutputDir, fileName ] )\n\n\twith open(pathName, 'w') as fp:\n\t fp.write( ET.tostring(articleE, method='xml'))\n\t self.curReporter.gotXml(article)\n\t if self.verbose: progress('X')",
"def log_xml(self):\n\n lFH = self.logger.getLogHandle();\n # xml_print( self.puke_dom, lFH )\n # lFH.write( MyXML.getRootDocumentXML(self) )\n lFH.write(self.getRootDocumentXML())",
"def generate_xml(self):\n raise NotImplementedError()",
"def put_config(self, elem):\n dest = open(self.module.cfg_path, \"w\")\n dest.write('<?xml version=\"1.0\"?>\\n')\n dest.write(etree.tostring(elem, pretty_print=True))\n dest.close()\n print \"Updated file \" + abbreviate(self.module.cfg_path, 65)",
"def writeAssignments(hashring, filename):\n logging.debug(\"Dumping pool assignments to file: '%s'\" % filename)\n\n try:\n with open(filename, 'a') as fh:\n fh.write(\"bridge-pool-assignment %s\\n\" %\n time.strftime(\"%Y-%m-%d %H:%M:%S\"))\n hashring.dumpAssignments(fh)\n except IOError:\n logging.info(\"I/O error while writing assignments to: '%s'\" % filename)",
"def write_stationxml(self, staxml, source='CIEI'):\n inv=obspy.core.inventory.inventory.Inventory(networks=[], source=source)\n for staid in self.waveforms.list(): inv+=self.waveforms[staid].StationXML\n inv.write(staxml, format='stationxml')\n return",
"def save_xml(self, name):\n # Wrap it in an ElementTree instance, and save as XML.\n tree = ElementTree.ElementTree(self.root)\n self.indent(self.root)\n full_name = name + '.dbc.xml'\n\n current_path_of_file = Path(os.getcwd())\n current_path_of_file = os.path.realpath(current_path_of_file) + \"\\\\\" + full_name\n\n destination_path = Path(os.getcwd())\n destination_path = os.path.realpath(destination_path) + \"\\\\scenario\"\n\n tree.write(full_name, encoding=\"utf-8\", xml_declaration=True)\n\n if not path.exists(destination_path):\n os.mkdir(destination_path)\n\n # Delete old files with the same name.\n if path.exists(destination_path + \"\\\\\" + full_name):\n remove(destination_path + \"\\\\\" + full_name)\n\n # Move created file to scenario folder.\n move(current_path_of_file, destination_path)",
"def generate_settings_xml(self):\n\n if self.rank == 0:\n batches = self.settings.batches\n inactive = self.settings.inactive\n particles = self.settings.particles\n\n # Just a generic settings file to get it running.\n settings_file = openmc.Settings()\n settings_file.batches = batches\n settings_file.inactive = inactive\n settings_file.particles = particles\n settings_file.source = openmc.Source(space=Box(self.settings.lower_left,\n self.settings.upper_right))\n\n if self.settings.entropy_dimension is not None:\n entropy_mesh = openmc.Mesh()\n entropy_mesh.lower_left = self.settings.lower_left\n entropy_mesh.upper_right = self.settings.upper_right\n entropy_mesh.dimension = self.settings.entropy_dimension\n settings_file.entropy_mesh = entropy_mesh\n\n # Set seed\n if self.settings.constant_seed is not None:\n seed = self.settings.constant_seed\n else:\n seed = random.randint(1, sys.maxsize-1)\n\n settings_file.seed = self.seed = seed\n\n settings_file.export_to_xml()"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Write one entire pool XML | def test_03_Pool1(self):
l_xml = poolXml._write_one_pool(self.m_pools[1])
# print(PrettyFormatAny.form(l_xml, 'W1-03-A - Pool 1'))
self.assertEqual(l_xml.attrib['Name'], TESTING_POOL_NAME_1)
self.assertEqual(l_xml.attrib['Key'], TESTING_POOL_KEY_1)
self.assertEqual(l_xml.attrib['Active'], TESTING_POOL_ACTIVE_1)
self.assertEqual(l_xml.find('Comment').text, TESTING_POOL_COMMENT_1)
self.assertEqual(l_xml.find('PoolType').text, TESTING_POOL_TYPE_1) | [
"def test_02_Pool0(self):\n l_xml = poolXml._write_one_pool(self.m_pools[0])\n # print(PrettyFormatAny.form(l_xml, 'W1-02-A - Pool 0'))\n self.assertEqual(l_xml.attrib['Name'], TESTING_POOL_NAME_0)\n self.assertEqual(l_xml.attrib['Key'], TESTING_POOL_KEY_0)\n self.assertEqual(l_xml.attrib['Active'], TESTING_POOL_ACTIVE_0)\n self.assertEqual(l_xml.find('Comment').text, TESTING_POOL_COMMENT_0)\n self.assertEqual(l_xml.find('PoolType').text, TESTING_POOL_TYPE_0)",
"def write_xml_file(self):\n for item in self.xml_lines:\n self.xml_file.write(\"{}\\n\".format(item))",
"def escribir(self):\n tree.write('metadata1.xml')\n bs = BeautifulSoup(open('metadata1.xml'), 'xml')\n archivo1 = open('metadata1.xml', \"w+\")\n archivo1.write(bs.prettify())",
"def test_04_AllPools(self):\n l_xml, l_count = poolXml.write_all_pools_xml(self.m_pyhouse_obj)\n # print(PrettyFormatAny.form(l_xml, 'W1-04-A - Pool'))\n # l_xml1 = l_xml.find('Pool')\n l_xml2 = l_xml[0]\n self.assertEqual(l_xml2.attrib['Name'], TESTING_POOL_NAME_0)\n self.assertEqual(l_xml2.attrib['Key'], TESTING_POOL_KEY_0)\n self.assertEqual(l_xml2.attrib['Active'], TESTING_POOL_ACTIVE_0)\n self.assertEqual(l_xml2.find('Comment').text, TESTING_POOL_COMMENT_0)\n self.assertEqual(l_xml2.find('PoolType').text, TESTING_POOL_TYPE_0)",
"def test_01_Base(self):\n # print(PrettyFormatAny.form(self.m_pools[0], 'W1-01-A - Pools'))\n l_xml = poolXml._write_base(self.m_pools[0])\n # print(PrettyFormatAny.form(l_xml, 'W1-01-B - Pool'))\n self.assertEqual(l_xml.attrib['Name'], TESTING_POOL_NAME_0)\n self.assertEqual(l_xml.attrib['Key'], TESTING_POOL_KEY_0)\n self.assertEqual(l_xml.attrib['Active'], TESTING_POOL_ACTIVE_0)",
"def writeXML(self, parent):\n node = xml.SubElement(parent, \"customer\")\n nodeName = xml.SubElement(node, \"name\")\n nodeName.text = str(self.getName())\n nodeFiducial = xml.SubElement(node, \"fiducialNum\")\n nodeFiducial.text = str(self.getFiducialNum())\n nodeId = xml.SubElement(node, \"id\")\n nodeId.text = str(self.getId())\n nodeBalance = xml.SubElement(node, \"balance\")\n self.getAccountBalance().writeXML(nodeBalance)\n nodeAllergies = xml.SubElement(node, \"allergies\")\n for allergy in self.getAllergies(): \n nodeAllergy = xml.SubElement(nodeAllergies, \"allergy\")\n nodeAllergy.text = allergy",
"def getXML(self):\n node = xml.Element(\"database\")\n for customer in self.getCustomers():\n customer.writeXML(node)\n \n for item in self.getItems(): \n item.writeXML(node)\n return node",
"def writeNodes(net):\n\n fd = open(\"nodes.xml\", \"w\")\n fd.write(\"<nodes>\\n\")\n for node in net._nodes:\n fd.write(\" <node id=\\\"\" + node._id + \"\\\" x=\\\"\" +\n str(node._coord[0]) + \"\\\" y=\\\"\" + str(node._coord[1]) + \"\\\"/>\\n\")\n fd.write(\"</nodes>\\n\")",
"def toXML( self ):\n from xml.dom.ext import PrettyPrint\n #grab elements out of config object to form basis of xml config file\n p = self.peapod.getElementsByTagName( \"peapod\" )[0]\n\n #loop through feed dicts in list adding elements into XML\n for rssfeed in self.feedlist:\n title_node = self.peapod.createElement( \"title\" )\n title_node.appendChild( self.peapod.createTextNode( rssfeed[\"title\"] ) )\n url_node = self.peapod.createElement( \"url\" )\n url_node.appendChild( self.peapod.createTextNode( rssfeed[\"url\"] ) )\n feed = self.peapod.createElement( \"feed\" )\n feed.appendChild( url_node )\n feed.appendChild( title_node )\n p.appendChild( feed )\n try:\n fd = open( os.path.sep.join( (self.config.options[\"homedir\"], \"peapod.xml\") ), \"w\" )\n logger.debug(\"Writing feedlist to \" + fd.name)\n PrettyPrint( p, fd )\n fd.close()\n except Exception,e:\n print e",
"def write_toXMLfile(self):\n sfbxml = self.sdict['sfbxml']\n self._make_sfbxmlfile(sfbxml)",
"def write_xml_changes(self, outfile):\n raise NotImplementedError",
"def _writeXmlFile(self, article, articleE):\n\tbodyE = articleE.find(\"body\")\n\tif bodyE == None:\n\t self.curReporter.gotNoBody(article)\n\t if self.verbose: progress('x')\n\tif not self.writeFiles: return\n\n\tfileName = 'PMC' + str(article.pmcid) + \".xml\"\n pathName = os.sep.join( [ self.curOutputDir, fileName ] )\n\n\twith open(pathName, 'w') as fp:\n\t fp.write( ET.tostring(articleE, method='xml'))\n\t self.curReporter.gotXml(article)\n\t if self.verbose: progress('X')",
"def log_xml(self):\n\n lFH = self.logger.getLogHandle();\n # xml_print( self.puke_dom, lFH )\n # lFH.write( MyXML.getRootDocumentXML(self) )\n lFH.write(self.getRootDocumentXML())",
"def generate_xml(self):\n raise NotImplementedError()",
"def put_config(self, elem):\n dest = open(self.module.cfg_path, \"w\")\n dest.write('<?xml version=\"1.0\"?>\\n')\n dest.write(etree.tostring(elem, pretty_print=True))\n dest.close()\n print \"Updated file \" + abbreviate(self.module.cfg_path, 65)",
"def writeAssignments(hashring, filename):\n logging.debug(\"Dumping pool assignments to file: '%s'\" % filename)\n\n try:\n with open(filename, 'a') as fh:\n fh.write(\"bridge-pool-assignment %s\\n\" %\n time.strftime(\"%Y-%m-%d %H:%M:%S\"))\n hashring.dumpAssignments(fh)\n except IOError:\n logging.info(\"I/O error while writing assignments to: '%s'\" % filename)",
"def write_stationxml(self, staxml, source='CIEI'):\n inv=obspy.core.inventory.inventory.Inventory(networks=[], source=source)\n for staid in self.waveforms.list(): inv+=self.waveforms[staid].StationXML\n inv.write(staxml, format='stationxml')\n return",
"def save_xml(self, name):\n # Wrap it in an ElementTree instance, and save as XML.\n tree = ElementTree.ElementTree(self.root)\n self.indent(self.root)\n full_name = name + '.dbc.xml'\n\n current_path_of_file = Path(os.getcwd())\n current_path_of_file = os.path.realpath(current_path_of_file) + \"\\\\\" + full_name\n\n destination_path = Path(os.getcwd())\n destination_path = os.path.realpath(destination_path) + \"\\\\scenario\"\n\n tree.write(full_name, encoding=\"utf-8\", xml_declaration=True)\n\n if not path.exists(destination_path):\n os.mkdir(destination_path)\n\n # Delete old files with the same name.\n if path.exists(destination_path + \"\\\\\" + full_name):\n remove(destination_path + \"\\\\\" + full_name)\n\n # Move created file to scenario folder.\n move(current_path_of_file, destination_path)",
"def generate_settings_xml(self):\n\n if self.rank == 0:\n batches = self.settings.batches\n inactive = self.settings.inactive\n particles = self.settings.particles\n\n # Just a generic settings file to get it running.\n settings_file = openmc.Settings()\n settings_file.batches = batches\n settings_file.inactive = inactive\n settings_file.particles = particles\n settings_file.source = openmc.Source(space=Box(self.settings.lower_left,\n self.settings.upper_right))\n\n if self.settings.entropy_dimension is not None:\n entropy_mesh = openmc.Mesh()\n entropy_mesh.lower_left = self.settings.lower_left\n entropy_mesh.upper_right = self.settings.upper_right\n entropy_mesh.dimension = self.settings.entropy_dimension\n settings_file.entropy_mesh = entropy_mesh\n\n # Set seed\n if self.settings.constant_seed is not None:\n seed = self.settings.constant_seed\n else:\n seed = random.randint(1, sys.maxsize-1)\n\n settings_file.seed = self.seed = seed\n\n settings_file.export_to_xml()"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Write Pool Section with all pools. | def test_04_AllPools(self):
l_xml, l_count = poolXml.write_all_pools_xml(self.m_pyhouse_obj)
# print(PrettyFormatAny.form(l_xml, 'W1-04-A - Pool'))
# l_xml1 = l_xml.find('Pool')
l_xml2 = l_xml[0]
self.assertEqual(l_xml2.attrib['Name'], TESTING_POOL_NAME_0)
self.assertEqual(l_xml2.attrib['Key'], TESTING_POOL_KEY_0)
self.assertEqual(l_xml2.attrib['Active'], TESTING_POOL_ACTIVE_0)
self.assertEqual(l_xml2.find('Comment').text, TESTING_POOL_COMMENT_0)
self.assertEqual(l_xml2.find('PoolType').text, TESTING_POOL_TYPE_0) | [
"def writeAssignments(hashring, filename):\n logging.debug(\"Dumping pool assignments to file: '%s'\" % filename)\n\n try:\n with open(filename, 'a') as fh:\n fh.write(\"bridge-pool-assignment %s\\n\" %\n time.strftime(\"%Y-%m-%d %H:%M:%S\"))\n hashring.dumpAssignments(fh)\n except IOError:\n logging.info(\"I/O error while writing assignments to: '%s'\" % filename)",
"def test_02_Pool0(self):\n l_xml = poolXml._write_one_pool(self.m_pools[0])\n # print(PrettyFormatAny.form(l_xml, 'W1-02-A - Pool 0'))\n self.assertEqual(l_xml.attrib['Name'], TESTING_POOL_NAME_0)\n self.assertEqual(l_xml.attrib['Key'], TESTING_POOL_KEY_0)\n self.assertEqual(l_xml.attrib['Active'], TESTING_POOL_ACTIVE_0)\n self.assertEqual(l_xml.find('Comment').text, TESTING_POOL_COMMENT_0)\n self.assertEqual(l_xml.find('PoolType').text, TESTING_POOL_TYPE_0)",
"def writeSection(self):\n p = os.path.join(self.basePath, \"section/index.html\")\n self.writeLayout(\n p, body=self.sectionsIndex(), basePath=\"..\", title=\"Sections Index\"\n )\n for sName, s in sorted(self.schema.sections.items()):\n basePath = \"../..\"\n p = os.path.join(self.basePath, f\"section/{sName}/index.html\")\n sec = s.section\n body = self.metaDesc(sec, basePath=basePath)\n self.writeLayout(\n p,\n body=body,\n title=f\"Section {sName}\",\n basePath=basePath,\n extra=self.mathExtra(basePath),\n )\n for vName, v in s.valueEntries.items():\n basePath = \"../../..\"\n p = os.path.join(self.basePath, f\"section/{sName}/value/{vName}.html\")\n body = self.metaDesc(v, basePath=basePath)\n self.writeLayout(\n p,\n body=body,\n title=f\"Value {sName}\",\n basePath=basePath,\n extra=self.mathExtra(basePath),\n )\n for dName, d in s.dimensions.items():\n basePath = \"../../..\"\n p = os.path.join(\n self.basePath, f\"section/{sName}/dimension/{dName}.html\"\n )\n body = self.metaDesc(d, basePath=basePath)\n self.writeLayout(\n p,\n body=body,\n title=f\"Dimension {sName}\",\n basePath=basePath,\n extra=self.mathExtra(basePath),\n )",
"def store_pool(self, pool, start):\n if isinstance(start, Integral):\n indices = map(str, range(start, start + len(pool)))\n else:\n indices = [\"(%s) + %d\" % (start, i) for i in range(len(pool))]\n name = self.array_name\n s = \"\"\n for i, index in enumerate(indices):\n s += \"%s[%s] = %s;\\n\" % (name, index, pool[i])\n return s",
"def test_03_Pool1(self):\n l_xml = poolXml._write_one_pool(self.m_pools[1])\n # print(PrettyFormatAny.form(l_xml, 'W1-03-A - Pool 1'))\n self.assertEqual(l_xml.attrib['Name'], TESTING_POOL_NAME_1)\n self.assertEqual(l_xml.attrib['Key'], TESTING_POOL_KEY_1)\n self.assertEqual(l_xml.attrib['Active'], TESTING_POOL_ACTIVE_1)\n self.assertEqual(l_xml.find('Comment').text, TESTING_POOL_COMMENT_1)\n self.assertEqual(l_xml.find('PoolType').text, TESTING_POOL_TYPE_1)",
"def write_hgrps(self, hgrp_list, dirname):\n self.host_group_manager.write_objects(hgrp_list, dirname)",
"def _write_config_section(self):\n\n self._write_report('%s %s\\n'%(_MARKER_SECTION_BEGIN, _SECTION_NAME_CONFIG))\n self._write_report('%s %s\\n'%(_FIELD_NAME_VERSION, self._version))\n self._write_report('%s %s\\n'%(_FIELD_NAME_DEVICE, self._device_type))\n self._write_report('%s %s\\n'%(_FIELD_NAME_TRACE_MODE,\n self._parameters.trace_mode))\n self._write_report('%s %s\\n'%(_FIELD_NAME_SUBMODE,\n self._parameters.submode))\n if self._parameters.included_cores:\n self._write_report('%s %s\\n'%(_FIELD_NAME_NUM_REPLICAS,\n len(self._parameters.included_cores)))\n else:\n self._write_report('%s %s\\n'%(_FIELD_NAME_NUM_REPLICAS,\n self._num_replicas))\n self._write_report('%s %s\\n'%(_FIELD_NAME_NUM_REPLICAS_PER_HOST,\n self._num_replicas_per_host))\n self._write_report('%s %s\\n'%(_FIELD_NAME_NUM_HOSTS, self._num_hosts))\n self._write_report('%s %s\\n'%(_MARKER_SECTION_END, _SECTION_NAME_CONFIG))",
"def _write_op_list_section(self, op_list):\n\n self._write_report('%s %s\\n'%(_MARKER_SECTION_BEGIN, _SECTION_NAME_OP_LIST))\n self._write_report('%s %d\\n'%(_FIELD_NAME_NUM_OPS, len(op_list)))\n for i in range(0, len(op_list)):\n op = op_list[i]\n line = '%d \"%s\" %s'%(i, op.name, op.type)\n for out_tensor in op.outputs:\n if out_tensor.name not in self._tensorname_idx_map:\n raise ValueError(\n 'out_tensor %s is not in tensorname_idx_map'%out_tensor.name)\n line += ' %d'%self._tensorname_idx_map[out_tensor.name]\n line += '\\n'\n self._write_report(line)\n self._write_report('%s %s\\n'%(_MARKER_SECTION_END, _SECTION_NAME_OP_LIST))",
"def produce(self):\n for network in self.subnets:\n output = ''\n for address in self.network.addresses():\n if ipaddress.ip_address(address.ip) in network:\n for entry in address.ns_entries.filter(type='PTR'):\n reversed_ip = ipaddress.ip_address(address.ip).reverse_pointer\n output += '{}. IN {} {}.{}. ; {}\\n'.format(reversed_ip, entry.type,\n entry.name,\n entry.domain.name,\n address.creation_date)\n filename = '{}/{}.db'.format(self.directory,\n str(network.network_address).replace(':', '.'))\n with open(filename, 'w') as lock_file:\n locks.lock(lock_file, locks.LOCK_EX)\n lock_file.write(output)\n lock_file.close()\n self.update_soa()",
"def test_create_ip_pool_all_args(self):\n pool = self.get_mocked_resource()\n\n display_name = 'dummy'\n gateway_ip = '1.1.1.1'\n ranges = [{'start': '2.2.2.0', 'end': '2.2.2.255'},\n {'start': '3.2.2.0', 'end': '3.2.2.255'}]\n cidr = '2.2.2.0/24'\n description = 'desc'\n dns_nameserver = '7.7.7.7'\n pool.create(cidr, allocation_ranges=ranges,\n display_name=display_name,\n gateway_ip=gateway_ip,\n description=description,\n dns_nameservers=[dns_nameserver])\n\n data = {\n 'display_name': display_name,\n 'description': description,\n 'subnets': [{\n 'gateway_ip': gateway_ip,\n 'allocation_ranges': ranges,\n 'cidr': cidr,\n 'dns_nameservers': [dns_nameserver]\n }]\n }\n\n test_client.assert_json_call(\n 'post', pool,\n 'https://1.2.3.4/api/v1/pools/ip-pools',\n data=jsonutils.dumps(data, sort_keys=True),\n headers=self.default_headers())",
"def write(self):\n for data_object in self.data_list:\n data_object.write()",
"def merge_pool(self, avi_config):\n mergelist=[]\n for poolgrp in avi_config['PoolGroup']:\n if poolgrp['name'] == 'lb-depoed1cdb.qai-pri-5984-poolgroup':\n print('found')\n # do not merge the pool if it is a backup pool in the group\n pool_member = [obj for obj in poolgrp['members'] if not\n obj.get('priority_label', '10') == '2']\n length = len(pool_member)\n for count in range(length):\n pool_name = pool_member[count]['pool_ref'].split(\n '&')[1].split('=')[1]\n if pool_name in mergelist:\n continue\n pool = [pl for pl in avi_config['Pool']\n if pl['name'] == pool_name]\n if not pool:\n LOG.debug(\"'%s' not present\" % pool_name)\n continue\n for count2 in range(count+1, length):\n pname = pool_member[count2]['pool_ref'].split(\n '&')[1].split('=')[1]\n nextpool = [pol for pol in avi_config['Pool']\n if pol['name'] == pname]\n if not nextpool:\n LOG.debug(\"'%s' not present\" % pname)\n continue\n if pool[0]['health_monitor_refs'].sort() == nextpool[0][\n 'health_monitor_refs'].sort():\n LOG.debug(\"Merging pool '%s' in '%s'\" % (nextpool[0][\n 'name'], pool[0]['name']))\n ip_port = set()\n for ser in pool[0]['servers']:\n ip_port.add(str(ser['ip']['addr']) + ':' + str(\n ser['port']))\n for server in nextpool[0]['servers']:\n ipport = str(server['ip']['addr']) + ':' + str(\n server['port'])\n if ipport not in list(ip_port):\n pool[0]['servers'].append(server)\n for cl in csv_writer_dict_list:\n if cl['Object Name'] == (nextpool[0][\n 'name'].replace('-pool','')) and cl[\n 'Netscaler Command'] in ['add service',\n 'add serviceGroup']:\n cl['AVI Object'] = 'Merged to %s' % pool[0][\n 'name']\n mergelist.append(nextpool[0]['name'])\n for plg in avi_config['PoolGroup']:\n plg['members'] = [member for member in plg['members'] if\n member['pool_ref'].split('&')[1].split('=')[1] not\n in mergelist]\n avi_config['Pool'] = [pools for pools in avi_config['Pool'] if pools[\n 'name'] not in mergelist]",
"def save_pool():\n for i in range(total_models):\n with open(os.path.join(pool_dir, 'model_{}.pickle'.format(i)), 'wb') as f:\n pickle.dump(current_pool[i], f)",
"def test_pool_add_data(self):\n pool_name = make_test_pool(StratisCertify.DISKS[0:1])\n self.unittest_command(\n [_STRATIS_CLI, \"pool\", \"add-data\", pool_name, StratisCertify.DISKS[1]],\n 0,\n True,\n True,\n )",
"def pools(self, pool=None):\n url = f'{self.hostname}/pools'\n if pool:\n url += '/' + pool\n return self._get(url)",
"def test_pool_create(self):\n pool_name = p_n()\n self.unittest_command(\n [_STRATIS_CLI, \"pool\", \"create\", pool_name, StratisCertify.DISKS[0]],\n 0,\n True,\n True,\n )",
"def pools(self, pool=None):\n url = self.hostname + '/pools'\n if pool:\n url += '/' + pool\n return self._get(url)",
"def pools():\n pools = InventoryPool.query.all()\n return render_template('inventory/pools.html',\n pools=pools\n )",
"def test_01_Base(self):\n # print(PrettyFormatAny.form(self.m_pools[0], 'W1-01-A - Pools'))\n l_xml = poolXml._write_base(self.m_pools[0])\n # print(PrettyFormatAny.form(l_xml, 'W1-01-B - Pool'))\n self.assertEqual(l_xml.attrib['Name'], TESTING_POOL_NAME_0)\n self.assertEqual(l_xml.attrib['Key'], TESTING_POOL_KEY_0)\n self.assertEqual(l_xml.attrib['Active'], TESTING_POOL_ACTIVE_0)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Given a key, return an item from the cache. Return None if the item is not in the cache. | def get(self, key):
#return none if the item isn't in the cache
if key not in self.items:
return None
#retrieve the item from the dictionary
item = self.items[key]
#move it to the front of the list since it is the
#most recently accessed item
self._move_to_head(item)
return item | [
"def __getitem__(self, key):\n\n # check for slycat path\n self.check_fs_path()\n\n # is item in cache?\n if key in self:\n\n # get hash and value\n digest = self.digest_hash(key)\n value = self._loaded[digest].value\n expired = self._loaded[digest].expired()\n\n # if expired, erase and return None\n if expired:\n self.expire(digest)\n return None\n\n else:\n return None\n\n # cherrypy.log.error(\"[CACHE] Retrieving %s from cache.\" % str(digest))\n\n return value",
"def get_item(self, key):\n\t\tif not key in self.items: return None\n\t\treturn self.items[ key ]",
"def _cache_get(self, key):\r\n entry = None\r\n if self.cache is not None:\r\n entry = self.cache.get(key)\r\n\r\n return entry or (None, 0, 0)",
"def get_item(self, key):\n cPickle_key = self.normalize_key(key)\n md5_key = hashlib.md5(cPickle_key).hexdigest()\n document = self.collection.find_one({\"md5\":md5_key, \"key\": cPickle_key})\n if document != None:\n item = cPickle.loads(str(document['item']))\n item.isHit = True\n return item\n\n else:\n item = CacheItem()\n item.key = key\n return item",
"def get(self, *args, **kwargs):\r\n # Look in the cache to see if there is an unexpired item. If there is\r\n # we can just return the cached result.\r\n cache_key = self.make_key(args, kwargs)\r\n # Lock and load\r\n with self._cache_lock:\r\n if cache_key in self._cache:\r\n expirytime, item = self._cache[cache_key]\r\n\r\n if expirytime >= time():\r\n return item\r\n else:\r\n # An expired item is present - delete it\r\n del self._cache[cache_key]\r\n # Nothing found\r\n return None",
"def extract_from_cache(self, cache, key):\n try:\n value = cache.read(key)\n except Exception, e:\n value = None\n return value",
"def get_value(key):\n data = cache.get(key)\n if data:\n return pickle.loads(data)",
"def get(self, bucket, key):\n\n return self._cache[bucket].get(key, None)",
"def _cache_get(self, k):\n self._cache.move_to_end(k)\n return self._cache[k]",
"def read(self, key):\n if key not in self.data:\n raise Exception('Cache miss for key {key}'.format(key=key))\n\n self.promote(key)\n return self.deque['value']",
"def get_cache(cls, key):\n return cls._instance(key)._cache",
"def _fetch_item_from_cache(self, title):\n\n # Get item from cache\n cached_item = self.cache.get(title)\n # If it's not expired, go with it\n if cached_item and not cached_item.is_expired():\n logger.debug(\"Cache hit for '%s'\", title)\n return cached_item\n # If it's expired, query the API using if-modified-since to see if cache is still valid\n elif cached_item:\n logger.debug(\"Cache expired for '%s'\", title)\n modified_since = http_datestring_from_datetime(cached_item.last_modified)\n params = {'q': title, 'type': 'track'}\n headers = {'If-Modified-Since': modified_since}\n r = requests.get(SPOTIFY_API_SEARCH_TRACK_URL, params=params, headers=headers)\n\n # Something bad happened with the API that we can't recover from\n if r.status_code not in VALID_API_STATUSCODES:\n raise ApiException(r.status_code)\n # If we get statuscode 304, we can still use the cached item\n if r.status_code == 304:\n logger.debug(\"Cache still valid for '%s'\", title)\n return cached_item\n else:\n logger.debug(\"Cache invalidated for '%s'\", title)\n self.cache.remove(title)\n return None",
"def Lookup(self, key):\n return CacheReference(self, key)",
"def get(self, key):\r\n\t\tstartslot = self.hashfunction(key, len(self.slots))\r\n\t\tdata = None\r\n\t\tstop = False\r\n\t\tfound = False\r\n\t\tposition = startslot\r\n\r\n\t\twhile self.slots[position] != None and not stop and not found:\r\n\t\t\tif self.slots[position] == key:\r\n\t\t\t\tfound = True\r\n\t\t\t\tdata = self.data[position]\r\n\t\t\telse:\r\n\t\t\t\tposition = self.rehash(position,len(self.slots))\r\n\t\t\t\tif position == startslot: #Eventually after several modulo, position would again be equal to startslot\r\n\t\t\t\t\tstop = True #This means no element was found\r\n\t\treturn data",
"def get(self, k):\n hc = hash(k) % self.M # First place it could be\n entry = self.table[hc]\n while entry:\n if entry.key == k:\n return entry.value\n entry = entry.next\n return None # Couldn't find",
"def __getitem__(self, image_file):\n image_hash = self.hash_image(image_file)\n if image_hash in self.cache:\n return self.cache[image_hash]\n raise KeyError('Image not in cache.')",
"def get_node(self, key):\n for node in self.__iter__():\n if node.data == key:\n return node\n\n raise ValueError('key not found')",
"def __getitem__(self, key):\n if self.document_cache is None:\n return self.fetch_document(key, raw_results = False)\n try:\n return self.document_cache[key]\n except KeyError:\n document = self.fetch_document(key, raw_results = False)\n self.document_cache.cache(document)\n return document",
"def find(self, key):\n _, current, _ = self._linear_search(key)\n\n if current is None:\n value = None\n else:\n value = copy.deepcopy(current._value)\n return value"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Given a key and a value, add an item to the cache. If the cache is full, the least recently used item will be evicted. | def put(self, key, value):
#first check if item in already in the cache
item = self.items.get(key, None)
#if not create a new item
if item is None:
#if the cache is full, evict the last item
if self.is_full():
self._evict()
item = CacheItem(key, value)
#add it to the dictionary
self.items[key] = item
#insert it at the front on the linked list
self._push_front(item)
#increment number of items by 1
self.total_items += 1
else:
#update the value of the found item
#move it to the front of the list since it is now
#the most recently accessed item
item.value = value
self._move_to_head(item) | [
"def add(self, key, value):\n self.m_cache[key] = [self.m_time, key, value]\n self.m_time += 1\n self.expire()",
"def add_to_cache(self, key, value):\n self._cache_data[key] = value\n self._dirty = True",
"def add(self, key, value):\n key = self.get_key(key)\n\n if key not in self._cache:\n entry = self.Entry(key, value)\n size = entry.size\n\n # If the Size of a single artifact exceeds the MAXIMUM CACHE SIZE, we raise\n if self.is_limited and size > self.max_size_in_bytes:\n raise self.Error('The artifact\\'s size is {0} bytes exceeds the MAXIMUM CACHE SIZE, which is {1} bytes!'\n ' Please adjust, consequently your '\n 'Bench Configuration with an appropriated Value!'.format(size, self.max_size_in_bytes))\n\n # Considering Current size plus the item to be added\n if self.is_limited and self.current_size + size > self.max_size_in_bytes:\n self._flush(size)\n\n # CacheManager is fine to accept the new element\n # Adding it to the Queue and Cache dict.\n self._queue.put((entry.timestamp, entry))\n self._cache[key] = entry\n\n # Updating the current size with the new added item\n self._update_current_size(size)\n\n return self._cache[key]",
"def put(self, key: KeyType, value: Optional[ValType] = None) -> None:\n curr_value = None\n if key in self._lru_cache:\n curr_value = self._lru_cache.pop(key)\n\n while len(self._lru_cache) >= self._max_cache_items:\n self._lru_cache.popitem(last=False)\n\n if value is None:\n value = self._retrieval_function( # type: ignore\n key=key, value=curr_value.element if curr_value else None\n )\n\n self._lru_cache[key] = self.Element(\n value=value, creation_time=datetime.datetime.now(tz=datetime.timezone.utc)\n )",
"def write(self, key, value):\n # Evict item if necessare before inserting.\n evicted = None\n if len(self.data) == self.max_size and key not in self.data:\n evicted = self.evict()\n\n # Write the cache item.\n if key not in self.data:\n item = {'key': 0, 'value': value, '_key': key}\n self.heap.insert(item)\n self.data[key] = item\n else:\n item = self.data[key]\n item['value'] = value\n\n self.increment_frequency(item)\n return evicted",
"def add(self, key, value):\n self._data.append(self._Item(key, value))\n self._upheap(len(self._data)-1) # upheap newly added position",
"def put(self, key, item):\n if key is None or item is None:\n return\n if key in self.key_tracker.keys():\n y = self.key_tracker.get(key) + 1\n self.key_tracker.pop(key)\n else:\n y = 1\n if len(self.key_tracker) >= BaseCaching.MAX_ITEMS:\n x = self.least_frequent_use_key()\n print(\"DISCARD: {}\".format(x))\n self.key_tracker.pop(x)\n self.cache_data.pop(x)\n self.cache_data.update({key: item})\n self.key_tracker.update({key: y + self.count / 1000})\n self.count += 1",
"def add(self, key, content=None):\n\n if key in self._map and self._map.get(key) is content:\n return # do nothing as this key already exists with the same value\n elif len(self._map) >= self._max_len:\n self.delete_least_used()\n\n self._map[key] = content\n self._popularity[key] = 0",
"def add(self, key, value): # 3\r\n self._data.append(self._Item(key, value))\r\n self._upheap(len(self._data) - 1) # upheap newly added position\r",
"def add(self, key, value, expire=None, read=False, tag=None, retry=True, close=True):\n\n if isinstance(key, (tuple, list)):\n key = self.makey(*key)\n\n expire = expire or self.expire\n self.cache.add(key, value, expire=expire, read=read, tag=tag, retry=retry)\n if close:\n self.close()",
"def insert_into_cache(self, cache, key, value):\n if cache == self.t1:\n evicted = self.t1.write(key, value)\n if evicted != None:\n return self.insert_into_cache(self.b1, evicted['key'], evicted['value'])\n\n if cache == self.b1:\n return self.b1.write(key, value)\n\n if cache == self.t2:\n evicted = self.t2.write(key, value)\n if evicted != None:\n return self.insert_into_cache(self.b2, evicted['key'], evicted['value'])\n\n if cache == self.b2:\n return self.b2.write(key, value)",
"async def _add_or_replace(self, key, cache, func, *args, **kwargs):\n if key in cache:\n if self.max_lifetime:\n add_or_replace = datetime.now() - cache[key][\"fetch_time\"] > self.max_lifetime\n else:\n add_or_replace = False\n pop_allowed = False\n else:\n add_or_replace = True\n pop_allowed = True\n\n # record is not cached yet or is beyond its lifetime\n if add_or_replace:\n # when limit of cached records is reached, pop the oldest one\n if self.max_size and pop_allowed and len(cache) == self.max_size:\n _, _ = cache.popitem(last=False)\n\n # cache new records\n cache[key] = {\n \"data\": await func(*args, **kwargs),\n \"fetch_time\": datetime.now()\n }\n\n return cache",
"def memorize(self, key, item):\n self._memory[key].append(item)",
"def add(self, key, value):\n\n assert isinstance(key, bytes_type)\n assert isinstance(value, bytes_type)\n\n dbfile = self.dbfile\n pos = dbfile.tell()\n dbfile.write(_lengths.pack(len(key), len(value)))\n dbfile.write(key)\n dbfile.write(value)\n\n # Get hash value for the key\n h = self.hashfn(key)\n # Add hash and on-disk position to appropriate bucket\n self.buckets[h & 255].append((h, pos))",
"def add_or_update(self, key, value, **kwargs):\n return self._add_or_update(key=key, value=value, **kwargs)",
"def insert(self, key, value):\n # hash the key and map that hash to a bucket\n hash_key = self.hash_function(key) % len(self.buckets)\n\n bucket = self.buckets[hash_key]\n\n for i, val in enumerate(bucket):\n # check if exists, and override if so\n if val[0] == key:\n bucket[i] = (key, value)\n return\n # insert new\n bucket.append((key, value))",
"def add(self, key, value):\r\n index = self.hash(key)\r\n\r\n if self.array[index] is not None:\r\n # This index contains some values.\r\n # We need to check if the key we're adding already exists, this\r\n # way, we can update it with the new value, this way, we can update\r\n # it with the new value\r\n\r\n # kvp = key/value pair\r\n for kvp in self.array[index]:\r\n # If the key is found, then update the current value to the new\r\n # value.\r\n\r\n if kvp[0] == key:\r\n kvp[1] = value\r\n break\r\n\r\n # Remember for/else, the else executes after the loop completetes\r\n # normally. Meaning, if no breaks happen, it will execute this else\r\n # statement.\r\n else:\r\n # If no breaks happened, it means that no existing key was\r\n # found. Therefore, we can simply append it to the end of the\r\n # list at this index.\r\n self.array[index].append([key, value])\r\n\r\n else:\r\n # This index is empty. We will create an empty list and append the\r\n # key value pair.\r\n self.array[index] = []\r\n self.array[index].append([key, value])",
"def add(self, k: str, v: str):\n\n if k in self.__cache_dict and self.__cache_dict[k][1]:\n self.__change_set.add(k)\n elif k in self.__delete_set:\n self.__delete_set.remove(k)\n self.__change_set.add(k)\n self.__cache_dict[k] = (v, False)",
"def put(self, key, value):\n index = self.hash_index(key)\n current = self.storage[index].head\n while current:\n if current.key == key:\n current.value = value\n current = current.next\n\n node = HashTableEntry(key, value)\n self.storage[index].insert_at_head(node)\n self.count += 1\n\n # init linked list at index position\n # if collision -> reassign to either head or tail"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Insert the given item to the front of the linked list. | def _push_front(self, item):
#point the item's previous pointer to head and its
#next pointer to the item after the head
item.prev = self.head
item.next = self.head.next
#the item is still not fully in the linked list yet
#point the item after the head's previous pointer to
#the new item and point the head's next pointer to the item
self.head.next.prev = item
self.head.next = item | [
"def enqueue_front(self, item):\n self.list.append(item)",
"def insert_front(self, data):\n node = ListNode(data)\n if self.head:\n node.next = self.head\n self.head = node",
"def enqueue_back(self, item):\n self.list.prepend(item)",
"def insert_before(self, item):\n new_element = SinglyLinkedListElement(self.list, item, None)\n current_element = self.list.head\n was_inserted = False\n # Insertion happens before the first element\n if self == current_element:\n # This is the case where there is only one element in the list\n if current_element.next is None:\n self.list.head = new_element\n self.list.head.next = current_element\n self.list.tail = self.list.head.next\n self.list.size += 1\n return\n # Here there are more than one element in the list\n new_element.next = current_element\n self.list.head = new_element\n self.list.size += 1\n return\n\n while True:\n if current_element is None:\n was_inserted = False\n break\n # Check if it is the next element we are looking for\n next_element = current_element.next\n if next_element != None:\n if self == next_element:\n # Found the right slot - insert the new element\n new_element.next = next_element\n current_element.next = new_element\n was_inserted = True\n break\n current_element = current_element.next\n if was_inserted:\n self.list.size += 1",
"def insert_before_element(self, item, element):\n if item is not None and element is not None:\n element.insert_before(item)\n else:\n raise IndexError",
"def insert(self, item):\n if not item or isinstance(item, Node):\n raise ValueError(\"Cannot insert a None or a Node type\")\n\n if self.head == None:\n self.head = Node(item)\n else:\n node = Node(item)\n node.next = self.head\n self.head = node\n\n self.size += 1\n return True",
"def insert_at_index(self, index, item):\n # Check if the given index is out of range and if so raise an error\n if not (0 <= index <= self.size):\n raise ValueError('List index out of range: {}'.format(index))\n\n if index == 0:\n self.prepend(item)\n elif index == self.size:\n self.append(item)\n else:\n new_node = Node(item)\n curr = self.head\n for i in range(index): # curr will become the node that comes after the new_node we want to insert\n curr = curr.next\n print(curr.data)\n curr.previous.next = new_node # setting the prev's next of the curr to become new_node\n new_node.next = curr # setting the new_node to point to correct next node\n new_node.previous = curr.previous # setting the new_node to point to correct previous node\n curr.previous = new_node # curr's prev is now the new node instead of the old curr's prev\n self.size += 1",
"def insert_at(self, index, item):\n ptr = self.head\n if ptr is None:\n self.head = SinglyLinkedListElement(self, item, None)\n self.tail = self.head\n self.size += 1\n return\n i = 0\n while ptr is not None and ptr.data is not None:\n if i == index:\n ptr.insert(item)\n ptr = ptr.next\n i += 1",
"def move_to_front(self, node: Node):\n self.head = node\n node.prev = self.head\n node.next = self.tail",
"def insert_at_index(self, index, item):\n if not (0 <= index <= self.size):\n raise ValueError('List index out of range: {}'.format(index))\n if index == self.size:\n self.append(item)\n return\n if index == 0:\n self.prepend(item)\n return\n current_node = self.head\n count = 1\n while count < index: \n current_node = current_node.next\n count += 1\n new_node = Node(item)\n new_node.next = current_node.next\n current_node.next = new_node\n self.size += 1",
"def add_to_front(self, data):\r\n\t\tself._head = Node(data, self._head)",
"def prepend(self, value):\n # create a new node\n # set its next to be current front\n # update current front to new node\n self.front = LinkedListNode(value, self.front)\n self.size += 1\n if self.back is None:\n # ie empty list\n self.back = self.front",
"def insert(self, i, item):\n if item != None and item not in self:\n list.insert(self, i, item)",
"def _move_to_front(self, item: QueueItem | _cog.DocItem) -> None:\n # The parse queue stores soups along with the doc symbols in QueueItem objects,\n # in case we're moving a DocItem we have to get the associated QueueItem first and then move it.\n item_index = self._queue.index(item)\n queue_item = self._queue[item_index]\n del self._queue[item_index]\n\n self._queue.append(queue_item)\n log.trace(f\"Moved {item} to the front of the queue.\")",
"def insert(self, item):\n new_element = SinglyLinkedListElement(self.list, item, self.next)\n # The singly linked list is empty\n if self.list.head is None:\n self.list.head = new_element\n self.list.tail = new_element\n self.list.size += 1\n return\n\n # The singly linked list contains one element\n elif self.list.size == 1:\n if self.list.head == self:\n self.list.head = new_element\n if self.list.tail == self:\n self.list.tail = new_element\n return\n else:\n if self.list.head == self:\n self.list.head = new_element\n if self.list.tail == self:\n self.list.tail = new_element\n\n ptr = self.list.head\n while ptr is not None:\n if ptr.next == self:\n if ptr.next == self.list.tail:\n self.list.tail = new_element\n ptr.next = new_element\n ptr = ptr.next",
"def insert(self, item: 'void *', insertbefore: 'int const') -> \"void\":\n return _coin.SbPList_insert(self, item, insertbefore)",
"def insert(self, item: 'SbVec3f', insertbefore: 'int const') -> \"void\":\n return _coin.SbVec3fList_insert(self, item, insertbefore)",
"def add_front(self, value: object) -> None:\n\n # Initialize variable\n new_node = DLNode(value)\n\n # Add node\n new_node.next = self.sentinel.next\n new_node.next.prev = new_node\n\n self.sentinel.next = new_node\n new_node.prev = self.sentinel",
"def insert_before(self, node, value):\n if (node is None) or (self._head is None): # If you specify to insert a data node before an empty node or before an empty linked list, do nothing\n return\n\n if node == self._head: # If you insert a data node before the head of the linked list, insert it directly\n self.insert_to_head(value)\n return\n\n new_node = Node(value)\n pro = self._head\n not_found = False # If the specified inserted Node node is not found in the entire linked list, the mark amount is set to True\n while pro.next_node != node: # Find a Node before the specified Node\n if pro.next_node is None: # If the last node of the linked list has been reached, it indicates that the specified inserted Node node is not found in the linked list\n not_found = True\n break\n else:\n pro = pro.next_node\n if not not_found:\n pro.next_node = new_node\n new_node.next_node = node"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Applies all contained diffs to the given file_dict. This operation will be done inplace. | def apply(self, file_dict: dict):
for filename, diff in self.diffs.items():
file_dict[filename] = diff.modified | [
"def update(self, files_dict):\n\n # take the FolderProcessor's result and compare it to the current DB",
"def diff(self) -> None:\n\n # We do not write to the translated files directly.\n self.copy_files()\n new_file = dict()\n old_file = dict()\n i = 0\n for old_filepath, new_filepath in zip(self.old_files, self.diff_dest_files):\n new_file[i] = dict()\n new_file[i][\"filepath\"] = new_filepath\n new_file[i][\"nodes\"] = self.parse_file(new_filepath)\n\n old_file[i] = dict()\n old_file[i][\"filepath\"] = old_filepath\n old_file[i][\"nodes\"] = self.parse_file(old_filepath)\n i += 1\n\n patches = dict()\n # diff each file\n for k in range(i):\n old_filepath = old_file[k][\"filepath\"]\n new_filepath = new_file[k][\"filepath\"]\n diffs_to_process = max(len(new_file[k][\"nodes\"]), len(old_file[k][\"nodes\"]))\n print_prominent_info(self.get_diff_intro_msg(old_filepath, new_filepath, k + 1, i, diffs_to_process))\n if diffs_to_process == 0:\n continue\n patches[new_filepath] = self.diff_nodes(old_filepath, new_file[k][\"nodes\"], old_file[k][\"nodes\"])\n self.patch_files(patches)\n log.info(\"Done\")",
"def update_compact_files(self, ):\n for file_path, updates in self._updates.items():\n if os.path.exists(file_path):\n with open_temp_copy(file_path, binary=True) as instream, open(file_path, 'wb') as outstream:\n updated_events = self._updated_compact_events(\n yaml.parse(instream),\n updates\n )\n \n yaml.emit(updated_events, outstream)\n else:\n with open(file_path, 'wb') as outstream:\n yaml.emit(self._fresh_content_events(updates.items()), outstream)",
"def diff_files(self):\n files = dict()\n # get the git diff stat without the last summary line\n lines = self.repo.git.diff('--stat', '--no-color').splitlines()[:-1]\n for line in lines:\n match = re.match(r' (.*\\S) *\\| *([0-9]+) ', line)\n fname = match[1]\n #changes = int(match[2])\n diff = self.get_diff_raw(fname)\n\n files[fname] = Patch(\n files=[fname],\n size=len(diff),\n hunks=diff.count('@@')/2,\n test_size=False)\n\n return files",
"def _merge_bigmapdiffs(self, storage_dict: Dict[str, Any], bigmap_name: str, array: bool) -> None:\n if self.diffs is None:\n raise Exception('`bigmaps` field missing')\n _logger.debug(bigmap_name)\n bigmapdiffs = [bm for bm in self.diffs if bm['path'] == bigmap_name]\n bigmap_key = bigmap_name.split('.')[-1]\n for diff in bigmapdiffs:\n _logger.debug('Applying bigmapdiff: %s', diff)\n if diff['action'] in ('add_key', 'update_key'):\n key = diff['content']['key']\n if array is True:\n storage_dict[bigmap_key].append({'key': key, 'value': diff['content']['value']})\n else:\n storage_dict[bigmap_key][key] = diff['content']['value']",
"def _transition_hashes(self, cursor, hash_field_name, diff_hashes):\n\t\tfrom reviewboard.diffviewer.models import RawFileDiffData\n\t\tlegacy_hash_field_name = \"legacy_%s\" % hash_field_name\n\t\tif settings.DEBUG:\n\t\t\told_filediff_info = dict((filediff.pk, getattr(filediff, legacy_hash_field_name).pk) for filediff in self.filter(**{legacy_hash_field_name + \"__in\": diff_hashes}))\n\t\telse:\n\t\t\told_filediff_info = None\n\t\tif connection.vendor == \"mysql\":\n\t\t\tcursor.execute(\"UPDATE %(filediff_table)s\" \" INNER JOIN %(raw_fdd_table)s raw_fdd\" \" ON raw_fdd.binary_hash = \" \" %(filediff_table)s.%(hash_field_name)s_id\" \" SET\" \" raw_%(hash_field_name)s_id = raw_fdd.id,\" \" %(hash_field_name)s_id = NULL\" \" WHERE raw_fdd.binary_hash IN (%(diff_hashes)s)\" % {\"filediff_table\": self.model._meta.db_table, \"raw_fdd_table\": RawFileDiffData._meta.db_table, \"hash_field_name\": hash_field_name, \"diff_hashes\": \",\".join(\"'%s'\" % diff_hash for diff_hash in diff_hashes)})\n\t\telif connection.vendor == \"postgresql\":\n\t\t\tcursor.execute(\"UPDATE %(filediff_table)s\" \" SET\" \" raw_%(hash_field_name)s_id = raw_fdd.id,\" \" %(hash_field_name)s_id = NULL\" \" FROM %(raw_fdd_table)s raw_fdd\" \" WHERE\" \" raw_fdd.binary_hash IN (%(diff_hashes)s) AND\" \" raw_fdd.binary_hash = \" \" %(hash_field_name)s_id\" % {\"filediff_table\": self.model._meta.db_table, \"raw_fdd_table\": RawFileDiffData._meta.db_table, \"hash_field_name\": hash_field_name, \"diff_hashes\": \",\".join(\"'%s'\" % diff_hash for diff_hash in diff_hashes)})\n\t\telse:\n\t\t\traw_fdds = RawFileDiffData.objects.filter(binary_hash__in=diff_hashes).only(\"pk\", \"binary_hash\")\n\t\t\tfor raw_fdd in raw_fdds:\n\t\t\t\tself.filter(**{legacy_hash_field_name: raw_fdd.binary_hash}).update(**{hash_field_name: raw_fdd.pk, legacy_hash_field_name: None})\n\t\tif settings.DEBUG:\n\t\t\tnew_filediff_info = dict((filediff.pk, getattr(filediff, hash_field_name).binary_hash) for filediff in self.filter(pk__in=old_filediff_info.keys()))\n\t\t\tassert old_filediff_info == new_filediff_info",
"def apply_vsop87_deltat_json_file(self, json_file):\n\t\twith open(json_file) as json_string:\n\t\t\tdelta_t = json.load(json_string)\n\t\tcursor = self.connection.cursor()\n\t\tfor table_name in delta_t:\n\t\t\ttable = delta_t[table_name]\n\t\t\tfirst = table.get(\"first\", None)\n\t\t\tlast = table.get(\"last\", None)\n\t\t\tfirst_ym = table.get(\"firstYM\", None)\n\t\t\tlast_ym = table.get(\"lastYM\", None)\n\t\t\tdata = table.get(\"table\", None)\n\t\t\tcursor.execute(DELTA_T_INSERT, [table_name, first, last, first_ym, last_ym, data])\n\t\tself.connection.commit()",
"def bulk_diff_update(self, mapper, key_columns, previous_mappings, mappings):\n return core.bulk_diff_update(self.session, mapper, key_columns, previous_mappings, mappings)",
"def apply_to(self, tagdb):\n for pkg, patch in self.iteritems():\n patch.apply(pkg, tagdb)",
"def accumulate_diff_info_output_to_orig(self, diff_info, accumulated_diff_info):\n\n for key in diff_info['keys']:\n value = diff_info['keys'][key]\n rest_text = value['rest_text']\n original_text = value['original_text']\n rest_text_filtered = value['rest_text_filtered_sc']\n original_text_filtered = value['original_text_filtered_sc']\n if key not in accumulated_diff_info.keys():\n accumulated_diff_info[key] = {'rest_chars': 0,\n 'original_chars': 0,\n \"rest_chars_filtered\": 0,\n \"original_chars_filtered\": 0\n }\n\n accumulated_diff_info[key]['rest_chars'] += len(rest_text)\n accumulated_diff_info[key]['original_chars'] += len(original_text)\n accumulated_diff_info[key]['rest_chars_filtered'] += len(rest_text_filtered)\n accumulated_diff_info[key]['original_chars_filtered'] += len(original_text_filtered)\n\n return accumulated_diff_info",
"def diff_dictionaries(update_dict, current_conf_dict):\n for key in update_dict:\n if isinstance(update_dict.get(key), dict):\n res = diff_dictionaries(update_dict.get(key),\n current_conf_dict.get(key, {}))\n if res:\n return True\n elif ordered(update_dict.get(key)) != ordered(\n current_conf_dict.get(key)):\n ctx.logger.info(\n 'Changes found in diff_dictionaries: key={key}\\n'.format(\n key=key))\n ctx.logger.info(\n 'update_dict: {}'.format(ordered(update_dict.get(key))))\n ctx.logger.info(\n 'current_conf_dict: {}'.format(ordered(\n current_conf_dict.get(key))))\n return True\n return False",
"def update_checksum_map(checksum_map_file_name='checksums.json', verify_checksums=False,\n top_directory='.', exclude=[], raise_errors=False, *args, **kwargs):\n import os.path\n import json\n import warnings\n \n verbosity = kwargs.pop('verbosity', 0)\n if not os.path.isfile(checksum_map_file_name) or not os.path.getsize(checksum_map_file_name) > 1:\n with open(checksum_map_file_name, 'w') as f:\n f.write('{}')\n with open(checksum_map_file_name, 'r') as f:\n checksum_map = json.load(f)\n path_map = {checksum_map[c]['path']: (c, checksum_map[c]) for c in checksum_map}\n changes = {}\n paths_and_names = find_files(top_directory, exclude)\n for path, name in paths_and_names:\n if verbosity > 1:\n print('Processing {0}'.format(path))\n changed = False\n if os.path.isfile(path) and not os.path.islink(path):\n if verbosity > 0:\n print('Checking {0}'.format(path))\n size = os.path.getsize(path)\n if path in path_map:\n checksum, map_entry = path_map[path]\n if map_entry['size'] != size:\n if map_entry['size'] > 0:\n error_message = 'Expected size of {0} does not match actual size of {1} for {2}'.format(map_entry['size'], size, path)\n if raise_errors:\n raise ValueError(error_message)\n else:\n warnings.warn(error_message)\n else:\n if verbosity > 0:\n print('\\tThe expected file size has changed. Re-computing checksum.')\n old_checksum = checksum\n checksum = md5checksum(path)\n map_entry.update({\n 'path': path,\n 'size': size,\n })\n checksum_map[checksum] = map_entry\n changes[checksum] = map_entry\n changed = True\n checksum_map.pop(old_checksum, None) # Remove the old entry from the map\n if verify_checksums and not changed:\n new_checksum = md5checksum(path)\n if checksum != new_checksum:\n error_message = 'Expected checksum {0} does not match actual checksum {1} for {2}'.format(checksum, new_checksum, path)\n if raise_errors:\n raise ValueError(error_message)\n else:\n warnings.warn(error_message)\n else:\n checksum = md5checksum(path)\n map_entry = {\n 'path': path,\n 'size': size,\n }\n checksum_map[checksum] = map_entry\n changes[checksum] = map_entry\n changed = True\n if changed:\n with open(checksum_map_file_name+'_tmp.json', 'w') as f:\n json.dump(changes, f, indent=4, separators=(',', ': '))\n else:\n if verbosity > 1:\n print('\\tThis is either not a file or just a link; skipping')\n if changes:\n with open(checksum_map_file_name, 'w') as f:\n json.dump(checksum_map, f, indent=4, separators=(',', ': '))\n if verbosity > 1:\n return checksum_map\n else:\n return",
"def update(self, filenames: Sequence[str], extrahash: str) -> None:\n\n # First, completely prune entries for nonexistent files.\n self.entries = {\n path: val\n for path, val in self.entries.items()\n if os.path.isfile(path)\n }\n\n # Also remove any not in our passed list.\n self.entries = {\n path: val for path, val in self.entries.items() if path in filenames\n }\n\n # Add empty entries for files that lack them.\n # Also check and store current hashes for all files and clear\n # any entry hashes that differ so we know they're dirty.\n for filename in filenames:\n if filename not in self.entries:\n self.entries[filename] = {}\n self.curhashes[filename] = curhash = get_files_hash(\n [filename], extrahash\n )\n # Also store modtimes; we'll abort cache writes if\n # anything changed.\n self.mtimes[filename] = os.path.getmtime(filename)\n entry = self.entries[filename]\n if 'hash' in entry and entry['hash'] != curhash:\n del entry['hash']",
"def accumulate_diff_info(self, ocromore_data, diff_info, accumulated_diff_info):\n table_name = ocromore_data['file_info'].tablename\n (missing_keys, additional_keys, same_keys) = diff_info\n\n for tag in missing_keys:\n accumulated_diff_info.add_info_at(tag, table_name, True, False, False)\n for tag in additional_keys:\n accumulated_diff_info.add_info_at(tag, table_name, False, True, False)\n for tag in same_keys:\n accumulated_diff_info.add_info_at(tag, table_name, False, False, True)\n\n return accumulated_diff_info",
"def autoapply_actions(results,\n file_dict,\n file_diff_dict,\n section,\n log_printer):\n\n default_actions, invalid_actions = get_default_actions(section)\n no_autoapply_warn = bool(section.get('no_autoapply_warn', False))\n for bearname, actionname in invalid_actions.items():\n log_printer.warn('Selected default action {!r} for bear {!r} does '\n 'not exist. Ignoring action.'.format(actionname,\n bearname))\n\n if len(default_actions) == 0:\n # There's nothing to auto-apply.\n return results\n\n not_processed_results = []\n for result in results:\n try:\n # Match full bear names deterministically, prioritized!\n action = default_actions[result.origin]\n except KeyError:\n for bear_glob in default_actions:\n if fnmatch(result.origin, bear_glob):\n action = default_actions[bear_glob]\n break\n else:\n not_processed_results.append(result)\n continue\n\n applicable = action.is_applicable(result, file_dict, file_diff_dict)\n if applicable is not True:\n if not no_autoapply_warn:\n log_printer.warn('{}: {}'.format(result.origin, applicable))\n not_processed_results.append(result)\n continue\n\n try:\n action().apply_from_section(result,\n file_dict,\n file_diff_dict,\n section)\n log_printer.info('Applied {!r} on {} from {!r}.'.format(\n action.get_metadata().name,\n result.location_repr(),\n result.origin))\n except Exception as ex:\n not_processed_results.append(result)\n log_printer.log_exception(\n 'Failed to execute action {!r} with error: {}.'.format(\n action.get_metadata().name, ex),\n ex)\n log_printer.debug('-> for result ' + repr(result) + '.')\n\n return not_processed_results",
"def check_docker_files(host, ssh_obj, files_modified_dict, dict_to_compare, remote_sha_sum_dict):\n for files in dict_to_compare.keys():\n try:\n temp_list = HandleSSHConnections.run_remote_commands(ssh_obj, \"sha256sum %s\" % files)\n shortened_file_name = files.split(\"/\")[-1]\n for line in temp_list:\n sha_sum = line.split()[0]\n if line.strip().split()[0] == dict_to_compare[files]:\n modified = False\n DictionaryHandling.add_to_dictionary(files_modified_dict, host, \"%s has been modified\" %\n shortened_file_name, modified)\n else:\n modified = True\n DictionaryHandling.add_to_dictionary(files_modified_dict, host, \"%s has been modified\" %\n shortened_file_name, modified)\n # Added the file name and sha sum in the key to be able to associate the sum to modified flag\n # This will help to identify it for colourization\n DictionaryHandling.add_to_dictionary(remote_docker_file_sums_dict, host, \"%s sha256sum : %s\" %\n (shortened_file_name, sha_sum), modified)\n except socket.error:\n print(\"No SSH connection is open\")",
"def compensate(self, *args, **kwargs):\n for i in self.fcmdict:\n self.fcmdict[i].compensate(*args, **kwargs)\n return self",
"def linkFileDict(fileDict, interactive):\n for f in fileDict:\n target = buildDotfilesPath(f)\n linkName = buildHomePath(fileDict[f])\n linkFile(target, linkName, interactive)",
"def apply_fixits(inputs, checker_names, file_paths, interactive, reports):\n def apply_process(out_dir):\n \"\"\"\n Execute clang-apply-replacements binary.\n \"\"\"\n subprocess.Popen([\n analyzer_context.get_context().replacer_binary,\n *ignore_flag,\n out_dir]).communicate()\n\n not_existing_files = set()\n existing_files = set()\n modified_files = set()\n\n ignore_flag = [\"--ignore-insert-conflict\"] if \\\n analyzer_types.is_ignore_conflict_supported() else []\n\n for i in inputs:\n fixit_dir = os.path.join(i, 'fixit')\n\n if not os.path.isdir(fixit_dir):\n LOG.info('No fixits in %s', i)\n continue\n\n with tempfile.TemporaryDirectory() as out_dir:\n for fixit_file in os.listdir(fixit_dir):\n with open(os.path.join(fixit_dir, fixit_file),\n encoding='utf-8', errors='ignore') as f:\n content = yaml.load(f, Loader=yaml.BaseLoader)\n fixit_mtime = get_last_mod_time(\n os.path.join(fixit_dir, fixit_file))\n\n existing, not_existing, modified = clang_tidy_fixit_filter(\n content, checker_names, file_paths, reports,\n fixit_mtime, interactive)\n\n existing_files.update(existing)\n not_existing_files.update(not_existing)\n modified_files.update(modified)\n\n fixit_file = os.path.join(out_dir, fixit_file)\n if len(content['Diagnostics']) != 0:\n with open(fixit_file, 'w',\n encoding='utf-8', errors='ignore') as out:\n yaml.dump(content, out)\n\n if not ignore_flag:\n apply_process(out_dir)\n try:\n os.remove(fixit_file)\n except FileNotFoundError:\n pass\n\n if ignore_flag:\n apply_process(out_dir)\n\n if existing_files:\n print(\"Updated files:\\n{}\".format(\n '\\n'.join(sorted(existing_files))),\n file=sys.stderr)\n if not_existing_files:\n print(\"Not existing files:\\n{}\".format(\n '\\n'.join(sorted(not_existing_files))),\n file=sys.stderr)\n if modified_files:\n print(\"Skipped files due to modification since last analysis:\\n{}\"\n .format('\\n'.join(sorted(modified_files))),\n file=sys.stderr)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Determines if the result overlaps with source ranges provided. | def overlaps(self, ranges):
if isinstance(ranges, SourceRange):
ranges = [ranges]
for range in ranges:
for self_range in self.affected_code:
if range.overlaps(self_range):
return True
return False | [
"def overlap(r1: Rule, r2: Rule):\n if max(r1.src[0], r2.src[0]) > min(r1.src[1], r2.src[1]):\n return False\n if max(r1.dst[0], r2.dst[0]) > min(r1.dst[1], r2.dst[1]):\n return False\n return True",
"def overlaps(self, other):\n return not (self.start > other.end or self.end < other.start)",
"def overlaps(self, other: \"Interval\") -> bool:\n return not (self.end <= other.start or self.start >= other.end)",
"def regions_overlap(r1,r2):\n x = False\n for loc in r1.locs:\n if loc in r2.locs:\n return True\n return False",
"def rangeset_intersect(ranges0, ranges1, presorted=False):\n\n if len(ranges0) == 0 or len(ranges1) == 0:\n return _np.empty([0, 2])\n rng0, rng1 = list(map(_np.asarray, [ranges0, ranges1]))\n\n if not presorted:\n rng0, rng1 = [r[_np.argsort(r[:,0])] for r in [rng0, rng1]]\n for rng in [rng0, rng1]:\n assert _np.all(rng[:,1] > rng[:,0])\n\n l0, r0 = rng0.T\n l1, r1 = rng1.T\n f0, f1 = [rng.flatten() for rng in [rng0, rng1]]\n\n lin0 = inranges(l0, f1, [1, 0])\n rin0 = inranges(r0, f1, [0, 1])\n lin1 = inranges(l1, f0, [0, 0])\n rin1 = inranges(r1, f0, [0, 0])\n\n #keep only those edges that are within a good area of the other range\n l = weave(l0[lin0], l1[lin1])\n r = weave(r0[rin0], r1[rin1])\n return _np.array([l, r]).T",
"def overlap(a, b):\n # if any start / end is None then it doesn't overlap\n if a[0] is None or a[1] is None or b[0] is None or b[1] is None:\n return False\n # If the casing start/end intersects\n records_intersect = (a[0] > b[0] and a[0] < b[1]) or (a[1] > b[0] and a[1] < b[1])\n # If the series start or end in the same place\n records_overlap = (a[0] == b[0]) or (a[1] == b[1])\n return records_intersect or records_overlap",
"def overlap(self, x):\n return max(self.adr, x.adr) <= min(self.end, x.end)",
"def overlapping_ranges(lst):\n\n\tfirst_range = []\n\tsecond_range = []\n\tchecker = lst[-1]\n\n\tfor i in range (lst[0], lst[1] + 1):\n\t\tfirst_range.append(i)\n\t\n\n\tfor i in range(lst[2],lst[3] + 1):\n\t\tsecond_range.append(i)\n\t\n\n\tcount = 0 \n\tfor num in second_range:\n\t\tif num in first_range:\n\t\t\tcount += 1\n\n\tif checker <= count:\n\t\treturn True\n\n\treturn False",
"def overlap(a1, a2, b1, b2):\n\tassert a1 <= a2\n\tassert b1 <= b2\n\tassert isinstance(a1, int) and isinstance(a2, int) and isinstance(b1, int) and isinstance(b2, int)\n\t\n\t# if a interval is completely to the left of the b interval\n\tif a2 < b1:\n\t\treturn False\n\t# if a interval is completely to the right of the b interval\n\telif a1 > b2:\n\t\treturn False\n\telse:\n\t\treturn True",
"def _in_matched_range(start_idx, end_idx, matched_ranges):\n for range_start_idx, range_end_idx in matched_ranges:\n if not (end_idx <= range_start_idx or start_idx >= range_end_idx):\n return True\n return False",
"def overlap(start_1, end_1, start_2, end_2):\n return range(max(start_1, start_2),\n min(end_1, end_2) + 1)",
"def overlaps(self, ext2):\r\n\t\ts1, e1 = self.data['start'], self.data['end']\r\n\t\ts2, e2 = ext2.data['start'], ext2.data['end']\r\n\t\treturn (self.thash == ext2.thash and all([e2 > s1, e1 > s2]))",
"def are_overlapping(self, domain_interactor, domain):\n min1, *max1 = domain_interactor.split(\"-\")\n if max1 == []: # domain_interactor is not an interval\n return False\n\n min2, *max2 = domain.split(\"-\")\n if max2 == []: # domain is not an interval\n return False\n\n cut0, cut1 = max(int(min1), int(min2)), min(int(max1[0]), int(max2[0]))\n if cut1 - cut0 > 0:\n return True\n return False",
"def _do_intervals_overlap(intervals_a, intervals_b):\n\n def contained(points, intervals):\n return np.logical_and(\n np.less_equal(intervals[:, 0], points),\n np.less_equal(points, intervals[:, 1]))\n\n return np.logical_or(\n np.logical_or(\n contained(intervals_a[:, 0], intervals_b),\n contained(intervals_a[:, 1], intervals_b)),\n np.logical_or(\n contained(intervals_b[:, 0], intervals_a),\n contained(intervals_b[:, 1], intervals_a)))",
"def overlaps_dates(\n br: BlockRange, start: Optional[datetime], end: Optional[datetime]\n) -> bool:\n return (start is None or start <= br.end) and (end is None or br.start <= end)",
"def is_overlapping(self) -> bool:\n if self._is_overlapping is None:\n self._is_overlapping = any(\n self._single_intervals[i].end > self._single_intervals[i + 1].start for i in range(self.num_blocks - 1)\n )\n return self._is_overlapping",
"def allocation_pools_overlapping(src_pools, dst_pools):\n\n src_ip_set = convert_allocation_pools_to_ip_set(src_pools)\n dst_ip_set = convert_allocation_pools_to_ip_set(dst_pools)\n\n overlap = src_ip_set & dst_ip_set\n\n if not overlap:\n return False\n\n return True",
"def count_overlapping(ranges: List[RangePair]) -> int:\n return sum(does_range_overlap(range_pair) for range_pair in ranges)",
"def isoverlap(r1, r2):\n y1 = r1[1]\n x1 = r1[0]\n h1 = r1[3]\n w1 = r1[2]\n \n y2 = r2[1]\n x2 = r2[0]\n h2 = r2[3]\n w2 = r2[2]\n \n if ((x1+w1)<x2 or (x2+w2)<x1 or (y1+h1)<y2 or (y2+h2)<y1):\n return False\n else:\n return True"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Attempts to return a metrics from an Apache Beam PipelineResults. | def get_pipeline_metric(pipeline_results, metric_name, index=0,
result_type='counters'):
metrics_filter = MetricsFilter().with_name(metric_name)
query_result = pipeline_results.metrics().query(metrics_filter)
try:
return query_result[result_type][index].committed
except IndexError:
logging.info(
'No key in metrics for %s at index %s, returning 0', metric_name, index)
return 0 | [
"def get_metrics_dict(results):\n raise NotImplementedError",
"def publish_metrics(\n self, result: PipelineResult, extra_metrics: Optional[dict] = None):\n metric_id = uuid.uuid4().hex\n metrics = result.metrics().query(self.filters)\n\n # Metrics from pipeline result are stored in map with keys: 'gauges',\n # 'distributions' and 'counters'.\n # Under each key there is list of objects of each metric type. It is\n # required to prepare metrics for publishing purposes. Expected is to have\n # a list of dictionaries matching the schema.\n insert_dicts = self._prepare_all_metrics(metrics, metric_id)\n\n insert_dicts += self._prepare_extra_metrics(metric_id, extra_metrics)\n if len(insert_dicts) > 0:\n for publisher in self.publishers:\n publisher.publish(insert_dicts)",
"def compute_metrics(self, results: List[dict]) -> Dict[str, float]:\n logger: MMLogger = MMLogger.get_current_instance()\n\n classes = self.dataset_meta['classes']\n self.version = self.dataset_meta['version']\n # load annotations\n self.data_infos = load(\n self.ann_file, backend_args=self.backend_args)['data_list']\n result_dict, tmp_dir = self.format_results(results, classes,\n self.jsonfile_prefix)\n\n metric_dict = {}\n\n if self.format_only:\n logger.info(\n f'results are saved in {osp.basename(self.jsonfile_prefix)}')\n return metric_dict\n\n for metric in self.metrics:\n ap_dict = self.nus_evaluate(\n result_dict, classes=classes, metric=metric, logger=logger)\n for result in ap_dict:\n metric_dict[result] = ap_dict[result]\n\n if tmp_dir is not None:\n tmp_dir.cleanup()\n return metric_dict",
"def get_pipeline(self):\n pipeline = beam.Pipeline(options=self.options)\n\n #pylint: disable=W0106\n (pipeline\n | BillboardChartsRead()\n | beam.GroupByKey()\n | beam.Map(self.analyze_lyrics)\n | beam.io.WriteToText('gs://billboard_charts/text_results.txt'))\n\n return pipeline",
"def getMetrics(self) -> Awaitable[Dict]:\n return self.client.send(\"Performance.getMetrics\", {})",
"def get_metrics(self):\n for field_name, field_obj in self.get_fields().items():\n if getattr(field_obj, 'is_metric', False):\n yield field_name, getattr(self, field_name)",
"def composite_metric_results(self) -> Dict[int, Dict[str, float]]:\n return self.scene_composite_metric_results",
"def extract_metrics(result, variety, backend):\n\n timings = result['timings']\n batch_size = result['batchSize']\n warmup_time = result['warmupTime']\n total_time = result['totalTime']\n training_time = total_time - warmup_time\n batch_count = len(timings)\n\n timings_s = np.array(timings) / 1000\n wall_time = total_time / 1000.0\n\n # Average examples per second across the entire benchmark run,\n # including warmup period. Assumes two warmup batches.\n # TODO: Lower to one batch when we have better-shaped zero tangent vectors.\n total_time_s = total_time / 1000.0\n total_num_examples = batch_size * (batch_count + 2)\n average_examples_per_second = total_num_examples / total_time_s\n\n # Examples per second, calculated after warmup period\n # of the measurements.\n warm_time_s = training_time / 1000.0\n warm_num_examples = batch_size * batch_count\n examples_per_second = warm_num_examples / warm_time_s\n\n metrics = [{\n 'name': 'exp_per_second',\n 'value': examples_per_second\n }, {\n 'name': 'avg_exp_per_second',\n 'value': average_examples_per_second\n }, {\n 'name': 'startup_time',\n 'value': warmup_time / 1000.0\n }, {\n 'name': 'step_time_median',\n 'value': np.median(timings_s)\n }, {\n 'name': 'step_time_min',\n 'value': np.min(timings_s)\n }, {\n 'name': 'step_time_max',\n 'value': np.max(timings_s)\n }]\n\n return (wall_time, metrics)",
"def processResults(self, cmd, result):\n values = {}\n\n # Command output is in cmd.result.output\n for line in cmd.result.output.splitlines():\n match = re.match(\n r'^Chain (?P<chain>\\S+) \\(policy \\S+ '\n r'(?P<packets>\\d+) packets, '\n r'(?P<bytes>\\d+) bytes\\)$',\n line)\n\n if match:\n for measure in (\"packets\", \"bytes\"):\n values_key = \"{}{}\".format(match.group(\"chain\"), measure)\n values[values_key] = int(match.group(measure))\n\n # Add values to result only for all matching datapoints.\n for point in cmd.points:\n if point.id in values:\n result.values.append((point, values[point.id]))",
"def metrics(self, metrics):\n logger.debug(\"Submitting metrics to the api\")\n return self._submit_metrics(metrics)",
"def metric_results(self) -> Dict[int, Dict[str, torch.Tensor]]:\n return self.scene_metric_results",
"def results(self):\r\n return pd.Series(\r\n {\r\n \"metric_bo\": getattr(self, \"metric_bo\", None),\r\n \"time_bo\": getattr(self, \"time_bo\", None),\r\n \"metric_train\": getattr(self, \"metric_train\", None),\r\n \"metric_test\": getattr(self, \"metric_test\", None),\r\n \"time_fit\": getattr(self, \"time_fit\", None),\r\n \"mean_bagging\": getattr(self, \"mean_bagging\", None),\r\n \"std_bagging\": getattr(self, \"std_bagging\", None),\r\n \"time_bagging\": getattr(self, \"time_bagging\", None),\r\n \"time\": getattr(self, \"time\", None),\r\n },\r\n name=self.name,\r\n )",
"def generate_mlpipeline_metrics(metrics):\n metadata = list()\n for name, value in metrics.items():\n if not isinstance(value, (int, float)):\n try:\n value = float(value)\n except ValueError:\n print(\"Variable {} with type {} not supported as pipeline\"\n \" metric. Can only write `int` or `float` types as\"\n \" pipeline metrics\".format(name, type(value)))\n continue\n metadata.append({\n 'name': name,\n 'numberValue': value,\n 'format': \"RAW\",\n })\n\n with open('/mlpipeline-metrics.json', 'w') as f:\n json.dump({'metrics': metadata}, f)",
"async def get_metrics(self) -> [Metric]:\n raise NotImplementedError(\"Class {0} has not implemented the get_metrics functions. \"\n \"All meters must implement this function\".format(self.__class__))",
"def compute_metrics(self, stages=None, metrics=roc_auc_score, label=1):\n def _compute_metrics(proba):\n return metrics((self.y == label) * 1, proba[:, label], sample_weight=self.sample_weight)\n\n return pandas.DataFrame(self._map_on_stages(_compute_metrics, stages=stages))",
"def produce_phase(pipeline_run):\n scores = pipeline_run['run']['results']['scores']\n\n if len(scores) > 1:\n raise ValueError('This run has more than one score!')\n\n scores = scores[0]\n\n return {\n 'metric': scores['metric']['metric'],\n 'context': pipeline_run['context'],\n 'normalized_score': scores['normalized']\n }",
"def test_get_metrics(self):\n pass",
"def _read(self, results=None):\n results = results or self.table.scan()\n response = results.get('ResponseMetadata') or {}\n httpcode = response.get('HTTPStatusCode')\n if httpcode != 200:\n raise ValueError(response)\n return results",
"def collect(self) -> core.Metric:\n results = self._tester.test()\n\n download_speed = core.GaugeMetricFamily('download_speed_bps',\n 'Download speed (bit/s)')\n download_speed.add_metric(labels=[], value=results.download)\n yield download_speed\n\n upload_speed = core.GaugeMetricFamily('upload_speed_bps',\n 'Upload speed (bit/s)')\n upload_speed.add_metric(labels=[], value=results.upload)\n yield upload_speed\n\n ping = core.GaugeMetricFamily('ping_ms', 'Latency (ms)')\n ping.add_metric(labels=[], value=results.ping)\n yield ping\n\n bytes_received = core.GaugeMetricFamily('bytes_received',\n 'Bytes received during test')\n bytes_received.add_metric(labels=[], value=results.bytes_received)\n yield bytes_received\n\n bytes_sent = core.GaugeMetricFamily('bytes_sent',\n 'Bytes sent during test')\n bytes_sent.add_metric(labels=[], value=results.bytes_sent)\n yield bytes_sent"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Enable logging to Humio if the `token` and `dataspace` are set in the `secrets` payload. | def configure_control(configuration: Configuration, secrets: Secrets):
token = secrets.get("humio", {}).get("token", "").strip()
if not token:
logger.debug("Missing Humio token secret")
with_logging.enabled = False
return
dataspace = secrets.get("humio", {}).get("dataspace", "").strip()
if not dataspace:
logger.debug("Missing Humio dataspace")
with_logging.enabled = False
return
logger.debug("Humio logging control is active for this session")
with_logging.enabled = True | [
"def issue_influxdb_token(self, token_data: TokenData) -> str:\n secret = self._config.influxdb_secret\n if not secret:\n raise NotConfiguredException(\"No InfluxDB issuer configuration\")\n if self._config.influxdb_username:\n username = self._config.influxdb_username\n else:\n username = token_data.username\n if token_data.expires:\n expires = token_data.expires\n else:\n now = datetime.now(timezone.utc)\n expires = now + timedelta(minutes=self._config.exp_minutes)\n payload = {\n \"exp\": int(expires.timestamp()),\n \"iat\": int(time.time()),\n \"username\": username,\n }\n return jwt.encode(payload, secret, algorithm=\"HS256\")",
"def register_opts(conf):\n conf.register_opts(keystone.middleware.auth_token.opts,\n group='keystone_authtoken',\n )\n keystone.middleware.auth_token.CONF = conf",
"def test_session_log_secrets(device_slog):\n conn = ConnectHandler(**device_slog)\n conn.session_log.write(\"\\nTesting password and secret replacement\\n\")\n conn.session_log.write(\"This is my password {}\\n\".format(conn.password))\n conn.session_log.write(\"This is my secret {}\\n\".format(conn.secret))\n\n file_name = device_slog[\"session_log\"]\n with open(file_name, \"r\") as f:\n session_log = f.read()\n if conn.password:\n assert conn.password not in session_log\n if conn.secret:\n assert conn.secret not in session_log",
"def check_manual_token(spotify_handler):\n if request.headers.get('Authorization'):\n access_token = request.headers.get('Authorization').split()[1]\n spotify_handler.get_cache_handler().save_token_to_cache(\n {\n 'access_token': access_token,\n 'expires_in': 3600,\n 'scope': 'user-library-read playlist-modify-public playlist-read-collaborative',\n 'expires_at': int(time.time()) + 3600,\n }\n )",
"def test_check_audience_param_not_set(self):\n\n config = {\n 'init_config': {},\n 'instances': [\n {\n 'url': 'http://localhost:13001',\n 'authentication': {\n 'token_auth': {\n 'name': \"admin\",\n 'initial_token': \"dsfdgfhgjhkjuyr567uhfe345ythu7y6tre456sdx\",\n 'renewal_days': 10\n }\n },\n 'saved_searches': [{\n \"name\": \"minimal_metrics\",\n \"parameters\": {}\n }],\n 'tags': []\n }\n ]\n }\n\n # This is done to avoid going in the commit_succeeded call after the check runs\n self.collect_ok = False\n\n check = False\n\n try:\n self.run_check(config, mocks={\n '_dispatch_saved_search': _mocked_dispatch_saved_search,\n '_search': _mocked_search,\n '_saved_searches': _mocked_saved_searches,\n })\n except CheckException:\n check = True\n\n self.assertTrue(check, msg='Splunk metric instance missing \"authentication.token_auth.audience\" value')",
"def _set_token(self, payload):\n if self.api_token is None:\n self._auth_zabbix(self.__api_user, self.__api_password)\n payload['auth'] = self.api_token\n return payload",
"def test_auth_token(get_data):\n assert os.environ['OANDA_PRACTISE_TOKEN'] in\\\n get_data.headers['Authorization']",
"def warn_missing_auth():\n click.echo('No authenication means provided!', err=True)\n click.echo('You must provide an authentication means either by passing '\n '--auth-token or by persisting a login token to your local '\n 'MetaGenScope configuration file (see metagenscope login help).')",
"def set_token(token):\n resp = get_config()\n if not resp:\n return False\n data = resp[\"result\"]\n path = resp[\"path\"]\n data[\"token\"] = token\n with open(path, \"w\") as file:\n json.dump(data, file, sort_keys=True, indent=\"\")\n return True",
"def _enable_ig(ctx, data_store, token):\n headers = {\n \"Authorization\": f\"Bearer {token}\",\n \"Content-Type\": \"application/json; charset=utf-8\"\n }\n project = os.environ['FHIR_PROJECT']\n location = os.environ['GOOGLE_LOCATION']\n data_set = os.environ['GOOGLE_DATASET']\n url = f\"https://healthcare.googleapis.com/v1beta1/projects/{project}/locations/{location}/datasets/{data_set}/fhirStores/{data_store}?updateMask=validationConfig\"\n response = requests.patch(\n url=url,\n headers=headers,\n json={\n \"validationConfig\": {\n \"enabledImplementationGuides\": [\"https://ncpi-fhir.github.io/ncpi-fhir-ig/ImplementationGuide/NCPI-FHIR-Implementation-Guide\"]\n }\n }\n )\n if response.status_code == 200:\n logger.info(f\"IG enabled on {data_store}\")\n else:\n logger.info((\"could.not.enable.ig\", data_store, response.status_code, response.text))",
"def log_organizer_secret_to_console(app):\n if not app.testing:\n try:\n url = flask.url_for('organizer.setup_game',\n organizer_secret=app.organizer_secret,\n _method='GET')\n print(f'Organizer URL: {url}')\n except Exception as e:\n print(f'Ignore {e}. organizer_secret={app.organizer_secret}')",
"def config_set(self):\n\t\tif (self.config.config_has_section() and not self.config.get_val_from_config('token')) or (not self.config.config_has_section()):\n\t\t\treturn 1\n\t\telif self.config.get_val_from_config('token'):\n\t\t\tiocalls.print_text('\\nLogged in as: %s' %self.config.get_val_from_config('username'))\n\t\t\tiocalls.print_text('Session Token: %s' %self.config.get_val_from_config('token'))\n\t\t\treturn 0",
"def set_auth_token(token):\n global _auth_token\n _auth_token = token",
"def setup_logging(app):\n formatter = logging.Formatter(\n \"[%(asctime)s] {%(pathname)s:%(lineno)d} %(levelname)s - %(message)s\")\n handler = RotatingFileHandler(\n 'atm_api.log', maxBytes=10000000, backupCount=5)\n handler.setLevel(app.config[\"LOG_LEVEL\"])\n handler.setFormatter(formatter)\n app.logger.addHandler(handler)",
"def add_auth_middleware(app):\n auth_conf = dict(CONF.keystone_authtoken)\n # These items should only be used for accessing Ironic API.\n # For keystonemiddleware's authentication,\n # keystone_authtoken's items will be used and\n # these items will be unsupported.\n # [ironic]/os_password\n # [ironic]/os_username\n # [ironic]/os_auth_url\n # [ironic]/os_tenant_name\n auth_conf.update({'admin_password':\n CONF.ironic.os_password or\n CONF.keystone_authtoken.admin_password,\n 'admin_user':\n CONF.ironic.os_username or\n CONF.keystone_authtoken.admin_user,\n 'auth_uri':\n CONF.ironic.os_auth_url or\n CONF.keystone_authtoken.auth_uri,\n 'admin_tenant_name':\n CONF.ironic.os_tenant_name or\n CONF.keystone_authtoken.admin_tenant_name,\n 'identity_uri':\n CONF.ironic.identity_uri or\n CONF.keystone_authtoken.identity_uri})\n auth_conf['delay_auth_decision'] = True\n app.wsgi_app = auth_token.AuthProtocol(app.wsgi_app, auth_conf)",
"def run_with_token(self):\n\n if not self.config.token:\n logger.critical(\n \"Token is empty. Please open the config file and add the bot's token!\"\n )\n sys.exit(1)\n else:\n self.run(self.config.token)",
"def test_telemetry_enabledbydefault(duthosts, enum_rand_one_per_hwsku_hostname):\n duthost = duthosts[enum_rand_one_per_hwsku_hostname]\n\n status = duthost.shell('sonic-db-cli CONFIG_DB HGETALL \"FEATURE|telemetry\"',\n module_ignore_errors=False)['stdout_lines']\n status_list = get_list_stdout(status)\n # Elements in list alternate between key and value. Separate them and combine into a dict.\n status_key_list = status_list[0::2]\n status_value_list = status_list[1::2]\n status_dict = dict(list(zip(status_key_list, status_value_list)))\n for k, v in list(status_dict.items()):\n if str(k) == \"status\":\n status_expected = \"enabled\"\n pytest_assert(str(v) == status_expected,\n \"Telemetry feature is not enabled\")",
"def check_auth(token):\r\n return token == SLACK_TOKEN",
"async def startup(app):\r\n app['jwt_secret'] = 'secret' # note! Changing JWT configuration will fail authorization \r\n app['jwt_algorithm'] = 'HS256'\r\n app['jwt_exp_delta_seconds'] = 60000000\r\n app['jwt_exp_delta_seconds_remember_me'] = 36000000\r\n app.logger.info('starting up server')"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Always write usage_stats.json regardless of report success/failure. If report fails, the error message should be written to usage_stats.json If file write fails, the error will just stay at dashboard.log. usage_stats.json won't be written. | def _report_usage_sync(self):
if not self.usage_stats_enabled:
return
try:
self._fetch_and_record_extra_usage_stats_data()
data = ray_usage_lib.generate_report_data(
self.cluster_config_to_report,
self.total_success,
self.total_failed,
self.seq_no,
self._dashboard_head.gcs_client.address,
)
error = None
try:
self.client.report_usage_data(
ray_usage_lib._usage_stats_report_url(), data
)
except Exception as e:
logger.info(f"Usage report request failed. {e}")
error = str(e)
self.total_failed += 1
else:
self.total_success += 1
finally:
self.seq_no += 1
data = ray_usage_lib.generate_write_data(data, error)
self.client.write_usage_data(data, self.session_dir)
except Exception as e:
logger.exception(e)
logger.info(f"Usage report failed: {e}") | [
"def testReportFileWriteOutAfterEachSuiteReportReport(self):\n self.reporter.SetReportFile(self.file_name)\n self.reporter.SuiteReport('PassOnReportFile',\n constants.PASS,\n 'suite line')\n self.assertTrue(os.path.isfile(self.file_name) and\n os.path.getsize(self.file_name) > 0)",
"def test_json_reporting_plugin_write_output_no_plugin_context():\n with TemporaryDirectory() as tmp_dir:\n jrp = setup_json_reporting_plugin(tmp_dir, False)\n package = Package(\n \"valid_package\", os.path.join(os.path.dirname(__file__), \"valid_package\")\n )\n assert jrp.write_output(package, \"level\", \"line\")\n output_file = os.path.join(os.getcwd(), package.name + \"-\" + \"level\" + \".json\")\n if os.path.exists(output_file):\n os.remove(output_file)",
"def _export_json_report(self, data, output_dir, filename):\n filename = os.path.join(output_dir, filename)\n dump_data(data, filename, indent=2, default=make_json_serializable)\n logger.info(\"Generated %s\", filename)",
"def do_write_report(self, arg):\n # Store original output channel\n old_stdout = sys.stdout\n # Open file and write\n with open(arg, \"w\") as f:\n sys.stdout = f\n print(self.last_fit_report)\n # Restore original output channel\n sys.stdout = old_stdout",
"def _write_report_to_output_folder(self, file_name : str, report : str):\n if file_name[file_name.rfind('.'):] != \".html\":\n raise Exception(\"Output file must have extension .html, files is named %s\" % file_name)\n\n output_path = os.path.join(self._output_folder, file_name)\n with open(output_path, 'w') as output_file:\n output_file.write(report)\n output_file.closed",
"def test_write_report_missing_file(mock_opened_file):\n handle = mock_opened_file()\n cleanup_report.write_report([{\n 'metadata_path': 'metadata.yaml',\n 'files': [\n {'path': 'example_path1.py', 'services': ['example_svc']},\n {'path': 'example_path2.py', 'services': ['example_svc']},\n {'path': 'example_path3.py', 'services': ['example_svc']}\n ]\n }], [\n cleanup_report.make_github_url('', 'example_path1.py'),\n cleanup_report.make_github_url('', 'example_path3.py')\n ], 'test.csv')\n calls = make_expected_calls(\n 1, 2, 2, [\n ','.join([cleanup_report.make_github_url(\n '', 'example_path1.py'), 'Python', 'example_svc']),\n ','.join([cleanup_report.make_github_url(\n '', 'example_path3.py'), 'Python', 'example_svc'])\n ]\n )\n handle.write.assert_has_calls(calls)",
"def _handle_send_report(self, request_json: dict):\n if self.headers.get('Authorization') is None:\n self._set_response(400, 'text/html')\n self.wfile.write(b'missing token')\n return\n\n if self.headers.get('Authorization') != self.token:\n self._set_response(401, 'text/html')\n self.wfile.write(b'Invalid Token')\n return\n\n if request_json.get('xmlReport') is None:\n self._set_response(400, 'text/html')\n self.wfile.write(b'missing xmlReport')\n return\n\n self._set_report_context(request_json)\n self._create_incident(request_json)\n self._set_response(200, 'text/html')",
"def write_counters_to_file(self):\n with open(os.path.join(self.cwd,'data/others/counters.txt'),'w') as outputfile:\n json.dump(CounterValues().last_counter,outputfile)\n return True \n return False",
"def test_writeResults(self):\n stringIO = StringIO()\n result = DistReporter(Reporter(stringIO))\n runner = self.getRunner()\n runner.writeResults(result)\n self.assertTrue(stringIO.tell() > 0)",
"def get_stats_json(self) -> Optional[Path]:\n stats_json = self.log_dir / 'Stats.json'\n if stats_json.exists():\n return stats_json\n return None",
"def test_write_empty_report(mock_opened_file):\n cleanup_report.write_report([], [], 'test.csv')\n mock_opened_file.assert_called_with('test.csv', 'w')\n handle = mock_opened_file()\n handle.write.assert_called()",
"def _set_report_file(self):\n if not self._parameters.report_file_path:\n self._report_file = None\n return\n try:\n self._report_file = gfile.Open(self._parameters.report_file_path, 'w')\n except IOError as e:\n raise e",
"def write_report(stats, to):\n rendered_template = _render(stats)\n\n with open(to, \"wb\") as f:\n f.write(rendered_template.encode(\"utf-8\"))",
"def log_report(self, name: str, content: str) -> Optional[str]:\n file_name = os.path.join(LOCAL_REPORTS_DIR, name + \".log\")\n\n with open(file_name, \"w\") as wfh:\n wfh.write(content)",
"def write_to_disk(testcase_run, testcase_file_path):\n if not testcase_run:\n return\n\n stats_file_path = TestcaseRun.get_stats_filename(testcase_file_path)\n with open(stats_file_path, 'w') as f:\n f.write(testcase_run.to_json())",
"def report(self):\n self.debug(\"Writing XML file to: %s\", self.options.xunit_file)\n xml_file = codecs.open(self.options.xunit_file, 'w',\n self.encoding, 'replace')\n\n self.stats['encoding'] = self.encoding\n self.stats['total'] = (self.stats['timeout'] + self.stats['failures'] +\n self.stats['passed'] + self.stats['skipped'])\n\n xml_file.write('<?xml version=\"1.0\" encoding=\"%(encoding)s\"?>'\n '<testsuite name=\"gst-validate-launcher\" tests=\"%(total)d\" '\n 'errors=\"%(timeout)d\" failures=\"%(failures)d\" '\n 'skipped=\"%(skipped)d\">' % self.stats)\n\n tmp_xml_file = codecs.open(self.tmp_xml_file.name, 'r',\n self.encoding, 'replace')\n\n for l in tmp_xml_file:\n xml_file.write(l)\n\n xml_file.write('</testsuite>')\n xml_file.close()\n tmp_xml_file.close()\n os.remove(self.tmp_xml_file.name)\n\n self._createTmpFile()",
"def write_pull_info_to_file(self):\n filename = 'pull_info_{}.json'.format(\n time.strftime('%Y_%m_%d_%H%M%S', time.gmtime()))\n current_path = os.path.dirname(os.path.abspath(__file__))\n self.file_path = current_path + '/reports/' + filename\n\n with open(self.file_path, 'w+') as outfile:\n json.dump(self.pull_info, outfile)\n\n return self.file_path",
"def set_statfile(self, name: str) -> None:\n self.stats.close()\n self.stats = open(os.path.join(self._statPath, name), 'a')",
"def write_issue_report():\n print \"preparing report...\"\n report = open(OUTPUT_DIRECTORY + \"/report-\" + time.strftime(\"%Y-%m-%dT%H:%M:%SZ\") + \".txt\", 'w')\n report_weeks = get_report_weeks()\n report_end_date = get_report_end_date()\n for week_number in range(0, report_weeks):\n week_end_date = report_end_date - timedelta(days = week_number * 7)\n week_start_date = week_end_date - timedelta(days = 6)\n report_header = \"Issues completed from \" + week_start_date.strftime(\"%m/%d/%Y\") + \" to \" + week_end_date.strftime(\"%m/%d/%Y\")\n report.write(\"==============================================\\n\")\n report.write(report_header)\n report.write(\"\\n==============================================\\n\\n\")\n\n for repo_data_file in os.listdir(\"data\"):\n repo_header_added = False\n\n with open(\"data/\" + repo_data_file) as df: \n repo_data = json.load(df)\n\n for issue in repo_data:\n issue_closed_at = dateutil.parser.parse(issue['closed_at']).date()\n if week_end_date >= issue_closed_at >= week_start_date:\n if not repo_header_added:\n repo_header = repo_data_file.replace(\"_\", \"/\")\n report.write(\"--------------------------------------\\n\" + repo_header + \":\\n--------------------------------------\\n\\n\")\n repo_header_added = True\n line = (\"* \" + issue['title'] + \"\\n\" + issue['html_url'] + \"\\n\").encode('ascii', 'ignore').decode('ascii')\n report.write(line)\n if repo_header_added is True: \n report.write(\"\\n\")"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Converts a time in clocks to a time in s | def clocks_to_s(time, clock_cycle=20e-9):
return time*clock_cycle | [
"def clocks_to_s(time, clock_cycle=20e-9):\n return time * clock_cycle",
"def convert_time_to_seconds(time_string):\n if time_string[-1] == 's':\n return int(time_string[:-1])\n else:\n denominations = [int(t) for t in time_string.split(':')]\n converts = [60**i for i in reversed(range(len(denominations)))]\n return sum([c*d for c, d in zip(converts, denominations)])",
"def second_to_hour(time_s: float):\n time_h = time_s / 60 / 60\n return time_h",
"def mattime_to_sbetime(self, dt):\n dtnum = (dt - 719529) * 24 * 3600\n return dtnum",
"def seconds2timestring(secs):\n if not secs:\n secs = 0\n return \"{:02d}:{:02d}:{:02d}\".format(secs/3600, (secs/60)%60, secs%60)",
"def ConvertTimeToSeconds(time):\n splittime = time.split(\":\")\n minutes = int(splittime[0])\n seconds = int(splittime[1])\n timetotal = 60*minutes + seconds\n return timetotal",
"def convert_time(seconds):\n unit = \"seconds\"\n time = seconds\n if time >= 60:\n time = time / 60\n unit = \"minutes\"\n if time >= 60:\n time = time / 60\n unit = \"hours\"\n\n return time, unit",
"def duration_to_kodi(time_s):\n return time_s / 1000",
"def TimeConvert(num):\n # code goes here\n m = num % 60\n h = (num - m) / 60\n return '{}:{}'.format(int(h), int(m))",
"def sec_x10(time):\n return (time.second % 10) * 10 + time.microsecond / 100000",
"def get_time(self,time,date):\n\t\tsts = date[6:] + '-' + date[3:5] + '-' + date[0:2] + ' ' + time[0:12]\n\t\tgmtplus = float(time[18])\n\t\tsjd = Time(sts, format='iso', scale='utc').jd - gmtplus/24.0 # subtract +1 hr\n\t\treturn sjd",
"def _to_time(integ, frac, n=32):\n return integ + float(frac) / 2**n",
"def slurm_time_to_seconds(time:str) -> int:\n # Get rid of the milliseconds and change the separator for day to hours from \"-\" to \":\"\n time_tmp = (time.replace(\"-\",\":\")).rsplit('.',1)[0]\n # Split each units of time (seconds, minutes, hours and days) and convert them into seconds before adding them together.\n seconds=sum(x * int(t) for x, t in zip([1, 60, 3600, 86400], reversed(time_tmp.split(\":\"))))\n return seconds",
"def toSegundos(self):\r\n return self.__hh * 3600 + self.__mm * 60 + self.__ss",
"def convert(seconds):\n min, sec = divmod(seconds, 60)\n hour, min = divmod(min, 60)\n return \"%dh: %02dm: %02ds\" % (hour, min, sec)",
"def format_time(self, seconds):\n return str(int(seconds+1))+'s'",
"def sbetime_to_mattime(self, dt):\n dtnum = dt / 24 / 3600 + 719529\n return dtnum",
"def format_time(ms):\n if ms == 0:\n return \"?:??\"\n else:\n return \"%d:%02d\" % (ms / 60000, (ms / 1000) % 60)",
"def seconds2time(my_seconds):\n return (datetime(1970,1,1) + timedelta(seconds=my_seconds)).time()",
"def tstamp_to_milisseconds(self, timestamp):\n \n ftr = [3600, 60, 1] # lista de segundos para transformar hrs, min, seg\n\n # separa horas, minutos e segundos, convert todos para segundos e soma\n return 1000*sum([a*b for a, b in\n zip (ftr, [int(i) for i in\n timestamp.split(\":\")])])"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get's the expected tqisa filename based on the qisa filename. | def infer_tqisa_filename(qisa_fn: str):
return qisa_fn[:-4]+'tqisa' | [
"def test_source_ttf_font_filename_equals_familystyle(self):\n ttfont = Font.get_ttfont(self.operator.path)\n\n style_name = ttfont.stylename\n if style_name == 'Normal' or style_name == 'Roman':\n style_name = 'Regular'\n\n expectedname = '{0}-{1}'.format(ttfont.familyname.replace(' ', ''),\n style_name.replace(' ', ''))\n actualname, extension = os.path.splitext(self.operator.path)\n\n self.expectedfilename = '{0}{1}'.format(expectedname, extension)\n self.assertEqual(os.path.basename(actualname), expectedname)",
"def GetTestNameAndISAFromFileName(filename):\n # Strip the \".json\" extension\n stripped_basename = os.path.splitext(os.path.basename(filename))[0]\n # The ISA is the last element in the filename, seperated with \"-\".\n if stripped_basename.endswith(('-a32', '-t32')):\n isa = [stripped_basename[-3:]]\n test_name = stripped_basename[:-4]\n else:\n # If the ISA is ommitted, support both.\n isa = [\"a32\", \"t32\"]\n test_name = stripped_basename\n\n return (test_name, isa)",
"def test1_inputfilename(self):\n postdata = {'language':'fr','encoding':'utf-8'}\n validmeta, metadata, parameters = self.inputtemplate.generate(None, None, postdata)\n self.assertTrue(validmeta)\n self.assertTrue(isinstance(metadata,clam.common.data.CLAMMetaData))\n filename = clam.common.data.resolveinputfilename(self.inputtemplate.filename, parameters, self.inputtemplate, 0)\n self.assertEqual(filename,'test.utf-8.fr.txt')",
"def test_get_valid_filename() -> None:\n assert (\n fileup.get_valid_filename(\"john's portrait in 2004.jpg\")\n == \"johns_portrait_in_2004.jpg\"\n )",
"def test_case_name(expected_output_path):\n basename = os.path.basename(expected_output_path)\n basename = basename.replace('-out-', '-')\n basename = basename.replace('-in-', '-')\n basename = basename.replace('.txt', '')\n return basename",
"def get_download_filename(self):\n title = self.contentnode.title\n filename = \"{} ({}).{}\".format(title, self.get_preset(), self.get_extension())\n valid_filename = get_valid_filename(filename)\n return valid_filename",
"def test2_outputfilename(self):\n postdata = {'language':'fr','encoding':'utf-8'}\n validmeta, metadata, parameters = self.inputtemplate.generate(None, None, postdata)\n self.assertTrue(validmeta)\n self.assertTrue(isinstance(metadata,clam.common.data.CLAMMetaData))\n filename = clam.common.data.resolveinputfilename(self.inputtemplate.filename, parameters, self.inputtemplate, 0)\n self.assertEqual(filename,'test.utf-8.fr.txt')",
"def query_log_filename(self, idx):\n if not isinstance(idx, baseinteger):\n raise TypeError(\"idx can only be an instance of type baseinteger\")\n filename = self._call(\"queryLogFilename\",\n in_p=[idx])\n return filename",
"def filename_core (apath):\n if (apath is None): # sanity check\n return ''\n return os.path.basename(os.path.splitext(apath)[0])",
"def construct_file_path(options):\n\n # if path already contains file name return \n if options.with_fn:\n return options.src_path\n \n fn = \"e%d-r%04d-s%02d-c%02d.xtc\" % (options.expid, options.runnr,\n options.stream, options.chunk)\n return os.path.join(options.src_path, fn)",
"def get_file_name(state, year) :\r\n \r\n global_data_path = os.path.dirname(__file__) + \"/../raw/\"\r\n state_year_data_path = global_data_path + str(year) + \"/\" + state + \"/\"\r\n\r\n if state == \"AK\":\r\n if year < 2000:\r\n raise FileNotFoundError(\"No raw data for \" + str(year) + \"elections in \" + state)\r\n elif year < 2014 :\r\n return state_year_data_path + \"results.html\"\r\n elif year >= 2014 :\r\n return state_year_data_path + \"results.txt\"\r\n else :\r\n raise ValueError(\"State \" + state + \" is not yet supported\")",
"def qafile_from_framefile(frame_file, qaprod_dir=None, output_dir=None):\n frame_file = checkgzip(frame_file)\n frame_meta = read_meta_frame(frame_file)\n night = frame_meta['NIGHT'].strip()\n camera = frame_meta['CAMERA'].strip()\n expid = int(frame_meta['EXPID'])\n if frame_meta['FLAVOR'] in ['flat', 'arc']:\n qatype = 'qa_calib'\n else:\n qatype = 'qa_data'\n # Name\n qafile = findfile(qatype, night=night, camera=camera, expid=expid,\n outdir=output_dir, qaprod_dir=qaprod_dir)\n # Return\n return qafile, qatype",
"def _get_input_fname(self):\n fnames = self._get_fnames_from_related_checks()\n if len(fnames) > 1:\n msg = (\"referencing more than one file per check system \"\n \"is not yet supported by this script.\")\n raise SSGError(msg)\n return fnames.pop() if fnames else None",
"def get_pdffilename(self):\n project_dir = os.path.dirname(self.template_file)\n #print yaml.load(open(os.path.join(project_dir, 'index.yaml')))\n\n pdfkeys = yaml.load(open(os.path.join(project_dir, 'index.yaml')))['pdffilename']\n filename = os.path.join(project_dir, 'reports',\n ''.join(['_'.join([self.vals[key] for key in pdfkeys]), '_', self.uniq_id, '.pdf']))\n\n #TODO: uniq_id is still not really unique and there is small theoretical possiblity\n # that filename may reflect older patient. However this will happen only if the\n # older record is deleted, so should not matter much.\n return filename",
"def test_hisat_fq(expected_fixture, tmp_dir, sample, file_name):\n tmp_dir_name = os.path.basename(os.path.normpath(tmp_dir))\n fastq.equal_fastq(os.path.join(expected_fixture, tmp_dir_name, sample,\n file_name),\n os.path.join(tmp_dir, sample, file_name))",
"def check_seq_format(self, in_file):\n\t\tin_file = str(object=in_file)\n\t\tself._check_file(in_file)\n\t\tout = ''\n\t\tif in_file.lower().endswith('.fa') or in_file.lower().endswith('.fasta') or in_file.lower().endswith('.fas'):\n\t\t\tout = os.path.splitext(in_file)[0] + '.fq'\n\t\t\tif not self._check_file(out):\n\t\t\t\tcmd = 'fasta_to_fastq' + ' ' + in_file + ' > ' + out\n\t\t\t\tlog.debug(str(cmd))\n\t\t\t\tself.cmd.append(cmd)\n\t\t\treturn out\n\t\telse:\n\t\t\tlog.debug('Format seems to be fastq.')\n\t\t\treturn in_file",
"def fileName(self, QAbstractFileEngine_FileName_file=None): # real signature unknown; restored from __doc__\r\n return QString",
"def real_name(self, read_artifact):\n\n if self.configured_name is None:\n return self.filename\n\n parts = []\n for part in self.configured_name:\n if isinstance(part, Artifact):\n part = read_artifact(part)\n if part is None:\n return None\n parts.append(part)\n return \"\".join(parts)",
"def getFilename(self, frameNum):\n\t\treturn self.format % (self.dirname, self.frameName, frameNum)",
"def _subject_name(path):\n subject = os.path.basename(path)\n subject = subject.replace('-test', '')\n subject = subject.replace('-spec', '')\n subject = subject.replace('-unit', '')\n subject = subject.replace('.test', '')\n subject = subject.replace('.spec', '')\n subject = subject.replace('.unit', '')\n subject = subject.replace('.acceptance', '')\n subject = subject.split('.')[0]\n\n if subject == \"index\":\n # use the parent directory's name\n subject = _directory(path)\n\n return subject"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Takes in a line of a tqisa file and returns the starting time. This corrects for the timing in the "bs" instruction. Time is in units of clocks. | def get_start_time(line: str):
start_time = int(line.split(':')[0])
if 'bs' in line:
# Takes the second character after "bs"
pre_interval = int(line.split('bs')[1][1])
start_time += pre_interval
return start_time | [
"def get_start_time(self):\n with open(self.fp_file, 'r') as f:\n lines = f.readlines()\n starttime = 999999999999\n for x in lines:\n if 'TRACK_TIME' in x:\n ttemp = float(x[11:])\n starttime = min(starttime, ttemp)\n\n self.starttime = starttime\n\n return",
"def get_monitor_start_time():\n \n # read the 8th of December data as a list of strings\n# f = open('../data_p_beam/2_second/20171208.csv')\n# lines = f.readlines()\n# f.close()\n \n # !!! temporarily changing this to a run closer to the start of where\n # proper data was first collected\n filename = 'T071217_0001.txt'\n f = open('../data_ucn/monitor_detector/' + filename)\n lines = f.readlines()\n f.close()\n \n date_time = filename[1:3].zfill(2) + \\\n '.12.2017 ' + \\\n lines[26][15:23]\n\n pattern = '%d.%m.%Y %H:%M:%S'\n start_time = int(time.mktime(time.strptime(date_time, pattern)))\n \n return start_time",
"def first_get_times(self, header, seq=True):\n times = header[:, -3] / np.float(self.nperpacket) \\\n + header[:, -2].astype(np.float)\n \n\n if seq is True:\n seq = header[:, -1] \n times = (seq - seq[0]) / 625.0**2 + times[0]\n times = seq / 625.0**2\n\n times = header[:, -1] * 2.56e-6\n #liam edit\n return times#self.J2000_to_unix(times)",
"def _get_recording_start_time(self) -> float:\n recording_start_time = 0.0\n if self.sync_message_file is not None:\n with open(self.sync_message_file, \"r\") as f:\n sync_strs = f.read()\n sync_lines = sync_strs.split(\"\\n\")\n for line in sync_lines:\n if \"Start Time\" in line:\n tokens = line.split(\":\")\n start_time = int(tokens[-1])\n sample_rate = int(tokens[0].split(\"@\")[-1].strip().split()[0])\n recording_start_time = start_time / float(sample_rate)\n return recording_start_time",
"def compute_marci_time(self, line):\n if not hasattr(self, \"_num_framelets\"):\n self._num_bands = self.label[\"IsisCube\"][\"Core\"][\"Dimensions\"][\"Bands\"]\n # is the detector line summing/line scale factor\n sum_mode = self.label[\"IsisCube\"][\"Instrument\"][\"SummingMode\"]\n\n framelet_offset_factor = self.label[\"IsisCube\"][\"Instrument\"][\"ColorOffset\"]\n if self.flipped_framelets:\n framelet_offset_factor *= -1\n\n self._framelet_offset_lookup = {\n \"NIR\" : 0 * framelet_offset_factor,\n \"RED\" : 1 * framelet_offset_factor,\n \"ORANGE\" : 2 * framelet_offset_factor,\n \"GREEN\" : 3 * framelet_offset_factor,\n \"BLUE\" : 4 * framelet_offset_factor,\n \"LONG_UV\" : 5 * framelet_offset_factor,\n \"SHORT_UV\" : 6 * framelet_offset_factor,\n }\n self._filters = self.label[\"IsisCube\"][\"BandBin\"][\"FilterName\"]\n\n self._framelet_rate = self.label[\"IsisCube\"][\"Instrument\"][\"InterframeDelay\"].value\n framelet_height = 16\n\n self._actual_framelet_height = framelet_height / sum_mode\n\n num_lines = self.label[\"IsisCube\"][\"Core\"][\"Dimensions\"][\"Lines\"]\n self._num_framelets = num_lines / (16 / sum_mode)\n\n times = []\n for band in range(self._num_bands):\n framelet = ((line - 0.5) / self._actual_framelet_height) + 1\n framelet_offset = self._framelet_offset_lookup[self._filters[band]]\n adjusted_framelet = framelet - framelet_offset\n\n time = self.start_time\n # Keeping in line with ISIS\n if not self.flipped_framelets:\n time += (adjusted_framelet - 1) * self._framelet_rate\n else:\n time += (self._num_framelets - adjusted_framelet) * self._framelet_rate\n times.append(time)\n return times",
"def get_elapsed_time(line):\n\tsp= line.strip().split()\n\t#print sp\n\tsec=float(sp[3])\n\thr = sec/(60.0*60.0)\n\treturn hr",
"def process_line(self, line: SccLine) -> SmpteTimeCode:\n\n debug = str(line.time_code) + \"\\t\"\n\n for scc_word in line.scc_words:\n\n if self.previous_code == scc_word.value:\n continue\n\n line.time_code.add_frames()\n\n if scc_word.value == 0x0000:\n continue\n\n if scc_word.byte_1 < 0x20:\n\n control_code = SccControlCode.find(scc_word.value)\n if control_code is not None \\\n and control_code is SccControlCode.find(self.previous_code):\n # Skip duplicated control code from 'Field 2'\n line.time_code.add_frames(-1)\n continue\n\n attribute_code = SccAttributeCode.find(scc_word.value)\n mid_row_code = SccMidRowCode.find(scc_word.value)\n pac = SccPreambleAddressCode.find(scc_word.byte_1, scc_word.byte_2)\n spec_char = SccSpecialCharacter.find(scc_word.value)\n extended_char = SccExtendedCharacter.find(scc_word.value)\n\n if pac is not None:\n debug += \"[PAC|\" + str(pac.get_row()) + \"|\" + str(pac.get_indent())\n if pac.get_color() is not None:\n debug += \"|\" + str(pac.get_color())\n if pac.get_font_style() is not None:\n debug += \"|I\"\n if pac.get_text_decoration() is not None:\n debug += \"|U\"\n debug += \"/\" + hex(scc_word.value) + \"]\"\n self.process_preamble_address_code(pac, line.time_code)\n self.previous_code_type = type(pac)\n\n elif attribute_code is not None:\n debug += \"[ATC/\" + hex(scc_word.value) + \"]\"\n self.process_attribute_code(attribute_code)\n self.previous_code_type = type(attribute_code)\n\n elif mid_row_code is not None:\n debug += \"[MRC|\" + mid_row_code.get_name() + \"/\" + hex(scc_word.value) + \"]\"\n self.process_mid_row_code(mid_row_code, line.time_code)\n self.previous_code_type = type(mid_row_code)\n\n elif control_code is not None:\n debug += \"[CC|\" + control_code.get_name() + \"/\" + hex(scc_word.value) + \"]\"\n self.process_control_code(control_code, line.time_code)\n self.previous_code_type = type(control_code)\n\n\n elif spec_char is not None:\n word = spec_char.get_unicode_value()\n debug += word\n self.process_text(word, line.time_code)\n self.previous_code_type = type(spec_char)\n\n elif extended_char is not None:\n if self.current_style in (SccCaptionStyle.PaintOn, SccCaptionStyle.RollUp):\n self.active_caption.get_current_text().backspace()\n else:\n self.buffered_caption.get_current_text().backspace()\n\n word = extended_char.get_unicode_value()\n debug += word\n self.process_text(word, line.time_code)\n self.previous_code_type = type(extended_char)\n\n else:\n debug += \"[??/\" + hex(scc_word.value) + \"]\"\n LOGGER.warning(\"Unsupported SCC word: %s\", hex(scc_word.value))\n self.previous_code_type = None\n\n else:\n word = scc_word.to_text()\n debug += word\n self.process_text(word, line.time_code)\n self.previous_code_type = str\n\n self.previous_code = scc_word.value\n\n LOGGER.debug(debug)\n\n return line.time_code",
"def observation_time_start(self):\n return self.time_ref + u.Quantity(self.table.meta[\"TSTART\"], \"second\")",
"def ephemeris_start_time(self):\n if not hasattr(self, \"_ephemeris_start_time\"):\n if not self.flipped_framelets:\n line = 0.5\n else:\n line = self.label[\"IsisCube\"][\"Core\"][\"Dimensions\"][\"Lines\"] + 0.5\n self._ephemeris_start_time = min(self.compute_marci_time(line))\n return self._ephemeris_start_time",
"def time(line, xmlFile):\n time = re.match(\"(.*?)(\\d+:\\d\\d[pm|am]+)\", line)\n if time is None:\n pass\n else:\n other, timeFinal = time.groups()\n print(\"\\t<qTime>\" + timeFinal + \"</qTime>\", file = xmlFile)",
"def get_start_time():\n return read_file_value(\"start_file.json\", \"build_start_time\")",
"def ephemeris_start_time(self):\n if not hasattr(self, \"_ephemeris_start_time\"):\n tdi_mode = self.label[\"IsisCube\"][\"Instrument\"][\"Tdi\"]\n bin_mode = self.label[\"IsisCube\"][\"Instrument\"][\"Summing\"]\n # Code replicated from the ISIS HiRise Camera Model\n\n # The -74999 is the code to select the transformation from\n # high-precision MRO SCLK to ET\n start_time = spice.scs2e(-74999, self.spacecraft_clock_start_count)\n # Adjust the start time so that it is the effective time for\n # the first line in the image file. Note that on 2006-03-29, this\n # time is now subtracted as opposed to adding it. The computed start\n # time in the EDR is at the first serial line.\n start_time -= self.un_binned_rate * ((tdi_mode / 2.0) - 0.5);\n # Effective observation\n # time for all the TDI lines used for the\n # first line before doing binning\n start_time += self.un_binned_rate * ((bin_mode / 2.0) - 0.5);\n self._ephemeris_start_time = start_time\n return self._ephemeris_start_time",
"def get_time_of_first_state_vector(file_path):\n time_of_first_state_vector = ''\n with open(file_path) as f:\n for line in f:\n if line.startswith('time_of_first_state_vector'):\n time_of_first_state_vector = re.split(r'\\s+', line)[1]\n return time_of_first_state_vector",
"def get_time(filetime):\r\n winticks = struct.unpack('<Q', filetime)[0]\r\n microsecs = (winticks - WINDOWS_TICKS_TO_POSIX_EPOCH) / WINDOWS_TICKS\r\n return datetime.fromtimestamp(microsecs)",
"def get_starttime(self):\n starttime = self.header_data[\"Extended Header\"][\n \"32-byte Extended Header Block #1\"\n ][\"remote_unit_epoch_start_time\"][\"value\"] * (10 ** -6)\n return UTCDateTime(starttime)",
"def TimeFromFileName(file_path):\n label = TimeLabel(file_path)\n if label == \"\":\n return 0.0\n else:\n return float(label)",
"def ephemeris_start_time(self):\n return spice.scs2e(-74999, self.spacecraft_clock_start_count)",
"def time_start(self, section):\r\n if (section == 0):\r\n return self.t0\r\n else:\r\n time_start_index = range(-self.number_of_section - 1, 0)\r\n return self.p[time_start_index[section]] * self.unit_time",
"def _test_serving_size_preparation_time(line):\n match = _sspt_re.match(line.rstrip())\n if match:\n return True, match.group(1), match.group(2)\n else:\n return False, '', ''",
"def _parseTimeLine(lineno, line):\n m = re.match(RE_ALL, line)\n if m is None:\n return InvalidTimeTrackingEntry(\n lineno=lineno,\n line=line,\n errorMessage=\"Bad format\")\n else:\n trigram = m.group('trigram')\n\n dt = m.group('date')\n try:\n date = datetime.datetime.strptime(dt,'%d/%m/%Y')\n except:\n return InvalidTimeTrackingEntry(\n lineno=lineno,\n line=line,\n errorMessage=\"Error with the date '%s'. Format is 03/12/2017\" % dt)\n\n duration = minutes(m.group('duration'))\n if duration is None:\n return InvalidTimeTrackingEntry(\n lineno=lineno,\n line=line,\n errorMessage=\"Invalid duration '%d'. Format is 10h or 1h30 or 30m\" % duration)\n\n issue = m.group('issue')\n if issue[0] != '#':\n return InvalidTimeTrackingEntry(\n lineno=lineno,\n line=line,\n errorMessage= \"Issue must start with #. Found '%s'\" % issue)\n else:\n try:\n issueNb = int(issue[1:])\n except:\n return InvalidTimeTrackingEntry(\n lineno=lineno,\n line=line,\n errorMessage=\"Issue specification is incorrect.\"\n \" Should be like #23. Found '%s'\" \\\n % issue )\n return ValidTimeTrackingEntry(\n lineno=lineno,\n line=line,\n trigram=trigram,\n date=date,\n duration=duration,\n issueNb = issueNb\n )\n\n\n\n # timeTrackingEntries = []\n # nKOTimeLines = []\n # for (no, line) in nTimeLines:\n # m = re.match(RE_ALL, line)\n # if m:\n # te = ValidTimeTrackingEntry(\n # lineno=no,\n # line=line,\n # trigram = m.group('trigram'),\n # date = m.group('date'),\n # duration = m.group('duration'),\n # issueNb = m.group('issue')\n # )\n # timeTrackingEntries += [te]\n # else:\n # nKOTimeLines += [(no, line)]\n # return (timeTrackingEntries, nKOTimeLines)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Extracts the map for the smis and smit qubit registers from a qisa file | def get_register_map(qisa_fn: str):
reg_map = {}
with open(qisa_fn, 'r') as q_file:
linenum = 0
for line in q_file:
if 'start' in line:
break
if 'smis' in line or 'smit' in line:
reg_key = line[5:line.find(',')]
start_reg_idx = line.find('{')
reg_val = (line[start_reg_idx:].strip())
reg_map[reg_key] = eval(reg_val)
return reg_map | [
"def make_rms_map():\n\tpath = '/nfs/slac/g/ki/ki19/deuce/AEGIS/unzip/'\n\tfile_name = path+'seg_ids.txt'\n\tall_seg_ids = np.loadtxt(file_name, delimiter=\" \",dtype='S2')\n\t#all_seg_ids=['01']\n\tfilters = ['f606w', 'f814w']\n\tfor f in filters:\n\t\tfor fl in glob.glob(path+f+'/*_rms.fits'):\n\t\t\tos.remove(fl)\n\t\tfor id in all_seg_ids:\n\t\t\tfile_name = path + f +'/EGS_10134_'+ id +'_acs_wfc_'+f+'_30mas_unrot_wht.fits'\n\t\t\thdu = pyfits.open(file_name)\n\t\t\tdat = hdu[0].data\n\t\t\tnew_dat = 1/(np.array(dat)**0.5)\n\t\t\tnew_header = hdu[0].header\n\t\t\thdu.close()\n\t\t\tnew_name = path + f +'/EGS_10134_'+ id +'_acs_wfc_'+f+'_30mas_unrot_rms.fits'\n\t\t\tpyfits.writeto(new_name, new_dat, new_header)",
"def parse_fastq(file_name):\t\n\tfastq_dict = {}\n\tfile = open(file_name)\n\tfile_content = file.readlines()\n\ti = 0\n\twhile i < len(file_content):\n\t\tif i % 4 == 0:\n\t\t\tfastq_dict[file_content[i].strip('\\n')] = file_content[i+1].strip('\\n')\n\t\t\ti += 1\n\t\telse:\n\t\t\ti += 1\n\treturn fastq_dict",
"def writeQASM(coefficients,ham_terms,nqubits,filepath,mapping='JW'):\n f1 = open(filepath+'.qasm','w')\n f2 = open(filepath+'-b.out','w')\n n=-1\n f1.write('--- initialization ---\\n')\n for n in range(8):\n f1.write('\\tqubit\\tq'+str(n)+',0\\n')\n f1.write('--- MPS preparation ---\\n')\n f1.write('\\tRy\\tq0\\ta5\\n')\n f1.write('\\tcnot\\tq0,q7\\n')\n\n f1.write('\\tRy\\tq6\\ta4\\n')\n f1.write('\\tcnot\\tq0,q6\\n')\n f1.write('\\tRy\\tq6\\t-a4\\n') \n f1.write('\\tcnot\\tq0,q6\\n')\n \n f1.write('\\tX\\tq0\\n') \n f1.write('\\tcnot\\tq0,q5\\n') \n f1.write('\\tX\\tq0\\n') \n\n f1.write('\\tRy\\tq4\\ta1\\n')\n f1.write('\\tcnot\\tq0,q4\\n')\n f1.write('\\tRy\\tq4\\t-a1\\n')\n f1.write('\\tcnot\\tq0,q4\\n')\n\n f1.write('\\tRy\\tq4\\ta2\\n')\n f1.write('\\tcnot\\tq0,q4\\n')\n f1.write('\\tRy\\tq4\\t-a2\\n')\n f1.write('\\tcnot\\tq0,q4\\n')\n\n f1.write('\\tRy\\tq0\\ta0\\n')\n f1.write('\\tcnot\\tq0,q3\\n')\n\n f1.write('\\tcnot\\tq0,q2\\n')\n\n f1.write('\\tX\\tq0\\n') \n f1.write('\\tcnot\\tq0,q1\\n') \n\n writeUCC(coefficients,ham_terms, f1, nqubits, mapping)\n\n f1.close()",
"def _read_maps(self):\n try:\n f = open(self._procpath('%d/maps' % self.pid))\n except IOError as e:\n # ignore killed process\n if e.errno != errno.ENOENT:\n raise\n return\n for line in f:\n try:\n so = line.split()[5].strip()\n self.mapped.append(so)\n except IndexError:\n pass",
"def struct_import_pymatgen(filename,ubin=\"\"):\n import re\n from monty.io import zopen\n from monty.re import regrep \n from pymatgen.util.io_utils import clean_lines\n from pymatgen.io.pwscf import PWInput\n\n print(\"# Reading atomic coordinates from: \", filename)\n with zopen(filename, \"rt\") as f:\n string = f.read()\n\n lines = list(clean_lines(string.splitlines()))\n\n print(lines)\n def input_mode(line):\n if line[0] == \"&\":\n return (\"sections\", line[1:].lower())\n elif \"ATOMIC_SPECIES\" in line:\n return (\"pseudo\", )\n elif \"K_POINTS\" in line:\n return (\"kpoints\", line.split(\"{\")[1][:-1])\n elif \"CELL_PARAMETERS\" in line or \"ATOMIC_POSITIONS\" in line:\n return (\"structure\", line.split(\"{\")[1][:-1])\n elif line == \"/\":\n return None\n else:\n return mode\n\n sections = {\"control\": {}, \"system\": {}, \"electrons\": {}, \n \"ions\": {}, \"cell\":{}}\n pseudo = {}\n pseudo_index = 0\n lattice = []\n species = []\n coords = []\n structure = None\n site_properties = {\"pseudo\":[]}\n mode = None\n for line in lines:\n mode = input_mode(line)\n if mode == None:\n pass\n elif mode[0] == \"sections\":\n section = mode[1]\n m = re.match(r'(\\w+)\\(?(\\d*?)\\)?\\s*=\\s*(.*)', line)\n if m:\n key = m.group(1).strip()\n key_ = m.group(2).strip()\n val = m.group(3).strip()\n if key_ != \"\":\n if sections[section].get(key, None) == None:\n val_ = [0.0]*20 # MAX NTYP DEFINITION\n val_[int(key_)-1] = PWInput.proc_val(key, val)\n sections[section][key] = val_\n\n site_properties[key] = []\n else:\n sections[section][key][int(key_)-1] = PWInput.proc_val(key, val) \n else:\n sections[section][key] = PWInput.proc_val(key, val)\n\n# elif mode[0] == \"pseudo\":\n# m = re.match(r'(\\w+)\\s+(\\d*.\\d*)\\s+(.*)', line)\n# if m:\n# pseudo[m.group(1).strip()] = {}\n# pseudo[m.group(1).strip()][\"index\"] = pseudo_index\n# pseudo[m.group(1).strip()][\"pseudopot\"] = m.group(3).strip()\n# pseudo_index += 1\n# elif mode[0] == \"kpoints\":\n# m = re.match(r'(\\d+)\\s+(\\d+)\\s+(\\d+)\\s+(\\d+)\\s+(\\d+)\\s+(\\d+)', line)\n# if m:\n# kpoints_grid = (int(m.group(1)), int(m.group(2)), int(m.group(3)))\n# kpoints_shift = (int(m.group(4)), int(m.group(5)), int(m.group(6)))\n# else:\n# kpoints_mode = mode[1]\n elif mode[0] == \"structure\":\n m_l = re.match(r'(-?\\d+\\.?\\d*)\\s+(-?\\d+\\.?\\d*)\\s+(-?\\d+\\.?\\d*)', line)\n m_p = re.match(r'(\\w+)\\s+(-?\\d+\\.\\d*)\\s+(-?\\d+\\.?\\d*)\\s+(-?\\d+\\.?\\d*)', line)\n if m_l:\n lattice += [ float(m_l.group(1)), float(m_l.group(2)), float(m_l.group(3)) ]\n elif m_p:\n site_properties[\"pseudo\"].append(pseudo[m_p.group(1)][\"pseudopot\"])\n species += [pseudo[m_p.group(1)][\"pseudopot\"].split(\".\")[0]]\n coords += [[float(m_p.group(2)), float(m_p.group(3)), float(m_p.group(4))]]\n\n for k, v in site_properties.items():\n if k != \"pseudo\":\n site_properties[k].append(sections['system'][k][pseudo[m_p.group(1)][\"index\"]])\n if mode[1] == \"angstrom\":\n coords_are_cartesian = True\n elif mode[1] == \"crystal\":\n coords_are_cartesian = False\n\n structure = Structure(Lattice(lattice), species, coords, \n coords_are_cartesian=coords_are_cartesian,\n site_properties=site_properties)\n return structure",
"def _build_mapping_file(self, samples):\n with TRN:\n all_sample_ids = set()\n sql = \"\"\"SELECT filepath_id, filepath\n FROM qiita.filepath\n JOIN qiita.prep_template_filepath USING (filepath_id)\n JOIN qiita.prep_template_preprocessed_data\n USING (prep_template_id)\n JOIN qiita.preprocessed_processed_data\n USING (preprocessed_data_id)\n JOIN qiita.filepath_type USING (filepath_type_id)\n WHERE processed_data_id = %s\n AND filepath_type = 'qiime_map'\n ORDER BY filepath_id DESC\"\"\"\n _id, fp = get_mountpoint('templates')[0]\n to_concat = []\n\n for pid, samples in viewitems(samples):\n if len(samples) != len(set(samples)):\n duplicates = find_duplicates(samples)\n raise QiitaDBError(\"Duplicate sample ids found: %s\"\n % ', '.join(duplicates))\n # Get the QIIME mapping file\n TRN.add(sql, [pid])\n qiime_map_fp = TRN.execute_fetchindex()[0][1]\n # Parse the mapping file\n qiime_map = pd.read_csv(\n join(fp, qiime_map_fp), sep='\\t', keep_default_na=False,\n na_values=['unknown'], index_col=False,\n converters=defaultdict(lambda: str))\n qiime_map.set_index('#SampleID', inplace=True, drop=True)\n qiime_map = qiime_map.loc[samples]\n\n duplicates = all_sample_ids.intersection(qiime_map.index)\n if duplicates or len(samples) != len(set(samples)):\n # Duplicate samples so raise error\n raise QiitaDBError(\"Duplicate sample ids found: %s\"\n % ', '.join(duplicates))\n all_sample_ids.update(qiime_map.index)\n to_concat.append(qiime_map)\n\n merged_map = pd.concat(to_concat)\n\n cols = merged_map.columns.values.tolist()\n cols.remove('BarcodeSequence')\n cols.remove('LinkerPrimerSequence')\n cols.remove('Description')\n new_cols = ['BarcodeSequence', 'LinkerPrimerSequence']\n new_cols.extend(cols)\n new_cols.append('Description')\n merged_map = merged_map[new_cols]\n\n # Save the mapping file\n _, base_fp = get_mountpoint(self._table)[0]\n mapping_fp = join(base_fp, \"%d_analysis_mapping.txt\" % self._id)\n merged_map.to_csv(mapping_fp, index_label='#SampleID',\n na_rep='unknown', sep='\\t')\n\n self._add_file(\"%d_analysis_mapping.txt\" % self._id, \"plain_text\")",
"def get_resinum_to_resi_map(resiname_file, offset = 0, indexing = 1, aa_code = 3):\n resi_map = {}\n\n if resiname_file == None:\n print('Warning: No prmtop or PDB file given.\\n' + \\\n ' No residue number information will be presented.')\n for i in range(10000):\n resi_map[i] = str(i)\n return resi_map\n\n try:\n f = file(resiname_file)\n except IOError:\n print('Warning: Could not open ' + resiname_file + '.\\n' + \\\n ' No residue number information will be presented.')\n for i in range(10000):\n resi_map[i] = str(i)\n return resi_map\n\n # If the file is a prmtop file...\n\n if not resiname_file.endswith('.pdb'):\n resi_num = 1\n \n residue_section = False\n for line in f:\n if line.startswith('%FLAG RESIDUE_POINTER'):\n break\n if line.startswith('%FLAG RESIDUE_LABEL'):\n residue_section = True\n if not residue_section or line.startswith('%F'):\n continue\n else:\n residue_names = line.split()\n for resi_name in residue_names:\n if aa_code == 1:\n resi_name = ThrLett_to_OneLett(resi_name)\n resi_name = resi_name.capitalize() + str(resi_num + offset)\n resi_map[resi_num + indexing - 1] = resi_name\n resi_num += 1\n\n # If the file is a PDB file...\n\n else:\n for line in f:\n if not (line.startswith('ATOM') or line.startswith('HETATM')):\n continue\n resi_name = line[17:21].strip()\n resi_num = int(line[22:26].strip())\n if aa_code == 1:\n resi_name = ThrLett_to_OneLett(resi_name)\n resi_name = resi_name.capitalize() + str(resi_num + offset)\n resi_map[resi_num + indexing - 1] = resi_name\n \n f.close()\n\n if not resi_map:\n print(\"Warning: Could not extract residue information from prmtop or PDB file.\\n\")\n print(\" No residue number information will be presented.\")\n for i in range(10000):\n resi_map[i] = str(i)\n return resi_map\n \n return resi_map",
"def readms1_iter(filename,mzi_min = 10000):\n fo = open(filename)\n line = fo.readline()\n while line != \"\":\n if line[0] == \"H\":\n line = fo.readline()\n else:\n break\n lsmzs = {}\n lsmzi = {}\n scan_num = None\n rt = None\n scan_info = []\n while line:\n if 'S' in line:\n if scan_num is not None:\n lsmzs = np.array(lsmzs)\n lsmzi = np.array(lsmzi)\n yield scan_num, rt, lsmzs, lsmzi\n scan_num = int(line.split()[1])\n line = fo.readline()\n rt = float(line.split()[-1])\n scan_info.append((scan_num, rt))\n fo.readline()\n fo.readline()\n fo.readline()\n lsmzs = []\n lsmzi = []\n line = fo.readline()\n while line:\n if 'S' not in line:\n if len(line.split()) == 2:\n mzv, mzi = line.split()\n else:\n print(line)\n print('something wrong with this line')\n line = ''\n break\n mzv = float(mzv)\n mzi = float(mzi)\n if mzi >= mzi_min:\n lsmzs.append(mzv)\n lsmzi.append(mzi)\n line = fo.readline()\n else:\n break\n fo.close()\n lsmzs = np.array(lsmzs)\n lsmzi = np.array(lsmzi)\n yield scan_num, rt, lsmzs, lsmzi",
"def map_sg_ses_enclosure_slot_to_sas_wwn():\n\n Debug(\"def map_sg_ses_enclosure_slot_to_sas_wwn() entry\")\n\n map = {}\n\n map_enc = map_enclosures()\n map_wwn = map_sata_wwn_to_hba_wwn()\n\n for enc in map_enc.keys():\n\n sg_dev = map_enc[enc][\"sg_dev\"]\n\n map[sg_dev] = {}\n\n slot = \"\"\n sas_wwn = \"\"\n\n Debug(\"map_sg_ses_enclosure_slot_to_sas_wwn:: sg_dev = \" + str(sg_dev))\n\n for line in SysExec(\"sg_ses -p aes \" + str(sg_dev)).splitlines():\n\n if re.search(\"Element type: SAS expander\", line):\n break\n\n if not re.search(\"(Element index:|SAS address|target port for:)\", line):\n continue\n\n if re.search(\"attached SAS address\", line):\n continue\n\n if re.search(\"target port for:\", line):\n\n line = line.split(\":\")[1].strip()\n\n if line == \"SSP\":\n protocol = \"SAS\"\n elif line == \"SATA_device\":\n protocol = \"SATA\"\n else:\n protocol = \"UNKNOWN_PROTO\"\n\n if re.search(\"Element index:\", line):\n slot = ' '.join(line.split())\n slot = slot.split(\":\")[1]\n slot = ' '.join(slot.split())\n slot = slot.split()[0]\n\n if re.search(\"SAS address\", line):\n\n sas_wwn = line.split(\":\")[1].strip()\n\n if protocol == \"SATA\":\n\n if sas_wwn in map_wwn:\n sas_wwn = map_wwn[sas_wwn]\n\n if sas_wwn == \"0x0\":\n sas_wwn = \"EMPTY\"\n\n if slot and not slot in map[sg_dev]:\n map[sg_dev][slot] = {}\n\n if sas_wwn:\n map[sg_dev][slot][\"wwn\"] = sas_wwn\n\n if slot and sas_wwn:\n\n slot = \"\"\n sas_wwn = \"\"\n\n#\tDebug(\"map_sg_ses_enclosure_slot_to_sas_wwn:: map = \" + str(map))\n Debug(\"def map_sg_ses_enclosure_slot_to_sas_wwn() exit\")\n\n return(map)",
"def raw6_to_stims(raw6file,band,eclipse,margin=90.001):\n print \"Extracting stim data from \",raw6file,\" ...\"\n print \" Using a search box with sides of \",margin,\" arcseconds.\"\n # This is unscoped for some reason... so I'm just coding it.\n xclk, yclk, xcen, ycen, xscl, yscl, xslp, yslp = clk_cen_scl_slp(band,eclipse)\n\n chunksz = 1000000\n print \"Loading raw6 file...\"\n raw6hdulist = pyfits.open(raw6file,memmap=1)\n raw6htab = raw6hdulist[1].header\n nphots = raw6htab['NAXIS2']\n stim1={'t':np.array([]),'q':np.array([]),'xb':np.array([]),'xamc':np.array([]),'yamc':np.array([]),'xa':np.array([]),'ya':np.array([]),'x':np.array([]),'y':np.array([]),'yb':np.array([]),'yap':np.array([])}\n stim2={'t':np.array([]),'q':np.array([]),'xb':np.array([]),'xamc':np.array([]),'yamc':np.array([]),'xa':np.array([]),'ya':np.array([]),'x':np.array([]),'y':np.array([]),'yb':np.array([]),'yap':np.array([])}\n stim3={'t':np.array([]),'q':np.array([]),'xb':np.array([]),'xamc':np.array([]),'yamc':np.array([]),'xa':np.array([]),'ya':np.array([]),'x':np.array([]),'y':np.array([]),'yb':np.array([]),'yap':np.array([])}\n stim4={'t':np.array([]),'q':np.array([]),'xb':np.array([]),'xamc':np.array([]),'yamc':np.array([]),'xa':np.array([]),'ya':np.array([]),'x':np.array([]),'y':np.array([]),'yb':np.array([]),'yap':np.array([])}\n print \"\"\n for i in xrange(int(nphots/chunksz)+1):\n csvrows = []\n chunkbeg, chunkend = i*chunksz, (i+1)*chunksz-1\n if chunkend > nphots:\n chunkend = nphots-1\n chunkid = \" \"+str(i+1)+\" of \"+str(int(nphots/chunksz)+1)+\": \"\n print_inline(chunkid+\"Unpacking raw6 data...\")\n #print chunkbeg,chunkend\n t = np.array(raw6hdulist[1].data.field('t')[chunkbeg:chunkend])\n phb1 = np.array(raw6hdulist[1].data.field('phb1')[chunkbeg:chunkend],dtype='int64')\n phb2 = np.array(raw6hdulist[1].data.field('phb2')[chunkbeg:chunkend],dtype='int64')\n phb3 = np.array(raw6hdulist[1].data.field('phb3')[chunkbeg:chunkend],dtype='int64')\n phb4 = np.array(raw6hdulist[1].data.field('phb4')[chunkbeg:chunkend],dtype='int64')\n phb5 = np.array(raw6hdulist[1].data.field('phb5')[chunkbeg:chunkend],dtype='int64')\n\n q = ((phb4 & 3) << 3) + ((phb5 & 224) >> 5)\n xb = phb1 >> 5\n xamc = np.array( ((phb1 & 31) << 7), dtype='int16' ) + np.array( ((phb2 & 254) >> 1), dtype='int16') - np.array( ((phb1 & 16) << 8), dtype='int16')\n yb = ((phb2 & 1) << 2) + ((phb3 & 192) >> 6)\n yamc = np.array( ((phb3 & 63) << 6), dtype='int16') + np.array( ((phb4 & 252) >> 2), dtype='int16') - np.array( ((phb3 & 32) << 7), dtype='int16')\n xa = ((phb5 & 16) >> 4) + ((phb5 & 3) << 3) + ((phb5 & 12) >> 1)\n xraw0 = xb*xclk + xamc\n yraw0 = yb*yclk + yamc\n ya = np.array( ((((yraw0/(2*yclk) - xraw0/(2*xclk)) + 10)*32) + xa), dtype='int64') % 32\n xraw = xraw0 + np.array((((xa+7) % 32) - 16), dtype='int64') * xslp\n yraw = yraw0 + np.array((((ya+7) % 32) - 16), dtype='int64') * yslp\n x = (xraw - xcen)*xscl\n y = (yraw - ycen)*yscl\n\n index1,index2,index3,index4=find_stims_index(x,y,band,eclipse,margin)\n #print (len(index1)+len(index2)+len(index3)+len(index4))/4.\n\n # There may well be a better way to do this\n stim1['t'] = np.append(stim1['t'],t[index1])\n stim1['x'] = np.append(stim1['x'],x[index1])\n stim1['y'] = np.append(stim1['y'],y[index1])\n stim1['q'] = np.append(stim1['q'],q[index1])\n stim1['xa'] = np.append(stim1['xa'],xa[index1])\n stim1['xb'] = np.append(stim1['xb'],ya[index1])\n stim1['ya'] = np.append(stim1['ya'],ya[index1])\n stim1['yb'] = np.append(stim1['yb'],yb[index1])\n stim1['xamc'] = np.append(stim1['xamc'],xamc[index1])\n stim1['yamc'] = np.append(stim1['yamc'],yamc[index1])\n stim1['yap'] = np.append(stim1['yap'],rtaph_yap(ya[index1],yb[index1],yamc[index1]))\n stim2['t'] = np.append(stim2['t'],t[index2])\n stim2['x'] = np.append(stim2['x'],x[index2])\n stim2['y'] = np.append(stim2['y'],y[index2])\n stim2['q'] = np.append(stim2['q'],q[index2])\n stim2['xa'] = np.append(stim2['xa'],xa[index2])\n stim2['xb'] = np.append(stim2['xb'],ya[index2])\n stim2['ya'] = np.append(stim2['ya'],ya[index2])\n stim2['yb'] = np.append(stim2['yb'],yb[index2])\n stim2['xamc'] = np.append(stim2['xamc'],xamc[index2])\n stim2['yamc'] = np.append(stim2['yamc'],yamc[index2])\n stim2['yap'] = np.append(stim2['yap'],rtaph_yap(ya[index2],yb[index2],yamc[index2]))\n stim3['t'] = np.append(stim3['t'],t[index3])\n stim3['x'] = np.append(stim3['x'],x[index3])\n stim3['y'] = np.append(stim3['y'],y[index3])\n stim3['q'] = np.append(stim3['q'],q[index3])\n stim3['xa'] = np.append(stim3['xa'],xa[index3])\n stim3['xb'] = np.append(stim3['xb'],ya[index3])\n stim3['ya'] = np.append(stim3['ya'],ya[index3])\n stim3['yb'] = np.append(stim3['yb'],yb[index3])\n stim3['xamc'] = np.append(stim3['xamc'],xamc[index3])\n stim3['yamc'] = np.append(stim3['yamc'],yamc[index3])\n stim3['yap'] = np.append(stim3['yap'],rtaph_yap(ya[index3],yb[index3],yamc[index3]))\n stim4['t'] = np.append(stim4['t'],t[index4])\n stim4['x'] = np.append(stim4['x'],x[index4])\n stim4['y'] = np.append(stim4['y'],y[index4])\n stim4['q'] = np.append(stim4['q'],q[index4])\n stim4['xa'] = np.append(stim4['xa'],xa[index4])\n stim4['xb'] = np.append(stim4['xb'],ya[index4])\n stim4['ya'] = np.append(stim4['ya'],ya[index4])\n stim4['yb'] = np.append(stim4['yb'],yb[index4])\n stim4['xamc'] = np.append(stim4['xamc'],xamc[index4])\n stim4['yamc'] = np.append(stim4['yamc'],yamc[index4])\n stim4['yap'] = np.append(stim4['yap'],rtaph_yap(ya[index4],yb[index4],yamc[index4]))\n\n print_inline(\" Done.\")\n\n return stim1,stim2,stim3,stim4",
"def read(self):\n \n reader = Scan\n \n nanonis_data = reader(self._input_file_path)\n\n header_dict = nanonis_data.header\n signal_dict = nanonis_data.signals\n\n parm_dict, data_dict = self._parse_sxm_parms(header_dict,\n signal_dict)\n \n self.parm_dict = parm_dict\n self.data_dict = data_dict\n\n #Specify dimensions\n x_dim = self.data_dict['Dimensions'][0]\n y_dim = self.data_dict['Dimensions'][1]\n\n dataset_list = []\n channel_parms = self.parm_dict['channel_parms']\n\n for dataset_name in list(self.data_dict.keys())[:-1]:\n \n data_mat = self.data_dict[dataset_name]\n \n #Make a sidpy dataset\n data_set = sid.Dataset.from_array(data_mat, name = dataset_name)\n\n #Set the data type\n data_set.data_type = sid.DataType.IMAGE\n \n metadata = channel_parms[dataset_name]\n\n # Add quantity and units\n data_set.units = metadata['Unit']\n data_set.quantity = metadata['Name']\n\n # Add dimension info\n data_set.set_dimension(0, x_dim)\n data_set.set_dimension(1, y_dim)\n \n # append metadata \n def merge_dict(dict1, dict2):\n res = {**dict1, **dict2}\n return res\n \n chan_metadata = self.parm_dict['channel_parms'][dataset_name]\n orig_metadata = self.parm_dict['meas_parms']\n \n data_set.original_metadata = merge_dict(chan_metadata,orig_metadata)\n dataset_list.append(data_set)\n \n return dataset_list",
"def sarread(file):\n\n filename, EXT = os.path.splitext(file)\n\n if EXT == '.pkl':\n f = open(file, 'rb')\n # for python2\n if sys.version_info < (3, 1):\n sardata = pkl.load(f)\n sarplat = pkl.load(f)\n # for python3\n else:\n sardata = pkl.load(f, encoding='latin1')\n sarplat = pkl.load(f, encoding='latin1')\n f.close()\n elif EXT == '.mat':\n data = scio.loadmat(file, struct_as_record=True)\n sardata = SarData()\n sarplat = SarPlat()\n\n temp = data['sardata']\n sardata.name = str(temp['name'][0][0][0])\n sardata.image = temp['image'][0][0]\n sardata.rawdata = temp['rawdata'][0][0]\n sardata.description = temp['description'][0][0]\n\n if 'sarplat' in data.keys():\n temp = data['sarplat']\n sarplat.name = str(temp['name'][0][0][0])\n sensor = temp['sensor'][0][0][0][0]\n acquis = temp['acquisition'][0][0][0][0]\n params = temp['params'][0][0][0][0]\n selection = temp['selection'][0][0][0][0]\n\n sensordict = {}\n sensordict['Fc'] = sensor['Fc'][0][0]\n sensordict['H'] = sensor['H'][0][0]\n sensordict['V'] = sensor['V'][0][0]\n sensordict['Tp'] = sensor['Tp'][0][0]\n sensordict['Kr'] = sensor['Kr'][0][0]\n sensordict['Lr'] = sensor['Lr'][0][0]\n sensordict['La'] = sensor['La'][0][0]\n sensordict['PRF'] = sensor['PRF'][0][0]\n sensordict['Fs'] = sensor['Fs'][0][0]\n sensordict['Name'] = str(sensor['Name'][0])\n sensordict['Br'] = sensor['Br'][0][0]\n sensordict['Wl'] = sensor['Wl'][0][0]\n sensordict['Rs'] = sensor['Rs'][0][0]\n sensordict['To'] = sensor['To'][0][0]\n sensordict['Ws'] = sensor['Ws'][0][0]\n sensordict['Vs'] = sensor['Vs'][0][0]\n sensordict['Vr'] = sensor['Vr'][0][0]\n sensordict['Vg'] = sensor['Vg'][0][0]\n sarplat.sensor = sensordict\n # print(sensordict)\n\n acqdict = {}\n acqdict['StateVector'] = acquis['StateVector']\n acqdict['PlatCenter'] = acquis['PlatCenter'][0]\n acqdict['SceneCenter'] = acquis['SceneCenter'][0]\n acqdict['BeamCenter'] = acquis['BeamCenter'][0]\n acqdict['SceneArea'] = acquis['SceneArea'][0]\n acqdict['BeamArea'] = acquis['BeamArea'][0]\n acqdict['EchoSize'] = acquis['EchoSize'][0]\n acqdict['As'] = acquis['As'][0][0]\n acqdict['Ar'] = acquis['Ar'][0][0]\n acqdict['Ad'] = acquis['Ad'][0][0]\n acqdict['Aon'] = acquis['Aon'][0][0]\n acqdict['Aba'] = acquis['Aba'][0][0]\n acqdict['Abr'] = acquis['Abr'][0][0]\n acqdict['Ai'] = acquis['Ai'][0][0]\n acqdict['Rbc'] = acquis['Rbc'][0][0]\n acqdict['Rb0'] = acquis['Rb0'][0][0]\n acqdict['Rsc'] = acquis['Rsc'][0][0]\n acqdict['Rs0'] = acquis['Rs0'][0][0]\n\n # print(acquis['SceneArea'], acquis['PlatCenter'], acquis['EchoSize'])\n sarplat.acquisition = acqdict\n\n # sarplat.params = None\n pardict = {}\n pardict['GeometryMode'] = str(params['GeometryMode'][0]) # geometry mode: beam/scene\n pardict['Rb0'] = params['Rb0'][0][0] # beam center min distance\n pardict['Rbc'] = params['Rbc'][0][0] # beam center distance\n pardict['Rs0'] = params['Rs0'][0][0] # scene center min distance\n pardict['Rsc'] = params['Rsc'][0][0] # scene center distance\n pardict['Tp'] = params['Tp'][0][0] # Pulse repetition period\n pardict['PRT'] = params['PRT'][0][0] # azimuth\n pardict['PRF'] = params['PRF'][0][0] # azimuth\n pardict['Fsa'] = params['Fsa'][0][0] # azimuth, Fsa = PRF\n pardict['Fs'] = params['Fs'][0][0] # range\n pardict['Fsr'] = params['Fsr'][0][0] # range, Fsr = Fs\n pardict['Na'] = params['Na'][0][0] # azimuth\n pardict['Nr'] = params['Nr'][0][0] # range\n pardict['Ka'] = params['Ka'][0][0] # azimuth\n pardict['Kr'] = params['Kr'][0][0] # range\n pardict['xmin'] = params['xmin'][0][0]\n pardict['xmax'] = params['xmax'][0][0]\n pardict['ymin'] = params['ymin'][0][0]\n pardict['ymax'] = params['ymax'][0][0]\n pardict['Rnear'] = params['Rnear'][0][0]\n pardict['Rfar'] = params['Rfar'][0][0]\n pardict['tnear'] = params['tnear'][0][0]\n pardict['tfar'] = params['tfar'][0][0]\n pardict['tstart'] = params['tstart'][0][0]\n pardict['tend'] = params['tend'][0][0]\n pardict['ta'] = params['ta'][0] # time array in azimuth\n pardict['fa'] = params['fa'][0] # freq array in azimuth\n pardict['tr'] = params['tr'][0] # time array in range\n pardict['fr'] = params['fr'][0] # freq array in range\n pardict['Tsa'] = params['Tsa'][0][0] # resolution in azimuth\n pardict['Tsr'] = params['Tsr'][0][0] # resolution in range\n pardict['Xc'] = params['Xc'][0][0] # X center\n pardict['Yc'] = params['Yc'][0][0] # Y center\n pardict['DA'] = params['DA'][0][0]\n pardict['DR'] = params['DR'][0][0]\n pardict['DY'] = params['DY'][0][0]\n pardict['DX'] = params['DX'][0][0]\n pardict['FPa'] = params['FPa'][0][0]\n pardict['FPr'] = params['FPr'][0][0]\n pardict['BWa'] = params['BWa'][0][0]\n pardict['Ta'] = params['Ta'][0][0]\n pardict['Tr'] = params['Tr'][0][0]\n pardict['t0'] = params['t0'][0][0]\n pardict['tac'] = params['tac'][0][0]\n pardict['fadc'] = params['fadc'][0][0]\n pardict['Bdop'] = params['Bdop'][0][0]\n pardict['Nsar'] = params['Nsar'][0][0]\n pardict['Tsar'] = params['Tsar'][0][0]\n pardict['Lsar'] = params['Lsar'][0][0]\n\n pardict['As'] = params['As'][0][0] # squint angle\n pardict['Ar'] = params['Ar'][0][0] # squint angle\n pardict['Ad'] = params['Ad'][0][0] # depression angle\n pardict['Aon'] = params['Aon'][0][0] # off-nadir angle\n pardict['Aba'] = params['Aba'][0][0] # antenna azimuth beamwidth\n pardict['Abr'] = params['Abr'][0][0] # antenna range beamwidth\n pardict['Ai'] = params['Ai'][0][0] # incidence angle\n\n pardict['taSub'] = params['taSub'][0]\n pardict['faSub'] = params['faSub'][0]\n pardict['trSub'] = params['trSub'][0]\n pardict['frSub'] = params['frSub'][0]\n pardict['SubNa'] = params['SubNa'][0][0]\n pardict['SubNr'] = params['SubNr'][0][0]\n pardict['SubTa'] = params['SubTa'][0][0]\n pardict['SubTr'] = params['SubTr'][0][0]\n pardict['tnearSub'] = params['tnearSub'][0][0]\n pardict['tfarSub'] = params['tfarSub'][0][0]\n pardict['tstartSub'] = params['tstartSub'][0][0]\n pardict['tendSub'] = params['tendSub'][0][0]\n pardict['SubRnear'] = params['SubRnear'][0][0]\n pardict['SubRfar'] = params['SubRfar'][0][0]\n pardict['SubSceneArea'] = params['SubSceneArea'][0]\n pardict['SubBeamArea'] = params['SubBeamArea'][0]\n pardict['SubSceneCenter'] = params['SubSceneCenter'][0]\n pardict['SubBeamCenter'] = params['SubBeamCenter'][0]\n pardict['SubEchoAnchor'] = params['SubEchoAnchor'][0]\n pardict['SubEchoSize'] = params['SubEchoSize'][0]\n pardict['SubFPa'] = params['SubFPa'][0][0]\n pardict['SubFPr'] = params['SubFPr'][0][0]\n pardict['SubRsc'] = params['SubRsc'][0][0]\n pardict['SubRs0'] = params['SubRs0'][0][0]\n pardict['SubRbc'] = params['SubRbc'][0][0]\n pardict['SubRb0'] = params['SubRb0'][0][0]\n pardict['xminSub'] = params['xminSub'][0][0]\n pardict['xmaxSub'] = params['xmaxSub'][0][0]\n pardict['yminSub'] = params['yminSub'][0][0]\n pardict['ymaxSub'] = params['ymaxSub'][0][0]\n pardict['SubAbr'] = params['SubAbr'][0][0]\n pardict['SubAd'] = params['SubAd'][0][0]\n pardict['SubAon'] = params['SubAon'][0][0]\n pardict['SubKa'] = params['SubKa'][0][0]\n pardict['SubBa'] = params['SubBa'][0][0]\n\n sarplat.params = pardict\n\n seldict = {}\n seldict['taSub'] = selection['taSub'][0]\n seldict['faSub'] = selection['faSub'][0]\n seldict['trSub'] = selection['trSub'][0]\n seldict['frSub'] = selection['frSub'][0]\n seldict['SubNa'] = selection['SubNa'][0][0]\n seldict['SubNr'] = selection['SubNr'][0][0]\n seldict['SubTa'] = selection['SubTa'][0][0]\n seldict['SubTr'] = selection['SubTr'][0][0]\n seldict['tnearSub'] = selection['tnearSub'][0][0]\n seldict['tfarSub'] = selection['tfarSub'][0][0]\n seldict['tstartSub'] = selection['tstartSub'][0][0]\n seldict['tendSub'] = selection['tendSub'][0][0]\n seldict['SubRnear'] = selection['SubRnear'][0][0]\n seldict['SubRfar'] = selection['SubRfar'][0][0]\n seldict['SubSceneArea'] = selection['SubSceneArea'][0]\n seldict['SubBeamArea'] = selection['SubBeamArea'][0]\n seldict['SubSceneCenter'] = selection['SubSceneCenter'][0]\n seldict['SubBeamCenter'] = selection['SubBeamCenter'][0]\n seldict['SubEchoAnchor'] = selection['SubEchoAnchor'][0]\n seldict['SubEchoSize'] = selection['SubEchoSize'][0]\n seldict['SubFPa'] = selection['SubFPa'][0][0]\n seldict['SubFPr'] = selection['SubFPr'][0][0]\n seldict['SubRsc'] = selection['SubRsc'][0][0]\n seldict['SubRs0'] = selection['SubRs0'][0][0]\n seldict['SubRbc'] = selection['SubRbc'][0][0]\n seldict['SubRb0'] = selection['SubRb0'][0][0]\n seldict['xminSub'] = selection['xminSub'][0][0]\n seldict['xmaxSub'] = selection['xmaxSub'][0][0]\n seldict['yminSub'] = selection['yminSub'][0][0]\n seldict['ymaxSub'] = selection['ymaxSub'][0][0]\n seldict['SubAbr'] = selection['SubAbr'][0][0]\n seldict['SubAd'] = selection['SubAd'][0][0]\n seldict['SubAon'] = selection['SubAon'][0][0]\n seldict['SubKa'] = selection['SubKa'][0][0]\n seldict['SubBa'] = selection['SubBa'][0][0]\n sarplat.selection = seldict\n else:\n sarplat = None\n\n elif EXT in ['.hdf5', '.h5']:\n f = h5py.File(file, \"r\")\n sardata = SarData()\n sarplat = SarPlat()\n\n ds_sardata = f[\"sardata\"]\n ds_sarpalt = f[\"sarplat\"]\n\n sardata.name = ds_sardata['name'].value\n sardata.image = ds_sardata['image'].value\n sardata.rawdata = ds_sardata['rawdata'].value\n\n ds_sarpalt_sensor = ds_sarpalt[\"sensor\"]\n ds_sarpalt_params = ds_sarpalt[\"params\"]\n ds_sarpalt_acquis = ds_sarpalt[\"acquisition\"]\n ds_sarpalt_select = ds_sarpalt[\"selection\"]\n sensor = dict()\n for key in ds_sarpalt_sensor.keys():\n sensor[key] = ds_sarpalt_sensor[key].value\n\n params = dict()\n for key in ds_sarpalt_params.keys():\n params[key] = ds_sarpalt_params[key].value\n\n acquis = dict()\n for key in ds_sarpalt_acquis.keys():\n acquis[key] = ds_sarpalt_acquis[key].value\n\n selection = dict()\n for key in ds_sarpalt_select.keys():\n selection[key] = ds_sarpalt_select[key].value\n\n sarplat.name = ds_sarpalt['name'].value\n sarplat.sensor = sensor\n sarplat.params = params\n sarplat.acquisition = acquis\n sarplat.selection = selection\n\n f.close()\n\n else:\n raise(TypeError(\"Not supported! Only support: (pkl, mat, hdf5)!\"))\n\n return sardata, sarplat",
"def get_mapped_isoseqid(self):\n st = set()\n for read in self.bam:\n isoseqid = read.qname\n st.add(isoseqid)\n return st",
"def read5c(fn='obscode.dat'):\n with open(fn) as f:\n sites = {}\n for line in f:\n s = parseLine(line)\n if s:\n sites[line[:3]] = s\n return sites",
"def parse(path):\n print \"Parsing file: %s\" % path\n acc2taxa = {}\n acc2ncbi = {}\n f = open(path)\n line = f.readline()\n tax = []\n while line:\n if line[0:2] == 'ID':\n ID = line.split(' ')[3].split('_')[1]\n if line[0:2] == 'OC':\n [tax.append(i.strip()) for i in line.strip().split(' ')[1].split(';')[:-1]]\n if line[0:2] == 'OX':\n ncbi = line.strip().split('NCBI_TaxID=')[1].split(';')[0]\n if line[0:2] == 'OS':\n name = line.split(' ')[1].strip()\n if line[0:2] == '//':\n # print \"Adding %s : %s\" % (ID, tax)\n tax.append(name)\n acc2taxa[ID] = tax\n acc2ncbi[ID] = ncbi\n tax = []\n line = f.readline()\n return acc2taxa, acc2ncbi",
"def getValidTickerMap(args):\n dividend_file = os.path.join(args.outDir, args.inFile)\n with open(dividend_file) as divfh:\n csvdiv = csv.DictReader(divfh)\n tick_mqaid_map = dict()\n for lines in csvdiv:\n tick = lines[\"tick\"]\n if tick == \"Null\":\n continue\n else:\n tick_mqaid_map[tick] = lines[\"mqaid\"]\n return tick_mqaid_map",
"def get_next_map_token(scanner):\n for line in scanner.fh:\n # look for section header\n m = re.search('^([0-9_A-Z]+)' + \\\n '(\\s+(0x[0-9a-fA-F]+)\\s+(0x[0-9a-fA-F]+))?\\s*$',\n line)\n if m:\n if m.group(2) != None:\n section = MapParser.Section(m.group(1), \n int(m.group(3),0), \n int(m.group(4),0))\n scanner.curr_section = section\n #return (section, None) \n else:\n scanner.curr_section_name = m.group(1)\n scanner.split_line_section = True\n continue\n\n # handle split line header\n if scanner.split_line_section:\n m = re.search('^\\s+(0x[0-9a-fA-F]*)\\s+(0x[0-9a-fA-F]+)\\s*$', line)\n scanner.split_line_section = False\n if m:\n section = MapParser.Section(scanner.curr_section_name, \n int(m.group(1),0), \n int(m.group(2),0))\n scanner.curr_section = section\n #return (section, None)\n continue\n\n # look for symbol\n m = re.search('^ ([\\.a-zA-Z0-9_]+)(\\s+(0x[0-9a-fA-F]+)\\s+(0x' + \\\n '[0-9a-fA-F]+)\\s+.*?([^\\\\\\\\/]+\\\\.lib)\\\\((.*)\\\\))?\\s*$', \n line)\n if m and scanner.curr_section != None:\n scanner.curr_symbol = m.group(1)\n if m.group(2) != None:\n symbol = MapParser.Symbol(int(m.group(3),0), \n int(m.group(4),0), \n m.group(6), \n m.group(5), \n extract_segment(m.group(1)),\n m.group(1))\n #return (scanner.curr_section, symbol)\n return symbol\n else:\n scanner.split_line_symbol = True\n continue\n\n # handle split line symbol\n if scanner.split_line_symbol:\n m = re.search('^\\s+(0x[0-9a-fA-F]+)\\s+(0x[0-9a-fA-F]+)\\s+.*?' + \\\n '([^\\\\\\\\/]+\\\\.lib)\\\\((.*)\\\\)\\s*$', \n line)\n scanner.split_line_symbol = False\n if m:\n symbol = MapParser.Symbol(int(m.group(1),0), \n int(m.group(2),0), \n m.group(4), \n m.group(3),\n extract_segment(scanner.curr_symbol),\n scanner.curr_symbol)\n #return (scanner.curr_section, symbol)\n return symbol\n continue\n\n # end section on empty line\n m = re.search('^$', line)\n if m:\n scanner.split_line_section = False\n scanner.split_line_symbol = False\n scanner.curr_section = None\n scanner.curr_section_name = ''\n scanner.curr_symbol = None\n \n\n # clear split line flags if no matches\n scanner.split_line_section = False\n scanner.split_line_symbol = False\n\n # indicate done scanning\n #return (None, None)\n return None",
"def read_mf6(tape, mat, mt):\n mf = 6\n df = tape._get_section_df(mat, mf, mt)\n out = {\"MAT\": mat, \"MF\": mf, \"MT\": mt}\n i = 0\n C, i = sandy.read_cont(df, i)\n out.update({\n \"ZA\": C.C1,\n \"AWR\": C.C2,\n \"LCT\": C.L2, # Reference system for secondary energy and angle\n })\n subsections = {}\n # Each subsection describes one reaction product. There can be more than\n # one subsection for a given particle, but the combination of the product\n # identifier with its final isomeric state create a unique identifier for\n # each reaction.\n for a in range(C.N1):\n T, i = sandy.read_tab1(df, i)\n LAW = T.L2 # Distintion between different distribution function\n ZAP = T.C1 # Product identifier\n LIP = T.L1 # Product isomeric state identifier\n ZAM = zam.za2zam(ZAP, meta=LIP, method=False)\n add = {\n \"AWP\": T.C2, # Product mass in neutron units\n \"LAW\": T.L2,\n \"NR\": T.NBT,\n \"NP\": T.INT,\n \"E\": T.x, # Neutron incident energy\n \"Y\": T.y, # The product multiplicity\n }\n # LAW dependent structures:\n if LAW == 1: # Continuum Energy-Angle Distributions\n L, i = sandy.read_tab2(df, i)\n NE = L.NBT[0] # How many NE incident energies\n add.update({\n \"LANG\": L.L1, # Angular representation identificator\n \"LEP\": L.L2, # Interpolation for secondary energy\n \"ENR\": L.NR, # I put here ENR insted of NR to do not overwrite NR of the tab1 section.\n \"ENE\": L.NBT, # Number of different product energy\n \"EINT\": L.INT, # product energy diferent values\n })\n add_e = {}\n # To repeat for all the NE incident energies\n for j in range(NE):\n T, i = sandy.read_list(df, i)\n E = T.C2 # Incident energy\n if int(T.L2) == 0:\n Ep = T.B[::2]\n b = T.B[1::2]\n else:\n Ep = T.B[::T.L2+2]\n b = T.B[::1]\n del b[::T.L2+2] # To delete from b the energy values\n add_2 = {\n \"ND\": T.L1, # Number of discrete energies\n \"NA\": T.L2, # Number of angular parameters\n \"NW\": T.NPL, # Total number of words\n \"NEP\": T.N2, # Secondary energy points in distribution\n \"Ep\": Ep, # The energy of the product emitted\n \"b\": b, # Coefficients for the angular representation\n # the contents of the b depend on LANG\n }\n add_e[E] = add_2\n add[\"EGROUPS\"] = add_e\n\n elif LAW == 2: # Discrete Two-Body Scattering\n L, i = sandy.read_tab2(df, i)\n NE = L.NBT[0]\n add.update({\n \"ENR\": L.NZ,\n \"ENE\": L.NBT, # Number of different product energy\n \"EINT\": L.INT, # Product energy values\n })\n add_e = {}\n # To repeat list records for all the incident energies\n for j in range(NE):\n T, i = sandy.read_list(df, i)\n E = T.C2 # Incident energy\n add_2 = {\n \"LANG\": T.L1, # Angular representation identificator\n \"NW\": T.NPL, # Number of parameters\n \"NL\": T.N2, # Highest coefficient identificator\n \"Al\": T.B, # Coeficcient of angular representation\n }\n add_e[E] = add_2\n add[\"EGROUPS\"] = add_e\n\n elif LAW == 5: # Charged-Particle Elastic Scattering\n logging.warning(f\"\"\"'(LAW) = ({LAW})' is not validated.\n Please report any posible error/bug.\"\"\")\n L, i = sandy.read_tab2(df, i)\n NE = L.NBT[0] # How many NE incident energies\n LIDP = L.L1\n add.update({\n \"SPI\": L.C1,\n \"LIDP\": LIDP,\n \"ENR\": L.NR,\n \"ENE\": L.NBT,\n \"EINT\": L.INT,\n })\n add_e = {}\n for j in range(NE): # To repeat the LIST records for all the NE\n T, i = sandy.read_list(df, i)\n E = T.C2\n LTP = T.L1\n add_2 = {\n \"LTP\": T.L1,\n \"NW\": T.NPL,\n \"NL\": T.N2,\n }\n # We have to do the distintion between the different Ai organizations.\n if LTP == 1 and LIDP == 0 or LIDP == 1:\n mark = T.N2+1\n b = T.B[0:mark]\n Ra = T.B[mark::2]\n Ia = T.B[mark+1::2]\n add_2[\"A\"] = {\n \"B\": b,\n \"Ra\": Ra,\n \"Ia\": Ia,\n }\n elif LTP == 2:\n add_2[\"A\"] = {\n \"C\": T.B,\n }\n elif LTP > 2:\n nu = T.B[::2]\n p = T.B[1::2]\n add_2[\"A\"] = {\n \"nu\": nu,\n \"p\": p,\n }\n add_e[E] = add_2\n add[\"EGROUPS\"] = add_e\n\n elif LAW == 6: # N-Body Phase-Space Distributions\n T, i = sandy.read_cont(df, i)\n add.update({\n \"APSX\": T.C1, # Total mass(neutron uni) of N particles\n \"NPSX\": T.N2, # Number of particles distributed\n })\n elif LAW == 7: # Laboratory Angle-Energy Law\n L, i = sandy.read_tab2(df, i)\n NE = L.NBT[0] # How many interpolation range we have\n # Interpolation parameters for incident energy E\n add.update({\n \"ENR\": L.NZ,\n \"ENE\": L.NBT, # The incident energies number\n \"EINT\": L.INT, # Incident energy values\n })\n add_e = {}\n # To repeat for all NE incident energies\n for j in range(NE):\n T, i = sandy.read_tab2(df, i)\n # Interpolation parameters for emission cosine\n E = T.C2 # Incident energy\n NMU = T.NBT[0] # Number of possible emission cosines\n add_2 = {\n \"NRM\": T.NZ,\n \"NMU\": T.NBT, # Number of different cosines\n \"NU_INT\": T.INT, # Value of emission cosines\n }\n add_e[E] = add_2\n # To repeat for all the NMU emission cosines\n add_nu = {}\n for hz in range(NMU):\n Z, i = sandy.read_tab1(df, i)\n # Interpolation parameters for secondary energy E′\n nu = Z.C2 # Value for emission cosine\n E_p = Z.y[::2]\n E_distr = Z.y[1::2]\n add_3 = {\n \"NRP\": Z.NBT,\n \"NEP\": Z.INT,\n \"EP_INT\": Z.x, # Energies for E'\n \"E_p\": E_p, # Secondary energy value\n \"E_distr\": E_distr, # Distribution according to nu, E and E'\n }\n add_nu[nu] = add_3\n add_e[E][\"COSGROUPS\"] = add_nu\n add[\"EGROUPS\"] = add_e\n subsections[ZAM] = add\n out[\"NK\"] = subsections\n return out",
"def getreadinfo(samfname):\n f = open(samfname)\n firstbases=[]\n seqs = []\n quals = []\n for line in f:\n if line[0] != '@':\n v = line.split()\n seq = v[9]\n seqlen = len(seq)\n noindelCIGARstr = str(seqlen) + \"M\"\n if v[5] == noindelCIGARstr: ## check to see if CIGAR string indicates no indels\n firstbases.append(int(v[3]))\n seqs.append(seq)\n quals.append(v[10])\n return firstbases,seqs,quals",
"def read():\n print(\"Read Medlars medical abstracts data set\")\n dir = join(dirname(dirname(abspath(__file__))), \"datasets\", \"Medlars\", \"med.all\")\n doc = open(dir)\n V = sp.lil_matrix((16017, 1033))\n term2idx = {}\n idx2term = {}\n n_free = 0\n line = doc.readline()\n for abstract in range(1033):\n ii = int(line.split()[1])\n # omit .W char\n doc.readline()\n line = doc.readline()\n while line != \".I \" + str(ii + 1) and line != \"\":\n for term in line.split():\n term = term.strip().replace(',', '').replace('.', '')\n if term not in term2idx:\n term2idx[term] = n_free\n idx2term[n_free] = term\n n_free += 1\n V[term2idx[term], ii - 1] += 1\n line = doc.readline().strip()\n return V, term2idx, idx2term"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Takes part of an instruction and splits it into a tuple of codeword, target | def split_instr_to_op_targ(instr: str, reg_map: dict):
cw, sreg = instr.split(' ')
target_qubits = reg_map[sreg]
return (cw, target_qubits) | [
"def parse_instruction(self, line):\n instruction, *args = line.strip().replace(',', '').split()\n return instruction, args",
"def get_instructions ():\n try:\n instruction = sys.argv[1]\n try:\n target_id = int (sys.argv[2])\n return (instruction, target_id)\n except:\n return (instruction, None)\n except:\n print ('ERROR: I need instructions!')\n sys.exit()",
"def instructions(self) -> Sequence[Instruction]:",
"def split_instructions(instructions):\n instructions = list(reversed(instructions))\n split_instructions_ = []\n while len(instructions) > 0:\n current = \"\"\n while current not in DIRECTION_DICT:\n current += instructions.pop()\n split_instructions_.append(current)\n return split_instructions_",
"def get_instruction_tokens(self, _ea):\n\t\tif (_ea != BADADDR):\n\t\t\treturn filter(None, GetDisasm(_ea).split(\" \"))",
"def disassemble(line):\n if type(line) != type(''):\n return ('***UNTRANSLATABLE INSTRUCTION!***', '***UNTRANSLATABLE***', \\\n [])\n hex = binary_to_num(reduce(lambda x, y: x + y, line.strip().split(' ')))\n for tuple in opcodes:\n proto = binary_to_num(reduce(lambda x, y: x + y, tuple[0].split(' ')))\n mask = binary_to_num(reduce(lambda x, y: x + y, tuple[1].split(' ')))\n if hex & mask == proto:\n # We have found the proper instruction. Decode the arguments.\n opcode = tuple[2]\n translation = opcode\n hex <<= 4\n args = []\n separator = ' '\n for arg in arguments[opcode]:\n # r s u n z\n if arg == 'r':\n val = (hex & 0xf000) >> 12\n translation += separator + 'r' + str(val)\n separator = ', '\n hex <<= 4\n args += [val]\n elif arg == 'z':\n hex <<= 4\n elif arg == 's' or arg == 'u':\n val = (hex & 0xff00) >> 8\n if arg == 's' and (val & 0x80) != 0:\n val -= 256\n translation += separator + str(val)\n separator = ', '\n hex <<= 8\n args += [val]\n elif arg == 'u':\n val = (hex & 0xff00) >> 8\n translation += separator + str(val)\n separator = ', '\n hex <<= 8\n args += [val]\n elif arg == 'n':\n # In the absence of other information, always unsigned\n val = hex & 0xffff\n translation += separator + str(val)\n separator = ', '\n hex <<= 16\n args += [val]\n return (translation, opcode, args)\n return ('***UNTRANSLATABLE INSTRUCTION!***', '***UNTRANSLATABLE***', [])",
"def for_line_opcode_parse(full_text: str):\n for line_index, stripped in for_line_stripped_comments(full_text):\n yield get_label(stripped) if has_label(stripped) else None, get_opcode(stripped), strip_opcode(stripped)",
"def parse(instruction_str):\n match = re.search(\"(nop|acc|jmp) (.*)$\", instruction_str)\n return {\"operation\": match[1], \"argument\": int(match[2])}",
"def parse_code(code: List[str]) -> List[Tuple[str, int]]:\n return [parse_line(line) for line in code]",
"def parse_instruction(instruction_string):\n op, arg = instruction_string.split()\n\n instruction = Instruction(op, int(arg))\n\n return instruction",
"def parse_byte_and_args(self) -> Tuple[str, Any, int]: # TODO: code in the middle of that\n f = self.frame\n opoffset = f.f_lasti\n byteCode = f.f_code.co_code[opoffset] # type: int\n assert type(byteCode) == int\n\n f.f_lasti += 1\n byteName = dis.opname[byteCode]\n arg = None # type: Optional[bytes]\n arguments = []\n\n if byteCode >= dis.HAVE_ARGUMENT:\n arg, f.f_lasti = f.f_code.co_code[f.f_lasti:f.f_lasti + 2], f.f_lasti + 2\n assert type(arg) == bytes, type(arg)\n\n intArg = arg[0] + (arg[1] << 8)\n if byteCode in dis.hasconst:\n arg = f.f_code.co_consts[intArg]\n elif byteCode in dis.hasname:\n arg = f.f_code.co_names[intArg]\n elif byteCode in dis.hasjrel:\n arg = f.f_lasti + intArg\n elif byteCode in dis.hasjabs:\n arg = intArg\n elif byteCode in dis.haslocal:\n arg = f.f_code.co_varnames[intArg]\n else:\n arg = intArg\n arguments = [arg]\n\n assert type(byteName) == str, (byteName, type(byteName))\n # assert False, (arguments, type(arguments)) #TODO:object triples\n assert type(opoffset) == int, (opoffset, type(opoffset))\n\n return byteName, arguments, opoffset",
"def input_and_target(data):\r\n input_seq=[]\r\n target_seq=[]\r\n for i in range(len(data)):\r\n input_seq.append(data[i][:-1])\r\n target_seq.append(data[i][1:])\r\n return input_seq, target_seq",
"def input_and_target(data):\n input_seq=[]\n target_seq=[]\n for i in range(len(data)):\n input_seq.append(data[i][:-1])\n target_seq.append(data[i][1:])\n return input_seq, target_seq",
"def load_input_data(input_file):\n instruction_list = []\n\n with open(input_file, 'r') as f:\n for line in f:\n instruction_list += [line.strip()]\n\n instruction_tuple = tuple(instruction_list)\n\n return instruction_tuple",
"def _parse_edges(instructions: list[str]) -> list[list[str]]:\n return [STEP_RE.findall(line) for line in instructions]",
"def disassemble(section):\n\n def instruction(address, bytecode, mnemonic, op_str):\n return (address, bytecode, mnemonic, op_str)\n \n name, base, _, executable = section\n bytecode = section_code[name]\n\n if (executable):\n # Note: The properties of the instruction objects returned by\n # Capstone are unicode strings. For consistency, they\n # should be converted to byte strings.\n return (instruction(i.address, i.bytes, \n i.mnemonic.encode('latin-1'), \n i.op_str.encode('latin-1'))\n for i in mode.disasm(bytecode, base))\n \n return (instruction(base+i, [ord(bytecode[i:i+1])], b'.byte', \n b'0x%02x' % ord(bytecode[i:i+1])) \n for i in range(len(bytecode)))",
"def getProgramTargets(program):\n\n def get_item_from_inst_ptr(inst_ptr, RDK):\n \"\"\"Get an item from an instruction pointer, typically a TargetPtr, FramePtr, ToolPtr, etc.\"\"\"\n if inst_ptr == '0' or inst_ptr == 0:\n return None\n\n item = robolink.Item(RDK, str(inst_ptr))\n if not item.Valid(True) or str(item.item) != str(inst_ptr):\n return None\n\n return item\n\n def get_target_from_inst(inst_target_name, RDK):\n \"\"\"Get a target item from an instruction, typically a MoveL or MoveJ\"\"\"\n r = RE_TARGET.search(inst_target_name)\n if not r:\n return None\n\n target_name = r.group(1)\n target = RDK.Item(target_name, robolink.ITEM_TYPE_TARGET)\n if not target.Valid(True) or target.Name() != target_name:\n return None\n\n return target\n\n def get_targets_from_inst(inst_targets_name, RDK):\n \"\"\"Get targets items from an instruction, typically a MoveC.\"\"\"\n r = RE_TARGET.search(inst_targets_name)\n if not r:\n return None, None\n\n target_names = r.group(1).split(', ')\n target1 = RDK.Item(target_names[0], robolink.ITEM_TYPE_TARGET)\n if not target1.Valid(True) or target1.Name() != target_names[0]:\n target1 = None\n\n target2 = RDK.Item(target_names[1], robolink.ITEM_TYPE_TARGET)\n if not target2.Valid(True) or target2.Name() != target_names[1]:\n target2 = None\n\n return target1, target2\n\n targets = []\n\n RDK = program.RDK()\n inst_count = program.InstructionCount()\n for i in range(inst_count):\n instruction_dict = program.setParam(i)\n\n if 'TargetPtr' in instruction_dict:\n target = get_item_from_inst_ptr(instruction_dict['TargetPtr'], RDK)\n if target:\n targets.append(target)\n continue\n\n if instruction_dict['Type'] in [robolink.INS_TYPE_MOVE]:\n target = get_target_from_inst(instruction_dict['Name'], RDK)\n if target:\n targets.append(target)\n continue\n\n if instruction_dict['Type'] in [robolink.INS_TYPE_MOVEC]:\n target1, target2 = get_targets_from_inst(instruction_dict['Name'], RDK)\n if target1:\n targets.append(target1)\n if target2:\n targets.append(target2)\n\n # At this point there is not target in this instruction, or we fail to retrieve it\n\n return targets",
"def extract_generated_target(output_tokens, encoder, target):\n # Filter out first instance of start token\n assert output_tokens.ndim == 1\n\n start_tokens = output_tokens == encoder.__dict__[f'begin_{target}']\n if np.any(start_tokens):\n start_ind = np.argmax(start_tokens) + 1\n else:\n start_ind = 0\n\n end_tokens = output_tokens == encoder.__dict__[f'end_{target}']\n if np.any(end_tokens):\n end_ind = np.argmax(end_tokens)\n else:\n end_ind = output_tokens.shape[0]\n\n return {\n 'extraction': encoder.decode(output_tokens[start_ind:end_ind]),\n 'start_ind': start_ind,\n 'end_ind': end_ind,\n }",
"def _parse_one_instruction(cls, instr):\n opcode = Opcode(instr % 100)\n instr //= 100 # get rid of the opcode\n num_param = cls.NUM_PARAMS_OF_OPCODE[opcode]\n parameter_modes = []\n for i in range(num_param):\n parameter_modes.append(ParamMode(instr % 10))\n instr //= 10\n return opcode, parameter_modes",
"def create_splitword_target(word_part):\n\n split_word = sentence.sem.find('splitword', {'idref' : t_id})\n wordpart_idref = split_word.find('part', {'word' : word_part})\n\n last_frame = sentence.sem.frames.find_all('frame', {'name' : NEGATION_FRAME_NAME})[-1]\n\n # Create <target>\n target = chapter_input.new_tag('target')\n last_frame.insert(0, target)\n\n # Create target <fenode>\n target_fenode = chapter_input.new_tag('fenode')\n target_fenode['idref'] = wordpart_idref.get('id')\n target_fenode['is_split'] = 'yes'\n target.insert(0, target_fenode)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns time tuples of the form (start_time, operation, target_qubits, line_nr) | def get_timetuples(qisa_fn: str):
reg_map = get_register_map(qisa_fn)
tqisa_fn = infer_tqisa_filename(qisa_fn)
time_tuples = []
with open(tqisa_fn, 'r') as tq_file:
for i, line in enumerate(tq_file):
# Get instruction line
if re.search(r"bs", line):
# Get the timing number
start_time = get_start_time(line)
# Get the instr
instr = re.split(r'bs ', line)[1][1:]
# We now parse whether there is a | character
if '|' in line:
multi_instr = re.split(r'\s\|\s', instr)
else:
multi_instr = [instr]
for instr in multi_instr:
instr = instr.strip()
op, targ = split_instr_to_op_targ(instr, reg_map)
result = (start_time, op, targ, i)
time_tuples.append(result)
return time_tuples | [
"def _get_time_info(self, logs):\n hours = timedelta(0)\n tasks = {} # task: timedelta\n\n for entry in logs:\n delta = entry['stop'] - entry['start']\n hours += delta\n if len(entry['task']):\n if entry['task'] in tasks:\n tasks[entry['task']] += delta\n else:\n tasks[entry['task']] = delta\n\n tasks = [(task, delta) for task, delta in tasks.items()]\n tasks = sorted(tasks, key=lambda x: x[1])\n tasks = ', '.join([\n '{} ({})'.format(task, prettify_delta(delta))\n for task, delta in tasks\n ])\n\n if not tasks:\n tasks = '-'\n\n return '\\n'.join([\n 'tasks: {}'.format(tasks),\n 'total: {}'.format(prettify_delta(hours))\n ])",
"def get_times(self, header, seq=True):\n times_cpu = header[:, -3] / np.float(self.nperpacket) \\\n + header[:, -2].astype(np.float)\n\n # May 2 edits\n if seq is False:\n\n return times_cpu, []\n\n if seq is True:\n seq = header[:, -1] \n times = seq / 625.0**2\n\n return times, self.J2000_to_unix(times - times[0] + times_cpu[0])",
"def get_timings(self):\r\n return self.times",
"def compute_marci_time(self, line):\n if not hasattr(self, \"_num_framelets\"):\n self._num_bands = self.label[\"IsisCube\"][\"Core\"][\"Dimensions\"][\"Bands\"]\n # is the detector line summing/line scale factor\n sum_mode = self.label[\"IsisCube\"][\"Instrument\"][\"SummingMode\"]\n\n framelet_offset_factor = self.label[\"IsisCube\"][\"Instrument\"][\"ColorOffset\"]\n if self.flipped_framelets:\n framelet_offset_factor *= -1\n\n self._framelet_offset_lookup = {\n \"NIR\" : 0 * framelet_offset_factor,\n \"RED\" : 1 * framelet_offset_factor,\n \"ORANGE\" : 2 * framelet_offset_factor,\n \"GREEN\" : 3 * framelet_offset_factor,\n \"BLUE\" : 4 * framelet_offset_factor,\n \"LONG_UV\" : 5 * framelet_offset_factor,\n \"SHORT_UV\" : 6 * framelet_offset_factor,\n }\n self._filters = self.label[\"IsisCube\"][\"BandBin\"][\"FilterName\"]\n\n self._framelet_rate = self.label[\"IsisCube\"][\"Instrument\"][\"InterframeDelay\"].value\n framelet_height = 16\n\n self._actual_framelet_height = framelet_height / sum_mode\n\n num_lines = self.label[\"IsisCube\"][\"Core\"][\"Dimensions\"][\"Lines\"]\n self._num_framelets = num_lines / (16 / sum_mode)\n\n times = []\n for band in range(self._num_bands):\n framelet = ((line - 0.5) / self._actual_framelet_height) + 1\n framelet_offset = self._framelet_offset_lookup[self._filters[band]]\n adjusted_framelet = framelet - framelet_offset\n\n time = self.start_time\n # Keeping in line with ISIS\n if not self.flipped_framelets:\n time += (adjusted_framelet - 1) * self._framelet_rate\n else:\n time += (self._num_framelets - adjusted_framelet) * self._framelet_rate\n times.append(time)\n return times",
"def get_expanded_times(self):\n if self.reps is not None:\n times = []\n for _, time, nreps in zip(self.oli,self.time,self.reps):\n nreps = _round_int_repcnt(nreps)\n times.extend( [time]*nreps )\n return _np.array(times, dtype=self.dataset.timeType)\n else: return self.time.copy()",
"def construct_time_array():\n global time_diffs\n time_diffs = np.zeros((no_epochs, no_epochs))\n for m in range(0, no_epochs):\n t11 = time.Time(str(fields[m].header['STARTMJD']), format='mjd')\n t12 = time.Time(str(fields[m].header['ENDMJD']), format='mjd')\n t1 = t11+(t12-t11)/2\n for n in range(m+1, no_epochs):\n t21 = time.Time(str(fields[n].header['STARTMJD']), format='mjd')\n t22 = time.Time(str(fields[n].header['ENDMJD']), format='mjd')\n t2 = t21 + (t22 - t21) / 2\n time_delta = (t2-t1).to_value('sec')\n time_diffs[m, n] = time_delta\n time_diffs[n, m] = -1*time_delta",
"def get_timing(pool):\n sim.send_data(pool)\n\n start_raw = time.time()\n pool.imap(sim.get_forces_raw, range(sim.PARTICLE_COUNT))\n sim.get_results(pool)\n end_raw = time.time()\n\n start_tree = time.time()\n pool.imap(sim.get_forces_tree, range(sim.PARTICLE_COUNT))\n sim.get_results(pool)\n end_tree = time.time()\n\n return [end_raw - start_raw, end_tree - start_tree]",
"def times(self):\n return time_points(self.n, self.dt)",
"def time_update(self):\r\n self.time = []\r\n t = [0] + self.time_final_all_section()\r\n for i in range(self.number_of_section):\r\n self.time.append((t[i+1] - t[i]) / 2.0 * self.tau[i]\r\n + (t[i+1] + t[i]) / 2.0)\r\n return np.concatenate([i for i in self.time])",
"def test_time(self):\n numeric_times = self.output.variables[\"time\"][:]\n\n self.assertGreater(numeric_times.size, 0)\n\n self.assertAlmostEqual(np.mean(np.diff(numeric_times)), 0.1, delta=0.002)\n self.assertAlmostEqual(np.min(np.diff(numeric_times)), 0.1, delta=0.002)\n self.assertAlmostEqual(np.max(np.diff(numeric_times)), 0.1, delta=0.002)\n\n datetimes = nc.num2date(numeric_times, self.output.variables[\"time\"].units)\n\n self.assertGreaterEqual(datetimes[0], self.start_time)\n self.assertLessEqual(datetimes[-1], self.end_time)\n\n self.assertLess(abs((datetimes[0] - self.start_time).total_seconds()), 0.1)\n self.assertLess(abs((datetimes[-1] - self.end_time).total_seconds()), 0.1)",
"def observation_time_start(self):\n return self.time_ref + u.Quantity(self.table.meta[\"TSTART\"], \"second\")",
"def get_changing_times2(recfile):\n times = recfile[0][1]\n startings = [t[0] for t in times]\n endings = [t[1] for t in times]\n return startings, endings",
"def get_all_timesteps(self):\n for h in self.real_handles:\n h.seek(0, SEEK_SET)\n nx = self.para['nx']\n ny = self.para['ny']\n\n ms = []\n ret_lst = []\n while True:\n try:\n m, data = self.get_next_timestep()\n except ff.NoMoreRecords:\n break\n ms.append(m)\n ret_lst.append(data)\n\n ret = np.stack(ret_lst, axis=0)\n return np.array(ms), ret",
"def timestamp():\n debug(0,'Time elapsed since start: ', time_string(elapsed_time()) )",
"def time(state):",
"def test_timestep_creation(self):\n start_time = datetime(2015, 1, 1, 0, 0, 0) # 01/01/2015 00:00\n offset = -480\n time_list_every_5min = [0, 5, 10]\n expected_5min_output = [1420099200, 1420099500, 1420099800] #generated using http://www.epochconverter.com/\n\n time_list_every_hour = [0, 60, 120]\n expected_hourly_output = [1420099200, 1420102800, 1420106400] #generated using http://www.epochconverter.com/\n\n self.assertEqual(expected_5min_output, tools.make_timesteps(start_time, offset, time_list_every_5min))\n self.assertEqual(expected_hourly_output, tools.make_timesteps(start_time, offset, time_list_every_hour))",
"def get_rhines_times(target_athlete=TARGET_ATHLETE):\r\n races = get_data()\r\n return [row[3:14].strip() for row in races.splitlines() if target_athlete in row]",
"def timeFlow(self):",
"def _parse_table_time(line):\n # Time tuples for leave and join times.\n t_l, t_a = (time.strptime(e, \"%H:%M\") for e in line.strip().split())\n # datetime.timedelta objects.\n t_l = datetime.timedelta(hours=t_l[3], minutes=t_l[4])\n t_a = datetime.timedelta(hours=t_a[3], minutes=t_a[4])\n return (Event(t_l, EV_TRAIN_LEAVE), Event(t_a, EV_TRAIN_ARRIV))",
"def pget_cpu_times (self):\n\n stats = self.stats\n u_t = float(stats[12]) / self.TICKS # uptime\n u_st = float(stats[13]) / self.TICKS # start\n c_utime = float(stats[14]) / self.TICKS\n c_stime = float(stats[15]) / self.TICKS\n return self.CPUTIMES(u_t, u_st, c_utime, c_stime)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns a list of tuples that perform a specific operation | def get_operation_tuples(time_tuples: list, target_op: str):
op_indices = find_operation_idx_in_time_tuples(time_tuples,
target_op=target_op)
time_tuples_op = []
for op_idx in op_indices:
time_tuples_op.append(time_tuples[op_idx])
return time_tuples_op | [
"def x_ops(ops,L,t):\n result = []\n for j in range(len(ops)):\n op = ops[j]\n result.append('Result of %s'%op)\n result.append(execute_op(op,L,t))\n return result",
"def makeops(op, lists):\n return tuple(l[0] if len(l) == 1 else build(op, l) for l in lists)",
"def _get_ops(self):\n\n arglist = []\n arg = self.arg\n\n if arg is None:\n op = [(self.opcode, 0)]\n else:\n while arg > 0xff:\n arg = arg >> (8 * len(arglist))\n arglist.append((self.EXTENDED_ARG, arg & 0xff))\n\n arglist = arglist[::-1]\n if len(arglist) > 3:\n # No more than 3 EXTENDED_ARG opcodes can precede\n # an opcode\n raise RuntimeError(\n f'argument {arg} for {dis.opname[opcode]} too large')\n\n if arglist:\n # The argument associated with the actual instruction\n # is the last one in the arglist\n arg = arglist.pop()[1]\n\n op = [(self.opcode, arg)]\n\n return arglist + op",
"def test_search(tup, fun):\n res=()\n for t in tup:\n for f in fun:\n if f(t[0], t[1]) == t[2]:\n res += (f, 'ok')\n else:\n res += (f, t[1], t[2], 'problemas')\n return res",
"def get_operation(x, y, operation):\r\n # return argrelextrema(y, operation)\r\n index = argrelextrema(y, operation)\r\n print('getting index')\r\n print(index)\r\n _x = x[index]\r\n _y = y[index]\r\n return _x, _y, index",
"def list_operations():\n con = create_connection(DATABASE)\n try:\n cur = con.cursor()\n cur.execute(\"select operation from calculations order by operation_id desc limit 10\")\n rows = cur.fetchall()\n result=[]\n for row in rows:\n result.append({\n 'operation':row[0]\n } )\n return result\n except Error as e:\n logger.error(\"Unable to retrive operations from Sqlite. Error is: \"+str(e))",
"def get_operation(self,element_1,element_2):\r\n res = []\r\n for name_f,f in self.get_morphisms():\r\n try:\r\n if element_2 in f(element_1):\r\n res.append(name_f)\r\n except:\r\n pass\r\n return res",
"def pair_to_args(self, *args, **kwargs) -> Tuple:\n return [*args, *kwargs.values()]",
"def add_tuples(a, b):\n return tuple(map(operator.add, a, b))",
"def get_operands(self):\n return [self.get_next_rand(), self.get_next_rand()]",
"def _get_ops_details(self):\n return [\n self._get_op_details(idx) for idx in range(self._interpreter.NumNodes())\n ]",
"def ops(self):\n return self._all_ops",
"def sub_tuples(a, b):\n return tuple(map(operator.sub, a, b))",
"def get_args(expr):\n if isinstance(expr, Call):\n return expr.args\n if isinstance(expr, TupleGetItem):\n return get_args(expr.tuple_value)\n if isinstance(expr, relay.Tuple):\n return [arg for args in map(get_args, expr.fields) for arg in args]\n return []",
"def to_list_of_tuple(self, list_of_tuple, query, n):\n value = self.query(query)\n value = value[0]\n rank = list(value)\n rank.append(n)\n value = tuple(rank)\n list_of_tuple.append(value)\n\n return list_of_tuple",
"def points_to_tuple(*args):\n return tuple(args[i][j] for i in range(len(args)) for j in range(len(args[i])))",
"def tmap( *args ):\n return tuple( map( *args ) )",
"def extract_ops_from_tk(tk_circ, str_map):\n op_list, params_list, wires_list = [], [], []\n\n for op in tk_circ.__iter__():\n if op.op.type != OpType.Measure:\n op, params, wires = tk_op_to_pennylane(op, str_map)\n op_list.append(op)\n params_list.append([np.pi * p for p in params])\n wires_list.append(wires)\n\n return op_list, params_list, wires_list",
"def mymap(funcs, args):\n if isinstance(funcs, Iterable):\n return [tuple(f(arg) for arg in args) for f in funcs]\n else:\n return [funcs(arg) for arg in args]",
"def generate_operations(self):\n assert self.limit <= 100\n for i in range(1, self.limit + 1):\n yield (i,)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
determines if compilation of a file is needed based on it's timestamp and an optional recompile option. The behaviour of this function depends on the recompile argument. | def check_recompilation_needed(program_fn: str, platf_cfg: str,
recompile=True):
if recompile == True:
return True
elif recompile == 'as needed':
try:
if is_more_rencent(program_fn, platf_cfg):
return False
else:
return True # compilation is required
except FileNotFoundError:
# File doesn't exist means compilation is required
return True
elif recompile == False: # if False
if is_more_rencent(program_fn, platf_cfg):
return False
else:
raise ValueError('OpenQL config has changed more recently '
'than program.')
else:
raise NotImplementedError(
'recompile should be True, False or "as needed"') | [
"def check_recompilation_needed(\n program_fn: str,\n platf_cfg: str,\n recompile=True\n) -> bool:\n if recompile is True:\n return True # compilation is enforced\n elif recompile == 'as needed':\n # In case you ever think of a hash-based check mind that this\n # function is called in parallel multiprocessing sometime!!!\n if isfile(program_fn) and is_more_recent(program_fn, platf_cfg):\n return False # program file is good for using\n else:\n return True # compilation is required\n elif recompile is False:\n if isfile(program_fn):\n if is_more_recent(platf_cfg, program_fn):\n log.warning(\"File {}\\n is more recent\"\n \"than program, use `recompile='as needed'` if you\"\n \" don't know what this means!\".format(platf_cfg))\n return False\n else:\n raise ValueError('No file:\\n{}'.format(platf_cfg))\n else:\n raise NotImplementedError(\n 'recompile should be True, False or \"as needed\"')",
"def has_flag(compiler, flagname):\r\n import tempfile\r\n with tempfile.NamedTemporaryFile('w', suffix='.cpp') as f:\r\n f.write('int main (int argc, char **argv) { return 0; }')\r\n try:\r\n compiler.compile([f.name], extra_postargs=[flagname])\r\n except setuptools.distutils.errors.CompileError:\r\n return False\r\n return True",
"def check_recompilation_needed_hash_based(\n self,\n clifford_rb_oql: str,\n recompile: bool = True,\n ) -> dict:\n\n hashes_ext = \".hashes\"\n tmp_ext = \".tmp\"\n rb_system_hashes_fn = self.filename + hashes_ext\n tmp_fn = rb_system_hashes_fn + tmp_ext\n\n platf_cfg_hash = get_file_sha256_hash(self._platf_cfg, return_hexdigest=True)\n this_file_hash = get_file_sha256_hash(clifford_rb_oql, return_hexdigest=True)\n file_hashes = {self._platf_cfg: platf_cfg_hash, clifford_rb_oql: this_file_hash}\n\n _recompile = False\n if not isfile(self.filename):\n if recompile is False:\n raise ValueError('No file:\\n{}'.format(self.filename))\n else:\n # Force recompile, there is no program file\n _recompile |= True # FIXME: why \"|=\"?\n\n # Determine if compilation is needed based on the hashed files\n if not isfile(rb_system_hashes_fn):\n # There is no file with the hashes, we must compile to be safe\n _recompile |= True\n else:\n # Hashes exist, we use them to determine if recompilations is needed\n with open(rb_system_hashes_fn) as json_file:\n hashes_dict = json.load(json_file)\n # Remove file to signal a compilation in progress\n remove(rb_system_hashes_fn)\n\n for fn in file_hashes.keys():\n # Recompile becomes true if any of the hashed files has a different\n # hash now\n _recompile |= hashes_dict.get(fn, \"\") != file_hashes[fn]\n\n # Write the updated hashes\n # We use a temporary file such that for parallel compilations, if the\n # process is interrupted before the end there will be no hash and\n # recompilation will be forced\n pathlib.Path(tmp_fn).parent.mkdir(parents=True, exist_ok=True)\n pathlib.Path(tmp_fn).write_text(json.dumps(file_hashes))\n\n res_dict = {\n \"file\": rb_system_hashes_fn,\n \"tmp_file\": tmp_fn\n }\n\n if recompile is False:\n if _recompile is True:\n log.warning(\n \"`{}` or\\n`{}`\\n might have been modified! Are you sure you didn't\"\n \" want to compile?\".format(self._platf_cfg, clifford_rb_oql)\n )\n res_dict[\"recompile\"] = False\n elif recompile is True:\n # Enforce recompilation\n res_dict[\"recompile\"] = True\n elif recompile == \"as needed\":\n res_dict[\"recompile\"] = _recompile\n\n return res_dict",
"def can_compile():\n logger = logging.getLogger(\"oa-logger\")\n if \"pypy\" in platform.python_implementation().lower():\n logger.warning(\"Compiler is not available on PyPy\")\n return False\n major, minor, patch = platform.python_version_tuple()\n if int(major) >= 3 and int(minor) < 5:\n logger.warning(\"Compiler is not available on 3.4 or lower.\")\n return False\n # There's not going to be a Python 2.8 so this is safe.\n if int(major) <= 2 and (int(minor) < 7 or int(patch) < 11):\n logger.warning(\"Compiler is not available on 2.7.10 or lower.\")\n return False\n return True",
"def compile(self):\n\n # Check to see if there's a makefile in the program root directory\n if self._compile_make():\n print('Compilation with makefile successful')\n return True\n\n elif self.src_bin_present and self._compile_make():\n print('Compilation with makefile successful')\n return True\n\n print('Compilation with makefile failed, falling back to internal compilers')\n\n if self.language == 'java':\n return self._compile_java()\n\n elif self.language in ['c', 'cpp', 'c++']:\n return self._compile_c()\n\n else:\n return self._compile_scripts()",
"def _check_compilation(self, handler, resource, language, compiled_file,\r\n mode=Mode.DEFAULT):\r\n if isinstance(mode, str):\r\n if mode == 'REVIEWED':\r\n mode = Mode.REVIEWED\r\n elif mode == 'TRANSLATED':\r\n mode = Mode.TRANSLATED\r\n else:\r\n mode = Mode.DEFAULT\r\n\r\n handler.bind_resource(resource)\r\n handler.set_language(language)\r\n compiled_template = handler.compile(mode=mode)\r\n f = open(compiled_file, 'r')\r\n expected_compiled_template = f.read()\r\n f.close()\r\n self.assertEqual(compiled_template, expected_compiled_template)",
"def compiler_exists(self):\n\n print(self.os)\n if(self.os == \"Ubuntu\"):\n compiler_info = open(COMPILER_FILENAME, \"w\")\n p1 = subprocess.Popen([\"dpkg\", \"--list\"],\n stdout=subprocess.PIPE)\n p2 = subprocess.Popen([\"grep\", \"compiler\"],\n stdin=p1.stdout, stdout=subprocess.PIPE)\n p1.stdout.close()\n output, err = p2.communicate()\n compiler_info.write(output)\n compiler_info.close()\n\n compiler_info = open(COMPILER_FILENAME, \"r\")\n for line in compiler_info:\n if line is not None:\n compiler_info.close()\n call([\"rm\", COMPILER_FILENAME])\n return True\n\n compiler_info.close()\n call([\"rm\", COMPILER_FILENAME])\n return False\n\n elif(self.os == \"RedHatLinux\"): # Todo\n pass",
"def check_compile(self, args, options):\n # modify args in order to be DIR \n # mode being either standalone or madevent\n \n if options['force']:\n self.force = True\n \n if not args:\n args.append('MC')\n return\n \n if len(args) > 1:\n self.help_compile()\n raise self.InvalidCmd, 'Invalid Syntax: Too many argument'\n\n elif len(args) == 1:\n if not args[0] in ['MC', 'FO']:\n raise self.InvalidCmd, '%s is not a valid mode, please use \"FO\" or \"MC\"' % args[0]\n mode = args[0]\n \n # check for incompatible options/modes",
"def monitor_file(source_data, compiler_config, check_interval):\n try:\n last_hash = None\n while True:\n current_hash = FileSystem.get_file_hash(source_data.file_path)\n if current_hash != last_hash:\n print(\"Compiling file [{0}]\".format(source_data.file_path))\n Build.remove_object_file(source_data.object_file_path)\n Build.create_object_file_dir(source_data.object_file_path)\n return_code, stdout, stderr = Build.compile_object(source_data, compiler_config)\n\n if len(stdout) > 0:\n print(\"stdout for [{0}]: {1}\".format(source_data.file_path, stdout))\n\n if len(stderr) > 0:\n print(\"stderr for [{0}]: {1}\".format(source_data.file_path, stderr))\n\n if return_code is 0:\n print(\"Compilation completed successfully for file [{0}]\".format(source_data.file_path))\n else:\n print(\n \"*** Compilation failed with return code [{0}] for file [{1}]\".format(\n return_code,\n source_data.file_path\n )\n )\n last_hash = current_hash\n\n time.sleep(check_interval)\n except KeyboardInterrupt:\n print(\"Stopping 'autocompile' for [{0}] ...\".format(source_data.file_path))",
"def check_compile(compiler, src):\n\n obj = artefact('out.o')\n targets, sources = [obj], [src]\n compiler.compile(targets, sources)",
"def is_compiled(self):\n return (super(self.__class__, self).is_compiled and\n os.access(self.languagemodel_file, os.R_OK) and\n os.access(self.dictionary_file, os.R_OK))",
"def compile(filepath):\n retcode = subprocess.call(\"/usr/bin/g++ \" + filepath, shell=True)\n return retcode == 0",
"def compile(self, timeout=30*60, verification_string=None):\n binary_path = self.get_file_path('binary')\n binary_existed = False\n pre_hash = None\n if not self.changed:\n if os.path.isfile(binary_path):\n binary_existed = True\n pre_hash = self.get_hash(binary_path)\n super(MorphologicalParser, self).compile(timeout, verification_string)\n if not self.changed:\n if binary_existed:\n post_hash = self.get_hash(binary_path)\n self.changed = pre_hash == post_hash\n else:\n if os.path.isfile(binary_path):\n self.changed = True",
"def _should_load(self) -> bool:\n if os.path.exists(self.file_path) and os.path.isfile(self.file_path):\n if self.last_loaded is None:\n return True\n return (\n datetime.datetime.fromtimestamp(\n getmtime(self.file_path),\n )\n > self.last_loaded\n )\n return False",
"def alwaysUseSourceFiles() -> bool:\n ...",
"def make(a, b, clobber=1):\n try:\n am = os.path.getmtime(a)\n except os.error:\n return 1\n if not clobber: return 0\n bm = os.path.getmtime(b)\n return am <= bm",
"def test_is_source_need_build_return_true(self, mock_load, mock_isfile):\n mock_load.return_value = None, _CC_NAME_TO_MODULE_INFO\n mod_info = native_module_info.NativeModuleInfo()\n mock_isfile.return_value = False\n self.assertTrue(mod_info._is_source_need_build(\n _CC_NAME_TO_MODULE_INFO['multiarch']))",
"def pdf_needs_regenerate(self):\n cachefile = self.pdf_get_cachefile()\n\n if not os.path.isfile(cachefile):\n return True\n\n # Check if cache file is older than last modification of wiki snip.\n modifytime = datetime.fromtimestamp(os.path.getmtime(cachefile))\n if modifytime < self.changed:\n return True\n return False",
"def is_gcc(self, args):\n # On Windows, GCC is run via `sh.exe windows-gcc-32` instead of `gcc`.\n if on_windows():\n return len(args[0]) > 1 and args[0][1] == 'windows-gcc-32.sh'\n return args[0][0] == 'gcc'",
"def compiler_check(cc_name, code_string, link_option=None):\n\n cc_check_src = \"check_test_prog.cpp\"\n cc_check_exec = \"check_test.out\"\n\n with open(cc_check_src, \"w\") as write_src:\n try:\n write_src.write(code_string)\n except IOError:\n print \"Couldn't create test program source file\"\n raise\n\n cc_cmd = [cc_name, cc_check_src, \"-o\", cc_check_exec]\n if link_option is not None:\n cc_cmd.append(link_option)\n\n result = False\n retcode = 1\n\n with open(os.devnull, 'w') as write_null:\n retcode = subprocess.call(cc_cmd,\n stdout = write_null, stderr = write_null)\n\n if retcode == 0:\n print \"Compilation successful, executing\"\n retcode = subprocess.call([\"./\" + cc_check_exec])\n if retcode == 0:\n result = True\n else:\n print \"Program compiled but terminated with an error\"\n else:\n print \"Compilation check failed\"\n\n try:\n if os.path.isfile(cc_check_src):\n os.remove(cc_check_src)\n if os.path.isfile(cc_check_exec):\n os.remove(cc_check_exec)\n except OSError:\n print \"Error deleting temporary program files\"\n pass\n\n return result"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This is a helper function for running an experiment that is spread over multiple OpenQL programs of varying length such as GST. Everytime the detector is called it will also modify the number of sweep points in the detector. | def load_range_of_oql_programs_varying_nr_shots(programs, counter_param, CC,
detector):
program = programs[counter_param()]
counter_param((counter_param()+1) % len(programs))
CC.eqasm_program(program.filename)
detector.nr_shots = len(program.sweep_points) | [
"def load_range_of_oql_programs_varying_nr_shots(\n programs,\n counter_param,\n CC,\n detector\n) -> None:\n program = programs[counter_param()]\n counter_param((counter_param() + 1) % len(programs))\n CC.eqasm_program(program.filename)\n\n detector.nr_shots = len(program.sweep_points)",
"def run_experiment():\n \n print_instructions(instructions)\n print_instructions(instructions2)\n run_blocks(PRACTICE_BLOCKS, f, True) \n print_instructions(instructions3)\n run_blocks(BLOCKS, f)\n print_instructions(exit_message)\n save_and_quit(f)",
"def run_experiment_single(iter_count, exp_string, n_points=None, n_features=None, n=None,\n data=None, n_est=100, contam_rate=0.1, r_mode='t', max_samples=None):\n\n #Initialize the result dictionary\n single_result_dict = {'index': iter_count}\n \n #Use iter_count to initialize subprocess RNG instance\n rng = np.random.RandomState(iter_count)\n\n #Print status updates \n if exp_string == 'syn1':\n print(f\"[INFO] Iteration {iter_count+1}: Dataset Size - {n_points}...\")\n elif exp_string == 'syn2':\n print(f\"[INFO] Iteration {iter_count+1}: Dataset Dimensionality - {n_features} features...\")\n elif exp_string == 'syn3':\n print(f\"[INFO] Iteration {iter_count+1}: Upto {n} features anomalised for dataset of {n_points:,} samples with {n_features} features...\")\n elif exp_string[:5] == 'real1':\n print(f\"[INFO] Iteration {iter_count+1}: {exp_string[6:]} dataset ({data.shape[0]} samples and {data.shape[1]} features)...\")\n else:\n print(f\"[INFO] Iteration {iter_count+1}: Upto {n} features anomalised for {exp_string[6:]} dataset ({data.shape[0]} samples and {data.shape[1]} features)...\")\n\n #Generate synthetic dataset for synthetic experiments\n if isinstance(data, type(None)):\n n_clusters= n_features // 2\n max_box = 20 * (n_clusters//4 + 1)\n data = generate_normal_points(points=n_points, dimensionality=n_features, \n clusters=n_clusters, max=max_box, random_state=rng)\n single_result_dict['orig_dataset'] = data\n max_samples = min(256, data.shape[0])\n n = data.shape[1]\n data = pd.DataFrame(data)\n else:\n single_result_dict['orig_dataset'] = []\n\n #Pick n_ano_points instances at random from the dataset\n n_ano_points = int(np.ceil(contam_rate * data.shape[0]))\n rand_indices = rng.randint(low=0, high=int(data.shape[0]), size=n_ano_points)\n single_result_dict['rand_indices'] = rand_indices\n \n #Fit the model to the dataset\n clf_orig = IsolationForest(n_estimators=n_est, max_samples=max_samples, random_state=rng)\n clf_orig.fit(data.values)\n\n #Generate original explanation matrices\n orig_aws_l, _, orig_aws_exec_time_l = generate_aws_explanations(clf_orig, data.values[rand_indices])\n orig_aws_clem_l, _, orig_aws_clem_exec_time_l = generate_aws_explanations(clf_orig, data.values[rand_indices], mode='clement')\n orig_aws_dif_l, _, orig_aws_dif_exec_time_l = generate_aws_explanations(clf_orig, data.values[rand_indices], mode='diffi')\n orig_shap_l, _, orig_shap_exec_time_l = generate_shap_explanations(clf_orig, data.values[rand_indices])\n orig_diffi_l, _, orig_diffi_exec_time_l = generate_diffi_explanations(clf_orig, data.values[rand_indices])\n\n #Convert the list to numpy array \n orig_aws_exp_matrix = np.array(orig_aws_l)\n orig_aws_clem_exp_matrix = np.array(orig_aws_clem_l)\n orig_aws_dif_exp_matrix = np.array(orig_aws_dif_l)\n orig_shap_exp_matrix = np.array(orig_shap_l)\n orig_diffi_exp_matrix = np.array(orig_diffi_l)\n\n single_result_dict['orig_aws_exp_matrix'] = orig_aws_exp_matrix\n single_result_dict['orig_aws_clem_exp_matrix'] = orig_aws_clem_exp_matrix\n single_result_dict['orig_aws_dif_exp_matrix'] = orig_aws_dif_exp_matrix\n single_result_dict['orig_shap_exp_matrix'] = orig_shap_exp_matrix\n single_result_dict['orig_diffi_exp_matrix'] = orig_diffi_exp_matrix\n\n #Anomalise the dataset\n new_dataset, settings_l, features_l = anomaliser(data.values, rand_indices, n, r_mode)\n single_result_dict['new_dataset']= new_dataset\n single_result_dict['settings_l']= settings_l\n single_result_dict['features_l']= features_l\n\n #Fit the model to the dataset\n clf_new = IsolationForest(n_estimators=n_est, max_samples=max_samples, contamination=contam_rate, random_state=rng)\n clf_new.fit(new_dataset)\n\n #Generate new explanation matrices\n new_aws_l, _, new_aws_exec_time_l = generate_aws_explanations(clf_new, new_dataset[rand_indices])\n new_aws_clem_l, _, new_aws_clem_exec_time_l = generate_aws_explanations(clf_new, new_dataset[rand_indices], mode='clement')\n new_aws_dif_l, _, new_aws_dif_exec_time_l = generate_aws_explanations(clf_new, new_dataset[rand_indices], mode='diffi')\n new_shap_l, _, new_shap_exec_time_l = generate_shap_explanations(clf_new, new_dataset[rand_indices])\n new_diffi_l, _, new_diffi_exec_time_l = generate_diffi_explanations(clf_new, new_dataset[rand_indices])\n\n new_aws_exp_matrix = np.array(new_aws_l)\n new_aws_clem_exp_matrix = np.array(new_aws_clem_l)\n new_aws_dif_exp_matrix = np.array(new_aws_dif_l)\n new_shap_exp_matrix = np.array(new_shap_l)\n new_diffi_exp_matrix = np.array(new_diffi_l)\n\n single_result_dict['new_aws_exp_matrix'] = new_aws_exp_matrix\n single_result_dict['new_aws_clem_exp_matrix'] = new_aws_clem_exp_matrix\n single_result_dict['new_aws_dif_exp_matrix'] = new_aws_dif_exp_matrix\n single_result_dict['new_shap_exp_matrix'] = new_shap_exp_matrix\n single_result_dict['new_diffi_exp_matrix'] = new_diffi_exp_matrix\n\n #Get the difference of the 2 numpy matrices\n _, _, aws_norm_diff = generate_normed_diff(m1=orig_aws_exp_matrix, \n m2=new_aws_exp_matrix)\n _, _, aws_clem_norm_diff = generate_normed_diff(m1=orig_aws_clem_exp_matrix, \n m2=new_aws_clem_exp_matrix)\n _, _, aws_dif_norm_diff = generate_normed_diff(m1=orig_aws_dif_exp_matrix, \n m2=new_aws_dif_exp_matrix)\n _, _, shap_norm_diff = generate_normed_diff(m1=orig_shap_exp_matrix, \n m2=new_shap_exp_matrix)\n _, _, diffi_norm_diff = generate_normed_diff(m1=orig_diffi_exp_matrix, \n m2=new_diffi_exp_matrix)\n random_exp_matrix = random_explainer(rand_indices, data.shape[1])\n\n single_result_dict['aws_norm_diff'] = aws_norm_diff\n single_result_dict['aws_clem_norm_diff'] = aws_clem_norm_diff\n single_result_dict['aws_dif_norm_diff'] = aws_dif_norm_diff\n single_result_dict['shap_norm_diff'] = shap_norm_diff\n single_result_dict['diffi_norm_diff'] = diffi_norm_diff\n single_result_dict['random_exp_matrix'] = random_exp_matrix\n\n #Generate ground truth\n ground_truth = generate_ground_truth(rand_indices, features_l, data.shape[1])\n single_result_dict['ground_truth'] = ground_truth\n\n #Compute the RMSE loss\n aws_loss = evaluate_loss(aws_norm_diff, ground_truth, n_ano_points)\n aws_clem_loss = evaluate_loss(aws_clem_norm_diff, ground_truth, n_ano_points)\n aws_dif_loss = evaluate_loss(aws_dif_norm_diff, ground_truth, n_ano_points)\n shap_loss = evaluate_loss(shap_norm_diff, ground_truth, n_ano_points)\n diffi_loss = evaluate_loss(diffi_norm_diff, ground_truth, n_ano_points)\n random_loss = evaluate_loss(random_exp_matrix, ground_truth, n_ano_points)\n\n single_result_dict['aws_rmse_loss'] = aws_loss\n single_result_dict['aws_clem_rmse_loss'] = aws_clem_loss\n single_result_dict['aws_dif_rmse_loss'] = aws_dif_loss\n single_result_dict['shap_rmse_loss'] = shap_loss\n single_result_dict['diffi_rmse_loss'] = diffi_loss\n single_result_dict['random_rmse_loss'] = random_loss\n \n #Compute the Execution Time\n aws_exec_time = mean(orig_aws_exec_time_l + new_aws_exec_time_l)\n aws_clem_exec_time = mean(orig_aws_clem_exec_time_l + new_aws_clem_exec_time_l)\n aws_dif_exec_time = mean(orig_aws_dif_exec_time_l + new_aws_dif_exec_time_l)\n shap_exec_time = mean(orig_shap_exec_time_l + new_shap_exec_time_l)\n diffi_exec_time = mean(orig_diffi_exec_time_l + new_diffi_exec_time_l)\n\n single_result_dict['aws_exec_time'] = aws_exec_time\n single_result_dict['aws_clem_exec_time'] = aws_clem_exec_time\n single_result_dict['aws_dif_exec_time'] = aws_dif_exec_time\n single_result_dict['shap_exec_time'] = shap_exec_time\n single_result_dict['diffi_exec_time'] = diffi_exec_time\n\n return single_result_dict",
"def run_algo(self):\n\n # dots\n self.update_alphadots()\n\n # canvas\n self.update_canvas()\n\n # run the program\n #self.wait_proc(self.run_proc(['do_everything.sh']))\n self.run_comparison()\n return",
"def main():\n\n\tname = \"SS_pyNN_closedLoop_webots\"\n\teesAmplitudes = [\"1\",\"240\"]\n\teesFrequency = \"40\"\n\tdelay = \"2\"\n\tweights_1 = np.linspace(0.05,0.1,5)\n\tweights_2 = np.linspace(0.01,0.05,5)\n\tweights_3 = np.linspace(0.01,0.1,10)\n\n\tw4 = -0.00145\n\tw5 = -0.0045\n\n\tsimTime = \"3000\"\n\tnSim = len(weights_1)*len(weights_2)*len(weights_3)*len(eesAmplitudes)\n\tcount=0.\n\tpercLastPrint=0.\n\tprintPeriod = 0.05\n\n\tfor w1 in weights_1:\n\t\tfor w2 in weights_2:\n\t\t\tfor w3 in weights_3:\n\t\t\t\tfor eesAmplitude in eesAmplitudes:\n\t\t\t\t\tresultName = name+\"_eesAmp_%d_w1_%f_w2_%f_w3_%f_w4_%f_w5_%f\" % (int(eesAmplitude),w1,w2,w3,w4,w5)\n\t\t\t\t\tresultFile = gt.find(\"*\"+resultName+\"*.p\",pathToResults)\n\t\t\t\t\tif not resultFile:\n\t\t\t\t\t\tinputFile = \"generatedStructures/ss_cl_w1_%f_w2_%f_w3_%f_w4_%f_w5_%f.txt\" % (w1,w2,w3,w4,w5)\n\t\t\t\t\t\ttls.modify_network_structure(\"templateClosedLoop2Dof.txt\",inputFile,delay,[w1,w2,w3,w4,w5])\n\t\t\t\t\t\tprogram = ['python','./scripts/runClosedLoopSim.py',eesFrequency,eesAmplitude,\"hanging\",\"mouse\",simTime,resultName,inputFile]\n\t\t\t\t\t\tgt.run_subprocess(program)\n\n\t\t\t\t\tcount+=1\n\t\t\t\t\tif count/nSim-percLastPrint>=printPeriod:\n\t\t\t\t\t\tpercLastPrint=count/nSim\n\t\t\t\t\t\tprint str(round(count/nSim*100))+\"% of simulations performed...\"",
"def experiment_main():\n\n\tprint ('---------------------')\n\tprint (\"Beginning LIME COMPAS Experiments....\")\n\tprint (\"(These take some time to run because we have to generate explanations for every point in the test set) \")\n\tprint ('---------------------')\n\n\t# Dictionaries that will store adversarial models and explanation methods\n\tadv_models = dict()\n\tadv_explainers = dict()\n\n\t# Generator specifications\n\tgenerator_specs = {\"original_dim\": original_dim, \"intermediate_dim\": 8, \"latent_dim\": latent_dim, \"epochs\": 100,\\\n\t\t\t\t\t\"dropout\": 0.3, \"experiment\": \"Compas\"}\n\n\t# Train the adversarial models for LIME with f and psi (fill te dictionary)\n\tadv_models[\"Perturbation\"] = Adversarial_Lime_Model(racist_model_f(), innocuous_model_psi()).train(xtrain, ytrain,\\\n\t\t\t\t\t\t\t\tcategorical_features=categorical_feature_indcs, feature_names=features, perturbation_multiplier=1)\n\tadv_models[\"DropoutVAE\"] = Adversarial_Lime_Model(racist_model_f(), innocuous_model_psi(),\n\t\t\t\t\t\t\t\tgenerator = \"DropoutVAE\", generator_specs = generator_specs).train(xtrain, ytrain,\\\n\t\t\t\t\t\t\t\tcategorical_features=categorical_feature_indcs, integer_attributes = integer_attributes, feature_names=features, perturbation_multiplier=1)\n\tadv_models[\"RBF\"] = Adversarial_Lime_Model(racist_model_f(), innocuous_model_psi(),\\\n\t\t\t\t\t\t\t\tgenerator = \"RBF\", generator_specs = generator_specs).train(xtrain,\\\n\t\t\t\t\t\t\t\tytrain, feature_names=features, categorical_features=categorical_feature_indcs)\n\tadv_models[\"Forest\"] = Adversarial_Lime_Model(racist_model_f(), innocuous_model_psi(),\\\n\t\t\t\t\t\t\t\tgenerator = \"Forest\", generator_specs = generator_specs).train(xtrain,\\\n\t\t\t\t\t\t\t\tytrain, feature_names=features, categorical_features=categorical_feature_indcs)\n\n\t# Fill the dictionary with explanation methods\n\tfor generator in [\"Perturbation\", \"DropoutVAE\", \"RBF\", \"Forest\"]:\n\t\tadv_explainers[generator] = lime.lime_tabular.LimeTabularExplainer(xtrain, feature_names=adv_models[generator].get_column_names(),\\\n\t\t\t\t\t\t\t\tdiscretize_continuous=False,categorical_features=categorical_feature_indcs, generator=generator,\\\n\t\t\t\t\t\t\t\tgenerator_specs=generator_specs, dummies=dummy_indcs, integer_attributes=integer_attributes)\n\n\t# We check every combination of adversarial model/explanation method\n\tfor explainer in adv_explainers:\n\t\tadv_explainer = adv_explainers[explainer]\n\t\tfor model in adv_models:\n\t\t\tadv_lime = adv_models[model]\n\t\t\texplanations = []\n\t\t\tfor i in range(xtest.shape[0]):\n\t\t\t\texplanations.append(adv_explainer.explain_instance(xtest[i], adv_lime.predict_proba).as_list())\n\n\t\t\t# Display Results\n\t\t\tprint (f\"LIME Ranks and Pct Occurances (1 corresponds to most important feature) for one unrelated feature\\\n\t\t\tadversarial model: {model}, explainer: {explainer}:\")\n\t\t\tsummary = experiment_summary(explanations, features)\n\t\t\tprint (summary)\n\t\t\tprint (\"Fidelity:\", round(adv_lime.fidelity(xtest),2))\n\n\t\t\t# Save Resutls\n\t\t\tfile_name = f\"../Results/CompasLime/compasLimeSummary_adversarial_{model}_explainer_{explainer}.csv\"\n\t\t\twith open(file_name, \"w\") as output:\n\t\t\t\tw = csv.writer(output)\n\t\t\t\tfor key, val in summary.items():\n\t\t\t\t\tw.writerow([key] + [pair for pair in val])\n\t\n\t# Repeat the same thing for two features (innocuous_model_psi_two is used)\n\tadv_models = dict()\n\tadv_explainers = dict()\n\n\t# Generator specifications\n\tgenerator_specs = {\"original_dim\": original_dim, \"intermediate_dim\": 8, \"latent_dim\": latent_dim, \"epochs\": 100, \"dropout\": 0.3, \"experiment\": \"Compas\"}\n\n\t# Train the adversarial models for LIME with f and psi (fill te dictionary)\n\tadv_models[\"Perturbation\"] = Adversarial_Lime_Model(racist_model_f(), innocuous_model_psi_two()).train(xtrain, ytrain,\\\n\t\t\t\t\t\t\t\tcategorical_features=categorical_feature_indcs, feature_names=features, perturbation_multiplier=1)\n\tadv_models[\"DropoutVAE\"] = Adversarial_Lime_Model(racist_model_f(), innocuous_model_psi_two(),\n\t\t\t\t\t\t\t\tgenerator = \"DropoutVAE\", generator_specs = generator_specs).train(xtrain, ytrain,\\\n\t\t\t\t\t\t\t\tcategorical_features=categorical_feature_indcs, integer_attributes = integer_attributes, feature_names=features, perturbation_multiplier=1)\n\tadv_models[\"RBF\"] = Adversarial_Lime_Model(racist_model_f(), innocuous_model_psi_two(),\\\n\t\t\t\t\t\t\t\tgenerator = \"RBF\", generator_specs = generator_specs).train(xtrain,\\\n\t\t\t\t\t\t\t\tytrain, feature_names=features, categorical_features=categorical_feature_indcs)\n\tadv_models[\"Forest\"] = Adversarial_Lime_Model(racist_model_f(), innocuous_model_psi_two(),\\\n\t\t\t\t\t\t\t\tgenerator = \"Forest\", generator_specs = generator_specs).train(xtrain,\\\n\t\t\t\t\t\t\t\tytrain, feature_names=features, categorical_features=categorical_feature_indcs)\n\n\t# Fill the dictionary with explanation methods\n\tfor generator in [\"Perturbation\", \"DropoutVAE\", \"RBF\", \"Forest\"]:\n\t\tadv_explainers[generator] = lime.lime_tabular.LimeTabularExplainer(xtrain, feature_names=adv_models[generator].get_column_names(),\n\t\t\t\t\t\t\t\t\t\tdiscretize_continuous=False, categorical_features=categorical_feature_indcs, generator=generator,\\\n\t\t\t\t\t\t\t\t\t\tgenerator_specs=generator_specs, dummies=dummy_indcs, integer_attributes=integer_attributes)\n\n\t# We check every combination of adversarial model/explanation method\n\tfor explainer in adv_explainers:\n\t\tadv_explainer = adv_explainers[explainer]\n\t\tfor model in adv_models:\n\t\t\tadv_lime = adv_models[model]\n\t\t\texplanations = []\n\t\t\tfor i in range(xtest.shape[0]):\n\t\t\t\texplanations.append(adv_explainer.explain_instance(xtest[i], adv_lime.predict_proba).as_list())\n\n\t\t\t# Display Results\n\t\t\tprint (f\"LIME Ranks and Pct Occurances (1 corresponds to most important feature) for two unrelated features\\\n\t\t\tadversarial model: {model}, explainer: {explainer}:\")\n\t\t\tsummary = experiment_summary(explanations, features)\n\t\t\tprint (summary)\n\t\t\tprint (\"Fidelity:\", round(adv_lime.fidelity(xtest),2))\n\n\t\t\t# Save Resutls\n\t\t\tfile_name = f\"../Results/CompasLime/compasLimeSummary2_adversarial_{model}_explainer_{explainer}.csv\"\n\t\t\twith open(file_name, \"w\") as output:\n\t\t\t\tw = csv.writer(output)\n\t\t\t\tfor key, val in summary.items():\n\t\t\t\t\tw.writerow([key] + [pair for pair in val])",
"def run_experiments():\n\n results = synthetic_experiment()\n results2 = unbalanced_synthetic_experiment()\n # results3, n_bank = bank_experiment('data/bank_raw.csv')\n # results4, n_pokec = pokec_experiment('data/soc-pokec-profiles.txt', 'data/soc-pokec-relationships.txt')\n\n with open('results/results_synthetic1.pickle', 'wb') as f:\n pickle.dump(results, f)\n\n with open('results/results_synthetic2.pickle', 'wb') as f:\n pickle.dump(results2, f)\n\n # with open('results/results_bank.pickle', 'wb') as f:\n # pickle.dump(results3, f)\n\n # with open('results/results_bank_args.pickle', 'wb') as f:\n # pickle.dump(n_bank, f)\n\n # with open('results/results_pokec.pickle', 'wb') as f:\n # pickle.dump(results4, f)\n\n # with open('results/results_pokec_args.pickle', 'wb') as f:\n # pickle.dump(n_pokec, f)",
"def run_experiment(nets, pars, equations, verbose=True, **kwargs):\n for subj in nets.keys():\n if verbose: print('Runing: subject %s' % (str(subj)))\n nets[subj].run_trials(pars, equations)",
"def conduct_experiment_4(self):\n self.experiment_4.conduct_experiment()",
"def package(expt_list=[\"train\", \"rec\", \"relax\", \"cr\"], mech_list=[\"3\"], rate=\"alpha\", power=[1], range = 1):\n expt_call_table = { \n \"train\" : re.TrainExpt, \n \"rec\" : re.RecoveryExpt, \n \"relax\" : re.RelaxExpt, \n \"cr\" : re.CrExpt, \n \"jumpfamily\": re.JFExpt,\n }\n ###paircomp is not going to work in this context (no run method)\n for mech in mech_list:\n \n for expt_type in expt_list:\n \n param = re.Parameters() # set defaults and then modify below\n \n ## 'sim_name' = \"trial\"\n \n ## 'rate_to_change' = 'd2op_plus'\n ## 'N_trials' = 10 \n ## 'hi_exp' = 1.5\n ## 'lo_exp' = -1.5\n \n ## 'MR_rate' = [(1,7), (0,5), (7,8), (5,6)]\n ## 'MR_avoid' = [(0,2)]\n ## 'zero_conc' = 0 \n ## 'high_conc' = 1e-2\n \n #edit simulation name to include mechanism used\n param.sim_name = expt_type + \"_m\" + mech\n \n #range of powers of ten to scan around initial rate\n param.hi_exp = range\n param.lo_exp = -range\n param.var_power = power\n param.N_trials = 10\n \n #edit rate to use specified //doesn't work for a series of names\n param.rate_to_change = rate\n \n param.MR_rate = [(0, 5), (0, 1)]\n print (\"\\n\\nExperiment name: \" + param.sim_name)\n \n experiment = expt_call_table[expt_type]\n e = experiment(mech, param)\n e.run()",
"def run_experiment_batch(exp_string, data=None, n_est=100, contam_rate=0.1,\n max_points=5000, point_step=500, max_features=25,\n feature_step=2, max_exps=20, r_mode='t', normalize=False, max_samples=None):\n\n #Set random seed for random. Note numpy random seed behaves differently\n #Each child inherits the same random state as parent when forking\n random.seed(seed_val)\n \n #To Store Main Experiment Results\n random_loss_l = []\n aws_loss_l = []\n aws_clem_loss_l = []\n aws_dif_loss_l = []\n diffi_loss_l = []\n shap_loss_l = []\n\n aws_exec_time_l = []\n aws_clem_exec_time_l = []\n aws_dif_exec_time_l = []\n diffi_exec_time_l = []\n shap_exec_time_l = []\n\n #To Store Other Experiment Results\n orig_dataset_l = []\n new_dataset_l = []\n rand_indices_l = []\n settings_ll = []\n features_ll = []\n ground_truth_l = []\n\n orig_aws_exp_matrix_l = []\n orig_aws_clem_exp_matrix_l = []\n orig_aws_dif_exp_matrix_l = []\n orig_shap_exp_matrix_l = []\n orig_diffi_exp_matrix_l = []\n\n new_aws_exp_matrix_l = []\n new_aws_clem_exp_matrix_l = []\n new_aws_dif_exp_matrix_l = []\n new_shap_exp_matrix_l = []\n new_diffi_exp_matrix_l = []\n\n aws_norm_diff_l = []\n aws_clem_norm_diff_l = []\n aws_dif_norm_diff_l = []\n diffi_norm_diff_l = []\n shap_norm_diff_l = []\n random_norm_diff_l = []\n \n #For saving purposes - \n save_string = exp_string + \"_\" + str(max_features) + \"features_\" + str(max_points) + \"maxPoints_rmode=\" + r_mode\n \n #Initialize the multiprocessing pool iterable that needs to be iterated over\n #The itertable is dependent on the type of experiment we are executing\n if exp_string == 'syn1':\n iterable = range(1000, max_points, point_step)\n pool_iterable = [(i, exp_string, val, max_features, None, None, n_est, contam_rate, r_mode, max_samples) \n for i, val in enumerate(iterable)]\n label = 'Dataset Size'\n\n elif exp_string == 'syn2':\n iterable = range(2, max_features, feature_step)\n pool_iterable = [(i, exp_string, max_points, val, None, None, n_est, contam_rate, r_mode, max_samples) \n for i, val in enumerate(iterable)]\n label = 'Number Of Attributes In The Dataset'\n\n elif exp_string == 'syn3':\n iterable = range(1, max_features)\n pool_iterable = [(i, exp_string, max_points, max_features, val, None, n_est, contam_rate, r_mode, max_samples)\n for i, val in enumerate(iterable)]\n #label = 'Max Number Of Candidate Attributes For Anomalisation' if r_mode == 't' else 'Number Of Anomalised Attributes'\n label = 'Number Of Anomalised Attributes'\n \n elif exp_string[:5] == 'real1':\n iterable = range(0, max_exps)\n pool_iterable = [(i, exp_string, None, None, None, data, n_est, contam_rate, r_mode, max_samples)\n for i in iterable]\n save_string = exp_string + \"_\" + r_mode #Change the string only if real dataset\n label = 'Iteration Count'\n normalize = True\n\n elif exp_string[:5] == 'real2':\n iterable = range(1, data.shape[1])\n pool_iterable = [(i, exp_string, None, None, val, data, n_est, contam_rate, r_mode, max_samples)\n for i, val in enumerate(iterable)]\n \n save_string = exp_string + \"_\" + r_mode #Change the string only if real\n #label = 'Max Number Of Candidate Attributes For Anomalisation' if r_mode == 't' else 'Number Of Anomalised Attributes'\n label = 'Number Of Anomalised Attributes'\n normalize = True\n\n else:\n raise ValueError(\"Invalid experiment details passed!\")\n \n #Use the multiprocessing library to parallelize and run experiments\n with Pool(cpu_count()) as p:\n result_map = p.starmap(run_experiment_single, pool_iterable)\n\n #Convert the map object FIRST into a list of dictionaries, and then \n #the list of dictionaries into a dictionary of lists\n result = defaultdict(list)\n result_list = list(result_map)\n result_list_sorted = sorted(result_list, key=lambda d: d['index'])\n {result[key].append(single_iter_dict[key]) for single_iter_dict in result_list_sorted for key in single_iter_dict.keys()}\n\n #For saving\n orig_dataset_l = result['orig_dataset']\n rand_indices_l = result['rand_indices']\n orig_aws_exp_matrix_l = result['orig_aws_exp_matrix']\n orig_aws_clem_exp_matrix_l = result['orig_aws_clem_exp_matrix']\n orig_aws_dif_exp_matrix_l = result['orig_aws_dif_exp_matrix']\n orig_shap_exp_matrix_l = result['orig_shap_exp_matrix']\n orig_diffi_exp_matrix_l = result['orig_diffi_exp_matrix']\n\n new_dataset_l = result['new_dataset']\n settings_ll = result['settings_l']\n features_ll = result['features_l']\n\n new_aws_exp_matrix_l = result['new_aws_exp_matrix']\n new_aws_clem_exp_matrix_l = result['new_aws_clem_exp_matrix']\n new_aws_dif_exp_matrix_l = result['new_aws_dif_exp_matrix']\n new_shap_exp_matrix_l = result['new_shap_exp_matrix']\n new_diffi_exp_matrix_l = result['new_diffi_exp_matrix']\n\n aws_norm_diff_l = result['aws_norm_diff']\n aws_clem_norm_diff_l = result['aws_clem_norm_diff']\n aws_dif_norm_diff_l = result['aws_dif_norm_diff']\n shap_norm_diff_l = result['shap_norm_diff']\n diffi_norm_diff_l = result['diffi_norm_diff']\n random_norm_diff_l = result['random_exp_matrix']\n\n ground_truth_l = result['ground_truth']\n\n random_loss_l = result['random_rmse_loss']\n aws_loss_l = result['aws_rmse_loss']\n aws_clem_loss_l = result['aws_clem_rmse_loss']\n aws_dif_loss_l = result['aws_dif_rmse_loss']\n diffi_loss_l = result['diffi_rmse_loss']\n shap_loss_l = result['shap_rmse_loss']\n\n aws_exec_time_l = result['aws_exec_time']\n aws_clem_exec_time_l = result['aws_clem_exec_time']\n aws_dif_exec_time_l = result['aws_dif_exec_time']\n shap_exec_time_l = result['shap_exec_time']\n diffi_exec_time_l = result['diffi_exec_time']\n\n total_random_rmse_loss = np.sum(random_loss_l)\n total_aws_rmse_loss = np.sum(aws_loss_l)\n total_aws_clem_rmse_loss = np.sum(aws_clem_loss_l)\n total_aws_dif_rmse_loss = np.sum(aws_dif_loss_l) \n total_diffi_rmse_loss = np.sum(diffi_loss_l)\n total_shap_rmse_loss = np.sum(shap_loss_l)\n\n avg_aws_exec_time = mean(aws_exec_time_l)\n avg_aws_clem_exec_time = mean(aws_clem_exec_time_l)\n avg_aws_dif_exec_time = mean(aws_dif_exec_time_l)\n avg_shap_exec_time = mean(shap_exec_time_l)\n avg_diffi_exec_time = mean(diffi_exec_time_l)\n\n #Printing final status update\n print(\"\\n[INFO] Final Loss Analysis...\")\n print(f\"Our method [AWS] resulted in an RMSE loss of {total_aws_clem_rmse_loss}\")\n print(f\"SHAP method resulted in an RMSE loss of {total_shap_rmse_loss}\")\n print(f\"DIFFI method resulted in an RMSE loss of {total_diffi_rmse_loss}\")\n print(f\"Random explainer resulted in an RMSE loss of {total_random_rmse_loss}\")\n\n print(\"\\n[INFO] Final Time Analysis...\")\n print(f\"Our method [AWS]: {avg_aws_clem_exec_time}\")\n print(f\"SHAP method: {avg_shap_exec_time}\")\n print(f\"DIFFI method: {avg_diffi_exec_time}\\n\")\n\n #Saving the files\n list_of_main_files = ['random_loss_l','aws_loss_l','aws_clem_loss_l','aws_dif_loss_l','diffi_loss_l',\n 'shap_loss_l','aws_exec_time_l','aws_clem_exec_time_l','aws_dif_exec_time_l',\n 'diffi_exec_time_l','shap_exec_time_l']\n \n list_of_support_files = ['orig_dataset_l','new_dataset_l', 'rand_indices_l','settings_ll','features_ll',\n 'ground_truth_l','orig_aws_exp_matrix_l','orig_aws_clem_exp_matrix_l','orig_aws_dif_exp_matrix_l',\n 'orig_shap_exp_matrix_l','orig_diffi_exp_matrix_l','new_aws_exp_matrix_l','new_aws_clem_exp_matrix_l',\n 'new_aws_dif_exp_matrix_l','new_shap_exp_matrix_l','new_diffi_exp_matrix_l','aws_norm_diff_l',\n 'aws_clem_norm_diff_l','aws_dif_norm_diff_l','diffi_norm_diff_l','shap_norm_diff_l', 'random_norm_diff_l']\n\n (batch_results_dict_main, batch_results_dict_support) = save_files(list_of_main_files, list_of_support_files, save_string, locals())\n \n #Plotting the results\n result_plotter(lists_of_files=[[aws_clem_loss_l, diffi_loss_l, shap_loss_l, random_loss_l],\n [aws_clem_exec_time_l, shap_exec_time_l, diffi_exec_time_l]], \n plot_mode=['loss', 'time'], \n fig_save_names=[exp_string + '_Comparing Loss Across Methods', exp_string + '_Comparing Execution Time Across Methods'],\n plot_titles = ['Comparing Loss Across Methods','Comparing Execution Time Across Methods'],\n x_label=label, x_range=iterable, normalize=normalize)\n\n return batch_results_dict_main, batch_results_dict_support",
"def run(self):\r\n for _ in range(self.trials):\r\n self.makeSweep()\r\n time.sleep(self.unitConverter(self.sweepDelay))\r\n print(\"Done!\")",
"def step_given_11(context):\n \n # create test aggregate\n agg = qr.TestAggregate(\"dimer-2-env\")\n agg.build()\n \n # get the associated time axis and the relaxation tensor and Hamiltonian\n time = agg.get_SystemBathInteraction().TimeAxis\n RR, HH = agg.get_RelaxationTensor(time, relaxation_theory=\"stR\")\n \n context.H = HH\n \n # define and calculate evolution superoperator\n U = qr.qm.EvolutionSuperOperator(ham=HH, relt=RR)\n #U.calculate()\n \n context.U = U",
"def main():\n\n # TODO: define:\n # step+noize\n # log scale instead of uniform\n\n # Define parametter: [min, max]\n dictParams = {\n \"batchSize\": [int, [1, 3]],\n \"learningRate\": [float, [1, 3]]\n }\n\n # Training multiple times with different parametters\n for i in range(10):\n # Generate the command line arguments\n trainingArgs = \"\"\n for keyArg, valueArg in dictParams:\n value = str(random(valueArg[0], max=valueArg[1]))\n trainingArgs += \" --\" + keyArg + \" \" + value\n\n # Launch the program\n os.run(\"main.py\" + trainingArgs)\n\n # TODO: Save params/results ? or already inside training args ?",
"def vqe_experiment(on_chip=False,\n processor_name=\"rainbow\",\n processor_gateset=\"fsim\", #cg.FSIM_GATESET,\n optimize_circuit=\"syc_layers\", #cg.optimized_for_sycamore,\n qubits=None,\n nh=2,\n nv=1,\n t=1.,\n U=2.,\n nocc=2,\n nocc_1=None,\n nocc_2=None,\n opt_algo = \"mod_spsa\",\n num_trials_setting=None, #[100, 1000, 10000],\n grad_evals_setting=None,#[1, 1, 2],\n max_evals_setting=None, #[500, 100, 100],\n split=False,\n opt_args=None,\n d = 1,\n initial_params=None,\n measurement_func=None,\n initial_prog_mapping=None,\n chosen_ansatz=None,\n error_correction={},\n file_desc=None,\n settings_file=None,\n objective_func=\"energy\",\n objective_args=None,\n exact=False,\n benchmark=None,\n save_samples=False,\n save_tflo_samples=False,\n notes=\"\",\n collect_data=False,\n **kwargs\n ):\n # files and time\n timestr = time.strftime(\"%Y%m%d-%H%M%S\")\n if file_desc is None:\n extra_desc = timestr\n else:\n if len(file_desc) > 40:\n print(\"Please limit the file_desc to less than 40 characters\")\n sys.exit()\n extra_desc = file_desc + \"-\" + time.strftime(\"%m%d-%H%M%S\")\n tick = time.perf_counter()\n\n # data location\n data_struct = generate_experiment_data_structure(nh, nv, VQE_FOLDER,\n extra_desc=extra_desc)\n\n settings = locals()\n experiment_metadata = settings.copy()\n\n logger = start_logger(logfile=data_struct.logger)\n Tee(data_struct.stdout_logger, \"a\")\n\n experiment_metadata[\"tag\"] = VERSION\n experiment_metadata[\"start_time\"] = timestr\n\n error_correction_settings = ERROR_CORRECTION_DICTIONARY\n error_correction_settings.update(error_correction)\n error_correction_metadata = {}\n error_correction_metadata[\"error_correction\"] = error_correction_settings\n\n optimizer = optimizer_def[optimize_circuit][0]\n optimizer_kwargs = optimizer_def[optimize_circuit][1]\n\n ## SETTING THE DEVICE\n if on_chip:\n engine = cirq.google.Engine(project_id=PROJECT_ID)\n processor = engine.get_processor(processor_name)\n experiment_metadata[\"device\"] = processor_name\n proc_gateset = cg.NAMED_GATESETS[processor_gateset]\n device = processor.get_device([proc_gateset])\n spec = processor.get_device_specification()\n experiment_metadata[\"specification\"] = MessageToDict(spec)\n latest_calibration = processor.get_current_calibration()\n experiment_metadata[\"calibration\"] = pd.DataFrame(latest_calibration.__dict__).to_json()\n print(device)\n qc = engine\n else:\n qc = cirq.Simulator()\n experiment_metadata[\"device\"] = \"simulator\"\n device = None\n\n\n\n # SETTING VARIOUS PARAMETERS\n # benchmarks\n benchmark_file = None\n if on_chip:\n with open(data_struct.calibration_filename, 'w') as outfile:\n json.dump(experiment_metadata[\"calibration\"], outfile, cls=NumpyEncoder, indent=4)\n # benchmark chosen qubits\n if benchmark == \"set\":\n total_qs = len(qubits)\n up = qubits[:total_qs//2]\n down = qubits[total_qs//2:]\n energies_qubits = {}\n for (q1, q2), (q3, q4) in zip(zip(up[0:total_qs//2-1], up[1:]),\n zip(down[0:total_qs//2-1], down[1:])):\n try:\n energies_qubits[str(position)] =benchmark_single_qubits(qubits=[[q1, q2, q3, q4]],\n project_id=PROJECT_ID,\n processor_name=processor_name,\n processor_gateset=processor_gateset)\n except:\n break\n experiment_metadata[\"benchmark\"] = energies_qubits\n if benchmark == \"full\":\n energies_qubits = benchmark_all_qubits(project_id=PROJECT_ID,\n processor_name=processor_name,\n processor_gateset=processor_gateset,\n save_file=False)\n experiment_metadata[\"benchmark\"] = energies_qubits\n with open(data_struct.benchmark_filename, 'w') as outfile:\n json.dump(experiment_metadata[\"benchmark\"], outfile, cls=NumpyEncoder, indent=4)\n benchmark_file = data_struct.benchmark_filename\n\n # number of parameters in layer\n num_params = 1 + (nh>1) + (nh>2) + (nv>1) + (nv>2)\n experiment_metadata[\"num_params\"] = num_params\n # load standard settings for occupation number, t, U, ideal parameters and\n # energy\n nocc_t, t_t, U_t, _ , sim_params, sim_E = standard_settings[(nh, nv)]\n experiment_metadata[\"expected_params\"] = sim_params\n experiment_metadata[\"expected_energy\"] = sim_E\n # occupation number\n if nocc is None:\n nocc = nocc_t\n experiment_metadata[\"nocc\"] = nocc\n # t\n # saving samples...\n samples_filename = \"\"\n if save_samples:\n samples_filename = data_struct.samples_filename\n if t is None:\n t = t_t\n experiment_metadata[\"t\"] = t\n # U\n if U is None:\n U = U_t\n experiment_metadata[\"U\"] = U\n # occupation number of spin up/spin down\n if nocc_1 is None:\n nocc_1 = nocc//2 + nocc%2\n nocc_2 = nocc//2\n experiment_metadata[\"nocc_1\"] = nocc_1\n experiment_metadata[\"nocc_2\"] = nocc_2\n # number of layers in ansatz\n if d is None:\n d = len(params)\n experiment_metadata[\"d\"] = d\n # qubits on which we're running\n if qubits is None:\n qubits, qubit_map = QUBITS_ASSIGNMENT[\"default\"](nh, nv)\n elif qubits[:4] == \"best\":\n if on_chip:\n all_qubits = device.qubit_set()\n if len(qubits) >= 6:\n metric_name = qubits[5:]\n if metric_name[:16] == \"benchmark_metric\":\n if metric_name[17:21] == \"time\":\n timestr = metric_name[22:]\n benchmark_file = \"experiments/benchmark/{timestr}/benchmark.json\"\n else:\n if benchmark != \"full\":\n print(\"If benchmark is being used without a preset file, full benchmark must be run! Set 'benchmark' option in configuration file to 'full'\")\n sys.exit()\n else:\n metric_name = \"two_qubit_parallel_sqrt_iswap_gate_xeb_average_error_per_cycle\"\n quality_function = find_best_rect\n if chosen_ansatz in [\"one_by_n_zigzag\", \"two_by_n_zigzag\"]:\n quality_function = find_best_zigzag\n qubits, qubit_map = find_best_qubit_list(latest_calibration, all_qubits,\n nh, nv, metric_name,\n benchmark=benchmark_file,\n quality_function=quality_function)\n print(f\"Using 'best' qubits {qubits}\")\n else:\n qubits, qubit_map = QUBITS_ASSIGNMENT[\"default\"](nh, nv)\n elif qubits[:6] == \"zigzag\":\n qubits_details = qubits.split()\n row = int(qubits_details[1])\n col = int(qubits_details[2])\n orientation = qubits_details[3]\n print(f\"Start at: ({row}, {col}), with {orientation}\")\n qubits, qubit_map = generate_zigzag_qubits(cirq.GridQubit(row, col), orientation, nh*nv)\n print(f\"Using qubits {qubits}\")\n else:\n # Use a grid of qubits starting at a particular position\n print(f\"Qubits: {qubits}\")\n qubits, qubit_map = generate_grid_qubits(qubits, None)\n print(f\"Using qubits {qubits}\")\n\n print(f\"Using qubits, {qubit_map}:{qubits}\")\n if qubit_map == \"site\":\n qubits = remap_qubits(qubits, map_site_to_JW, [nh, nv])\n print(f\"Qubits, remapped, using:{qubits}\")\n\n # Show the qubits that we're actually using in the experiment.\n if on_chip:\n print(color_qubits_in_grid(str(device), qubits))\n\n experiment_metadata[\"qubits\"] = str(qubits)\n # initial parameters to start optimization at\n if initial_params is None:\n if d > 0:\n initial_params = np.full((d, num_params), 1 / (d * t))\n else:\n initial_params = []\n params = initial_params\n experiment_metadata[\"params\"] = params\n # ansatz that is being used\n experiment_metadata[\"chosen_ansatz\"] = chosen_ansatz\n if chosen_ansatz is None:\n chosen_ansatz = any_h_by_v_explicit(qubits, nh, nv)\n experiment_metadata[\"chosen_ansatz\"] = \"h_by_v_explicit\"\n elif chosen_ansatz in [\"one_by_n_zigzag\", \"two_by_n_zigzag\"]:\n chosen_ansatz = NAMED_ANSATZ[chosen_ansatz](qubits, nh, nv)\n else:\n chosen_ansatz = NAMED_ANSATZ[chosen_ansatz]\n \n # measurements being applied to find energy\n measurement_func = create_all\n \n # initial state mapping\n if initial_prog_mapping == None or initial_prog_mapping == \"JW\":\n initial_prog_mapping = None\n measurements = measurement_func(nh, nv)\n kwargs = {}\n # loading previous parameters for spsa\n if opt_algo == \"spsa\" or opt_algo == \"mod_spsa\":\n if (objective_func in [\"set_parameters\", \"exact_parameters\", \"parameters_list\"]):\n print(\"Loading parameters...\")\n with open(\"parameters.json\") as json_file: ## LOAD PARAMETERS\n loaded_params = json.load(json_file)\n kwargs[\"given_params\"] = loaded_params\n # result analysis functions\n if exact:\n if opt_algo == \"mgd\" or opt_algo == \"bayes_mgd\":\n analysis_fns = analyze_exact_mgd\n else:\n analysis_fns = analyze_exact\n else:\n if opt_algo == \"mgd\" or opt_algo == \"bayes_mgd\":\n analysis_fns = analyze_mgd\n else:\n analysis_fns = analyze\n\n ## SAVE SETTINGS\n with open(data_struct.settings_filename, 'w') as outfile:\n json.dump(settings, outfile, cls=NumpyEncoder, indent=4)\n\n\n\n ## ON CHIP JOB DETAILS AND RUN ARGUMENTS\n job_desc = {}\n temp_run_args = {}\n tflo_run_args = {}\n run_args=None\n if exact:\n retrieve_objective = NAMED_OBJECTIVE[objective_func](run_executables_func=run_executables_exact,\n extract_values_func=extract_values_exact)\n run_args = {}\n run_args[\"qubit_order\"] = qubits\n elif opt_algo == \"mgd\" or opt_algo == \"bayes_mgd\":\n retrieve_objective = NAMED_OBJECTIVE[objective_func](extract_values_func=extract_values_mgd)\n else:\n retrieve_objective = NAMED_OBJECTIVE[objective_func]()\n if on_chip:\n job_id = f\"{extra_desc}\"\n program_id = job_id\n# experiment_metadata[\"job_id\"] = job_id\n# experiment_metadata[\"program_id\"] = program_id\n job_labels= {\n \"username\": os.getlogin(),\n \"vqe\": f\"{nh}x{nv}\",\n \"compressed\": \"no\",\n \"opt-alg\":opt_algo,\n \"program\": \"main\"\n }\n run_args = {\n \"job_id\":job_id,\n \"program_id\": program_id,\n \"job_labels\":job_labels,\n \"processor_ids\":[processor.processor_id],\n \"gate_set\":proc_gateset\n }\n kwargs.update({\"run_args\": run_args})\n job_desc = {\n \"job_id\": job_id,\n \"program_id\": program_id,\n \"qubits\": str(qubits),\n }\n # noise run arguments\n temp_run_args = {\n \"job_id\":job_id,\n \"program_id\": program_id,\n \"job_labels\": {\n \"username\": os.getlogin(),\n \"vqe\": f\"{nh}x{nv}\",\n \"program\": \"mitigation\"\n },\n \"processor_ids\":[processor.processor_id],\n \"gate_set\":proc_gateset\n }\n tflo_run_args = dict(temp_run_args)\n tflo_run_args[\"job_labels\"][\"program\"] = \"tflo_calib\"\n tflo_run_args[\"program_id\"] = \"tflo-\" + tflo_run_args[\"program_id\"]\n\n # Get which pairs we want to measure - needed for error correction\n pairs_set = set()\n pairs_dict = {}\n for (measurement_type, measurement) in measurements.items():\n for pair in measurement.pairs:\n pairs_set.add(pair)\n pairs_dict[measurement_type] = measurement.pairs\n pairs_set = list(pairs_set)\n\n print(f\"Got pairs set {pairs_set}\")\n\n ## ERROR CORRECTION SETTINGS\n \n mitigation_kwargs = {}\n \n postselection = error_correction_settings[\"occupation_number\"] or error_correction_settings[\"spin_type\"]\n if postselection:\n mitigation_kwargs[\"sample_error_mitigation\"] = sample_error_mitigation_func(error_correction_settings,\n nocc=nocc,\n nocc_1=nocc_1,\n nocc_2=nocc_2)\n\n if error_correction_settings[\"tflo\"]:\n mitigation_kwargs[\"tflo_points\"] = error_correction_settings.get(\"tflo_points\")\n mitigation_kwargs[\"tflo_exact_energies\"] = error_correction_settings.get(\"tflo_exact_energies\")\n\n\n ## INITIAL STATE SETTINGS\n\n initial_prog_instructions = initial_state_diff_spin_types(nh, nv, t, nocc_1,\n nocc_2, remap=qubits,\n mapping=initial_prog_mapping)\n initial_prog = cirq.Circuit(initial_prog_instructions())\n initial_prog = optimize_in_layers(initial_prog, **{})\n temp_prog = cirq.Circuit(initial_prog_instructions())\n experiment_metadata[\"initial_state_circuit\"] = cirq.to_json(temp_prog.moments)\n experiment_metadata[\"initial_state_circuit_text\"] = moment_diagram(temp_prog)\n\n tock = time.perf_counter()\n print(f\"---Starting to do calibration: delta {tock-tick}\")\n tick = time.perf_counter()\n\n ## MEASUREMENT AND CIRCUIT SETTINGS\n measurement_set = []\n experiment_metadata[\"measurement_circuit\"] = {}\n \n # check onsite is the first element of the dictionary!\n if \"onsite0\" not in measurements:\n print(\"Onsite type has to be in measurements\")\n\n measurement_type = \"onsite0\"\n measurement = measurements[measurement_type]\n ansatz_compiled = ansatz_multilayer_circuit(chosen_ansatz, params, qubits)\n reqs_circ = create_executable(cirq.Circuit(),\n ansatz_compiled,\n params,\n prep = cirq.Circuit(),\n qubits = qubits,\n remap = qubits,\n optimizer = optimizer,\n optimizer_kwargs = optimizer_kwargs)\n reqs_circ2 = create_executable(initial_prog,\n ansatz_compiled,\n params,\n prep = cirq.Circuit(),\n qubits = qubits,\n remap = qubits,\n optimizer = optimizer,\n optimizer_kwargs = optimizer_kwargs)\n\n tock = time.perf_counter()\n print(f\"---Starting to set up measurements: delta {tock-tick}\")\n tick = time.perf_counter()\n \n initial_prog = cirq.Circuit(initial_prog_instructions())\n initial_prog = optimize_in_layers(initial_prog, **{})\n \n # In some cases, we need to merge the final measurement transformation\n # with a previous layer of hopping terms.\n if nh == 2 and nv == 1:\n special_measurement_type = \"horiz0\"\n measurement_ansatz = hopping_measurement_ansatz(qubits, nh, nv, 0)\n if nh > 2 and nv == 1:\n special_measurement_type = \"horiz1\"\n measurement_ansatz = hopping_measurement_ansatz(qubits, nh, nv, 1)\n if nv >= 2:\n special_measurement_type = \"vert02\"\n measurement_ansatz = hopping_measurement_ansatz(qubits, nh, nv, 1)\n\n for (measurement_type, measurement) in measurements.items():\n if measurement.prep:\n if measurement_type == special_measurement_type:\n measurement_prog = measurement_ansatz\n measurement_prog_instructions = lambda *args: []\n else:\n measurement_prog_instructions = measurement.prep(measurement.pairs, qubits)\n measurement_prog = cirq.Circuit(measurement_prog_instructions())\n measurement_prog = optimize_in_layers(measurement_prog, **{})\n else:\n measurement_prog = None\n measurement_set.append(Circuits(qc,\n initial_prog,\n measurement_prog,\n chosen_ansatz,\n measurement_type,\n analysis_fns(measurement.analysis,\n measurement.pairs,\n nh, nv,\n t=t, U=U)\n ))\n if measurement.prep:\n temp_prog = cirq.Circuit(measurement_prog_instructions())\n if __debug__:\n module_logger.debug(f\"{measurement_type}:\\n{temp_prog}\")\n experiment_metadata[\"measurement_circuit\"][measurement_type] = cirq.to_json(temp_prog.moments)\n experiment_metadata[\"measurement_circuit_text\"] = moment_diagram(temp_prog)\n else:\n experiment_metadata[\"measurement_circuit\"][measurement_type] = \"\"\n experiment_metadata[\"measurement_circuit_text\"] = \"\"\n if __debug__ and measurement_type != special_measurement_type:\n module_logger.debug(f\"Executables, measurement {measurement_type}:\")\n ansatz_compiled = ansatz_multilayer_circuit(chosen_ansatz, params, qubits)\n circ = create_executable(initial_prog,\n ansatz_compiled,\n params,\n prep = measurement_prog,\n qubits = qubits,\n remap = qubits,\n optimizer = optimizer,\n optimizer_kwargs = optimizer_kwargs,\n print_gates=True)\n module_logger.debug(moment_diagram(circ))\n\n ## EXTRA ARGUMENTS FOR OPTIMIZATION\n # saving samples...\n samples_filename = \"\"\n if save_samples:\n samples_filename = data_struct.samples_filename\n tflo_samples_filename = \"\"\n if save_tflo_samples:\n tflo_samples_filename = data_struct.tflo_samples_filename\n # modified SPSA\n if opt_algo == \"mod_spsa\":\n measurement_sets = [measurement_set]*len(num_trials_setting)\n opt_details = zip(num_trials_setting, grad_evals_setting,\n max_evals_setting, measurement_sets)\n kwargs[\"spsa_details\"] = opt_details\n kwargs[\"spsa_kwargs\"] = dict(split=split, spsa_args=opt_args, repetitions=repetitions,\n save_file=True, save_filename=data_struct.processed_filename,\n num_layers=len(params), num_params=len(params[0]))\n kwargs[\"save_file\"] = True\n kwargs[\"save_filename\"] = data_struct.processed_filename\n args = (qubits, measurement_set, 10000, 1,\n d, num_params, optimizer, optimizer_kwargs,\n mitigation_kwargs, run_args, save_samples,\n samples_filename)\n # standard SPSA\n if opt_algo == \"spsa\":\n opt_details = measurement_set\n kwargs[\"grad_evals\"] = grad_evals_setting[0]\n kwargs[\"max_evals\"] = max_evals_setting[0]\n kwargs[\"opt_args\"] = opt_args\n kwargs[\"save_file\"] = True\n kwargs[\"save_filename\"] = data_struct.processed_filename\n kwargs[\"num_layers\"] = len(params)\n kwargs[\"num_params\"] = len(params[0])\n args = (qubits, measurement_set, 10000, 1,\n d, num_params, optimizer, optimizer_kwargs,\n mitigation_kwargs, run_args, save_samples,\n samples_filename)\n # mgd and bayes_mgd\n if opt_algo == \"mgd\" or opt_algo == \"bayes_mgd\":\n kwargs[\"max_evals\"] = max_evals_setting[0]\n kwargs[\"split\"] = split\n kwargs[\"save_file\"] = True\n kwargs[\"save_filename\"] = data_struct.processed_filename\n kwargs[\"opt_args\"] = opt_args\n args = (qubits, measurement_set, num_trials_setting[0], 1,\n d, num_params, optimizer, optimizer_kwargs,\n mitigation_kwargs, run_args, save_samples,\n samples_filename)\n params = [param for subparams in params for param in subparams]\n\n # no optimization\n if opt_algo == \"none\":\n kwargs[\"nmeas\"] = num_trials_setting[0]\n kwargs[\"split\"] = split\n kwargs[\"opt_args\"] = opt_args\n args = (qubits, measurement_set, num_trials_setting[0], 1,\n d, num_params, optimizer, optimizer_kwargs,\n mitigation_kwargs, run_args, save_samples,\n samples_filename)\n kwargs[\"save_file\"] = True\n kwargs[\"save_filename\"] = data_struct.processed_filename\n if opt_algo not in [\"none\", \"mod_spsa\", \"spsa\", \"mgd\", \"bayes_mgd\"]:\n args = ((1, [(1,0)]), qubits, measurement_set, 10000, 1,\n d, num_params, optimizer, optimizer_kwargs,\n mitigation_kwargs, run_args, save_samples,\n samples_filename)\n params = [param for subparams in params for param in subparams]\n kwargs[\"maxiter\"] = 100\n\n # RUN EXPERIMENTS\n with open(data_struct.jobs_filename, 'w') as outfile:\n json.dump(job_desc, outfile, indent=4)\n \n tock = time.perf_counter()\n print(f\"---Starting to run VQE: delta {tock-tick}\")\n tick = time.perf_counter()\n\n start_time = time.strftime(\"%Y%m%d-%H%M%S\")\n experiment_metadata[\"optimization_start_time\"] = start_time\n result = scipy.optimize.minimize(retrieve_objective, params, method=NAMED_OPT[opt_algo],\n options=kwargs, args=args)\n theta = result.x\n min_energy = result.fun\n end_time = time.strftime(\"%Y%m%d-%H%M%S\")\n experiment_metadata[\"optimization_end_time\"] = end_time\n print(f\"***{min_energy} {time.time()}\")\n\n tock = time.perf_counter()\n print(f\"---VQE complete: delta {tock-tick}\")\n tick = time.perf_counter()\n \n # Mitigate errors by training with fermionic linear optics,\n # unless we're running with exact measurements anyway.\n if error_correction_settings[\"tflo\"] and not exact:\n # Prepare exact analysis functions.\n exact_measurement_set = []\n for (measurement_type, measurement) in measurements.items():\n if measurement.prep:\n measurement_prog = measurement.prep(measurement.pairs, qubits)\n else:\n measurement_prog = None\n exact_measurement_set.append(Circuits(qc,\n initial_prog,\n measurement_prog,\n chosen_ansatz,\n measurement_type,\n analyze_exact(measurement.analysis,\n measurement.pairs,\n nh, nv,\n U=U)\n ))\n if opt_args is not None and \"single\" in opt_args:\n given_params = opt_args[\"single\"]\n elif opt_args is not None and \"single_flo\" in opt_args:\n given_params = opt_args[\"single_flo\"]\n else:\n given_params = theta\n \n if opt_algo == \"mgd\" or opt_algo == \"bayes_mgd\":\n given_params = [list(given_params)]\n extract_values_func = extract_values_mgd\n else:\n extract_values_func = extract_values\n \n mitigate_by_tflo(qubits, given_params, measurement_set,\n exact_measurement_set,\n num_layers=d,\n num_params=num_params,\n optimizer=optimizer,\n optimizer_kwargs=optimizer_kwargs,\n mitigation_kwargs=mitigation_kwargs,\n run_args=tflo_run_args,\n min_energy=min_energy,\n n=nh*nv,\n U=U,\n filename=data_struct.tflo_filename,\n extract_values_func=extract_values_func,\n save_tflo_samples=save_tflo_samples,\n tflo_samples_filename=tflo_samples_filename)\n\n\n\n with open(data_struct.metadata_filename, 'w') as outfile:\n json.dump(experiment_metadata, outfile, cls=NumpyEncoder, indent=4)\n with open(data_struct.ec_filename, 'w') as outfile:\n json.dump(error_correction_metadata, outfile, cls=NumpyEncoder, indent=4)\n\n print(f\"Data collected can be found at {data_struct.experiment_folder}\")\n return theta, min_energy, data_struct.experiment_folder",
"def test_MultiRun():\n\n # For each optimizer.\n for optimizer in [PSO, MOL, DE, LUS, PS]:\n # For different search-space dimensionalities.\n for dim in [2, 47]:\n # For different display intervals.\n for display_interval in [0, 11, 167]:\n # For different number of fitness evaluations.\n for max_evaluations in [53, 10391]:\n # For different fitness-trace-lengths.\n for trace_len in [0, 101]:\n # For different number of optimization runs.\n for num_runs in [1, 5]:\n # For parallel and non-parallel.\n for parallel in [True, False]:\n # Take a benchmark problem at random.\n problem_class = random.choice(Problem.all_benchmark_problems)\n problem = problem_class(dim=dim)\n\n # Run the test using this configuration.\n yield _do_test_MultiRun, optimizer, problem, dim, max_evaluations, display_interval, trace_len, parallel, num_runs",
"def main():\n\n # Process command line arguments\n parser = argparse.ArgumentParser(description='Integrate planets and selected asteroids in Rebound '\n 'with initial conditions from Horizons (planets) and asteroid orbital elements.')\n parser.add_argument('n0', nargs='?', metavar='n0', type=int, default=0,\n help='the first asteroid number to process')\n parser.add_argument('n_ast', nargs='?', metavar='B', type=int, default=1000,\n help='the number of asteroids to process in this batch'),\n parser.add_argument('mode', nargs='?', metavar='MODE', type=str, default='DB',\n help='Mode of operation. Three valid choices DB, CSV, and INS. '\n 'DB: insert to DB via CSVs.'\n 'CSV: Calculate and save to CSVs.'\n 'INS: Insert CSVs from previous run into DB.'),\n parser.add_argument('--epoch', nargs='?', metavar='EP', type=int, default=59000,\n help='epoch of the base simulation that is integrated forward and backwards, as an MJD')\n parser.add_argument('--mjd0', nargs='?', metavar='t0', type=int, default=48000, # originally 40400\n help='epoch of the first date in the integration, as an MJD.')\n parser.add_argument('--mjd1', nargs='?', metavar='t1', type=int, default=63000, # originally 77600\n help='epoch of the last date in the integration, as an MJD.')\n parser.add_argument('--interval', nargs='?', metavar='SPD', type=int, default=4,\n help='the number of days between frames saved to the database')\n parser.add_argument('--run_all', const=True, default=False, action='store_const',\n help='when true, run ALL asteroids; default is just to integrate missing ones.')\n parser.add_argument('--quiet', const=True, default=False, action='store_const',\n help='run in quiet mode (hide progress bar')\n parser.add_argument('--dry_run', dest='dry_run', action='store_const', const=True, default=False,\n help='Dry run: report parsed inputs then quit.')\n \n # Unpack command line arguments\n args = parser.parse_args()\n \n # Block of asteroids to integrate and epoch\n n0: int = args.n0\n n1: int = n0 + args.n_ast\n epoch: int = args.epoch\n\n # Operation mode\n mode: str = args.mode.upper()\n if mode not in ('DB', 'CSV', 'INS'):\n raise ValueError(\"Mode must be one of 'DB', 'CSV' or 'INS'.\")\n mode_description_tbl = {\n 'DB': 'Insert to database via CSVs.',\n 'CSV': 'Calculate and save to CSVs to disk; must insert them later.',\n 'INS': 'Insert CSVs from previous run into database.',\n }\n mode_description: str = mode_description_tbl[mode]\n\n # Flags\n run_all: bool = args.run_all\n missing: bool = not run_all\n verbose: bool = not args.quiet\n progbar: bool = not args.quiet\n dry_run: bool = args.dry_run\n\n # Date range for integration\n mjd0: int = args.mjd0\n mjd1: int = args.mjd1\n interval: int = args.interval\n # Epoch as a date for reporting\n epoch_dt = mjd_to_date(epoch)\n mjd0_dt = mjd_to_date(mjd0)\n mjd1_dt = mjd_to_date(mjd1)\n width_yrs: float = (mjd1 - mjd0) / 365.25\n times_saved: int = np.int32(np.ceil((mjd1-mjd0) / interval))\n\n # Integrator settings\n integrator: str = 'ias15'\n epsilon: float = 2.0**-32\n\n # Report arguments and integrator settings\n if verbose:\n print_stars()\n print(f'*n0 : {n0:06d}')\n print(f'*n1 : {n1:06d}')\n print(f'*epoch : {epoch} ({epoch_dt})')\n print(f' date range mjd : {mjd0} to {mjd1}')\n print(f'*interval : {interval}')\n print(f' times to save : {times_saved}')\n print(f'*mode : {mode}: {mode_description}')\n print(f'*run_all : {run_all}')\n print(f'*dry_run : {dry_run}')\n\n # Quit early if it was a dry run\n if dry_run:\n print('\\n This was a dry run. Bye!')\n sys.exit()\n\n # Set chunk_size for writing out DataFrame to database\n chunk_size: int = 2**19\n\n # Simulation with initial configuration for planets and selected asteroids; only take missing ones\n sim = make_sim_asteroids(epoch=epoch, n0=n0, n1=n1, missing=missing)\n \n # Delegate to appropriate functions depending on the mode\n # We need to integrate the asteroid orbits in either DB or CSV mode, but not in INS mode.\n # We also need to save the DataFrame to CSV in these modes.\n if mode in ('DB', 'CSV'):\n # Integrate the asteroids\n df_vec, df_elt = integrate_ast(sim=sim, mjd0=mjd0, mjd1=mjd1, interval=interval, progbar=progbar)\n # Save the DataFrames to CSV\n fnames_csv_vec, fnames_csv_elt = save_csvs(df_vec=df_vec, df_elt=df_elt, verbose=verbose)\n\n # If we are in insert mode, we don't have a list of file names yet.\n # Generate it by searching for matching files in the designated directory.\n if mode == 'INS':\n fnames_csv_vec, fnames_csv_elt = find_fnames_csv(verbose=verbose)\n\n # If we are in either DB or INS mode, we need to insert the CSV files to the database now\n if mode in ('DB', 'INS'):\n insert_csvs(fnames_csv_vec=fnames_csv_vec, fnames_csv_elt=fnames_csv_elt, progbar=progbar)",
"def experiment(algorithmType, numberOfHouses, numberOfIterations, algorithmName, iterations):\n\n algorithm = globals()[algorithmType]\n\n experimentInfo = []\n\n # Perform the algorithm numberOfIterations amount of times\n for i in range(numberOfIterations):\n\n plan = FloorPlan(numberOfHouses)\n\n if algorithmType == \"hillClimber\":\n\n plan = randomAlgorithm(plan)\n\n plan = algorithm(plan, i + i * 9)\n\n while len(plan.houses) < numberOfHouses:\n\n plan = randomAlgorithm(plan)\n\n plan = algorithm(plan, i + i * 9)\n\n else: \n\n plan = algorithm(plan)\n\n while len(plan.houses) < numberOfHouses:\n\n plan = algorithm(plan)\n\n # Save the plan value\n value = plan.getValue()\n\n plan.saveFloorplan(algorithmType, numberOfHouses)\n\n experimentInfo.append([i + 1, value])\n\n print(\"Iteration: \", i + 1)\n\n # Write all values to csv file to use for visualisation\n with open(\"experiments/\" + algorithmName + \"_\" + str(numberOfIterations) + \"_\" \n + str(numberOfHouses) + \".csv\", \"w\", newline = \"\") as myFile:\n \n writer = csv.writer(myFile)\n\n # Write the changed values\n writer.writerows(experimentInfo)\n\n converter.convert(algorithmName, numberOfIterations, numberOfHouses)",
"def run_experiment(tc, EXP_NAME):\n\n if EXP_NAME == PARAMS.EXPERIMENT_NAME_PROP:\n print \"###### Finding Security Task Parameter using Proposed Heuristic ######\"\n elif EXP_NAME == PARAMS.EXPERIMENT_NAME_BF:\n print \"###### Finding Security Task Parameter using Best-Fit Approach ######\"\n else:\n raise ValueError(\"Invalid Experiment Name!\")\n\n xi_list_per_core = [] # save xi for all cores all utilization vals\n xi_std_list_per_core = [] # save STD(xi) for all cores all utilization vals\n\n etasum_list_per_core = [] # save sum(eta) for all cores all utilization vals\n etasum_std_list_per_core = [] # save STD(sum(eta)) for all cores all utilization vals\n\n se_sched_list_per_core = [] # save SE schedulablity count for all cores all utilization vals\n rt_sched_list_per_core = [] # save RT schedulablity count for all cores all utilization vals\n\n for core in PARAMS.CORE_LIST:\n util_list = TGEN.get_util_list_by_core(core)\n\n xi_per_util_list = [None] * len(util_list)\n xi_std_per_util_list = [None] * len(util_list)\n etasum_per_util_list = [None] * len(util_list)\n etasum_std_per_util_list = [None] * len(util_list)\n\n se_sched_per_util_list = [None] * len(util_list)\n rt_sched_per_util_list = [None] * len(util_list)\n\n for uindx, util in enumerate(util_list):\n\n xi_list = []\n etasum_list = []\n se_sched_count = 0\n\n rt_sched_count = 0\n\n for ntc in range(0, PARAMS.N_TASKSET_EACH_CONF):\n\n print EXP_NAME, \"--> Analyzing Core:\", core, \"System Utilization:\", util, \"Task index\", ntc\n\n tc_conf = tc[core][util][ntc]\n rt_alloc = TALLOC.get_rt_task_assignemnt(tc_conf, PARAMS.EXPERIMENT_NAME_PROP, worst_fit=True)\n if rt_alloc is None:\n continue\n\n rt_sched_count += 1 # count schedulable RT taskset\n\n # pick which experiment to run\n if EXP_NAME == PARAMS.EXPERIMENT_NAME_PROP:\n eta_list, se_period_list, xi = TALLOC.allocate_security_task(tc_conf, rt_alloc)\n elif EXP_NAME == PARAMS.EXPERIMENT_NAME_BF:\n eta_list, se_period_list, xi = TALLOC.find_se_task_param_best_fit(tc_conf, rt_alloc)\n\n if eta_list is not None:\n se_sched_count += 1\n xi_list.append(xi)\n etasum_list.append(sum(eta_list))\n\n if len(xi_list) > 0:\n xi_mean_prop = sum(xi_list) / float(len(xi_list))\n xi_std_prop = np.std(np.array(xi_list))\n\n etasum_mean_prop = sum(etasum_list) / float(len(etasum_list))\n etasum_std_prop = np.std(np.array(etasum_list))\n\n else:\n xi_mean_prop = None\n xi_std_prop = None\n\n etasum_mean_prop = None\n etasum_std_prop = None\n\n xi_per_util_list[uindx] = xi_mean_prop # save xi per utilization\n xi_std_per_util_list[uindx] = xi_std_prop # save xi std per utilization\n\n se_sched_per_util_list[uindx] = se_sched_count # save SE schedulability count\n rt_sched_per_util_list[uindx] = rt_sched_count # save RT schedulability count\n\n etasum_per_util_list[uindx] = etasum_mean_prop # save xi per utilization\n etasum_std_per_util_list[uindx] = etasum_std_prop # save xi std per utilization\n\n xi_list_per_core.append(xi_per_util_list)\n xi_std_list_per_core.append(xi_std_per_util_list)\n\n etasum_list_per_core.append(xi_per_util_list)\n etasum_std_list_per_core.append(xi_std_per_util_list)\n\n se_sched_list_per_core.append(se_sched_per_util_list)\n rt_sched_list_per_core.append(rt_sched_per_util_list)\n\n output = ER.ExportOutput(etasum_list_per_core, etasum_std_list_per_core,\n xi_list_per_core, xi_std_list_per_core,\n rt_sched_list_per_core,\n se_sched_list_per_core)\n return output",
"def runall():\n\t\tfor session in ['tigerp6', 'beckp1', 'beckp4', 'orangep4', 'orangep5']:\n\t\t\tpass"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Renders the allbuild template to see all builds | def all_builds():
return render_template("allbuilds.html", builds=mongo.db.build.find()) | [
"def show_pubbuilds(request):\n builds = BuildsTable.objects.filter(access_r='pub').order_by('-time')\n return render(request, 'build_public.html', {'builds': builds})",
"def show_mybuilds(request):\n builds = BuildsTable.objects.filter(user=request.user).order_by('time')\n return render(request, 'build_mybuilds.html', {'builds': builds})",
"def test_all_builds():\n with utils.mock_systems(systems.app, [sys1, sys2]):\n c = systems.app.test_client()\n rv = c.get('/all-builds')\n assert (b'<a class=\"buildbox build_fail\" title=\"Build failed\"'\n in rv.data)\n assert b'<a class=\"buildbox build_ok\" title=\"Build OK\"' in rv.data",
"def displayStatusLine(self, builder_list, all_builds, revision, debug_info):\n\n details = []\n builds = {}\n\n # Display the boxes by category group.\n for category in builder_list:\n for subcategory in builder_list[category]:\n for category_full in builder_list[category][subcategory]:\n for builder in builder_list[category][subcategory][category_full]:\n builder_name = builder['builderName']\n builds[builder_name] = []\n introduced_in = None\n first_not_in = None\n\n cached_value = self.cache.get(builder_name, revision.revision)\n if cached_value:\n debug_info[\"from_cache\"] += 1\n\n b = {}\n b[\"url\"] = cached_value.url\n b[\"pageTitle\"] = cached_value.pageTitle\n b[\"color\"] = cached_value.color\n b[\"tag\"] = cached_value.tag\n b[\"builderName\"] = cached_value.builderName\n\n builds[builder_name].append(b)\n\n if cached_value.details and cached_value.color == \"failure\":\n details.append(cached_value.details)\n\n continue\n\n # Find the first build that does not include the revision.\n for build in all_builds[builder_name]:\n if self.comparator.isRevisionEarlier(build.revision, revision):\n first_not_in = build\n break\n else:\n introduced_in = build\n\n # Get the results of the first build with the revision, and the\n # first build that does not include the revision.\n results = None\n in_progress_results = None\n previous_results = None\n if introduced_in:\n results = introduced_in.results\n in_progress_results = introduced_in.inProgressResults\n if first_not_in:\n previous_results = first_not_in.results\n\n is_running = False\n if introduced_in and not introduced_in.isFinished:\n is_running = True\n\n url = \"./waterfall\"\n page_title = builder_name\n tag = \"\"\n current_details = {}\n if introduced_in:\n current_details = introduced_in.details or \"\"\n url = \"./buildstatus?builder=%s&number=%s\" % (\n urllib.quote(builder_name), introduced_in.number)\n page_title += \" \"\n page_title += urllib.quote(' '.join(introduced_in.text),\n ' \\n\\\\/:')\n\n builder_strip = builder_name.replace(' ', '')\n builder_strip = builder_strip.replace('(', '')\n builder_strip = builder_strip.replace(')', '')\n builder_strip = builder_strip.replace('.', '')\n tag = \"Tag%s%s\" % (builder_strip, introduced_in.number)\n\n if is_running:\n page_title += ' ETA: %ds' % (introduced_in.eta or 0)\n\n results_class = getResultsClass(results, previous_results,\n is_running, in_progress_results)\n\n b = {}\n b[\"url\"] = url\n b[\"pageTitle\"] = page_title\n b[\"color\"] = results_class\n b[\"tag\"] = tag\n b[\"builderName\"] = builder_name\n\n builds[builder_name].append(b)\n\n # If the box is red, we add the explaination in the details\n # section.\n if current_details and results_class == \"failure\":\n details.append(current_details)\n\n # Add this box to the cache if it's completed so we don't have\n # to compute it again.\n if results_class not in (\"running\", \"running_failure\",\n \"notstarted\"):\n debug_info[\"added_blocks\"] += 1\n self.cache.insert(builder_name, revision.revision, results_class,\n page_title, current_details, url, tag)\n\n return (builds, details)",
"def build_list(request):\n if request.method == 'GET':\n builds = Build.objects.all()\n serializer = BuildSerializer(builds, many=True)\n return Response(serializer.data)\n\n elif request.method == 'POST':\n serializer = BuildSerializer(data=request.DATA)\n if serializer.is_valid():\n serializer.save()\n return HttpResponse(JSONRenderer().render(serializer.data), {'content_type' : 'application/json'})\n else:\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)",
"def render_all_projects(request) -> HttpResponse:\n projects = Project.objects.filter(user_id=request.user)\n return render(request, \"view_projects.html\", {\"projects\": projects, \"form\": ProjectForm()})",
"def showAllItems():\n\n items = readAllItems()\n return render_template('show_all_items.html', items=items)",
"def buildings_ajax():\n\n buildings = db.session.query(Building).all()\n return ajax.admin.buildings_data(buildings)",
"def build_ids(request):\r\n return {'BUILD_ID_CSS': BUILD_ID_CSS, 'BUILD_ID_JS': BUILD_ID_JS,\r\n 'BUILD_ID_IMG': BUILD_ID_IMG}",
"def getAllBuildsForRevision(self, status, request, last_revision, num_builds,\n categories, builders, debug_info):\n\n all_builds = dict()\n\n # List of all builders in the dictionary.\n builder_list = dict()\n\n debug_info[\"builds_scanned\"] = 0\n # Get all the builders.\n builder_names = status.getBuilderNames()[:]\n for builder_name in builder_names:\n builder = status.getBuilder(builder_name)\n\n # Make sure we are interested in this builder.\n if categories and builder.category not in categories:\n continue\n if builders and builder_name not in builders:\n continue\n if builder_name_schema.IsTrybot(builder_name):\n continue\n\n # We want to display this builder.\n category_full = builder.category or 'default'\n\n category_parts = category_full.split('|')\n category = category_parts[0]\n if len(category_parts) > 1:\n subcategory = category_parts[1]\n else:\n subcategory = 'default'\n if not builder_list.get(category):\n builder_list[category] = {}\n if not builder_list[category].get(subcategory):\n builder_list[category][subcategory] = {}\n if not builder_list[category][subcategory].get(category_full):\n builder_list[category][subcategory][category_full] = []\n\n b = {}\n b[\"color\"] = \"notstarted\"\n b[\"pageTitle\"] = builder_name\n b[\"url\"] = \"./builders/%s\" % urllib.quote(builder_name, safe='() ')\n b[\"builderName\"] = builder_name\n state, _ = status.getBuilder(builder_name).getState()\n # Check if it's offline, if so, the box is purple.\n if state == \"offline\":\n b[\"color\"] = \"offline\"\n else:\n # If not offline, then display the result of the last\n # finished build.\n build = self.getHeadBuild(status.getBuilder(builder_name))\n while build and not build.isFinished():\n build = build.getPreviousBuild()\n\n if build:\n b[\"color\"] = getResultsClass(build.getResults(), None, False)\n\n # Append this builder to the dictionary of builders.\n builder_list[category][subcategory][category_full].append(b)\n # Set the list of builds for this builder.\n all_builds[builder_name] = self.getBuildsForRevision(request,\n builder,\n builder_name,\n last_revision,\n num_builds,\n debug_info)\n\n return (builder_list, all_builds)",
"def show_runs():\n # return render_template(\"runs.html\", runs=data.runs(), type=type)\n return render_template(\"runs.html\", runs=[], type=type)",
"async def get_all_builds(self, name: str) -> List[dict]:\n name = self._normalize_name(name)\n response = await self.jenkins._request(\n 'GET',\n f'/computer/{name}/rssAll',\n )\n return _parse_rss(await response.text())",
"def all_tickets(request):\n tickets = Ticket.objects.all()\n return render(request, \"tickets.html\", {'tickets': tickets})",
"def get_building(request):\n building_id = request.GET.get('building_id')\n organization_id = request.GET.get('organization_id')\n org = Organization.objects.get(pk=organization_id)\n canon = CanonicalBuilding.objects.get(pk=building_id)\n building = canon.canonical_snapshot\n user_orgs = request.user.orgs.all()\n parent_org = user_orgs[0].get_parent()\n\n if (building.super_organization in user_orgs or parent_org in user_orgs):\n exportable_field_names = None # show all\n else:\n # User isn't in the parent org or the building's org,\n # so only show shared fields.\n exportable_fields = parent_org.exportable_fields\n exportable_field_names = exportable_fields.values_list('name',\n flat=True)\n\n building_dict = building.to_dict(exportable_field_names)\n\n ancestors = get_ancestors(building)\n\n # Add child node (in case it hasn't yet been matched with any other\n # buildings). When this happens, ancestors should also be the empty list.\n if building.source_type in [ASSESSED_BS, PORTFOLIO_BS, GREEN_BUTTON_BS]:\n ancestors.append(building)\n imported_buildings_list = []\n for b in ancestors:\n d = b.to_dict(exportable_field_names)\n # get deleted import file names without throwing an error\n imp_file = ImportFile.raw_objects.get(pk=b.import_file_id)\n d['import_file_name'] = imp_file.filename_only\n # do not show deleted import file sources\n if not imp_file.deleted:\n imported_buildings_list.append(d)\n imported_buildings_list.sort(key=lambda x: x['source_type'])\n\n projects = get_projects(building, org)\n ou = request.user.organizationuser_set.filter(\n organization=building.super_organization\n ).first()\n\n return {\n 'status': 'success',\n 'building': building_dict,\n 'imported_buildings': imported_buildings_list,\n 'projects': projects,\n 'user_role': _get_js_role(ou.role_level) if ou else \"\",\n 'user_org_id': ou.organization.pk if ou else \"\",\n }",
"def query_builder():\n return render_template(\"template_query_builder.html\", active_nav=\"query\")",
"def display_all_tickets(self):\n\n self.model.get_all_tickets()\n pass",
"def any_builds_running(self):",
"def build_version(self, request, queryset):\n total = 0\n for version in queryset:\n trigger_build(\n project=version.project,\n version=version,\n )\n total += 1\n messages.add_message(\n request,\n messages.INFO,\n \"Triggered builds for {} version(s).\".format(total),\n )",
"def edit_build(build_id):\n\n build = mongo.db.build.find_one({\"_id\": ObjectId(build_id)})\n motherboards = mongo.db.motherboard.find()\n processors = mongo.db.processor.find()\n processor_coolers = mongo.db.processorcooler.find()\n memory = mongo.db.memory.find()\n graphics_cards = mongo.db.graphicscard.find()\n hard_drives = mongo.db.harddrive.find()\n power_supplies = mongo.db.powersupply.find()\n cases = mongo.db.case.find()\n return render_template(\n 'editbuild.html',\n motherboards=motherboards,\n processors=processors,\n processorcoolers=processor_coolers,\n memory=memory,\n graphicscards=graphics_cards,\n harddrives=hard_drives,\n powersupplies=power_supplies,\n cases=cases, build=build)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Gets the build's Id to populate the forms with that build values for editing finds all collection data to populate the forms with options | def edit_build(build_id):
build = mongo.db.build.find_one({"_id": ObjectId(build_id)})
motherboards = mongo.db.motherboard.find()
processors = mongo.db.processor.find()
processor_coolers = mongo.db.processorcooler.find()
memory = mongo.db.memory.find()
graphics_cards = mongo.db.graphicscard.find()
hard_drives = mongo.db.harddrive.find()
power_supplies = mongo.db.powersupply.find()
cases = mongo.db.case.find()
return render_template(
'editbuild.html',
motherboards=motherboards,
processors=processors,
processorcoolers=processor_coolers,
memory=memory,
graphicscards=graphics_cards,
harddrives=hard_drives,
powersupplies=power_supplies,
cases=cases, build=build) | [
"def _build_forms_from_get(self):\n \n if self.config_id is None:\n # New form\n \n initial_values = []\n if 'data_file' in self.request.GET:\n initial_values = [{'data_runs': self.request.GET.get('data_file', '')}]\n ScanFormSet = formset_factory(ScanForm,extra=0)\n else:\n ScanFormSet = formset_factory(ScanForm,extra=1)\n self.scans_form = ScanFormSet(initial=initial_values, prefix=\"sf\")\n \n initial_config = {}\n if 'experiment' in self.request.GET:\n initial_config['experiment'] = self.request.GET.get('experiment', '')\n if 'reduction_name' in self.request.GET:\n initial_config['reduction_name'] = self.request.GET.get('reduction_name', '')\n self.config_form = ConfigurationForm(initial=initial_config)\n MaskFormSet = formset_factory(MaskForm,extra=1)\n self.masks_form = MaskFormSet(prefix=\"mf\")\n \n else:\n # Retrieve existing configuration\n reduction_config = get_object_or_404(ReductionConfiguration, pk=self.config_id, owner=self.request.user)\n initial_config = ConfigurationForm.data_from_db(self.request.user, reduction_config)\n \n logger.debug(\"initial_config: %s\" % initial_config)\n ScanFormSet = formset_factory(ScanForm,extra=0)\n initial_values = []\n for item in reduction_config.reductions.all().order_by('timestamp'):\n props = ScanForm.data_from_db(self.request.user, item.pk)\n initial_values.append(props)\n \n \n self.scans_form = ScanFormSet(initial=initial_values, prefix=\"sf\")\n self.config_form = ConfigurationForm(initial=initial_config)\n MaskFormSet = formset_factory(MaskForm,extra=0)\n if initial_config.get('mask'):\n self.masks_form = MaskFormSet(initial=initial_config['mask'],prefix=\"mf\")\n else:\n self.masks_form = MaskFormSet(prefix=\"mf\")",
"def getConfigureForm(room_jid):",
"def build_ids(request):\r\n return {'BUILD_ID_CSS': BUILD_ID_CSS, 'BUILD_ID_JS': BUILD_ID_JS,\r\n 'BUILD_ID_IMG': BUILD_ID_IMG}",
"def get_build(self, build_id):\n\n build = BuildInfo(self, build_id)\n build.refresh() # To get 404 early..\n return build",
"def get_map_form_for_update(self, map_id):\n return # osid.mapping.MapForm",
"def get_calendar_form_for_update(self, calendar_id):\n return # osid.calendaring.CalendarForm",
"def clean(self):\n\n # TODO checking which field belongs to which form/collection shouldn't be done here\n # TODO the collection of native fields and foreign fields point at two different things\n\n print(f\"***{self.collections}\")\n for i, field in enumerate(self.fields):\n print(f\">>> {field.collection}\")\n field.index = i\n\n if not field.collection:\n print(\"Didn't have a collection\")\n field.collection = self.default_collection._id\n\n elif field.collection == self.default_collection._id:\n print(\"Had the default collection\")\n if field._id in self.foreign_fields_id:\n self.foreign_fields_id.remove(field._id)\n\n elif field.collection in self.collections: # This is looking in the wrong place\n print(\"Had a foreign collection\")\n if field._id not in self.foreign_fields_id:\n self.foreign_fields_id.append(field._id)\n\n else:\n print(\"Had something else\")\n field.collection = self.default_collection._id\n if field._id in self.foreign_fields_id:\n self.foreign_fields_id.remove(field._id)\n\n for collection in self.collections:\n for field in collection.fetch().fields:\n if field._id not in self.foreign_fields_id:\n self.fields.append(field)\n self.foreign_fields.append(field._id)\n\n if isinstance(self.name, str):\n n = FormTemplateModel.name.max_length\n self.name = self.name.strip()[:n] or FormTemplateModel.name.default\n\n if isinstance(self.title, str):\n n = FormTemplateModel.title.max_length\n self.title = self.title.strip()[:n] or self.name\n elif not self.title and self.name != FormTemplateModel.name.default:\n self.title = self.name\n\n if self.name != FormTemplateModel.name.default and self.default_collection.name == Connection.name.default:\n self.default_collection.name = self.name",
"def build_detail(request, build_id):\n build = get_object_or_404(Build, pk=build_id)\n\n if request.method == 'GET':\n serializer = BuildSerializer(build)\n return HttpResponse(JSONRenderer().render(serializer.data), {'content_type' : 'application/json'})\n\n elif request.method == 'PUT':\n serializer = BuildSerializer(build, data=request.DATA)\n if serializer.is_valid():\n serializer.save()\n return HttpResponse(JSONRenderer().render(serializer.data), {'content_type' : 'application/json'})\n else:\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n elif request.method == 'DELETE':\n build.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)",
"def get_forms(self):\n metadata = self.get_metadata()\n\n MetadataForm = self.get_metadata_form_class()\n metadata_form = MetadataForm(\n self.form_data,\n instance=metadata,\n category=self.trs_import.doc_category)\n\n revision_num = self.csv_data['revision']\n revision = metadata.get_revision(revision_num) if metadata else None\n\n RevisionForm = self.get_revision_form_class()\n revision_form = RevisionForm(\n self.form_data,\n instance=revision,\n category=self.trs_import.doc_category)\n\n return metadata_form, revision_form",
"def buildings_ajax():\n\n buildings = db.session.query(Building).all()\n return ajax.admin.buildings_data(buildings)",
"def refresh(self):\n self._info = self.app.storage.get_build(self.build_id)",
"def update_build(build_id):\n build = mongo.db.build\n build_params = {\n 'build_name': request.form.get('build_name'),\n 'motherboard': request.form.get('motherboard'),\n 'processor': request.form.get('processor'),\n 'processor_cooler': request.form.get('processor_cooler'),\n 'memory': request.form.get('memory'),\n 'graphics_card': request.form.get('graphics_card'),\n 'hard_drive': request.form.get('hard_drive'),\n 'power_supply': request.form.get('power_supply'),\n 'case': request.form.get('case')\n }\n\n build.replace_one({'_id': ObjectId(build_id)}, build_params)\n return redirect(url_for('all_builds'))",
"def get_event_form_for_update(self, event_id):\n return # osid.calendaring.EventForm",
"def get_forms(args):\n # Step 1. Create an API client with headers\n api_client = create_rooms_api_client(access_token=args[\"access_token\"])\n\n # Step 2. Get room documents\n rooms_api = RoomsApi(api_client)\n room_documents = rooms_api.get_documents(\n room_id=args[\"room_id\"],\n account_id=args[\"account_id\"]\n )\n\n # Step 2. Get room forms\n room_forms = [\n form for form in room_documents.documents\n if form.docu_sign_form_id\n ]\n return room_forms",
"def select_old_collection(self,new_id):\n # TODO: this is not compatible with a generic hub_db backend\n # TODO: this should return a collection with status=success\n col = get_src_build()\n doc = col.find_one({\"_id\":new_id})\n assert doc, \"No build document found for '%s'\" % new_id\n assert \"build_config\" in doc, \"No build configuration found for document '%s'\" % new_id \n assert doc[\"build_config\"][\"name\"] == doc[\"build_config\"][\"_id\"]\n confname = doc[\"build_config\"][\"name\"]\n docs = get_src_build().find({\n \"$and\":[\n {\"started_at\":{\"$lte\":doc[\"started_at\"]}},\n {\"build_config.name\":confname},\n {\"archived\":{\"$exists\":0}},\n ]},\n {\"_id\":1}).sort([(\"started_at\",-1)]).limit(2)\n _ids = [d[\"_id\"] for d in docs]\n assert len(_ids) == 2, \"Expecting 2 collection _ids, got: %s\" % _ids\n assert _ids[0] == new_id, \"Can't find collection _id '%s'\" % new_id\n return _ids[1]",
"def get_buildings_with_help(self) -> Set[int]:\n result, _ = self.post(\"alliance_help_getMyHelpers\")\n return {helper[\"job\"][\"buildingId\"] for helper in result[\"helpers\"]}",
"def all_builds():\n\n return render_template(\"allbuilds.html\", builds=mongo.db.build.find())",
"def get_building(request):\n building_id = request.GET.get('building_id')\n organization_id = request.GET.get('organization_id')\n org = Organization.objects.get(pk=organization_id)\n canon = CanonicalBuilding.objects.get(pk=building_id)\n building = canon.canonical_snapshot\n user_orgs = request.user.orgs.all()\n parent_org = user_orgs[0].get_parent()\n\n if (building.super_organization in user_orgs or parent_org in user_orgs):\n exportable_field_names = None # show all\n else:\n # User isn't in the parent org or the building's org,\n # so only show shared fields.\n exportable_fields = parent_org.exportable_fields\n exportable_field_names = exportable_fields.values_list('name',\n flat=True)\n\n building_dict = building.to_dict(exportable_field_names)\n\n ancestors = get_ancestors(building)\n\n # Add child node (in case it hasn't yet been matched with any other\n # buildings). When this happens, ancestors should also be the empty list.\n if building.source_type in [ASSESSED_BS, PORTFOLIO_BS, GREEN_BUTTON_BS]:\n ancestors.append(building)\n imported_buildings_list = []\n for b in ancestors:\n d = b.to_dict(exportable_field_names)\n # get deleted import file names without throwing an error\n imp_file = ImportFile.raw_objects.get(pk=b.import_file_id)\n d['import_file_name'] = imp_file.filename_only\n # do not show deleted import file sources\n if not imp_file.deleted:\n imported_buildings_list.append(d)\n imported_buildings_list.sort(key=lambda x: x['source_type'])\n\n projects = get_projects(building, org)\n ou = request.user.organizationuser_set.filter(\n organization=building.super_organization\n ).first()\n\n return {\n 'status': 'success',\n 'building': building_dict,\n 'imported_buildings': imported_buildings_list,\n 'projects': projects,\n 'user_role': _get_js_role(ou.role_level) if ou else \"\",\n 'user_org_id': ou.organization.pk if ou else \"\",\n }",
"def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n current_cv = self.object\n form = kwargs.get(\"form\")\n if current_cv.owner.id != self.request.user.id:\n forms = []\n for skill in current_cv.evaluate_skills.all():\n if form and form.instance.skill_from_cv_id == skill.id:\n forms.append(form)\n else:\n new_form = self.form_class(self.get_form_kwargs())\n new_form.fields[\"skill_from_cv\"].initial = skill\n forms.append(new_form)\n context[\"forms\"] = forms\n return context"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Gets the build Id and sends updated data to mongo Then gets redirected to all builds page | def update_build(build_id):
build = mongo.db.build
build_params = {
'build_name': request.form.get('build_name'),
'motherboard': request.form.get('motherboard'),
'processor': request.form.get('processor'),
'processor_cooler': request.form.get('processor_cooler'),
'memory': request.form.get('memory'),
'graphics_card': request.form.get('graphics_card'),
'hard_drive': request.form.get('hard_drive'),
'power_supply': request.form.get('power_supply'),
'case': request.form.get('case')
}
build.replace_one({'_id': ObjectId(build_id)}, build_params)
return redirect(url_for('all_builds')) | [
"def edit_build(build_id):\n\n build = mongo.db.build.find_one({\"_id\": ObjectId(build_id)})\n motherboards = mongo.db.motherboard.find()\n processors = mongo.db.processor.find()\n processor_coolers = mongo.db.processorcooler.find()\n memory = mongo.db.memory.find()\n graphics_cards = mongo.db.graphicscard.find()\n hard_drives = mongo.db.harddrive.find()\n power_supplies = mongo.db.powersupply.find()\n cases = mongo.db.case.find()\n return render_template(\n 'editbuild.html',\n motherboards=motherboards,\n processors=processors,\n processorcoolers=processor_coolers,\n memory=memory,\n graphicscards=graphics_cards,\n harddrives=hard_drives,\n powersupplies=power_supplies,\n cases=cases, build=build)",
"def delete_build(build_id):\n\n mongo.db.build.delete_one({'_id': ObjectId(build_id)})\n return redirect(url_for('all_builds'))",
"def build_detail(request, build_id):\n build = get_object_or_404(Build, pk=build_id)\n\n if request.method == 'GET':\n serializer = BuildSerializer(build)\n return HttpResponse(JSONRenderer().render(serializer.data), {'content_type' : 'application/json'})\n\n elif request.method == 'PUT':\n serializer = BuildSerializer(build, data=request.DATA)\n if serializer.is_valid():\n serializer.save()\n return HttpResponse(JSONRenderer().render(serializer.data), {'content_type' : 'application/json'})\n else:\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n elif request.method == 'DELETE':\n build.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)",
"def all_builds():\n\n return render_template(\"allbuilds.html\", builds=mongo.db.build.find())",
"def build_db_update(build_data):\n if db.session.query(\n models.Build).filter_by(job=build_data['name'],\n number=build_data['build']['number']).scalar():\n LOG.debug(\"Build exists. Updating its records.\")\n else:\n pass",
"def get_build(self, build_id):\n\n build = BuildInfo(self, build_id)\n build.refresh() # To get 404 early..\n return build",
"def main():\n app_id = os.environ.get('APP_ID', '896660')\n branch = os.environ.get('APP_BRANCH', 'public')\n\n try:\n build_id = get_build_id(app_id, branch)\n except Exception as e:\n print(\"Could not retrieve update status at this time: {}\"\n .format(e), file=sys.stderr)\n sys.exit(2)\n\n print(build_id)",
"def refresh(self):\n self._info = self.app.storage.get_build(self.build_id)",
"def build_list(request):\n if request.method == 'GET':\n builds = Build.objects.all()\n serializer = BuildSerializer(builds, many=True)\n return Response(serializer.data)\n\n elif request.method == 'POST':\n serializer = BuildSerializer(data=request.DATA)\n if serializer.is_valid():\n serializer.save()\n return HttpResponse(JSONRenderer().render(serializer.data), {'content_type' : 'application/json'})\n else:\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)",
"def save_build_data(build_id, data):\n build = Build.objects.filter(id=build_id).first()\n if build:\n BuildData.objects.collect(build, data)",
"def build_ids(request):\r\n return {'BUILD_ID_CSS': BUILD_ID_CSS, 'BUILD_ID_JS': BUILD_ID_JS,\r\n 'BUILD_ID_IMG': BUILD_ID_IMG}",
"def get_build_id(app_id, branch):\n response = {}\n try:\n r = requests.get(STEAM_API\n .format(app_id))\n except Exception as e:\n print(\"Exception while checking for update: {}\"\n .format(e), file=sys.stderr)\n raise\n\n try:\n response = r.json()\n except Exception as e:\n print(\"Exception while unmarshaling update response\"\n \" as json: {}\".format(e), file=sys.stderr)\n raise\n\n # some mild defensive programming\n if 'data' not in response:\n raise ValueError('Missing expected key \"data\" from response')\n\n if app_id not in response['data']:\n raise ValueError('Missing expected key (app_id): {0}'\n ' from response[\"data\"]'.format(app_id))\n\n if 'depots' not in response['data'][app_id]:\n raise ValueError('Missing expected key: \"depots\" from '\n 'response[\"data\"][\"{0}\"]'.format(app_id))\n\n if 'branches' not in response['data'][app_id]['depots']:\n raise ValueError('Missing expected key: \"branches\" from '\n 'response[\"data\"][\"{0}\"][\"depots\"]'.format(app_id))\n\n if branch not in response['data'][app_id]['depots']['branches']:\n raise ValueError('Missing expected key: \"{0}\" from '\n 'response[\"data\"][\"{1}\"][\"depots\"][\"branches\"]'\n .format(branch, app_id), file=sys.stderr)\n\n if 'buildid' not in \\\n response['data'][app_id]['depots']['branches']['public']:\n raise ValueError('Missing expected key: \"buildid\" from '\n 'response[\"data\"][\"{0}\"][\"depots\"][\"branches\"][\"{1}\"]'\n .format(app_id, branch), file=sys.stderr)\n\n\n # we only care about this\n return response['data'][app_id]['depots']['branches'][branch]['buildid']",
"def show_pubbuilds(request):\n builds = BuildsTable.objects.filter(access_r='pub').order_by('-time')\n return render(request, 'build_public.html', {'builds': builds})",
"def loan_forge():\n login = request.args.get('login', 'ionagamed')\n user = User.query.filter(User.login == login).first()\n copy_id = request.args['copy']\n user.checkout(DocumentCopy.query.filter(DocumentCopy.id == copy_id).first())\n return redirect(request.referrer)",
"def process_jenkins_build(self):\n self.logger.info(\"Starting to process_jenkins_build\")\n\n title = self._cfg['title']\n if title == 'ss4gating':\n env = \"INT\"\n else:\n env = \"PROD\"\n\n last_build_url = self._cfg['jenkins_url']\n\n if self._cfg.influx_host:\n influx_host = self._cfg.influx_host\n else:\n influx_host = influx_config.INFLUXDBIP\n\n if self._cfg.influx_port:\n influx_port = self._cfg.influx_port\n else:\n influx_port = influx_config.PORT\n\n influx_client = influx_dbmanager.get_influx_client(influx_host,\n influx_port,\n influx_config.USERNAME,\n influx_config.PASSWORD,\n influx_config.DATABASENAME)\n try:\n data = http_requests.get(\n last_build_url +\n '/api/json?depth=1',\n auth=(\n self._cfg['jenkins_username'],\n self._cfg['jenkins_password']))\n except BaseException:\n self.logger.exception(\n 'Unknown error occurred while fetching job details in \\'process_jenkins_build\\'')\n return\n if data:\n # Fetch job details\n job_number = data.json()['number']\n joburl = data.json()['url']\n datetime_obj = datetime.datetime.fromtimestamp(\n data.json()['timestamp'] / 1000.0)\n timestamp = str(datetime_obj)\n jobresult = data.json()['result']\n self.logger.info(\n 'Latest job:(' +\n str(job_number) +\n ',' +\n joburl +\n ',' +\n timestamp +\n ',' +\n jobresult +\n ')')\n self.logger.info(\"details does not exists\")\n set_bot_cfg(self._cfg)\n set_logger(self.logger)\n parse_logs_result = parselogs(\n self._job_name, str(job_number), str(job_number))\n self.logger.info('the result for parse log results is %s' % parse_logs_result)\n global errors\n errors = soft_assert_errors()\n if title == 'meeting_launch_duration':\n meeting_launch_time = cilogparser2_gating.get_launch_time(joburl, auth=(self._cfg['jenkins_username'],\n self._cfg['jenkins_password']))\n self.logger.info(\"The meeting launch time is {}\".format(meeting_launch_time))\n if meeting_launch_time:\n if meeting_launch_time in range(4):\n category = 1\n elif meeting_launch_time in range(4, 10):\n category = 2\n elif meeting_launch_time > 10:\n category = 3\n try:\n if self._influx_write:\n self.logger.info(\"Inserting job result for passed case\")\n if not influx_dbmanager.write_meeting_data(influx_client, title, meeting_launch_time, category):\n self.logger.exception(\"Data for the successful test {} not written\"\n \" into the influx database\".format(title))\n except BaseException as e:\n self.logger.exception(\"Hit exception while to write to influx\", e)\n spark_util.post_spark_message(BOT_CRASH_NOTIFICATION_ROOM, e, self._cfg['bearer_token'])\n else:\n self.logger.info(\"Influx write is false. No data written to the database\")\n\n if parse_logs_result and parse_logs_result[3] == 0 and parse_logs_result[9] == 0:\n job_msg_result = 'SUCCESS'\n elif parse_logs_result and parse_logs_result[3] == 0 and parse_logs_result[9] > 0:\n job_msg_result = 'SUCCESS with Retries'\n elif jobresult == 'ABORTED':\n job_msg_result = 'ABORTED'\n elif parse_logs_result and jobresult == 'FAILURE':\n job_msg_result = 'FAILURE'\n elif not parse_logs_result:\n job_msg_result = 'UNDETERMINED'\n self.logger.info(\"The job result is {}\".format(job_msg_result))\n\n if parse_logs_result and len(parse_logs_result[8].keys()) == 1 and 'afterSuite' in parse_logs_result[\n 8].keys():\n job_msg_result = 'SUCCESS'\n # Post in Spark room if there is a failure\n if job_msg_result != 'SUCCESS' and jobresult != 'ABORTED':\n report_dict = self.process_failed_success_with_retries_build(\n datetime_obj, job_msg_result, parse_logs_result, job_number, joburl, env, influx_client)\n elif job_msg_result != 'SUCCESS' and jobresult == 'ABORTED':\n self.process_aborted_build(job_number, joburl, title, influx_client)\n elif job_msg_result == 'SUCCESS':\n self.process_success_build(job_number, joburl, title, influx_client, parse_logs_result[2])\n else:\n job_msg_result = 'UNDETERMINED'\n undetermined_msg = 'The build {} has failed/aborted/not anlayzed/ could not' \\\n ' find the report for this run'.format(last_build_url)\n if job_msg_result == 'UNDETERMINED':\n spark_util.post_spark_message(self._cfg['spark_room'],\n undetermined_msg,\n self._cfg['bearer_token'])",
"def _trigger_build(self):\n if not self._dry_run():\n _gh_api.gh_http_post_request( \\\n '{}/repos/{}/dispatches'.format(self._config['GitHub']['service_url'],\n self._build_repo_full_name()),\n auth=self._config.auth(),\n params={'accept': 'application/vnd.github.v3+json'},\n json={'event_type': 'trigger-action'})\n self._log.info('Build triggered on \"%s\"', self._build_repo_full_name())\n else:\n self._log.info('Build triggered on \"%s\" (DRY RUN)', self._build_repo_full_name())",
"def create_build(request):\n if request.method == 'POST':\n\n \"\"\"If an user is trying to edit the \n build we are going to use this try except\"\"\"\n try:\n oldbuild = BuildsTable.objects.get(name=request.POST['name_input']) \n\n except BuildsTable.DoesNotExist:\n new_build = True\n pass\n \n else:\n new_build = False\n oldbuild.delete()\n \n \"\"\"We start by getting all the user \n entries and storing them into variables\"\"\"\n bname = request.POST['name_input']\n mobo = request.POST['moboField']\n cpu = request.POST['cpuField']\n case = request.POST['caseField']\n mem = request.POST['memField']\n hdd = request.POST['hdField']\n gpu = request.POST['gpuField']\n acc_r = request.POST['access_right']\n username = request.user\n moboprice = MoboListing.objects.get(moboList=mobo).moboPrice\n cpuprice = CpuListing.objects.get(cpuList=cpu).cpuPrice\n caseprice = CaseListing.objects.get(caseList=case).casePrice\n memprice = MemListing.objects.get(memList=mem).memPrice\n hdprice = HdListing.objects.get(hdList=hdd).hdPrice\n gpuprice = GpuListing.objects.get(gpuList=gpu).gpuPrice \n buildprice = moboprice + cpuprice + caseprice \n buildprice += memprice + hdprice + gpuprice \n \n \"\"\"Now we create an instance of \n BuildsTable and saves it to our database\"\"\"\n build = BuildsTable(name=bname, moboPart=mobo, cpuPart=cpu, \n casePart=case, memPart=mem, hdPart=hdd, \n gpuListing=gpu, user=username,\n access_r=acc_r, price=buildprice)\n build.save()\n \n else:\n \"\"\"the user got to this page somehow,\n we tell them there is an error\"\"\" \n return render(request, 'build_error.html') \n\n\n build_name = request.POST['name_input']\n return render(request, 'build_confirmation.html',\n {'build_name': build_name, 'new_build': new_build})",
"def main():\n \n jobInfo = jenkinsBase(jenkinsUrl)\n dbConn,cursor = mysqlConnect()\n \n updateProjectIndB(jobInfo, dbConn, cursor)\n buildsInfo(jobInfo, dbConn, cursor)",
"def build_version(self, request, queryset):\n total = 0\n for version in queryset:\n trigger_build(\n project=version.project,\n version=version,\n )\n total += 1\n messages.add_message(\n request,\n messages.INFO,\n \"Triggered builds for {} version(s).\".format(total),\n )"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Gets the build Id and deletes it form the database Then gets redirected to all builds page | def delete_build(build_id):
mongo.db.build.delete_one({'_id': ObjectId(build_id)})
return redirect(url_for('all_builds')) | [
"def delete_build_view(request, node_id):\n node = get_object_or_404(Node, pk=node_id)\n build = get_object_or_404(Build, node=node)\n \n # Check that the user has delete permission for the actual model\n node_modeladmin = get_modeladmin(Node)\n if not node_modeladmin.has_change_permission(request, obj=node, view=False):\n raise PermissionDenied\n \n # The user has already confirmed the deletion.\n if request.POST.get('post'):\n build.delete()\n node_modeladmin.log_change(request, node, \"Deleted firmware build\")\n node_modeladmin.message_user(request, \"Firmware build has been successfully deleted.\")\n return redirect('admin:nodes_node_firmware', node_id)\n \n context = {\n 'opts': node_modeladmin.model._meta,\n 'app_label': node_modeladmin.model._meta.app_label,\n 'title': 'Are your sure?',\n 'build': build,\n 'node': node, }\n return render(request, 'admin/firmware/delete_build_confirmation.html', context)",
"def delete_workflow_build_by_id(self, workflowid: str, workflowbuildid: str, query_params: Dict[str, object] = None) -> SSCVoidModel:\n if query_params is None:\n query_params = {}\n\n path_params = {\n \"workflowid\": workflowid,\n \"workflowbuildid\": workflowbuildid,\n }\n\n path = Template(\"/catalog/v2alpha2/workflows/${workflowid}/builds/${workflowbuildid}\").substitute(path_params)\n url = self.base_client.build_url(path)\n response = self.base_client.delete(url, params=query_params)\n return handle_response(response, )",
"def update_build(build_id):\n build = mongo.db.build\n build_params = {\n 'build_name': request.form.get('build_name'),\n 'motherboard': request.form.get('motherboard'),\n 'processor': request.form.get('processor'),\n 'processor_cooler': request.form.get('processor_cooler'),\n 'memory': request.form.get('memory'),\n 'graphics_card': request.form.get('graphics_card'),\n 'hard_drive': request.form.get('hard_drive'),\n 'power_supply': request.form.get('power_supply'),\n 'case': request.form.get('case')\n }\n\n build.replace_one({'_id': ObjectId(build_id)}, build_params)\n return redirect(url_for('all_builds'))",
"def delete(request, wid):\n wdoc = find_by_id(wid)\n cl = wdoc.cl\n wdoc.delete()\n return HttpResponseRedirect(\n reverse('waybill.views.listing', args=[cl]))",
"def build_detail(request, build_id):\n build = get_object_or_404(Build, pk=build_id)\n\n if request.method == 'GET':\n serializer = BuildSerializer(build)\n return HttpResponse(JSONRenderer().render(serializer.data), {'content_type' : 'application/json'})\n\n elif request.method == 'PUT':\n serializer = BuildSerializer(build, data=request.DATA)\n if serializer.is_valid():\n serializer.save()\n return HttpResponse(JSONRenderer().render(serializer.data), {'content_type' : 'application/json'})\n else:\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n elif request.method == 'DELETE':\n build.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)",
"def edit_build(build_id):\n\n build = mongo.db.build.find_one({\"_id\": ObjectId(build_id)})\n motherboards = mongo.db.motherboard.find()\n processors = mongo.db.processor.find()\n processor_coolers = mongo.db.processorcooler.find()\n memory = mongo.db.memory.find()\n graphics_cards = mongo.db.graphicscard.find()\n hard_drives = mongo.db.harddrive.find()\n power_supplies = mongo.db.powersupply.find()\n cases = mongo.db.case.find()\n return render_template(\n 'editbuild.html',\n motherboards=motherboards,\n processors=processors,\n processorcoolers=processor_coolers,\n memory=memory,\n graphicscards=graphics_cards,\n harddrives=hard_drives,\n powersupplies=power_supplies,\n cases=cases, build=build)",
"def delete_website_view(request, wid):\n template = 'websites/delete_website.html'\n websites = domain.websites_domain\n website = websites.find(wid)\n if not website:\n raise http.Http404\n if request.method == 'POST':\n websites.remove(website)\n return shortcuts.redirect(shortcuts.resolve_url('index'))\n else:\n return shortcuts.render(request, template, {'website': website})",
"def create_build(request):\n if request.method == 'POST':\n\n \"\"\"If an user is trying to edit the \n build we are going to use this try except\"\"\"\n try:\n oldbuild = BuildsTable.objects.get(name=request.POST['name_input']) \n\n except BuildsTable.DoesNotExist:\n new_build = True\n pass\n \n else:\n new_build = False\n oldbuild.delete()\n \n \"\"\"We start by getting all the user \n entries and storing them into variables\"\"\"\n bname = request.POST['name_input']\n mobo = request.POST['moboField']\n cpu = request.POST['cpuField']\n case = request.POST['caseField']\n mem = request.POST['memField']\n hdd = request.POST['hdField']\n gpu = request.POST['gpuField']\n acc_r = request.POST['access_right']\n username = request.user\n moboprice = MoboListing.objects.get(moboList=mobo).moboPrice\n cpuprice = CpuListing.objects.get(cpuList=cpu).cpuPrice\n caseprice = CaseListing.objects.get(caseList=case).casePrice\n memprice = MemListing.objects.get(memList=mem).memPrice\n hdprice = HdListing.objects.get(hdList=hdd).hdPrice\n gpuprice = GpuListing.objects.get(gpuList=gpu).gpuPrice \n buildprice = moboprice + cpuprice + caseprice \n buildprice += memprice + hdprice + gpuprice \n \n \"\"\"Now we create an instance of \n BuildsTable and saves it to our database\"\"\"\n build = BuildsTable(name=bname, moboPart=mobo, cpuPart=cpu, \n casePart=case, memPart=mem, hdPart=hdd, \n gpuListing=gpu, user=username,\n access_r=acc_r, price=buildprice)\n build.save()\n \n else:\n \"\"\"the user got to this page somehow,\n we tell them there is an error\"\"\" \n return render(request, 'build_error.html') \n\n\n build_name = request.POST['name_input']\n return render(request, 'build_confirmation.html',\n {'build_name': build_name, 'new_build': new_build})",
"def delete(id):\n db = get_db()\n db.execute(\n 'DELETE FROM urls WHERE id=?', (id,) \n )\n db.commit()\n return redirect(url_for('admin.overview'))",
"async def page_delete(id):\n \n if not(session.get('logged_in')):\n # if not logged it, dump them back to index\n return redirect(url_for('index'))\n \n page = DB.blog.find_one({'_id':id})\n if page is None:\n abort(404)\n \n DB.blog.delete_one({'_id':id})\n return redirect(url_for('index'))",
"def camera_delete():\n test_connection = sql_connection()\n if test_connection[0] == 130:\n return redirect(url_for('error', error_str=test_connection[1], error_code=test_connection[0]))\n\n camera_code = request.form['code']\n check = extract_info(functions.s_cam_table, functions.s_cam_code, camera_code)\n if check[0] == 228:\n camera_id = check[1][0][functions.s_cam_id]\n try:\n del_camera(camera_id)\n try:\n return render_template('delete.html')\n except:\n return redirect(url_for('error', error_str=sys.exc_info()[1], error_code=render_issue))\n except:\n return redirect(url_for('error', error_str=sys.exc_info()[1], error_code=delete_issue))\n return redirect(url_for('error', error_str=check[1], error_code=check[0]))",
"def get_build(self, build_id):\n\n build = BuildInfo(self, build_id)\n build.refresh() # To get 404 early..\n return build",
"def organisation_delete():\n test_connection = sql_connection()\n if test_connection[0] == 130:\n return redirect(url_for('error', error_str=test_connection[1], error_code=test_connection[0]))\n\n org_code = request.form['code']\n check = extract_info(functions.s_org_table, functions.s_org_code, org_code)\n if check[0] == 228:\n org_id = check[1][0][functions.s_org_id]\n try:\n del_org(org_id)\n try:\n return render_template('delete.html')\n except:\n return redirect(url_for('error', error_str=sys.exc_info()[1], error_code=render_issue))\n except:\n return redirect(url_for('error', error_str=sys.exc_info()[1], error_code=delete_issue))\n return redirect(url_for('error', error_str=check[1], error_code=check[0]))",
"def delete_shoppingTask():\n\n args = request.args\n task_index = int(args['task_index'])\n db = get_db()\n task = g.user[\"shoppingTasks\"][task_index]\n\n task_id = task[\"_id\"]\n db.users.update_one({\"_id\": g.user[\"_id\"]},\n {\"$pull\": {\"shoppingTasks\": {\"_id\": task_id}}})\n\n if ('passVal' in args):\n return redirect(url_for('index'))\n\n return redirect(url_for('shopping'))",
"def delete_old_build_data():\n retention_days = settings.RTD_TELEMETRY_DATA_RETENTION_DAYS\n days_ago = timezone.now().date() - timezone.timedelta(days=retention_days)\n # NOTE: we are using raw SQL here to avoid Django doing a SELECT first to\n # send `pre_` and `post_` delete signals\n # See https://docs.djangoproject.com/en/4.2/ref/models/querysets/#delete\n with connections[\"telemetry\"].cursor() as cursor:\n cursor.execute(\n # \"SELECT COUNT(*) FROM telemetry_builddata WHERE created BETWEEN %s AND %s\",\n \"DELETE FROM telemetry_builddata WHERE created BETWEEN %s AND %s\",\n [\n days_ago - timezone.timedelta(days=90),\n days_ago,\n ],\n )",
"def deletegamepost():\n\n\tpostdata = request.post_vars\n\tdeleteGameCheck(postdata['game_id'], auth.user)\n\n\treturn",
"def clear_builds():\n if os.path.isfile(BUILDTEST_BUILD_LOGFILE):\n os.remove(BUILDTEST_BUILD_LOGFILE)\n if os.path.isdir(config_opts[\"BUILDTEST_TESTDIR\"]):\n shutil.rmtree(config_opts[\"BUILDTEST_TESTDIR\"])\n\n print(\"Clearing Build History\")\n build_dict = {\"build\": {}}\n with open(BUILDTEST_BUILD_LOGFILE, \"w\") as outfile:\n json.dump(build_dict, outfile, indent=2)",
"def deletecampaign(id):\n campaign = Campaign.query.filter_by(id=id).first_or_404()\n if request.method == 'POST':\n campaignname = campaign.title\n db.session.delete(campaign)\n db.session.commit()\n flash('Campaign \"%s\" was deleted!' % campaignname)\n return redirect(url_for('listcampaigns'))\n return render_template('deletecampaign.html', campaign=campaign)",
"def delete_jobListing(db, id):\n print(id)\n cursor = db.cursor()\n sql = \"DELETE FROM jobListing WHERE jobID=?\"\n cursor.execute(sql, (id,))\n db.commit()"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create and bind db_pool before start serving requests | async def create_db_pool() -> None:
create_redis_pool = functools.partial(aioredis.create_redis_pool, encoding="utf-8")
redis_uri = f"redis://{REDIS_HOST}:{REDIS_PORT}"
redis = await trio_asyncio.run_asyncio(create_redis_pool, redis_uri)
app.db_pool = Database(redis) | [
"def dbpool(db_params):\n\n db_pool = psycopg2.pool.SimpleConnectionPool(1, 8, \n user=db_params[\"user\"],\n database=db_params[\"database\"])\n \n if(db_pool):\n # print(\"Connection pool created successfully\")\n\n return db_pool",
"async def acquire_pool(self):\n if isinstance(self.pool, aiomysql.Pool):\n with suppress(Exception):\n self.pool.close()\n\n self.pool = await aiomysql.create_pool(host=self.host, port=self.port, user=self.user,\n password=self.password, db=self.database)",
"def before_request():\n g.db = connect_db()",
"def _init_inner_db():\n db.create_all(bind=\"octopus_db\")",
"def init_worker(**kwargs):\n url = celery.conf.get(\"SQLALCHEMY_DATABASE_URI\")\n engine = create_engine(url)\n db_session.configure(bind=engine)",
"def registerPool(cls,name,dbPool):\r\n DatabaseRegistry.__registry[name] = dbPool",
"def heavy_init(cls):\n cfg.CONF.set_default('connection', 'sqlite://', group='database')\n cfg.CONF.set_default('max_overflow', -1, group='database')\n cfg.CONF.set_default('max_pool_size', 1000, group='database')\n\n qinling_opts = [\n (config.API_GROUP, config.api_opts),\n (config.PECAN_GROUP, config.pecan_opts),\n (config.ENGINE_GROUP, config.engine_opts),\n (config.STORAGE_GROUP, config.storage_opts),\n (config.KUBERNETES_GROUP, config.kubernetes_opts),\n (None, [config.launch_opt])\n ]\n for group, options in qinling_opts:\n cfg.CONF.register_opts(list(options), group)\n\n db_api.setup_db()",
"async def init_pg(app: web.Application):\n engine: Engine = await create_engine(\n database='async-lab',\n user='async-lab',\n password='async-lab',\n host='postgres',\n loop=app.loop\n )\n app['db'] = engine",
"def post(self, pool):\n # For some API requests the listener_id will be passed in the\n # pool_dict:\n context = pecan.request.context.get('octavia_context')\n\n pool.project_id = self._get_lb_project_id(context.session,\n self.load_balancer_id)\n\n lock_session = db_api.get_session(autocommit=False)\n if self.repositories.check_quota_met(\n context.session,\n lock_session,\n data_models.Pool,\n pool.project_id):\n lock_session.rollback()\n raise exceptions.QuotaException(\n resource=data_models.Pool._name())\n\n try:\n pool_dict = db_prepare.create_pool(\n pool.to_dict(render_unsets=True))\n if 'listener_id' in pool_dict:\n if pool_dict['listener_id'] is not None:\n self.listener_id = pool_dict.pop('listener_id')\n else:\n del pool_dict['listener_id']\n listener_repo = self.repositories.listener\n if self.listener_id and listener_repo.has_default_pool(\n lock_session, self.listener_id):\n raise exceptions.DuplicatePoolEntry()\n self._test_lb_and_listener_statuses(lock_session)\n\n pool_dict['operating_status'] = constants.OFFLINE\n pool_dict['load_balancer_id'] = self.load_balancer_id\n\n db_pool = self._validate_create_pool(lock_session, pool_dict)\n lock_session.commit()\n except Exception:\n with excutils.save_and_reraise_exception():\n lock_session.rollback()\n\n return self._send_pool_to_handler(context.session, db_pool)",
"async def init_db(app: Application) -> None:\n config = app['config'].redis\n redis = await aioredis.create_redis_pool(\n f'redis://{config.host}:{config.port}', db=config.database\n )\n app['db'] = redis",
"async def create_db_connection(request: Request, call_next):\n if \"titiler\" in str(request.url):\n return await call_next(request)\n reader = request.app.state.DB_READER()\n writer = request.app.state.DB_WRITER()\n READER.set(reader)\n WRITER.set(writer)\n resp = await call_next(request)\n reader.close()\n writer.close()\n return resp",
"def __init__(self, pool):\r\n resource.Resource.__init__(self)\r\n self.wsgi_resource = WSGIResource(reactor, pool, WSGIHandler())",
"def bootstrap(self):\n url = self.engine.url\n engine = create_engine(str(url))\n connection = None\n for i in range(10): # retries\n try:\n connection = engine.connect()\n except:\n print \"DBServer is probably not up yet, Retrying ...\"\n time.sleep(i * 5)\n continue\n if not connection:\n raise Exception(\"Couldn't connect to DBServer even after retries!\")\n\n self.Base.metadata.create_all(bind=self.engine)\n connection.close()",
"def create_pool(self, **params):\n pool = self.get_pool(connect=False, **params)\n\n # Save the pool\n self.pool.append(pool)\n\n return pool",
"def create_pool(self, context, pool, service):\n try:\n service_pending = self.lbdriver.create_pool(pool, service)\n self.cache.put(service, self.agent_host)\n if service_pending:\n self.needs_resync = True\n except q_exception.NeutronException as exc:\n LOG.error(\"NeutronException: %s\" % exc.msg)\n except Exception as exc:\n LOG.error(\"Exception: %s\" % exc.message)",
"async def on_startup():\n app.state.ENGINE_READER = create_engine(\n settings.reader_connection_string, echo=settings.debug\n )\n app.state.ENGINE_WRITER = create_engine(\n settings.writer_connection_string, echo=settings.debug\n )\n app.state.DB_READER = sessionmaker(\n autocommit=False, autoflush=False, bind=app.state.ENGINE_READER\n )\n app.state.DB_WRITER = sessionmaker(\n autocommit=False, autoflush=False, bind=app.state.ENGINE_WRITER\n )",
"def setup_db():\n create_service_db()",
"def connect(db_urls=None, async=False, *args, **kwargs):\n\tif(isinstance(db_urls, list)):\n\t\traise ValueError('Replicated databases must be provided as a tuple, not a list')\n\tif not(isinstance(db_urls, tuple)):\n\t\tdb_urls = (db_urls,)\n\t\n\treplicated_pool = None\n\tfor db_url in db_urls:\n\t\tdsn = get_dsn(db_url)\n\t\t\n\t\tdbapiName = dsn['dbapiName']\n\t\tdel dsn['dbapiName']\n\t\t\n\t\tglobs = {}\n\t\texec('from modu.persist import dbapi_%s as db_driver' % dbapiName, globs)\n\t\tdargs, dkwargs = globs['db_driver'].process_dsn(dsn)\n\t\tkwargs.update(dkwargs)\n\t\targs = list(args)\n\t\targs.extend(dargs)\n\t\t\n\t\tif('override_driver' in dkwargs):\n\t\t\tdbapiName = dkwargs.pop('override_driver')\n\t\t\n\t\tglobal pools, async_pools, pools_lock\n\t\tpools_lock.acquire()\n\t\ttry:\n\t\t\tif(async):\n\t\t\t\tselected_pools = async_pools\n\t\t\telse:\n\t\t\t\tselected_pools = pools\n\t\t\t\n\t\t\tif(db_url in selected_pools):\n\t\t\t\tpool = selected_pools[db_url]\n\t\t\telse:\n\t\t\t\tfrom modu.persist import dbapi\n\t\t\t\tif(async):\n\t\t\t\t\tpool = TimeoutConnectionPool(dbapiName, *dargs, **dkwargs)\n\t\t\t\telse:\n\t\t\t\t\tpool = SynchronousConnectionPool(dbapiName, *dargs, **dkwargs)\n\t\t\t\tselected_pools[db_url] = pool\n\t\tfinally:\n\t\t\tpools_lock.release()\n\t\t\n\t\tif(len(db_urls) == 1):\n\t\t\treturn pool\n\t\telif(replicated_pool):\n\t\t\treplicated_pool.add_slave(pool)\n\t\telse:\n\t\t\treplicated_pool = ReplicatedConnectionPool(pool)\n\t\n\treturn replicated_pool",
"async def _create_db(self):\n\t\tconn = await asyncpg.connect(\n\t\t\tdatabase=self._database, user=self._user,\n\t\t\tpassword=self._password, host=self._host\n\t\t\t)\n\n\t\tawait conn.execute(\n\t\t\t\"\"\"\n\t\t\tCREATE TABLE IF NOT EXISTS servers (\n\t\t\t\tid TEXT PRIMARY KEY, \n\t\t\t\tname TEXT,\n\t\t\t\tprefix TEXT,\n\t\t\t\toutput_channel TEXT, \n\t\t\t\texchanges TEXT ARRAY\n\t\t\t)\n\t\t\t\"\"\"\n\t\t)\n\n\t\tawait conn.close()\n\n\t\tself.pool = await asyncpg.create_pool(\n\t\t\tdatabase=self._database, user=self._user, \n\t\t\thost=self._host, password=self._password\n\t\t\t)\n\n\t\tself._logger.info(\"Set up database\")"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
set image03 attributes that can be extracted from the (formatindependent) volume | def _extract_attributes_from_volume(self):
vol = nibabel.load(self.nifti_1)
try:
(xyz_units, t_units) = vol.get_header().xyzt_units()
except:
(xyz_units, t_units) = (None, None)
if xyz_units == 'mm':
xyz_units = 'Millimeters'
elif xyz_units == 'm':
xyz_units = 'Meters'
elif xyz_units == 'um':
xyz_units = 'Micrometers'
else:
xyz_units = None
if t_units == 's':
t_units = 'Seconds'
elif t_units == 'ms':
t_unit = 'Milliseconds'
elif t_units == 'ms':
t_unit = 'Microseconds'
else:
t_unit = None
self.image_num_dimensions = len(vol.shape)
pixdim = vol.get_header()['pixdim']
for i in xrange(self.image_num_dimensions):
setattr(self, 'image_extent%d' % (i+1), vol.shape[i])
setattr(self, 'image_resolution%d' % (i+1), pixdim[i+1])
if i < 3 and xyz_units:
setattr(self, 'image_unit%d' % (i+1), xyz_unit)
if i == 3 and t_units:
self.image_unit4 = t_unit
return | [
"def incorrect_setting_for_volume_in_cm3_1():\n\n test_material = nmm.Material.from_library(\n name=\"Li4SiO4\",\n enrichment=50.0,\n enrichment_target=\"Li6\",\n enrichment_type=\"ao\",\n volume_in_cm3=\"1.0\",\n )\n\n test_material.fispact_material",
"def test_setting_for_volume_int(self):\n\n nmm.Material.from_library(\n name=\"Li4SiO4\",\n enrichment=50.0,\n enrichment_target=\"Li6\",\n enrichment_type=\"ao\",\n volume_in_cm3=1,\n )",
"def set_3d_attributes(\r\n self, mode=-1, min=0.0, max=0.0, iangle=-1, oangle=-1, outvol=-1\r\n ):\r\n return bass_call(\r\n BASS_ChannelSet3DAttributes,\r\n self.handle,\r\n mode,\r\n min,\r\n max,\r\n iangle,\r\n oangle,\r\n outvol,\r\n )",
"def updateAttributes(self):\n if self.gdal:\n self.rows = self.gdal.RasterYSize\n self.cols = self.gdal.RasterXSize\n self.bands = self.gdal.RasterCount\n band = self.gdal.GetRasterBand(1)\n self.datatype = band.DataType\n del band\n self.geotransform = self.gdal.GetGeoTransform()\n self.projection = self.gdal.GetProjection()\n\n else:\n raise Exception(\"No image is currently open from which the attributes can be read.\")\n\n return",
"def test_setting_for_volume_float(self):\n\n nmm.Material.from_library(\n name=\"Li4SiO4\",\n enrichment=50.0,\n enrichment_target=\"Li6\",\n enrichment_type=\"ao\",\n volume_in_cm3=1.1,\n )",
"def test_set_format(fx_asset):\n with Image(filename=str(fx_asset.join('mona-lisa.jpg'))) as img:\n img.format = 'png'\n assert img.format == 'PNG'\n strio = io.BytesIO()\n img.save(file=strio)\n strio.seek(0)\n with Image(file=strio) as png:\n assert png.format == 'PNG'\n with raises(ValueError):\n img.format = 'HONG'\n with raises(TypeError):\n img.format = 123",
"def fix_alt16_metadata(cube, coord=None):\n if coord is None:\n coord = cube.coord('altitude')\n elif isinstance(coord, str):\n coord = cube.coord(coord)\n coord.var_name = 'alt16'\n coord.standard_name = 'altitude'\n coord.long_name = 'altitude'\n coord.convert_units('m')\n coord.attributes['positive'] = 'up'\n return coord",
"def define_attributes(\n nc_file: Dataset,\n expocode,\n sect_id,\n data_type,\n stnnbr,\n castno,\n bottom_depth,\n):\n nc_file.EXPOCODE = expocode\n nc_file.Conventions = \"COARDS/WOCE\"\n nc_file.WOCE_VERSION = \"3.0\"\n nc_file.WOCE_ID = sect_id\n nc_file.DATA_TYPE = data_type\n nc_file.STATION_NUMBER = stnnbr\n nc_file.CAST_NUMBER = castno\n nc_file.BOTTOM_DEPTH_METERS = bottom_depth\n # nc_file.Creation_Time = fns.strftime_iso(datetime.datetime.utcnow())\n nc_file.Creation_Time = datetime.datetime.now(tz=datetime.timezone.utc).strftime(\n \"%Y-%m-%dT%H:%M:%S.%fZ\"\n )",
"def test_set_depth(fx_asset):\n with Image(filename=str(fx_asset.join('mona-lisa.jpg'))) as img:\n img.depth = 16\n assert img.depth == 16",
"def get_3d_attributes(self):\r\n answer = dict(\r\n mode=c_ulong(),\r\n min=c_float(),\r\n max=c_float(),\r\n iangle=c_ulong(),\r\n oangle=c_ulong(),\r\n outvol=c_float(),\r\n )\r\n bass_call(\r\n BASS_ChannelGet3DAttributes,\r\n self.handle,\r\n pointer(answer[\"mode\"]),\r\n pointer(answer[\"min\"]),\r\n pointer(answer[\"max\"]),\r\n pointer(answer[\"iangle\"]),\r\n pointer(answer[\"oangle\"]),\r\n pointer(answer[\"outvol\"]),\r\n )\r\n res = {}\r\n for k in answer:\r\n res[k] = answer[k].value\r\n return res",
"def modify_volume_attribute(DryRun=None, VolumeId=None, AutoEnableIO=None):\n pass",
"def set_attr_3(self, value):\r\n arg_str = p2e._base._util._convert_args_to_string(\"set.object.attr3\", self._object._eco_id, value)\r\n p2e._app.Exec(arg_str)",
"def add_to_vol(self, volume):\n volume[self.segmentation, 0] = self.color[0]/255\n volume[self.segmentation, 1] = self.color[1]/255\n volume[self.segmentation, 2] = self.color[2]/255",
"def file_attrs():\n sg1 = Group()\n sg2 = Group()\n sg3 = Group()\n grp = Group({'Subgroup1': sg1, 'Subgroup2': sg2, 'Subgroup3': sg3})\n grp.attrs = {'String Attribute': np.asarray(\"This is a string.\", '|S18'),\n 'Integer': np.asarray(42, '<i4'),\n 'Integer Array': np.asarray([0,1,2,3], '<i4'),\n 'Byte': np.asarray(-34, '|i1') }\n\n return File('attributes.hdf5', {'Group': grp})",
"def _populate_image_details(self, index, coco_image):\n file_name = coco_image[\"file_name\"]\n self.jsonl_data[index][\"image_details\"][\"format\"] = file_name[\n file_name.rfind(\".\") + 1 :\n ]\n self.jsonl_data[index][\"image_details\"][\"width\"] = coco_image[\"width\"]\n self.jsonl_data[index][\"image_details\"][\"height\"] = coco_image[\"height\"]",
"def artFluidAttrCtx(doAutoSave=bool, importfilemode=\"string\", displayVelocity=bool, mappressure=\"string\", useStrokeDirection=bool, outline=bool, rgbValue=float, reflectionaxis=\"string\", exportfilesave=\"string\", image3=\"string\", displayAsRender=bool, reflection=bool, dragSlider=\"string\", exportfilesizey=int, exportfiletype=\"string\", lowerradius=float, exportfilesizex=int, opacity=float, accopacity=bool, usepressure=bool, exists=bool, brushalignment=bool, name=\"string\", showactive=bool, surfaceConformedBrushVertices=bool, exportfilemode=\"string\", image1=\"string\", velocity=float, outwhilepaint=bool, tablet=bool, importfileload=\"string\", paintmode=\"string\", delaySelectionChanged=bool, profileShapeFile=\"string\", expandfilename=bool, brushfeedback=bool, stampProfile=\"string\", clear=bool, currentPaintableFluid=\"string\", projective=bool, importreassign=bool, property=\"string\", autoSave=\"string\", history=bool, image2=\"string\", tangentOutline=bool, radius=float):\n pass",
"def SetActiveAttributeInfo(self, vtkInformation, p_int, p_int_1, string, p_int_2, p_int_3, p_int_4):\n ...",
"def test_get_image_iso_meta(self):\n fixture = {'id': 3,\n 'name': 'fake iso image',\n 'is_public': False,\n 'disk_format': 'iso',\n 'container_format': 'bare',\n 'status': 'active',\n 'size': 19,\n 'location': \"file:///tmp/glance-tests/3\",\n 'properties': {}}\n\n new_image = self.client.add_image(fixture)\n new_image_id = new_image['id']\n\n # Test ID auto-assigned properly\n self.assertEquals(3, new_image_id)\n\n # Test all other attributes set\n data = self.client.get_image_meta(3)\n\n del fixture['location']\n for k, v in fixture.items():\n self.assertEquals(v, data[k])",
"def test_properties(self):\r\n file = SAMPLE_IMAGES[0]\r\n img = Image.from_file(str(file))\r\n assert img.name == file.stem\r\n\r\n assert img.h == 260\r\n assert img.w == 260\r\n assert len(img.shape) == 3\r\n assert img.ndim == 3\r\n assert img.c == 3"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
report whether the file or S3 object exists | def exists(self):
(bucket_name, object_name) = self.source[5:].split('/', 1)
s3 = S3Connection(self._s3_access_key,
self._s3_secret_key,
calling_format=OrdinaryCallingFormat())
try:
bucket = s3.get_bucket(bucket_name)
except S3ResponseError, data:
if data.args[0] == 404:
s3.close()
return False
raise
key = boto.s3.key.Key(bucket)
key.key = object_name
rv = key.exists()
s3.close()
return rv | [
"def file_exists( s3_path ):\n\n return _get_key(s3_path).exists()",
"def s3_file_exists(file_path, bucket_name):\n try:\n boto3.client('s3').head_object(Bucket=bucket_name, Key=file_path)\n except ClientError:\n # Not found\n return False\n return True",
"def test_asset_saintsxctf_s3_bucket_exists(self) -> None:\n bucket_name = 'asset.saintsxctf.com'\n s3_bucket = self.s3.list_objects(Bucket=bucket_name)\n self.assertTrue(s3_bucket.get('Name') == bucket_name)",
"def file_exists(filename: str) -> bool:\n if filename.startswith('s3://'):\n s3_client = _lazy_init_s3_client()\n o = urllib.parse.urlparse(filename)\n try:\n s3_client.head_object(Bucket=o.netloc, Key=o.path[1:])\n exists = True\n except ClientError:\n exists = False\n else:\n exists = os.path.isfile(filename)\n\n return exists",
"def object_exists(bucket=None, key=None, uri=None, s3_resource=None):\n s3_resource = s3_resource if s3_resource else resource\n if uri:\n (bucket, key) = decompose_uri(uri)\n try:\n s3_resource.Object(bucket, key).load()\n except botocore.exceptions.ClientError as e:\n if e.response['Error']['Code'] == \"404\":\n return False\n else:\n raise e\n return True",
"def test_real_resource_exists(self):\n res = S3Resource(\"s3://localhost:8069/test_bucket/test_key\")\n assert res.exists()",
"def check_bundle():\n s3 = boto3.client('s3', region_name=os.environ['TF_VAR_aws_region'])\n\n try:\n s3.get_object(\n Bucket=os.environ['TF_VAR_elastic_beanstalk_s3_bucket'],\n Key=os.environ['TF_VAR_elastic_beanstalk_s3_key']\n )\n except botocore.exceptions.ClientError as e:\n if e.response['Error']['Code'] == \"NoSuchKey\":\n return False\n else:\n raise e\n else:\n return True",
"def exists(self, sound):\n return self.s3.exists(sound.url)",
"def s3_bucket_exists(self, bucketName):\n\n try:\n self._s3Res.meta.client.head_bucket(Bucket=bucketName)\n except Exception as e:\n return False\n return True",
"def test_fictive_resource_not_exists(self):\n res = S3Resource(\"s3://localhost:8069/test_bucket/i_dont_exist\")\n assert not res.exists()",
"def file_exists(self, resource: GenomicResource, filename: str) -> bool:",
"def is_s3_key_valid(bucket, key):\n s3_client = boto3.Session(profile_name=\"kandavar_processing\").client('s3')\n try:\n s3_client.head_object(Bucket=bucket, Key=key)\n return True\n except botocore.exceptions.ClientError as e:\n if e.response['ResponseMetadata']['HTTPStatusCode'] == 404:\n return False # object does not exist\n else:\n raise e",
"def check_file(access_key: str,\r\n secret_key: str,\r\n s3_url: str,\r\n log: logging.Logger,\r\n bucket_name: str = None) -> Optional[List]:\r\n try:\r\n s3 = boto3.client('s3',\r\n aws_access_key_id=access_key,\r\n aws_secret_access_key=secret_key)\r\n except (ClientError, NoCredentialsError):\r\n log.error('Wrong credentials used to access the AWS account.')\r\n return None\r\n else:\r\n if bucket_name is None:\r\n bucket_name = s3_url.split('//')[1].split('.')[0]\r\n s3_file = s3_url.split('.amazonaws.com/')[1]\r\n return ['Contents' in s3.list_objects(Bucket=bucket_name,\r\n Prefix=s3_file),\r\n bucket_name,\r\n s3_file]",
"def check_bucket(file,bucketID):\r\n\tbucket = storage_client.bucket(bucketID)\r\n\tcheck = storage.Blob(bucket=bucket, name=file).exists(storage_client)\r\n\treturn check",
"def file_exists(obj):\n from django.forms import FileField\n from django.core.files.storage import default_storage\n from tendenci.apps.files.models import File\n\n if isinstance(obj, File):\n return default_storage.exists(obj.file.path)\n\n if isinstance(obj, FileField):\n return default_storage.exists(obj.path)\n\n if isinstance(obj, basestring) and obj:\n return default_storage.exists(obj)\n\n return False",
"def exists(bucket, key):\n # Get the object and use the key as a prefix\n name = ObjectStore.get_all_object_names(bucket, prefix=key)\n\n return len(name) > 0",
"def does_bucket_exist(self, bucket_name):\n for bucket in self.my_s3.buckets.all():\n if(bucket_name == bucket.name):\n return True\n \n return False",
"def objExists():\n pass",
"def __checkS3Bucket__(self, fileLocation=None):\n cmd = \"aws s3 ls \" + fileLocation\n try:\n output = subprocess.call(cmd, shell=True)\n if output == 1:\n return 'False'\n else:\n return 'True'\n\n except subprocess.CalledProcessError as e:\n print e.output\n arcpy.AddError(e.output)\n tb = traceback.format_exc()\n print tb\n arcpy.AddError(tb)\n self.__sm__(e.output)\n self.__sm__(tb)\n sys.exit()\n else:\n self.__sm__('Received list of elements in bucket')"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
returns the ceil to dp decimal places (to payback borrowed amounts). Includes 0.1% trading fee | def binance_ceil(x:float, dp:float):
return math.ceil(x*1.001*(10 ** dp))/(10 ** dp) | [
"def roundAmount(buyPrice, balance, stepSize):\n\tbuyPrice = float(buyPrice)\n\tbalance = float(balance)\n\tstepSize = float(stepSize)\n\tamount = (balance / buyPrice) - (balance / buyPrice % stepSize)\n\tamount = format(amount, '.8f')\n\treturn amount",
"def get_fee(market, price):\r\n return round(market.api.fees['trading']['taker'] * price,5)",
"def fees_percentage(self) -> float:\n return 100 * self.fees / (self.cost_buy + self.cost_sell)",
"def fees_percentage(self) -> float:\n return 100 * self.fees / (self.amount_deposit + self.amount_withdrawal)",
"def getfeerate(self):\n return self.config.fee_per_kb()",
"def cad_cashier(price: Union[int, float], payment: Union[int, float]) -> float:\n # Rounding to nearest 0.05\n priceRounded = round(price / 0.05) * 0.05\n\n # Round to two decimal places, float subtraction isn't exactly straightforward.\n return round((payment - priceRounded), 2)",
"def roundPrice(buyPrice, tickSize):\n\tbuyPrice = float(buyPrice)\n\ttickSize = float(tickSize)\n\tbuyPrice = buyPrice - (buyPrice % tickSize)\n\tbuyPrice = format(buyPrice, '.8f')\n\treturn buyPrice",
"def ebay_fee(sell_price):\n\n p50 = 0.13 # for amount $50 and lower\n p50_to_1000 = 0.05 # for $50.01-$1000\n p1000 = 0.02 # for $1000.01 and higher\n fee = 0.50 # fee to list item\n\n if sell_price <= 50:\n fee = fee + (sell_price*p50)\n elif sell_price <= 1000:\n fee = fee + (50*p50) + ((sell_price-50)*p50_to_1000)\n else:\n fee = fee + (50*p50) + ((1000-50)*p50_to_1000) \\\n + ((sell_price-1000)*p1000)\n return fee",
"def test_precision_when_default_rounding():\n price = TaxedMoney(Money('1.01', 'BTC'), Money('1.01', 'BTC'))\n result = percentage_discount(price, percentage=50)\n assert result.net == Money('0.51', 'BTC')",
"def get_price_per_unit_of_fee(self) -> PricePerUnit:",
"def desired_discount_rate_decimal_fraction(self):\n return self._desired_discount_rate_decimal_fraction",
"def _value_discount(base_price):\n if base_price <= 1000.0:\n return .03\n elif base_price < 3000.0:\n return .05\n elif base_price < 10000.0:\n return .07\n elif base_price < 50000.0:\n return .1\n else:\n return .15",
"def flops(val=None):\n return 0",
"def total_unr_perc_gain(self):\n tbc = self.total_book_cost()\n if tbc == 0.0:\n return 0.0\n return (self.total_market_value() - tbc) / tbc * 100.0",
"def fee(self):\n\n fees = 10\n if self.balance > 10.0 and self.balance < 1000.0:\n self.balance -= fees\n print(\" Your balance now is $\", self.balance, \"due to having less than $1000, which initiates a fee of $10\")\n return self.balance\n else:\n print(\"You will have no fees this month\")",
"def f_price(p):\n return f'{p:.2f}'.rjust(6, ' ')",
"def deposit(tents):\n tent = (float(tents) * float(0.10))\n return round(tent, 2)",
"def getExchangeFee(self):\n # get trade info from public API v3\n # info = requests.get(\"https://btc-e.com/api/3/info\").json()\n\n # fee = info['pairs']['btc_usd']['fee']\n\n return 0.2",
"def get_net(self) -> float:\n return self.coin * self.currentPrice - self.coinOwed * self.currentPrice + self.balance"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns dict for isolated margin account for base_asset. Enter base_asset as 'FET'. Do NOT include USDT | def get_isolated_margin_account(client, asset: str):
c = client.get_isolated_margin_account()
return list(filter(lambda x: x["baseAsset"]["asset"] == asset, c["assets"]))[0] | [
"def get_asset_balance(self):\n return self.client.get_asset_balance(asset)",
"async def get_margin_account(self, **params):\r\n return await self.client_helper(\"get_margin_account\", **params)",
"def account_map():\n return wallet['obj'].account_map",
"def _asset_info(self, node, date):\n account_cost_conv = self._convert_cost(node, date)\n account_cost = account_cost_conv.number\n\n account_balance_market_value_node = node.balance.reduce(\n convert.convert_position,\n self.operating_currency,\n g.ledger.price_map,\n datetime.date.today(),\n )\n account_balance_market_value = account_balance_market_value_node.get(\n self.operating_currency, ZERO\n )\n\n # Calculate unrealized gain/loss\n # (follow beancount convention that negative values are income)\n account_income_gain_loss_unrealized = (\n account_cost - account_balance_market_value\n )\n\n # Calculate unrealized gain/loss (percentage)\n account_gain_loss_unrealized_percentage = (\n (account_income_gain_loss_unrealized * D(-1.0)) / account_cost\n ) * D(100.0)\n\n return (\n account_balance_market_value,\n account_income_gain_loss_unrealized,\n account_gain_loss_unrealized_percentage,\n )",
"def _generate_credential() -> dict:\n\n return {\n \"accounts\": {}\n }",
"def _get_product_accounts(self):\n accounts = super(ProductTemplate, self)._get_product_accounts()\n # res = self._get_asset_accounts()\n accounts.update({\n # 'stock_input': res['stock_input'] or self.property_stock_account_input or self.categ_id.property_stock_account_input_categ_id,\n # 'stock_output': res['stock_output'] or self.property_stock_account_output or self.categ_id.property_stock_account_output_categ_id,\n 'stock_valuation': self.property_stock_account_valuation or self.categ_id.property_stock_valuation_account_id or False,\n 'stock_transit': self.property_stock_account_transit or False,\n 'stock_loss': self.property_stock_account_loss or False,\n })\n return accounts",
"def get_account_assets():\n query = iroha.query('GetAccountAssets', account_id='userone@domain')\n IrohaCrypto.sign_query(query, admin_private_key)\n\n response = net.send_query(query)\n data = response.account_assets_response.account_assets\n for asset in data:\n print('Asset id = {}, balance = {}'.format(\n asset.asset_id, asset.balance))",
"def _get_product_accounts(self):\n accounts = super(ProductTemplate, self)._get_product_accounts()\n res = self._get_asset_accounts()\n accounts.update({\n 'asset': self.property_asset_account_id or self.categ_id.property_asset_account_categ_id,\n 'return_income': self.property_return_income_account_id or self.categ_id.property_return_income_account_categ_id,\n })\n return accounts",
"def read_ofx_accounts_map(ledger):\r\n m = {}\r\n vardir = ledger.directives['var']\r\n accids = vardir.modules['ofx']['accid']\r\n for decl in accids:\r\n accid, accname = [x.strip() for x in decl.split()]\r\n try:\r\n acc = ledger.get_account(accname)\r\n except KeyError:\r\n raise SystemExit(\r\n \"Could not find account %s\\n @var declaration: %s\\n\" %\r\n (accname, decl))\r\n m[accid] = acc\r\n return m",
"def asset_allocation(self):\n\n # Obtain all market values in 1 currency (doesn't matter which)\n total_value = self.market_value(self._common_currency)\n\n total_value = max(\n 1., total_value\n ) # protect against division by 0 (total_value = 0, means new portfolio)\n\n asset_allocation = {}\n for name, asset in self._assets.items():\n asset_allocation[name] = asset.market_value_in(\n self._common_currency) / total_value * 100.\n\n return asset_allocation",
"def get_coin_info():\n query = iroha.query('GetAssetInfo', asset_id='coin#domain')\n IrohaCrypto.sign_query(query, admin_private_key)\n\n response = net.send_query(query)\n data = response.asset_response.asset\n print('Asset id = {}, precision = {}'.format(data.asset_id, data.precision))",
"def get_account_root(address: str, client: Client) -> Dict[str, Union[int, str]]:\n account_info = get_account_info(address, client)\n result = cast(Dict[str, Any], account_info.result)\n return cast(Dict[str, Union[int, str]], result[\"account_data\"])",
"def test_get_account_by_type_and_currency(self):\n pass",
"def get_account_meta(self, user, account, domain, until=None,\n include_user_defined=True, external_quota=None):\n return {}",
"def construct_current_holdings(self):\r\n d=dict((k,v) for k,v in [(s,0.0) for s in self.symbol_list])\r\n d['cash']=self.initial_capital\r\n d['commission']=0.0\r\n d['total']=self.initial_capital\r\n return d",
"def asset_details(self):\n if self.asset_details_json:\n return json.loads(self.asset_details_json)\n else:\n return {}",
"def test_get_asset_device_contract_information_list(self):\n pass",
"def _get_party_details_dictionary(self, account_type):\n if account_type == 'INTERMEDIARY':\n party_details = get_counterpartys_intermediary_details(self.acm_obj)\n else:\n party_details = get_counterpartys_correspondent_details(self.acm_obj)\n\n clearing_system = get_national_clearing_system(self.acm_obj)\n clearing_code = None\n branch_code = self.acm_obj.CounterpartyAccountRef().Accounting()\n if self.acm_obj.Currency().Name() == 'ZAR' and clearing_system == 'ZA':\n clearing_code = get_national_clearing_code(self.acm_obj)\n\n party_details_dict = dict()\n party_details_dict['CLEARING_SYSTEM'] = clearing_code\n party_details_dict['BRANCH_CODE'] = branch_code\n party_details_dict['ACCOUNT'] = party_details.get('ACCOUNT')\n party_details_dict['BIC'] = party_details.get('BIC')\n\n return party_details_dict",
"def safe_get(wallet, asset):\n try:\n return Ownership.objects.get(wallet=wallet, asset=asset)\n except ObjectDoesNotExist:\n return None"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
returns the price as float. pair MUST include USDT, ie ZECUSDT | def get_price(client, pair:str):
return float(client.get_recent_trades(symbol=pair, limit=1)[0]["price"]) | [
"def decimalize_price(t):\n return \"{0:.2f}\".format(float(t[0]))",
"def _price_str_to_float(price_str: str) -> float:\n return float((price_str[4:]).replace(',', '.'))",
"def get_btcprice():\n bitcoin_api_url = \"https://api.alternative.me/v2/ticker/bitcoin/?convert=CAD\"\n response = requests.get(bitcoin_api_url)\n response_json = response.json()\n price_cad = parse_float(response_json[\"data\"][\"1\"][\"quotes\"][\"CAD\"][\"price\"])\n return price_cad",
"def price(self) -> \"Decimal\":\n return self.exchange.quote_price(self.pair)",
"async def _async_get_price(exchange: ExchangeClient, pair: str) -> Tuple[str, Decimal]:\n ticker = await exchange.fetch_ticker(pair)\n return exchange.id, Decimal(\n exchange.price_to_precision(\n pair,\n # \"last\" is an alias to \"close\"\n ticker.get(\"last\"),\n )\n )",
"def parsePriceTargetTag(tag: bs4.element.Tag) -> float:\n pt = tag.text.split(u\"\\u279D\")[-1]\n price_target = float(pt.replace('$', '').replace(',', '').strip())\n return price_target",
"def get_symbol_price(self) -> float:\n symbol_coin_info: dict = r.crypto.get_crypto_quote(self.symbol)\n symbol_coin_price: int = symbol_coin_info[\"mark_price\"]\n\n return symbol_coin_price",
"def get_price(self):\n if self.price is not None:\n return unicode(self.price)\n if self.price_option:\n return unicode(self.price_option)\n\n return unicode('0.00')",
"def treasury_to_decimal(price):\n price_split = price.split(\"-\")\n integer_part = int(price_split[0])\n frac_part = price_split[1]\n\n frac_part_one = float(frac_part[0:2])\n frac_part_two = float(0)\n\n if len(frac_part) == 3:\n last_digit = frac_part[-1]\n if last_digit == \"+\":\n frac_part_two = float(4)\n else:\n frac_part_two = float(last_digit)\n elif len(frac_part) > 3:\n raise ValueError(\"4 decimal places are not supported\")\n\n return integer_part + (frac_part_one + (frac_part_two / 8)) / 32",
"def get_price(self):\n return str(self.gui.spn_price.textFromValue(self.gui.spn_price.value()))",
"def get_fee(market, price):\r\n return round(market.api.fees['trading']['taker'] * price,5)",
"def get_the_price(self, t):\r\n try:\r\n return float(self.price.loc[t])\r\n except:\r\n print(\"couldn't find the price at time of \" + self.ticker + \" \" + t)\r\n return",
"def proper_price(self, price):\r\n str_price = str(price)\r\n if ',' in str_price:\r\n str_price.replace(',', '.')\r\n if '.' not in str_price:\r\n currency_str_price = f'{str_price}.00'\r\n elif str_price[-2] == '.':\r\n currency_str_price = f'{str_price}0'\r\n else:\r\n currency_str_price = str_price\r\n return currency_str_price",
"def price_in_euros(self):\n return \"{:.2f}€\".format(self.price / 100)",
"def get_price(self):\n\t\treturn self._price_p_night",
"def get_price_per_unit_of_fee(self) -> PricePerUnit:",
"def get_prices(soup: BeautifulSoup) -> Generator[float, None, None]:\n _rows = soup.table.find_all(\"tr\")[1:]\n _prices = (row.find(\"br\").next.strip() for row in _rows)\n rate = ServiceCompanyHTML.get_exchange_rate()\n prices = (round(float(price.replace(\",\", \"\")) * rate, 2) for price in _prices)\n return prices",
"def display_price(self) -> Optional[pulumi.Input[float]]:\n return pulumi.get(self, \"display_price\")",
"def getPrice(self):\n return FoursquarePrice(self.base.get(\"price\", []))"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
returns the USDT balance in spot as float | def get_usdt_balance(client):
return float(client.get_asset_balance(asset='USDT')["free"]) | [
"def balance(self):\n return Amount(self._balance, \"usd\")",
"def getUninvested(self) -> float:\n record = self.conn.execute(\"\"\"SELECT amount FROM uninvested\"\"\").fetchone()\n if record:\n return float(record[0])\n else:\n return 0",
"def get_balance(self):\n query = 'select sum(amount) from pizza_transactions where user_id=%s'\n self.cursor.execute(query, (self.user,))\n result = self.cursor.fetchall()\n balance = result[0][0]\n if balance is None:\n balance = 0.0\n return balance",
"def GetFloat(self):\n\n return self.Amount / float(Money.HiCost)",
"def get_buy_amount(self):\r\n return self.balance / 3",
"def getBalanceAdjustment(self) -> \"float\":\n return _coin.SoCamera_getBalanceAdjustment(self)",
"def vat_amount(value):\n amount = float(value)\n return amount*vat_factor",
"def balance(self) -> float:\n return self._get_account_info_double(AccountInfoDouble.ACCOUNT_BALANCE)",
"def get_fund_balances():\n return _format_fund_balances(load_cash_forecasts(\"fund_balances\"))",
"def deposit(tents):\n tent = (float(tents) * float(0.10))\n return round(tent, 2)",
"def SoDecimationPercentageElement_get(state: 'SoState') -> \"float\":\n return _coin.SoDecimationPercentageElement_get(state)",
"def getBalance(self):\n\n balance = 0\n for item in self.ledger:\n balance += item[\"amount\"]\n\n return balance",
"def get_net(self) -> float:\n return self.coin * self.currentPrice - self.coinOwed * self.currentPrice + self.balance",
"def get_transaction_value():\r\n # Get the user input, transform it from a string to a float and store it in user_input\r\n return float(input('Your transaction amount : '))",
"def get_fcy_balance_formatted(self):\n return self.fcy_balance_formatted",
"def get_balance(self):\n balance_per_dev = self._build_balance_per_dev()\n return max(abs(b) for b in balance_per_dev.values())",
"def getbalance(url):\n return Channel.get(url).our.nValue",
"def get_balance(self):\n current_balance = 0\n\n for item in self.ledger:\n current_balance += item[\"amount\"]\n\n return current_balance",
"def _get_val(self):\n return self.stock_owned.dot(self.stock_price) + self.cash_in_hand"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Writes frame image to file. | def writeFrame(self, frameNum, img):
# use me pattern in for dest
frameFilename = self.dest % frameNum
print "write " + frameFilename
# write file
if not(img):
img = Image.new("RGB", self.size, "White")
img.save(frameFilename, 'PNG') | [
"def __writeFrame(self, saveDir=\"./ballData\"):\r\n if not os.path.exists(saveDir):\r\n os.makedirs(saveDir)\r\n saveName = str(int(time.time()))\r\n saveImgPath = os.path.join(saveDir, saveName + \".jpg\")\r\n try:\r\n cv2.imwrite(saveImgPath, self.frameArray)\r\n except:\r\n print(\"Error when saveing current frame!\")",
"def write(self, frame):\n self.video_writer.write(frame)",
"def write(self, image: Image):\n # image should be in RGB format, need to switch it to BGR for OpenCV\n frame = cv2.cvtColor(image.data, cv2.COLOR_RGB2BGR)\n frame = cv2.resize(\n frame, (self.width, self.height), interpolation=cv2.INTER_AREA\n )\n self.out.write(frame)",
"def __writeFrame(self, saveDir=\"./stickData\"):\r\n if not os.path.exists(saveDir):\r\n os.makedirs(saveDir)\r\n saveName = str(int(time.time()))\r\n saveImgPath = os.path.join(saveDir, saveName + \".jpg\")\r\n try:\r\n cv2.imwrite(saveImgPath, self.frameArray)\r\n except:\r\n print(\"Error when saveing current frame!\")",
"def WriteImage(self, filename):\r\n cv2.imwrite(filename,self.img)",
"def write_image(image: Image, filename: str) -> None:\n image.save(filename)",
"def save_frame(self, filename, t=0, savemask=False):\n im = self.get_frame(t)\n if savemask and self.mask is not None:\n mask = 255 * self.mask.get_frame(t)\n im = np.dstack([im, mask]).astype('uint8')\n ffmpeg_writer.write_image(filename, im)",
"def write_png(self, fname):\n im = self.make_image()\n _png.write_png(im, fname)",
"def saveFrame(self):\n # Needs to be completed\n \n # Ask the user for the destination file\n dest = QtGui.QFileDialog.getSaveFileName(self, \"Save frame as...\", '', 'Images (*.png *.gif *.jpg *.jpeg)')\n # Grab and save a frame in the given file\n self.gui.video.device.save(str(dest))",
"def save_frame(frame_num, frame_path, frame_plot):\n # frame plot\n frame_plot()\n plt.savefig(frame_path + str(frame_num) + '.png')\n plt.close()",
"def saveFrame(self, filename):\n\t\tself.frameList.append(filename)\n\t\tvisualizer = self.visualizer\n\t\timageType = self.imageType\n\t\tLogging.info(\"Saving screenshot to \", filename, kw = \"visualizer\")\n\t\tcomm = \"visualizer.getCurrentMode().saveSnapshot(filename)\"\n\t\teval(comm)",
"def write_image_world_file(image_file, a, d, b, e, c, f):\n target = osp.abspath(image_file) + 'w' # simply just append w on end of whatever extension the file has... (easiest reliable method)\n with open(target, 'w') as t:\n t.write('%.8f\\n%.8f\\n%.8f\\n%.8f\\n%.8f\\n%.8f\\n' % (a, d, b, e, c, f))",
"def write(self, frames):\n for frame in frames:\n self.out_video.write(\n frame.numpy().transpose(1, 2, 0).astype(\"uint8\"))",
"def write(self):\n if self.exif_dict != {}:\n exif_bytes = piexif.dump(self.exif_dict)\n piexif.insert(exif_bytes, self.filename)",
"def write_labeled_frames(self, root):\n write_dir = Path(root) / f\"{self.video_name}\"\n os.makedirs(write_dir, exist_ok=True)\n for frame, i in self.frames:\n i_str = str(i).zfill(5)\n frame.save(write_dir / f\"{i_str}.png\",\"PNG\")\n return True",
"def write_image(img, output):\n cv2.imwrite(output, img)",
"def writedump(self, frame, filename=\"\"):\n\n filename = os.path.join(\n self.datadir, \"frame.dmp\") if filename == \"\" else filename\n self.checkdatadir(createdir=True)\n\n if self.verbosity > 0:\n msg = \"Writing dump file {}\".format(colorize(filename, \"blue\"))\n print(msg)\n\n writedump(frame, filename)",
"def write_to_file(self, struct, fName):\n\n f = h5py.File(fName, \"w\")\n self._recursive_write(f, struct)\n f.close()",
"def write_frame(self, data):\r\n raise NotImplemented()"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The number of fault domains that the Dedicated Host Group spans. Changing this forces a new resource to be created. | def platform_fault_domain_count(self) -> pulumi.Input[int]:
return pulumi.get(self, "platform_fault_domain_count") | [
"def domains_count(self):\n return self._domains_count",
"def get_number_of_agents_for_scheduling(self, context):\n return 1",
"def faulted_count(self) -> int:\n return pulumi.get(self, \"faulted_count\")",
"def domain_size(domain):\n fixed_domain_sizes = {\n \"current collector\": 3,\n \"negative particle\": 5,\n \"positive particle\": 7,\n \"negative electrode\": 11,\n \"separator\": 13,\n \"positive electrode\": 17,\n \"negative particle size\": 19,\n \"positive particle size\": 23,\n }\n if domain in [[], None]:\n size = 1\n elif all(dom in fixed_domain_sizes for dom in domain):\n size = sum(fixed_domain_sizes[dom] for dom in domain)\n else:\n size = sum(hash(dom) % 100 for dom in domain)\n return size",
"def numOfDefinedStoragePools(self):\n ret = libvirtmod.virConnectNumOfDefinedStoragePools(self._o)\n if ret == -1: raise libvirtError ('virConnectNumOfDefinedStoragePools() failed', conn=self)\n return ret",
"def domain_count_dict(domains):\n domain_count = {} \n for domain in domains: \n if not domain in domain_count:\n domain_count[domain] = 1\n else:\n domain_count[domain] = domain_count[domain] + 1\n return domain_count",
"def _constraints_for_new_request(cls, config):\n return {'count': npr.randint(5, 20, 1)[0]}",
"def get_advertiser_domain_pagerrank(self) -> int:\n raise NotImplementedError",
"def getNumSites(self) -> int:\n return len(self.sites)",
"def _max_servers(self, resource_desc):\n total = 0\n for allocator in self._allocators:\n count = allocator.max_servers(resource_desc)\n self._logger.debug('%r returned %d', allocator._name, count)\n total += count\n return total",
"def number_of_requests(self) -> int:\n return len(self.resources)",
"def desired_nr_resources(self):\n return self.problem.schedule[self.now % len(self.problem.schedule)]",
"def update_number_of_domain_controllers(self, DirectoryId: str, DesiredNumber: int) -> Dict:\n pass",
"def get_site_cnt(self):\n\n return len(self.sites)",
"def __len__(self):\n return len(self.options['inventory_manager'].list_hosts()) + len(self.get_extra_inventory_hosts())",
"def pooled_instances(self) -> int:\n return pulumi.get(self, \"pooled_instances\")",
"def test_set_limits(self):\n domain = Domain.objects.get(name=\"test.com\")\n values = {\n \"name\": domain.name, \"quota\": domain.quota,\n \"default_mailbox_quota\": domain.default_mailbox_quota,\n \"enabled\": domain.enabled, \"type\": \"domain\",\n \"mailboxes_limit\": 3, \"mailbox_aliases_limit\": 3,\n \"domain_aliases_limit\": 3, \"domain_admins_limit\": 3\n }\n self.ajax_post(\n reverse(\"admin:domain_change\", args=[domain.id]),\n values\n )\n domain.refresh_from_db()\n self.assertEqual(\n domain.domainobjectlimit_set.get(name=\"mailboxes\").max_value, 3)\n self.assertEqual(\n domain.domainobjectlimit_set.get(\n name=\"mailbox_aliases\").max_value, 3)\n self.assertEqual(\n domain.domainobjectlimit_set.get(\n name=\"domain_aliases\").max_value, 3)\n self.assertEqual(\n domain.domainobjectlimit_set.get(\n name=\"domain_admins\").max_value, 3)",
"def verify_domain_validity(self):\n self.component_count['domain'] = {}\n self.component_count['domain']['intents'] = len(self.domain.intents)\n self.component_count['domain']['utterances'] = len(self.domain.templates)\n self.component_count['domain']['actions'] = len(self.domain.user_actions)\n self.component_count['domain']['forms'] = len(self.domain.form_names)\n self.component_count['domain']['slots'] = len(self.domain.slots)\n self.component_count['domain']['entities'] = len(self.domain.entities)\n self.component_count['utterances'] = len(self.domain.templates)\n if self.domain.is_empty():\n self.summary['domain'] = [\"domain.yml is empty!\"]",
"def test_dos_create_flavor_limits_list(self):\n # create a huge list of origins\n self.reset_defaults()\n self.limits_list.append({\"domains\": {\"min\": 1, \"max\": 5}})\n self.limits_list.append({\"caching\": {\"min\": 3600,\n \"max\": 604800, \"incr\": 300}})\n for k in range(1, 9000):\n self.limits_list.append({\"origins\": {\"min\": \"%s\" % k, \"max\": 5}})\n\n # send MAX_ATTEMPTS requests\n for k in range(1, self.MAX_ATTEMPTS):\n self.flavor_id = str(uuid.uuid1())\n self.check_one_request()",
"def numprocesses(self):\r\n info = self.info()\r\n return info['max_processes']"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Manage a Dedicated Host Group. Example Usage ```python import pulumi import pulumi_azure as azure example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West Europe") example_dedicated_host_group = azure.compute.DedicatedHostGroup("exampleDedicatedHostGroup", resource_group_name=example_resource_group.name, location=example_resource_group.location, platform_fault_domain_count=1) ``` Import Dedicated Host Group can be imported using the `resource id`, e.g. ```sh | def __init__(__self__,
resource_name: str,
args: DedicatedHostGroupArgs,
opts: Optional[pulumi.ResourceOptions] = None):
... | [
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n automatic_placement_enabled: Optional[pulumi.Input[bool]] = None,\n location: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n platform_fault_domain_count: Optional[pulumi.Input[int]] = None,\n resource_group_name: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n zone: Optional[pulumi.Input[str]] = None) -> 'DedicatedHostGroup':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _DedicatedHostGroupState.__new__(_DedicatedHostGroupState)\n\n __props__.__dict__[\"automatic_placement_enabled\"] = automatic_placement_enabled\n __props__.__dict__[\"location\"] = location\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"platform_fault_domain_count\"] = platform_fault_domain_count\n __props__.__dict__[\"resource_group_name\"] = resource_group_name\n __props__.__dict__[\"tags\"] = tags\n __props__.__dict__[\"zone\"] = zone\n return DedicatedHostGroup(resource_name, opts=opts, __props__=__props__)",
"def delete_dedicated_host_group(self,\n id: str,\n **kwargs\n ) -> DetailedResponse:\n\n if id is None:\n raise ValueError('id must be provided')\n headers = {}\n sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,\n service_version='V1',\n operation_id='delete_dedicated_host_group')\n headers.update(sdk_headers)\n\n params = {\n 'version': self.version,\n 'generation': self.generation\n }\n\n if 'headers' in kwargs:\n headers.update(kwargs.get('headers'))\n\n path_param_keys = ['id']\n path_param_values = self.encode_path_vars(id)\n path_param_dict = dict(zip(path_param_keys, path_param_values))\n url = '/dedicated_host/groups/{id}'.format(**path_param_dict)\n request = self.prepare_request(method='DELETE',\n url=url,\n headers=headers,\n params=params)\n\n response = self.send(request)\n return response",
"def list_dedicated_hosts(self,\n *,\n dedicated_host_group_id: str = None,\n start: str = None,\n limit: int = None,\n resource_group_id: str = None,\n zone_name: str = None,\n **kwargs\n ) -> DetailedResponse:\n\n headers = {}\n sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,\n service_version='V1',\n operation_id='list_dedicated_hosts')\n headers.update(sdk_headers)\n\n params = {\n 'version': self.version,\n 'generation': self.generation,\n 'dedicated_host_group.id': dedicated_host_group_id,\n 'start': start,\n 'limit': limit,\n 'resource_group.id': resource_group_id,\n 'zone.name': zone_name\n }\n\n if 'headers' in kwargs:\n headers.update(kwargs.get('headers'))\n headers['Accept'] = 'application/json'\n\n url = '/dedicated_hosts'\n request = self.prepare_request(method='GET',\n url=url,\n headers=headers,\n params=params)\n\n response = self.send(request)\n return response",
"def get_dedicated_host_group(self,\n id: str,\n **kwargs\n ) -> DetailedResponse:\n\n if id is None:\n raise ValueError('id must be provided')\n headers = {}\n sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,\n service_version='V1',\n operation_id='get_dedicated_host_group')\n headers.update(sdk_headers)\n\n params = {\n 'version': self.version,\n 'generation': self.generation\n }\n\n if 'headers' in kwargs:\n headers.update(kwargs.get('headers'))\n headers['Accept'] = 'application/json'\n\n path_param_keys = ['id']\n path_param_values = self.encode_path_vars(id)\n path_param_dict = dict(zip(path_param_keys, path_param_values))\n url = '/dedicated_host/groups/{id}'.format(**path_param_dict)\n request = self.prepare_request(method='GET',\n url=url,\n headers=headers,\n params=params)\n\n response = self.send(request)\n return response",
"def delete(self, group_name):\n self.request.mongo_connection.shinken.hostgroups.remove(\n {\"hostgroup_name\": group_name}\n )",
"def list_dedicated_host_groups(self,\n *,\n start: str = None,\n limit: int = None,\n resource_group_id: str = None,\n zone_name: str = None,\n **kwargs\n ) -> DetailedResponse:\n\n headers = {}\n sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,\n service_version='V1',\n operation_id='list_dedicated_host_groups')\n headers.update(sdk_headers)\n\n params = {\n 'version': self.version,\n 'generation': self.generation,\n 'start': start,\n 'limit': limit,\n 'resource_group.id': resource_group_id,\n 'zone.name': zone_name\n }\n\n if 'headers' in kwargs:\n headers.update(kwargs.get('headers'))\n headers['Accept'] = 'application/json'\n\n url = '/dedicated_host/groups'\n request = self.prepare_request(method='GET',\n url=url,\n headers=headers,\n params=params)\n\n response = self.send(request)\n return response",
"def test_add_host_to_hostgroup(self):\n pass",
"def update_dedicated_host_group(self,\n id: str,\n dedicated_host_group_patch: 'DedicatedHostGroupPatch',\n **kwargs\n ) -> DetailedResponse:\n\n if id is None:\n raise ValueError('id must be provided')\n if dedicated_host_group_patch is None:\n raise ValueError('dedicated_host_group_patch must be provided')\n if isinstance(dedicated_host_group_patch, DedicatedHostGroupPatch):\n dedicated_host_group_patch = convert_model(dedicated_host_group_patch)\n headers = {}\n sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,\n service_version='V1',\n operation_id='update_dedicated_host_group')\n headers.update(sdk_headers)\n\n params = {\n 'version': self.version,\n 'generation': self.generation\n }\n\n data = json.dumps(dedicated_host_group_patch)\n headers['content-type'] = 'application/merge-patch+json'\n\n if 'headers' in kwargs:\n headers.update(kwargs.get('headers'))\n headers['Accept'] = 'application/json'\n\n path_param_keys = ['id']\n path_param_values = self.encode_path_vars(id)\n path_param_dict = dict(zip(path_param_keys, path_param_values))\n url = '/dedicated_host/groups/{id}'.format(**path_param_dict)\n request = self.prepare_request(method='PATCH',\n url=url,\n headers=headers,\n params=params,\n data=data)\n\n response = self.send(request)\n return response",
"def update(self, group_name, group):\n group_dict = group.as_dict()\n if \"hostgroup_name\" not in group_dict.keys():\n group_dict['hostgroup_name'] = group_name\n\n self.request.mongo_connection.shinken.hostgroups.update(\n {\"hostgroup_name\": group_name},\n group_dict\n )",
"def from_dict(cls, _dict: Dict) -> 'DedicatedHostGroup':\n args = {}\n if 'class' in _dict:\n args['class_'] = _dict.get('class')\n else:\n raise ValueError('Required property \\'class\\' not present in DedicatedHostGroup JSON')\n if 'created_at' in _dict:\n args['created_at'] = string_to_datetime(_dict.get('created_at'))\n else:\n raise ValueError('Required property \\'created_at\\' not present in DedicatedHostGroup JSON')\n if 'crn' in _dict:\n args['crn'] = _dict.get('crn')\n else:\n raise ValueError('Required property \\'crn\\' not present in DedicatedHostGroup JSON')\n if 'dedicated_hosts' in _dict:\n args['dedicated_hosts'] = [DedicatedHostReference.from_dict(x) for x in _dict.get('dedicated_hosts')]\n else:\n raise ValueError('Required property \\'dedicated_hosts\\' not present in DedicatedHostGroup JSON')\n if 'family' in _dict:\n args['family'] = _dict.get('family')\n else:\n raise ValueError('Required property \\'family\\' not present in DedicatedHostGroup JSON')\n if 'href' in _dict:\n args['href'] = _dict.get('href')\n else:\n raise ValueError('Required property \\'href\\' not present in DedicatedHostGroup JSON')\n if 'id' in _dict:\n args['id'] = _dict.get('id')\n else:\n raise ValueError('Required property \\'id\\' not present in DedicatedHostGroup JSON')\n if 'name' in _dict:\n args['name'] = _dict.get('name')\n else:\n raise ValueError('Required property \\'name\\' not present in DedicatedHostGroup JSON')\n if 'resource_group' in _dict:\n args['resource_group'] = ResourceGroupReference.from_dict(_dict.get('resource_group'))\n else:\n raise ValueError('Required property \\'resource_group\\' not present in DedicatedHostGroup JSON')\n if 'resource_type' in _dict:\n args['resource_type'] = _dict.get('resource_type')\n else:\n raise ValueError('Required property \\'resource_type\\' not present in DedicatedHostGroup JSON')\n if 'supported_instance_profiles' in _dict:\n args['supported_instance_profiles'] = [InstanceProfileReference.from_dict(x) for x in _dict.get('supported_instance_profiles')]\n else:\n raise ValueError('Required property \\'supported_instance_profiles\\' not present in DedicatedHostGroup JSON')\n if 'zone' in _dict:\n args['zone'] = ZoneReference.from_dict(_dict.get('zone'))\n else:\n raise ValueError('Required property \\'zone\\' not present in DedicatedHostGroup JSON')\n return cls(**args)",
"def create_host_vapi(context, host_name, datacenter_name):\n user = context.testbed.config['ESX_USER']\n pwd = context.testbed.config['ESX_PASS']\n\n # Get the host folder for the Datacenter1 using the folder query\n datacenter = context.testbed.entities['DATACENTER_IDS'][datacenter_name]\n folder_summaries = context.client.vcenter.Folder.list(\n Folder.FilterSpec(type=Folder.Type.HOST, datacenters=set([datacenter])))\n folder = folder_summaries[0].folder\n\n create_spec = Host.CreateSpec(\n hostname=host_name,\n user_name=user,\n password=pwd,\n folder=folder,\n thumbprint_verification=Host.CreateSpec.ThumbprintVerification.NONE)\n host = context.client.vcenter.Host.create(create_spec)\n print(\"Created Host '{}' ({})\".format(host, host_name))\n\n return host",
"def delete_host_group(self, host_group_id):\n LOG.info(\"Deleting hostgroup: '%s'\" % host_group_id)\n return self.client.request(\n constants.DELETE, constants.DELETE_HOST_GROUP_URL.format(\n self.server_ip, host_group_id),\n payload=None)",
"def test_delete_host_group(self):\n pass",
"def modify_host_group(self, host_group_id, name=None,\n remove_host_ids=None,\n add_host_ids=None, description=None):\n LOG.info(\"Modifying hostgroup: '%s'\" % host_group_id)\n payload = self._prepare_modify_host_group_payload(\n name, remove_host_ids, add_host_ids, description)\n return self.client.request(\n constants.PATCH, constants.MODIFY_HOST_GROUP_URL.format(\n self.server_ip, host_group_id),\n payload)",
"def _create_hostgroup(self, hostgroupname):\n cli_cmd = 'createhostgroup -n %(name)s' % {'name': hostgroupname}\n out = self._execute_cli(cli_cmd)\n\n self._assert_cli_operate_out('_create_hostgroup',\n ('Failed to Create hostgroup %s.'\n % hostgroupname),\n cli_cmd, out)",
"def test_delete_host_from_group(self):\n pass",
"def get_hostgroup(hostgroup, limit = None, columns = None, extra_filter = None):\n return query(\"GET hostgroups\\nFilter: name = %s\\n\" % hostgroup,\n limit=limit, columns=columns, item_type=\"hostgroup\",\n extra_filter=extra_filter)",
"def create(self, group):\n self.request.mongo_connection.shinken.hostgroups.insert(\n group.as_dict()\n )",
"def get_dedicated_cloud_node(dedicated_cloud_node_name: Optional[str] = None,\n resource_group_name: Optional[str] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetDedicatedCloudNodeResult:\n __args__ = dict()\n __args__['dedicatedCloudNodeName'] = dedicated_cloud_node_name\n __args__['resourceGroupName'] = resource_group_name\n opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)\n __ret__ = pulumi.runtime.invoke('azure-native:vmwarecloudsimple/v20190401:getDedicatedCloudNode', __args__, opts=opts, typ=GetDedicatedCloudNodeResult).value\n\n return AwaitableGetDedicatedCloudNodeResult(\n availability_zone_id=pulumi.get(__ret__, 'availability_zone_id'),\n availability_zone_name=pulumi.get(__ret__, 'availability_zone_name'),\n cloud_rack_name=pulumi.get(__ret__, 'cloud_rack_name'),\n created=pulumi.get(__ret__, 'created'),\n id=pulumi.get(__ret__, 'id'),\n location=pulumi.get(__ret__, 'location'),\n name=pulumi.get(__ret__, 'name'),\n nodes_count=pulumi.get(__ret__, 'nodes_count'),\n placement_group_id=pulumi.get(__ret__, 'placement_group_id'),\n placement_group_name=pulumi.get(__ret__, 'placement_group_name'),\n private_cloud_id=pulumi.get(__ret__, 'private_cloud_id'),\n private_cloud_name=pulumi.get(__ret__, 'private_cloud_name'),\n provisioning_state=pulumi.get(__ret__, 'provisioning_state'),\n purchase_id=pulumi.get(__ret__, 'purchase_id'),\n sku=pulumi.get(__ret__, 'sku'),\n status=pulumi.get(__ret__, 'status'),\n tags=pulumi.get(__ret__, 'tags'),\n type=pulumi.get(__ret__, 'type'),\n vmware_cluster_name=pulumi.get(__ret__, 'vmware_cluster_name'))"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get an existing DedicatedHostGroup resource's state with the given name, id, and optional extra properties used to qualify the lookup. | def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
automatic_placement_enabled: Optional[pulumi.Input[bool]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
platform_fault_domain_count: Optional[pulumi.Input[int]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
zone: Optional[pulumi.Input[str]] = None) -> 'DedicatedHostGroup':
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _DedicatedHostGroupState.__new__(_DedicatedHostGroupState)
__props__.__dict__["automatic_placement_enabled"] = automatic_placement_enabled
__props__.__dict__["location"] = location
__props__.__dict__["name"] = name
__props__.__dict__["platform_fault_domain_count"] = platform_fault_domain_count
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["tags"] = tags
__props__.__dict__["zone"] = zone
return DedicatedHostGroup(resource_name, opts=opts, __props__=__props__) | [
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n active_nics: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n allow_forged_transmits: Optional[pulumi.Input[bool]] = None,\n allow_mac_changes: Optional[pulumi.Input[bool]] = None,\n allow_promiscuous: Optional[pulumi.Input[bool]] = None,\n check_beacon: Optional[pulumi.Input[bool]] = None,\n computed_policy: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n failback: Optional[pulumi.Input[bool]] = None,\n host_system_id: Optional[pulumi.Input[str]] = None,\n key: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n notify_switches: Optional[pulumi.Input[bool]] = None,\n ports: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['HostPortGroupPortArgs']]]]] = None,\n shaping_average_bandwidth: Optional[pulumi.Input[int]] = None,\n shaping_burst_size: Optional[pulumi.Input[int]] = None,\n shaping_enabled: Optional[pulumi.Input[bool]] = None,\n shaping_peak_bandwidth: Optional[pulumi.Input[int]] = None,\n standby_nics: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n teaming_policy: Optional[pulumi.Input[str]] = None,\n virtual_switch_name: Optional[pulumi.Input[str]] = None,\n vlan_id: Optional[pulumi.Input[int]] = None) -> 'HostPortGroup':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _HostPortGroupState.__new__(_HostPortGroupState)\n\n __props__.__dict__[\"active_nics\"] = active_nics\n __props__.__dict__[\"allow_forged_transmits\"] = allow_forged_transmits\n __props__.__dict__[\"allow_mac_changes\"] = allow_mac_changes\n __props__.__dict__[\"allow_promiscuous\"] = allow_promiscuous\n __props__.__dict__[\"check_beacon\"] = check_beacon\n __props__.__dict__[\"computed_policy\"] = computed_policy\n __props__.__dict__[\"failback\"] = failback\n __props__.__dict__[\"host_system_id\"] = host_system_id\n __props__.__dict__[\"key\"] = key\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"notify_switches\"] = notify_switches\n __props__.__dict__[\"ports\"] = ports\n __props__.__dict__[\"shaping_average_bandwidth\"] = shaping_average_bandwidth\n __props__.__dict__[\"shaping_burst_size\"] = shaping_burst_size\n __props__.__dict__[\"shaping_enabled\"] = shaping_enabled\n __props__.__dict__[\"shaping_peak_bandwidth\"] = shaping_peak_bandwidth\n __props__.__dict__[\"standby_nics\"] = standby_nics\n __props__.__dict__[\"teaming_policy\"] = teaming_policy\n __props__.__dict__[\"virtual_switch_name\"] = virtual_switch_name\n __props__.__dict__[\"vlan_id\"] = vlan_id\n return HostPortGroup(resource_name, opts=opts, __props__=__props__)",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n arn: Optional[pulumi.Input[str]] = None,\n base_capacity: Optional[pulumi.Input[int]] = None,\n config_parameters: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['WorkgroupConfigParameterArgs']]]]] = None,\n endpoints: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['WorkgroupEndpointArgs']]]]] = None,\n enhanced_vpc_routing: Optional[pulumi.Input[bool]] = None,\n namespace_name: Optional[pulumi.Input[str]] = None,\n publicly_accessible: Optional[pulumi.Input[bool]] = None,\n security_group_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n subnet_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n workgroup_id: Optional[pulumi.Input[str]] = None,\n workgroup_name: Optional[pulumi.Input[str]] = None) -> 'Workgroup':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _WorkgroupState.__new__(_WorkgroupState)\n\n __props__.__dict__[\"arn\"] = arn\n __props__.__dict__[\"base_capacity\"] = base_capacity\n __props__.__dict__[\"config_parameters\"] = config_parameters\n __props__.__dict__[\"endpoints\"] = endpoints\n __props__.__dict__[\"enhanced_vpc_routing\"] = enhanced_vpc_routing\n __props__.__dict__[\"namespace_name\"] = namespace_name\n __props__.__dict__[\"publicly_accessible\"] = publicly_accessible\n __props__.__dict__[\"security_group_ids\"] = security_group_ids\n __props__.__dict__[\"subnet_ids\"] = subnet_ids\n __props__.__dict__[\"tags\"] = tags\n __props__.__dict__[\"tags_all\"] = tags_all\n __props__.__dict__[\"workgroup_id\"] = workgroup_id\n __props__.__dict__[\"workgroup_name\"] = workgroup_name\n return Workgroup(resource_name, opts=opts, __props__=__props__)",
"def get_dedicated_host_group(self,\n id: str,\n **kwargs\n ) -> DetailedResponse:\n\n if id is None:\n raise ValueError('id must be provided')\n headers = {}\n sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,\n service_version='V1',\n operation_id='get_dedicated_host_group')\n headers.update(sdk_headers)\n\n params = {\n 'version': self.version,\n 'generation': self.generation\n }\n\n if 'headers' in kwargs:\n headers.update(kwargs.get('headers'))\n headers['Accept'] = 'application/json'\n\n path_param_keys = ['id']\n path_param_values = self.encode_path_vars(id)\n path_param_dict = dict(zip(path_param_keys, path_param_values))\n url = '/dedicated_host/groups/{id}'.format(**path_param_dict)\n request = self.prepare_request(method='GET',\n url=url,\n headers=headers,\n params=params)\n\n response = self.send(request)\n return response",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n address_ip_version: Optional[pulumi.Input[str]] = None,\n connection_drain: Optional[pulumi.Input[bool]] = None,\n connection_drain_timeout: Optional[pulumi.Input[int]] = None,\n health_check: Optional[pulumi.Input[pulumi.InputType['ServerGroupHealthCheckArgs']]] = None,\n preserve_client_ip_enabled: Optional[pulumi.Input[bool]] = None,\n protocol: Optional[pulumi.Input[str]] = None,\n resource_group_id: Optional[pulumi.Input[str]] = None,\n scheduler: Optional[pulumi.Input[str]] = None,\n server_group_name: Optional[pulumi.Input[str]] = None,\n server_group_type: Optional[pulumi.Input[str]] = None,\n status: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,\n vpc_id: Optional[pulumi.Input[str]] = None) -> 'ServerGroup':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _ServerGroupState.__new__(_ServerGroupState)\n\n __props__.__dict__[\"address_ip_version\"] = address_ip_version\n __props__.__dict__[\"connection_drain\"] = connection_drain\n __props__.__dict__[\"connection_drain_timeout\"] = connection_drain_timeout\n __props__.__dict__[\"health_check\"] = health_check\n __props__.__dict__[\"preserve_client_ip_enabled\"] = preserve_client_ip_enabled\n __props__.__dict__[\"protocol\"] = protocol\n __props__.__dict__[\"resource_group_id\"] = resource_group_id\n __props__.__dict__[\"scheduler\"] = scheduler\n __props__.__dict__[\"server_group_name\"] = server_group_name\n __props__.__dict__[\"server_group_type\"] = server_group_type\n __props__.__dict__[\"status\"] = status\n __props__.__dict__[\"tags\"] = tags\n __props__.__dict__[\"vpc_id\"] = vpc_id\n return ServerGroup(resource_name, opts=opts, __props__=__props__)",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None) -> 'MachineGroup':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = MachineGroupArgs.__new__(MachineGroupArgs)\n\n __props__.__dict__[\"count\"] = None\n __props__.__dict__[\"display_name\"] = None\n __props__.__dict__[\"etag\"] = None\n __props__.__dict__[\"group_type\"] = None\n __props__.__dict__[\"kind\"] = None\n __props__.__dict__[\"machines\"] = None\n __props__.__dict__[\"name\"] = None\n __props__.__dict__[\"type\"] = None\n return MachineGroup(resource_name, opts=opts, __props__=__props__)",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None) -> 'Group':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = GroupArgs.__new__(GroupArgs)\n\n __props__.__dict__[\"filter_expression\"] = None\n __props__.__dict__[\"group_arn\"] = None\n __props__.__dict__[\"group_name\"] = None\n __props__.__dict__[\"insights_configuration\"] = None\n __props__.__dict__[\"tags\"] = None\n return Group(resource_name, opts=opts, __props__=__props__)",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None) -> 'OriginGroup':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = OriginGroupArgs.__new__(OriginGroupArgs)\n\n __props__.__dict__[\"health_probe_settings\"] = None\n __props__.__dict__[\"name\"] = None\n __props__.__dict__[\"origins\"] = None\n __props__.__dict__[\"provisioning_state\"] = None\n __props__.__dict__[\"resource_state\"] = None\n __props__.__dict__[\"response_based_origin_error_detection_settings\"] = None\n __props__.__dict__[\"system_data\"] = None\n __props__.__dict__[\"traffic_restoration_time_to_healed_or_new_endpoints_in_minutes\"] = None\n __props__.__dict__[\"type\"] = None\n return OriginGroup(resource_name, opts=opts, __props__=__props__)",
"def get(self, group_name):\n\n g = self.request.mongo_connection.shinken.hostgroups.find_one(\n {\"hostgroup_name\": group_name}, {'_id': 0}\n )\n return hostgroup.HostGroup(**g)",
"def GetZoneGroupState(self, *args, **kwargs):\r\n kwargs['cache'] = kwargs.get('cache', zone_group_state_shared_cache)\r\n return self.send_command('GetZoneGroupState', *args, **kwargs)",
"def get(id):\n\n return Group.query.get(id)",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n amount: Optional[pulumi.Input[float]] = None,\n etag: Optional[pulumi.Input[str]] = None,\n filter: Optional[pulumi.Input[pulumi.InputType['BudgetResourceGroupFilterArgs']]] = None,\n name: Optional[pulumi.Input[str]] = None,\n notifications: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['BudgetResourceGroupNotificationArgs']]]]] = None,\n resource_group_id: Optional[pulumi.Input[str]] = None,\n time_grain: Optional[pulumi.Input[str]] = None,\n time_period: Optional[pulumi.Input[pulumi.InputType['BudgetResourceGroupTimePeriodArgs']]] = None) -> 'BudgetResourceGroup':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _BudgetResourceGroupState.__new__(_BudgetResourceGroupState)\n\n __props__.__dict__[\"amount\"] = amount\n __props__.__dict__[\"etag\"] = etag\n __props__.__dict__[\"filter\"] = filter\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"notifications\"] = notifications\n __props__.__dict__[\"resource_group_id\"] = resource_group_id\n __props__.__dict__[\"time_grain\"] = time_grain\n __props__.__dict__[\"time_period\"] = time_period\n return BudgetResourceGroup(resource_name, opts=opts, __props__=__props__)",
"def read_by_id(_id):\n try:\n return Group.get(Group.id == _id)\n except Exception:\n return None",
"def resource_group_get(name: str) -> ResourceGroup:\n command: List[str] = ['az', 'group', 'show', f'--name={name}']\n sh.print_command(command)\n process = sh.run_subprocess(command)\n # sh.log_subprocess(LOG, process, debug=ARGS.debug)\n if process.returncode != 0:\n return ResourceGroup()\n # resource_group = ResourceGroup(process.stdout)\n resource_group: ResourceGroup = json_to_dataclass(process.stdout, ResourceGroup)\n # LOG.debug(\"resource_group: {resource_group}\")\n return resource_group",
"def get_host_group_details(self, host_group_id):\n LOG.info(\"Getting hostgroup details by ID: '%s'\" % host_group_id)\n return self.client.request(constants.GET,\n constants.GET_HOST_GROUP_DETAILS_URL.format(\n self.server_ip, host_group_id),\n payload=None,\n querystring=constants.SELECT_ALL_HOST_GROUP)",
"def find_address_group(self, name_or_id, ignore_missing=True, **query):\n return self._find(\n _address_group.AddressGroup,\n name_or_id,\n ignore_missing=ignore_missing,\n **query,\n )",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n arn: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n provider_endpoint: Optional[pulumi.Input[str]] = None,\n provider_type: Optional[pulumi.Input[str]] = None,\n status: Optional[pulumi.Input[str]] = None,\n vpc_configuration: Optional[pulumi.Input[pulumi.InputType['HostVpcConfigurationArgs']]] = None) -> 'Host':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _HostState.__new__(_HostState)\n\n __props__.__dict__[\"arn\"] = arn\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"provider_endpoint\"] = provider_endpoint\n __props__.__dict__[\"provider_type\"] = provider_type\n __props__.__dict__[\"status\"] = status\n __props__.__dict__[\"vpc_configuration\"] = vpc_configuration\n return Host(resource_name, opts=opts, __props__=__props__)",
"def get_group(id_or_name):\n try:\n return Group.objects.get(pk=id_or_name)\n except (Group.DoesNotExist, ValueError):\n try:\n return Group.objects.get(name=id_or_name)\n except Group.DoesNotExist:\n return None",
"def _get_group(group_name, group_lookup, school_urlsafe):\n #check for existing group by name\n if group_name.lower() in group_lookup:\n logging.debug(\"group found in cache\")\n error = \"group found in cache\"\n create_error_log(error, 'ERR')\n return group_lookup[group_name.lower()], None\n\n from .group import Group\n school_key = ndb.Key(urlsafe = school_urlsafe)\n group = Group.query(Group.name_ == group_name, Group.school == school_key, namespace = '_x_').get()\n\n if group:\n logging.debug(\"group found in datastore\")\n error = \"group found in datastore\"\n create_error_log(error, 'ERR')\n group_lookup[group_name.lower()] = group.key\n return group.key, None\n\n logging.debug(\"No group found for %s, creating a new one\", group_name)\n group = Group(name=group_name)\n school_key = ndb.Key(urlsafe = school_urlsafe)\n group.school = school_key\n future = group.put_async()\n return group.key, future",
"def get_hostgroup_by_name(self, name):\n group = self.db_session.query(HostGroup)\\\n .filter(HostGroup.name == name)\\\n .first()\n return group"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The number of fault domains that the Dedicated Host Group spans. Changing this forces a new resource to be created. | def platform_fault_domain_count(self) -> pulumi.Output[int]:
return pulumi.get(self, "platform_fault_domain_count") | [
"def domains_count(self):\n return self._domains_count",
"def get_number_of_agents_for_scheduling(self, context):\n return 1",
"def faulted_count(self) -> int:\n return pulumi.get(self, \"faulted_count\")",
"def domain_size(domain):\n fixed_domain_sizes = {\n \"current collector\": 3,\n \"negative particle\": 5,\n \"positive particle\": 7,\n \"negative electrode\": 11,\n \"separator\": 13,\n \"positive electrode\": 17,\n \"negative particle size\": 19,\n \"positive particle size\": 23,\n }\n if domain in [[], None]:\n size = 1\n elif all(dom in fixed_domain_sizes for dom in domain):\n size = sum(fixed_domain_sizes[dom] for dom in domain)\n else:\n size = sum(hash(dom) % 100 for dom in domain)\n return size",
"def numOfDefinedStoragePools(self):\n ret = libvirtmod.virConnectNumOfDefinedStoragePools(self._o)\n if ret == -1: raise libvirtError ('virConnectNumOfDefinedStoragePools() failed', conn=self)\n return ret",
"def domain_count_dict(domains):\n domain_count = {} \n for domain in domains: \n if not domain in domain_count:\n domain_count[domain] = 1\n else:\n domain_count[domain] = domain_count[domain] + 1\n return domain_count",
"def _constraints_for_new_request(cls, config):\n return {'count': npr.randint(5, 20, 1)[0]}",
"def get_advertiser_domain_pagerrank(self) -> int:\n raise NotImplementedError",
"def getNumSites(self) -> int:\n return len(self.sites)",
"def _max_servers(self, resource_desc):\n total = 0\n for allocator in self._allocators:\n count = allocator.max_servers(resource_desc)\n self._logger.debug('%r returned %d', allocator._name, count)\n total += count\n return total",
"def number_of_requests(self) -> int:\n return len(self.resources)",
"def desired_nr_resources(self):\n return self.problem.schedule[self.now % len(self.problem.schedule)]",
"def update_number_of_domain_controllers(self, DirectoryId: str, DesiredNumber: int) -> Dict:\n pass",
"def get_site_cnt(self):\n\n return len(self.sites)",
"def __len__(self):\n return len(self.options['inventory_manager'].list_hosts()) + len(self.get_extra_inventory_hosts())",
"def pooled_instances(self) -> int:\n return pulumi.get(self, \"pooled_instances\")",
"def test_set_limits(self):\n domain = Domain.objects.get(name=\"test.com\")\n values = {\n \"name\": domain.name, \"quota\": domain.quota,\n \"default_mailbox_quota\": domain.default_mailbox_quota,\n \"enabled\": domain.enabled, \"type\": \"domain\",\n \"mailboxes_limit\": 3, \"mailbox_aliases_limit\": 3,\n \"domain_aliases_limit\": 3, \"domain_admins_limit\": 3\n }\n self.ajax_post(\n reverse(\"admin:domain_change\", args=[domain.id]),\n values\n )\n domain.refresh_from_db()\n self.assertEqual(\n domain.domainobjectlimit_set.get(name=\"mailboxes\").max_value, 3)\n self.assertEqual(\n domain.domainobjectlimit_set.get(\n name=\"mailbox_aliases\").max_value, 3)\n self.assertEqual(\n domain.domainobjectlimit_set.get(\n name=\"domain_aliases\").max_value, 3)\n self.assertEqual(\n domain.domainobjectlimit_set.get(\n name=\"domain_admins\").max_value, 3)",
"def verify_domain_validity(self):\n self.component_count['domain'] = {}\n self.component_count['domain']['intents'] = len(self.domain.intents)\n self.component_count['domain']['utterances'] = len(self.domain.templates)\n self.component_count['domain']['actions'] = len(self.domain.user_actions)\n self.component_count['domain']['forms'] = len(self.domain.form_names)\n self.component_count['domain']['slots'] = len(self.domain.slots)\n self.component_count['domain']['entities'] = len(self.domain.entities)\n self.component_count['utterances'] = len(self.domain.templates)\n if self.domain.is_empty():\n self.summary['domain'] = [\"domain.yml is empty!\"]",
"def test_dos_create_flavor_limits_list(self):\n # create a huge list of origins\n self.reset_defaults()\n self.limits_list.append({\"domains\": {\"min\": 1, \"max\": 5}})\n self.limits_list.append({\"caching\": {\"min\": 3600,\n \"max\": 604800, \"incr\": 300}})\n for k in range(1, 9000):\n self.limits_list.append({\"origins\": {\"min\": \"%s\" % k, \"max\": 5}})\n\n # send MAX_ATTEMPTS requests\n for k in range(1, self.MAX_ATTEMPTS):\n self.flavor_id = str(uuid.uuid1())\n self.check_one_request()",
"def numprocesses(self):\r\n info = self.info()\r\n return info['max_processes']"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Initialize the Multi Agent State. It receives the solver settings and the list of all single agent states. | def __init__(self, single_agents_states, solver_settings, parent=None):
super().__init__(single_agents_states, solver_settings, parent=parent)
self._back_propagation_set = []
self._collisions_set = set()
self.compute_cost()
self.compute_heuristics() | [
"def _initialize_agents(self):\n\n for agent in self.agents:\n agent.fill_with_binary()\n\n self.best_agent = copy.deepcopy(self.agents[0])",
"def initial_agent_states(self) -> Dict[str, AgentState]:\n if self._initial_agent_states is None:\n raise AEAEnforceError(\"Call create before calling initial_agent_states.\")\n return self._initial_agent_states",
"def initialize(self):\n\n \"*** YOUR CODE HERE\"\n #agent가 생성될때마다 agentNum을 하나씩 증가시킨다.\n MyAgent.agentNum = MyAgent.agentNum+1",
"def assign_attributes(self):\n\t\tfor agent in self.agents_list:\n\t\t\tagent.number_of_states = self.number_of_states\n\t\t\tagent.state = random.choice(self.states_list)",
"def initialize(self, **args):\n\n # Inform the user\n log.info(\"Initializing the population ...\")\n\n if self.oneSelfGenome.getParam(\"full_diversity\", True) and hasattr(self.oneSelfGenome, \"compare\"):\n for i in xrange(len(self.internalPop)):\n curr = self.internalPop[i]\n curr.initialize(**args)\n while self.__findIndividual(curr, i):\n curr.initialize(**args)\n else:\n for gen in self.internalPop:\n gen.initialize(**args)\n\n self.clearFlags()",
"def setup(self):\n with timer(\"Generating states\"):\n if self.hamiltonian.endswith(\"relevant\"):\n self.states = States(self.n, basis=Basis.N_L_ML_MS_RELEVANT)\n print(\"Loaded relevant N L ML MS states.\")\n else:\n self.states = States(self.n, basis=Basis.N_L_ML_MS)\n print(\"Loaded N L ML MS states.\")\n\n with timer(\"Loading Hamiltonian\"):\n mat_1, mat_1_zeeman, mat_2, mat_2_minus, mat_2_plus = load_hamiltonian(self.hamiltonian)\n mat_2_combination = mat_2_plus + mat_2_minus\n\n with timer(\"Loading transformations\"):\n transform_1 = load_transformation(self.n, Basis.N_L_J_MJ_RELEVANT, Basis.N_L_ML_MS_RELEVANT)\n\n with timer(\"Applying transformation to nlmlms\"):\n mat_1 = transform_basis(mat_1, transform_1)\n mat_1_zeeman = transform_basis(mat_1_zeeman, transform_1)\n mat_2 = transform_basis(mat_2, transform_1)\n # mat_2_plus = transform_basis(mat_2_plus, transform_1)\n # mat_2_minus = transform_basis(mat_2_minus, transform_1)\n mat_2_combination = transform_basis(mat_2_combination, transform_1)\n\n self.mat_1 = mat_1\n self.mat_1_zeeman = mat_1_zeeman\n self.mat_2 = mat_2\n # self.mat_2_plus = mat_2_plus\n # self.mat_2_minus = mat_2_minus\n self.mat_2_combination = mat_2_combination",
"def initialize_env(self):\n\n # Environment source file : should be json file\n global global_file_name\n\n print(\"Reading the environment input file...\")\n\n graph, people, Simulator.deadline = Env1.load_environment(global_file_name)\n\n print(\"Done reading the environment input file...\\n\\n\")\n while True:\n ans = input(f'Input deadline is {Simulator.deadline}, do you want to ignore it (Y/N) ? ').upper()\n if ans == 'Y':\n Simulator.deadline = float('inf')\n break\n elif ans == 'N':\n break\n else:\n print('Invalid input, choose one of: Y, N')\n\n print(\"\\nNeed agents information...\\n\\n\")\n\n # Agents creating\n Agent.Agent.restart_ids()\n while True:\n try:\n ans = input(\"How many agents do you want to run? \")\n agent_num = int(ans)\n break\n except:\n print('You must enter an integer...\\ntry again...')\n\n agents_list = [] # type: List [Agent]\n agent_locations = {}\n\n for i in range(1, agent_num + 1):\n agent_types = list(Simulator.agent_init_functions.keys())\n options = [f'({i}) - {t}' for i,t in enumerate(agent_types)]\n short = {str(i): t for i,t in enumerate(agent_types)}\n while True:\n print(\"Agent {} : Please enter the agent type:\\n{}\".format(i, '\\n'.join(options)))\n agent_type = input('Your choice: ')\n if len(agent_type) == 1 and agent_type in short:\n agent_type = short[agent_type]\n break\n if len(agent_type) > 1 and agent_type in agent_types:\n break\n print(f'Your choice of: \"{agent_type}\" is invalid, pick again from list (either number or complete name)')\n\n while True:\n try:\n ans = input(\"Please enter agent location (node number): \")\n agent_location = int(ans)\n if agent_location not in graph.graph:\n print(f'Your pick of {agent_location} is invalid must be on of { \", \".join([str(n) for n in graph.graph]) }')\n else:\n break\n except:\n print('You must enter a valid node number of type int...')\n\n # Invoking agent initialization function ( according to agent type )\n agent = Simulator.agent_init_functions[agent_type]()\n agents_list.append(agent)\n agent_locations[agent.get_id()] = [agent_location, agent_location, 0]\n\n print(\"\\n\\nInitializing environment\")\n\n env = Env1.Environment(graph=graph, agents_location=agent_locations, people_location=people, blocked_edges=[])\n env.initialize()\n return agents_list, env",
"def __initialise_states(self):\n\n # Start not dead and not powered up\n self.powered_up = False\n self.dead = False",
"def _init_state_variables(self) -> None:\n for name, type_info in self.STATE_VARIABLE_DEFINITIONS.items():\n self.create_state_var(name, type_info)",
"def _initialize_state_vector(self):\n np.random.seed(self.seed)\n self.initial_state = [0.0] * self.num_state_variables",
"def init_state(self) -> ESILState:\n\n self.state_manager = ESILStateManager([], lazy=self.lazy)\n state = self.state_manager.entry_state(self.r2api, **self.options)\n return state",
"def __init__(self, wire_client):\n uri = GOAL_STATE_URI.format(wire_client.get_endpoint())\n\n for _ in range(0, _NUM_GS_FETCH_RETRIES):\n self.xml_text = wire_client.fetch_config(uri, wire_client.get_header())\n xml_doc = parse_doc(self.xml_text)\n self.incarnation = findtext(xml_doc, \"Incarnation\")\n\n role_instance = find(xml_doc, \"RoleInstance\")\n if role_instance:\n break\n time.sleep(0.5)\n else:\n raise IncompleteGoalStateError(\"Fetched goal state without a RoleInstance [incarnation {inc}]\".format(inc=self.incarnation))\n\n try:\n self.role_instance_id = findtext(role_instance, \"InstanceId\")\n role_config = find(role_instance, \"Configuration\")\n self.role_config_name = findtext(role_config, \"ConfigName\")\n container = find(xml_doc, \"Container\")\n self.container_id = findtext(container, \"ContainerId\")\n\n AgentGlobals.update_container_id(self.container_id)\n\n # these properties are populated by fetch_full_goal_state()\n self._hosting_env_uri = findtext(xml_doc, \"HostingEnvironmentConfig\")\n self.hosting_env = None\n self._shared_conf_uri = findtext(xml_doc, \"SharedConfig\")\n self.shared_conf = None\n self._certs_uri = findtext(xml_doc, \"Certificates\")\n self.certs = None\n self._remote_access_uri = findtext(container, \"RemoteAccessInfo\")\n self.remote_access = None\n # TODO: extensions_config is an instance member only temporarily. Once we stop comparing extensionsConfig with\n # vmSettings, it will be replaced with the extensions goal state\n self.extensions_config = None\n self._extensions_config_uri = findtext(xml_doc, \"ExtensionsConfig\")\n\n except Exception as exception:\n # We don't log the error here since fetching the goal state is done every few seconds\n raise ProtocolError(msg=\"Error fetching goal state\", inner=exception)",
"def build_state(self):\n\n # Collect data about the environment\n waypoint = self.planner.next_waypoint() # The next waypoint \n inputs = self.env.sense(self) # Visual input - intersection light and traffic\n for key, value in iter(inputs.items()):\n if value is None:\n inputs.update({key:'None'})\n deadline = self.env.get_deadline(self) # Remaining deadline\n\n ########### \n ## TO DO ##\n ###########\n \n # NOTE : you are not allowed to engineer features outside of the inputs available.\n # Because the aim of this project is to teach Reinforcement Learning, we have placed \n # constraints in order for you to learn how to adjust epsilon and alpha, and thus learn about the balance between exploration and exploitation.\n # With the hand-engineered features, this learning process gets entirely negated.\n \n # Set 'state' as a tuple of relevant data for the agent \n return self.build_index(inputs,waypoint)",
"def initializeAgents(self,agentname,progress=None,singlememvar=\"\"):\n #print \"Initializing region %d agent %s memvar %30s\" % (self.regionid+1,agentname,singlememvar)\n if not self.popmap.has_key(agentname):\n return\n c=0\n for i in self.popmap[agentname]:\n i.instantiate(self.popmap,self.regionid,singlememvar=singlememvar)\n c+=1\n if not progress==None:\n progress.tick(append=str(c))",
"def __init__(\n self,\n time_step_spec,\n action_spec,\n # Specific to multi-agent case\n n_agents,\n learning_rate=1e-4,\n # Specific to multi-grid agents\n actor_fc_layers=(32, 32),\n value_fc_layers=(32, 32),\n lstm_size=(128,),\n conv_filters=8,\n conv_kernel=3,\n direction_fc=5,\n # Modifying agents\n inactive_agent_ids=tuple(),\n non_learning_agents=tuple(),\n # PPO Clip agent params\n importance_ratio_clipping=0.0,\n lambda_value=0.95,\n discount_factor=0.99,\n entropy_regularization=0.05,\n policy_l2_reg=0.0,\n value_function_l2_reg=0.0,\n shared_vars_l2_reg=0.0,\n value_pred_loss_coef=0.5,\n num_epochs=25,\n use_gae=False,\n use_td_lambda_return=False,\n normalize_rewards=True,\n reward_norm_clipping=10.0,\n normalize_observations=True,\n log_prob_clipping=0.0,\n gradient_clipping=None,\n check_numerics=False,\n debug_summaries=False,\n summarize_grads_and_vars=False,\n train_step_counter=None,\n network_build_fn=multigrid_networks.construct_multigrid_networks,\n policy_class=multiagent_ppo_policy.MultiagentPPOPolicy,\n agent_class=ppo_clip_agent.PPOClipAgent,\n name='MultiagentPPO'):\n self.n_agents = n_agents\n self.inactive_agent_ids = inactive_agent_ids\n self.non_learning_agents = non_learning_agents\n\n # Get single-agent specs\n (single_obs_spec, single_time_step_spec,\n single_action_spec) = self.get_single_agent_specs(time_step_spec,\n action_spec)\n\n # Make baby agents\n self.agents = [None] * self.n_agents\n self.optimizers = [None] * self.n_agents\n for agent_id in range(self.n_agents):\n with tf.name_scope('agent_' + str(agent_id)):\n self.optimizers[agent_id] = tf.compat.v1.train.AdamOptimizer(\n learning_rate=learning_rate)\n\n # Build actor and critic networks\n actor_net, value_net = network_build_fn(\n single_obs_spec,\n single_action_spec,\n actor_fc_layers=actor_fc_layers,\n value_fc_layers=value_fc_layers,\n lstm_size=lstm_size,\n conv_filters=conv_filters,\n conv_kernel=conv_kernel,\n scalar_fc=direction_fc)\n\n logging.info('Creating agent %d...', agent_id)\n self.agents[agent_id] = agent_class(\n single_time_step_spec,\n single_action_spec,\n self.optimizers[agent_id],\n actor_net=actor_net,\n value_net=value_net,\n entropy_regularization=entropy_regularization,\n importance_ratio_clipping=0.2,\n normalize_observations=False,\n normalize_rewards=False,\n use_gae=True,\n num_epochs=num_epochs,\n debug_summaries=debug_summaries,\n summarize_grads_and_vars=summarize_grads_and_vars,\n train_step_counter=train_step_counter,\n compute_value_and_advantage_in_train=True)\n self.agents[agent_id].initialize()\n\n with tf.name_scope('meta_agent'):\n # Initialize policies\n self._policies = [self.agents[a].policy for a in range(self.n_agents)]\n policy = policy_class(\n self._policies,\n time_step_spec=time_step_spec,\n action_spec=action_spec,\n clip=False,\n collect=False,\n inactive_agent_ids=inactive_agent_ids)\n\n self._collect_policies = [\n self.agents[a].collect_policy for a in range(self.n_agents)\n ]\n collect_policy = policy_class(\n self._collect_policies,\n time_step_spec=time_step_spec,\n action_spec=action_spec,\n clip=False,\n collect=True,\n inactive_agent_ids=inactive_agent_ids)\n\n super(MultiagentPPO, self).__init__(\n time_step_spec,\n action_spec,\n policy,\n collect_policy,\n train_sequence_length=None,\n debug_summaries=debug_summaries,\n summarize_grads_and_vars=summarize_grads_and_vars,\n train_step_counter=train_step_counter)\n\n self._global_step = train_step_counter\n self.update_normalizers_in_train = False\n print('Finished constructing multi-agent PPO')",
"def __init__(self):\n self.action_space = [(0, 0)] + list(permutations([i for i in range(m)], 2))\n self.action_space = [list(i) for i in self.action_space]\n self.state_space = [[x, y, z] for x in range(m) for y in range(t) for z in range(d)]\n self.state_init = random.choice(self.state_space)\n\n # Start the first round\n self.reset()",
"def initializeStateFlags(*args):\n return set(args)",
"def initialize():\n nonlocal current_state_id\n global strategy_dict\n if 'init_contract' not in options: # if init_contract i\n warnings.warn('Using most recently attempted strategy!')\n else:\n init_state, guart = options['init_contract']\n failures = options['init_fail']\n fails.append(failures)\n assm = contract_controller.to_assumption(init_state, failures)\n synthesize_contract(assm, guart)\n subprocess.run([parent_path + '/run', 'resyn'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) # synthesize strategy online\n # load initial strategy\n load_strategy()\n if 'init_state' not in options: # if the initial state is not specified, then it will be randomly chosen\n current_state_id = np.random.choice(tuple(strategy_dict))\n else:\n current_state_id = look_up_state_id(options['init_state'])\n collect()",
"def initialize_problem(self, problem_instance):\n self._solver_settings.initialize_heuristic(problem_instance)\n self._frontier = StatesQueue()\n self._closed_list = MStarStatesQueue()\n self._n_of_generated_nodes = 1\n self._n_of_expanded_nodes = 0\n\n single_agents_states = []\n for agent in problem_instance.get_agents():\n s = SingleAgentState(problem_instance.get_map(), agent.get_goal(), agent.get_start(), self._solver_settings)\n single_agents_states.append(s)\n\n starter_state = MStarState(single_agents_states, self._solver_settings)\n self._frontier.add(starter_state)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Expand the current state. For each single state, if the corresponding agent is not in the collision set, the next single state will be the one obtained by following the optimal policy, otherwise if it is in the collision set all the possible moves will be considered for that agent. Then these states are iterated in order to obtain all the possible multi agent state combinations. | def expand(self, verbose=False):
if verbose:
print("Expansion in progress... COLLISIONS SET {:<24}".format(str(self._collisions_set)), end=" ")
candidate_list = []
for i, single_state in enumerate(self._single_agents_states):
if i in self._collisions_set:
single_state_neighbor_list = single_state.expand()
candidate_list.append(single_state_neighbor_list)
else:
next_optimal_state = single_state.expand_optimal_policy()
candidate_list.append([next_optimal_state])
candidate_state_list = list(itertools.product(*candidate_list))
valid_states = []
for i, multi_state in enumerate(candidate_state_list):
if self.is_valid(multi_state):
valid_states.append(multi_state)
expanded_states = []
for i, multi_state in enumerate(valid_states):
m = MStarState(multi_state, self._solver_settings, parent=self)
m.set_back_propagation_set([self])
m.set_collisions_set(self.colliding_robots(m).copy())
expanded_states.append(m)
if verbose:
print("DONE! Number of expanded states:", len(expanded_states))
return expanded_states | [
"def expand(state: State) -> Generator[State, None, None]:\n n = len(state)\n for i in range(n):\n for j in range(n):\n if state[i, j] in Problem.values:\n continue\n else:\n for v in Problem.values:\n new_state = deepcopy(state)\n new_state[i, j] = v\n if Problem.is_valid(new_state):\n yield new_state\n break",
"def solve(self):\n \n # States used in value iteration\n states = self.cells\n state_container = {cell.get_name():cell for cell in states}\n \n # Values for the states\n value = {cell.get_name():0 for cell in states}\n value_new = {cell.get_name():value[cell.get_name()] for cell in states}\n \n # Current policy\n policy = {cell.get_name():\"\" for cell in states}\n \n # Maximum number of iterations\n N_max = 10000 # Should be sufficient for pretty much all scenarios\n \n # Value iteration\n for i in range(N_max):\n \n # Iterate each state\n for state in states:\n \n # Possible actions from current state\n actions = [\"north\",\"east\",\"south\",\"west\"]\n \n # Find best action for current state\n max_action = actions[0]\n max_value = -10000\n for action in actions:\n \n # The possible actual outcomes from current action\n # First one is always the correct outcome\n possible_outcomes = self.get_possible_outcomes(action)\n probs = [0.8, 0.1, 0.1]\n \n # Calculate the value of current action in current state\n current_value = 0\n \n # Three possible outcomes from current action\n for j in range(3):\n outcome = possible_outcomes[j]\n \n # Teleportation\n if state.get_name() == \"43\":\n next_cell = state_container[\"11\"]\n \n # Get next state if outcome is legit move\n elif outcome in state.get_moves():\n x_new = state.get_x() + self.MOVES[outcome][0]\n y_new = state.get_y() + self.MOVES[outcome][1]\n next_cell = state_container[str(x_new)+str(y_new)]\n \n # Remain in the current state if outcome is not legit\n else:\n next_cell = state\n \n # Update value of current action\n current_value += probs[j]* \\\n (self.get_immediate_reward(next_cell) + \\\n self.gamma*value[next_cell.get_name()])\n \n # If the value of current action is best so far,\n # update value function and policy\n if (current_value > max_value):\n max_action = action\n max_value = current_value\n \n # Store value and policy for this iteration in their dicts\n value_new[state.get_name()] = max_value\n policy[state.get_name()] = max_action\n \n # Check for convergence using the given threshold\n terminate = True\n for state in states:\n state_name = state.get_name()\n diff = value_new[state_name] - value[state_name]\n if diff > self.epsilon*(1-self.gamma)/(2*self.gamma) or \\\n diff < -self.epsilon*(1-self.gamma)/(2*self.gamma):\n terminate = False\n \n if terminate:\n if self.conv:\n print(\"Value iteration converged after \"+str(i), \\\n \" iterations.\")\n return [value_new, policy]\n else:\n value = {cell.get_name():value_new[cell.get_name()] \\\n for cell in states}",
"def optimaze(self):\n \n final = self.get_list_of_final()\n not_final = self.get_list_of_final(False)\n # Using frozenset since we will use set of sets. and we won't change it's value\n p = {frozenset(final), frozenset(not_final)}\n w = {frozenset(final)}\n\n while len(w):\n a = w.pop()\n x = set()\n for symbol in self.alphabet:\n x = {node for node in self.G.nodes_iter() if self.get_next_state(node, symbol) in a}\n # using set(p) instead of p since we will change it (the same for w).\n for y in set(p):\n inter = x.intersection(y)\n dif = y.difference(x)\n if inter and dif :\n p.remove(y)\n p.add(frozenset(inter))\n p.add(frozenset(dif))\n if y in set(w):\n w.remove(y)\n w.add(frozenset(inter))\n w.add(frozenset(dif))\n elif len(inter) <= len(dif):\n w.add(frozenset(inter))\n else :\n w.add(frozenset(dif))\n \n d = {}\n # Creation of a new automaton.\n for i, e in enumerate(sorted(p, key= lambda x: min(x))):\n d[e] = i+1\n new_automaton = Automaton()\n for key in d:\n state = set(key).pop()\n for char in self.alphabet:\n next_state = self.get_next_state(state, char)\n for elem in d:\n if next_state in elem:\n to_state = d[elem]\n break\n new_automaton.add_transition(d[key], char, to_state)\n for e in key:\n if e in final:\n new_automaton.set_state_final(d[key])\n break\n print(new_automaton.G.edges())\n return new_automaton",
"def act(self):\n self.logger.info('Picking action according to rule set')\n\n # computing state variable\n state, state_np = get_state(self)\n x, y, _, bombs_left, score = self.game_state['self']\n arena, bomb_map = state_np\n \n # determine valid actions\n directions = [(x,y), (x+1,y), (x-1,y), (x,y+1), (x,y-1)]\n valid_tiles, valid_actions = [], []\n for d in directions:\n if (((arena[d] == 0) or (arena[d] == 2) ) and\n (self.game_state['explosions'][d] <= 1) and\n (bomb_map[d] > 0)):\n valid_tiles.append(d)\n if (x-1,y) in valid_tiles: valid_actions.append('LEFT')\n if (x+1,y) in valid_tiles: valid_actions.append('RIGHT')\n if (x,y-1) in valid_tiles: valid_actions.append('UP')\n if (x,y+1) in valid_tiles: valid_actions.append('DOWN')\n if (x,y) in valid_tiles: valid_actions.append('WAIT')\n if (bombs_left > 0): valid_actions.append('BOMB')\n self.logger.debug(f'Valid actions: {valid_actions}')\n \n if len(valid_actions) == 0:\n return # we're fucked -> can only happen in last step of an episode\n \n # prepare state by stacking the the current state on top of 3 past states\n old_stack = self.state_hist[-1] if len(self.state_hist) > 0 else torch.from_numpy(np.array([np.nan]))\n stacked_state = stack(old_stack,state)\n\n # decide next action\n action= get_action(self, stacked_state, valid_actions)\n self.next_action = action\n \n # save state and action such that they are available in reward_update for learning\n self.action_hist.append(action)\n self.state_hist.append(stacked_state)",
"def solveOneStep(self):\n ### Student code goes here\n\n\n if self.currentState.state == self.victoryCondition or self.currentState not in self.visited:\n self.visited[self.currentState]=True\n\n return self.currentState.state == self.victoryCondition\n\n # Expand the new states from the parent state, but do not go through it\n if not self.currentState.children:\n for movable_states in self.gm.getMovables():\n self.gm.makeMove(movable_states)\n\n NextGameState = GameState(self.gm.getGameState(), self.currentState.depth+1, movable_states)\n if NextGameState not in self.visited:\n NextGameState.parent = self.currentState\n self.currentState.children.append(NextGameState)\n self.gm.reverseMove(movable_states)\n\n if self.currentState.nextChildToVisit<len(self.currentState.children): #explores as far as possible along each branch before backtracking\n nextState = self.currentState.children[self.currentState.nextChildToVisit]\n self.currentState.nextChildToVisit += 1\n self.gm.makeMove(nextState.requiredMovable)\n self.currentState = nextState\n return self.solveOneStep()\n else: #Backtracking\n self.gm.reverseMove(self.currentState.requiredMovable)\n self.currentState = self.currentState.parent\n return self.solveOneStep()",
"def getAction(self, gameState):\n \"*** YOUR CODE HERE ***\"\n \"\"\"\n Note: should use self.depth, self.evaluationFunction\n Current Problems/To-do:\n 1. Need to track depth so maximum depth isn't reached.\n - This could probably be achieved by comparing the depth of the current\n node with the initial depth: if (depth > self.depth): -> evaluate the state\n 2. Modify minimax to be modular so it can handle more than two agent\n \"\"\"\n\n def minimax(game_state, depth, agent_type):\n # check if current state is a leaf (depth is 0) or terminal state\n if game_state.isWin() or game_state.isLose() or depth == 0:\n return self.evaluationFunction(game_state)\n\n # get available moves to the current state\n # change this later as we can reuse in conditional blocks but need to use agent_type\n available_moves = game_state.getLegalActions()\n best_move = available_moves[0]\n num_agents = game_state.getNumAgents()\n\n # check whether the current agent is a MAX agent:\n if agent_type == 0 or agent_type == num_agents:\n v = float('-inf')\n\n for move in game_state.getLegalActions():\n successor = game_state.generateSuccessor(0, move)\n v_prime = minimax(successor, depth, 1)\n\n if v_prime > v:\n v = v_prime\n best_move = move\n\n self.best_moves.add((game_state, best_move, v))\n return v\n else:\n v = float('inf')\n\n # set appropriate agent type\n if agent_type == (num_agents - 1):\n next_agent = 0\n next_depth = depth - 1\n else:\n next_agent = agent_type + 1\n next_depth = depth\n\n for move in game_state.getLegalActions(agent_type):\n successor = game_state.generateSuccessor(agent_type, move)\n v_prime = minimax(successor, next_depth, next_agent)\n\n if v > v_prime:\n v = v_prime\n best_move = move\n\n self.best_moves.add((game_state, best_move, v))\n return v\n\n v = minimax(gameState, self.depth, self.index)\n\n for parent, key, value in self.best_moves:\n if key in gameState.getLegalActions() and value == v and parent == gameState:\n return key\n\n util.raiseNotDefined()",
"def chooseAction(self, gameState):\n\n #Check if our agent has died, if so create a new actionList, and update the goal\n if self.hasDied(gameState):\n self.actionList = []\n self.updateGoalState(self.getClosestFood(gameState), gameState)\n\n enemies = self.getOpponents(gameState)\n\n #This block checks for enemies within 6 map spaces of our agent, if one is found\n #It will find the closest friendly space to the agent, update the goal, and find the BFS path to the goal\n for enemy in enemies:\n enemyPosition = gameState.getAgentPosition(enemy)\n if enemyPosition != None:\n enemyDist = self.getMazeDistance(gameState.getAgentPosition(self.index), enemyPosition)\n\n #If the enemy is within 6 spaces and our agent is pacman and the enemy is a ghost\n #We are in enemy territory and are being chased\n if enemyDist <= 6 and (gameState.getAgentState(self.index).isPacman and\n not gameState.getAgentState(enemy).isPacman):\n\n #Obtain the cells that border the enemy territory and find the closest one\n borderCells = self.getBorderCells(gameState)\n bestDist = 9999\n bestCell = borderCells[0]\n currentPos = gameState.getAgentPosition(self.index)\n for cell in borderCells:\n dist = self.getMazeDistance(currentPos, cell)\n if dist < bestDist:\n bestDist = dist\n bestCell = cell\n #Update the goal with the closest friendly cell\n self.goal = bestCell\n self.actionList = []\n #Find the actions that will take us to the cell\n self.actionList = self.breadthFirstSearch(gameState)\n\n #By Default, if the action list is empty, find the nearest food to the agent,\n #Update the goal with the food location and repopulate the action list with the BFS path to the food\n if len(self.actionList) == 0:\n self.updateGoalState(self.getClosestFood(gameState), gameState)\n self.actionList = []\n self.actionList = self.breadthFirstSearch(gameState)\n\n return self.actionList.pop(0)",
"def get_next_agent_state_for_minting(self) -> Optional[AgentState]:\n result = None\n for agent_addr, agent_state in self.initial_agent_states.items():\n if agent_addr in self._already_minted_agents:\n continue\n self._already_minted_agents.append(agent_addr)\n result = agent_state\n break\n return result",
"def solveOneStep(self):\n ### Student code goes here\n curr = self.currentState\n # print(curr, end=\" \")\n # print(curr.depth)\n self.visited[self.currentState] = True\n movables = self.gm.getMovables()\n\n if self.gm.getGameState() == self.victoryCondition:\n return True\n\n else:\n if movables and not self.currentState.children:\n for x in range(len(movables)):\n\n self.gm.makeMove(movables[x])\n node = GameState(self.gm.getGameState(), curr.depth+1, movables[x])\n # self.gm.reverseMove(movables[x])\n curr.children.append(node)\n node.parent = curr\n if node not in self.visited:\n self.visited[node] = False\n self.gm.reverseMove(movables[x])\n else:\n self.gm.reverseMove(movables[x])\n else:\n if curr.parent != None:\n curr.depth -= 1\n self.gm.reverseMove(curr.requiredMovable)\n\n for child in curr.children:\n if self.visited[child] is False:\n self.currentState = child\n self.visited[child] = True\n self.gm.makeMove(child.requiredMovable)\n break",
"def build_state(self):\n\n # Collect data about the environment\n waypoint = self.planner.next_waypoint() # The next waypoint \n inputs = self.env.sense(self) # Visual input - intersection light and traffic\n for key, value in iter(inputs.items()):\n if value is None:\n inputs.update({key:'None'})\n deadline = self.env.get_deadline(self) # Remaining deadline\n\n ########### \n ## TO DO ##\n ###########\n \n # NOTE : you are not allowed to engineer features outside of the inputs available.\n # Because the aim of this project is to teach Reinforcement Learning, we have placed \n # constraints in order for you to learn how to adjust epsilon and alpha, and thus learn about the balance between exploration and exploitation.\n # With the hand-engineered features, this learning process gets entirely negated.\n \n # Set 'state' as a tuple of relevant data for the agent \n return self.build_index(inputs,waypoint)",
"def _most_likely_state_policy(self, ):\n\n to_minimize = np.ones(self.env.Nactions) * float(\"inf\")\n for a in range(self.env.Nactions):\n # moving agent\n agent_, move_possible = self.env._move(a, self.env.agent)\n if move_possible:\n # most likely source location\n most_likely_source = np.unravel_index(np.argmax(self.env.p_source, axis=None), self.env.p_source.shape)\n # Manhattan distance between agent and source\n to_minimize[a] = np.linalg.norm(np.asarray(agent_) - np.asarray(most_likely_source), ord=1)\n\n action_chosen = np.argwhere(np.abs(to_minimize - np.min(to_minimize)) < EPSILON_CHOICE).flatten()[0]\n\n return action_chosen, to_minimize",
"def expand(self, state):\n successor_function = self.graph.successor(state.content)\n new_nodes_list = []\n for successor in successor_function:\n new_node = State(\n content=successor[0],\n total_cost=state.cost + successor[1],\n depth=state.depth + 1,\n parent=state\n )\n new_nodes_list.append(new_node)\n self.closure.add(state)\n return new_nodes_list",
"def agents_update_state(self):\n\t\tfor agent in self.activated_agents:\n\t\t\tagent.update_state()",
"def solveOneStep(self):\n ### Student code goes here\n if self.victoryCondition == self.currentState.state:\n return True\n\n if self.currentState not in self.visited:\n self.visited[self.currentState] = True\n\n if self.gm.getMovables() and not self.currentState.children:\n self.findChildren(self.currentState)\n\n path = []\n\n always = 123 ### runs until we break this loop ourselves\n while always == 123:\n next = self.search_queue.get()\n\n if not next in self.visited:\n #create the path back to the top\n while next.requiredMovable:\n path.append(next.requiredMovable)\n next = next.parent\n\n # reverse the actual states\n while self.currentState.requiredMovable:\n self.gm.reverseMove(self.currentState.requiredMovable)\n self.currentState = self.currentState.parent\n\n # navigate back down and go to the children to mark them as visited and keep searching\n num = len(path) - 1\n while path:\n\n # get last element of path and the remove it\n move = path[num]\n path.remove(path[num])\n self.gm.makeMove(move)\n new_state = self.gm.getGameState()\n num = num - 1\n\n for child in self.currentState.children:\n if child.state == new_state:\n self.currentState = child\n self.visited[self.currentState] = True # set visited flag to make sure we don't visit again\n break\n break\n\n return False",
"def moves(self):\n for i in [(-1, 0), (1, 0), (0, -1), (0, 1)]:\n m = State(self.x + i[0], self.y + i[1], self.distance + 1)\n if m.valid:\n yield m",
"def _update_all_states(self):\n self.state = list()\n for i in range(4):\n this_player = i\n player_state = list()\n for j in range(4):\n pid = (this_player + j)%4\n hand_size = self.game.players[pid].hand_size\n tichu_flag = int(self.game.players[pid].tichu_flag)\n if pid == this_player:\n player_cards = self._cards_to_vec(\n self.game.players[pid].hand)\n else:\n player_cards = self.action_buffer[pid]\n player_state.append([hand_size, tichu_flag, player_cards])\n self.state.append(player_state)",
"def solve_puzzle(initial_state):\n queue = PriorityQueue()\n visited_states = set()\n\n queue.put((0, uuid.uuid4(), StateWithParent(state=initial_state, parent=None)))\n\n while not queue.empty():\n parent_cost, _, current_state_with_parent = queue.get()\n\n current_state = current_state_with_parent.state\n visited_states.add(state_to_tuple(current_state))\n\n actions = get_available_actions(current_state)\n successor_states = map(lambda action: action(current_state), actions)\n\n for state in successor_states:\n if state_to_tuple(state) not in visited_states:\n new_state_with_parent = StateWithParent(state=state,\n parent=current_state_with_parent)\n cost = heuristic_cost(state)\n if cost == 0:\n # If the heuristic cost of the given state equals 0, then\n # the goal state is found and we can return it immediately.\n return new_state_with_parent\n total_cost = cost + parent_cost\n\n queue.put((total_cost, uuid.uuid4(), new_state_with_parent))\n\n return None",
"def get_outcome(self, state, action):\n next_state = None\n reward = 0\n if state in [53, 131]: # end of MDP\n return next_state, reward\n if action == 0: # move right\n next_state = state + 1\n if state == 38: # goal state 1\n next_state = 53\n reward = 100\n elif state == 158: # goal state 2\n next_state = 131\n reward = 100\n elif state == 1: # cliff\n next_state = None\n reward = -100\n elif 7 <= state <= 51 and (state % 14 == 7 or state % 14 == 8 or state % 14 == 9): # room 1 wind\n next_state = state + 29\n elif state in [63, 64, 65]: # room 1 wind\n next_state = state + 15\n elif 10 <= state <= 68 and (state % 14 == 10 or state % 14 == 11 or state % 14 == 12): # room 1 wind\n next_state = state + 15\n elif 113 <= state <= 157 and (state % 14 == 1 or state % 14 == 2 or state % 14 == 3): # room 2 wind\n next_state = state - 13\n elif 130 <= state <= 160 and (state % 14 == 4 or state % 14 == 5 or state % 14 == 6): # room 2 wind\n next_state = state - 27\n elif state in [116, 117, 118]: # room 2 wind\n next_state = state - 13\n elif 5 <= state <= 75 and state % 14 == 5: # room 1 left border\n next_state = state\n elif 105 <= state <= 147 and state % 14 == 7: # room 2 right border\n next_state = state\n elif state % 14 == 13: # world right border\n next_state = state\n elif action == 1: # move up\n next_state = state - 14\n if state in [16, 17, 18, 84]: # cliff\n next_state = None\n reward = -100\n elif 21 <= state <= 65 and (state % 14 == 7 or state % 14 == 8 or state % 14 == 9): # room 1 wind\n next_state = state + 14\n elif state in [7, 8, 9]: # room 1 wind\n next_state = state + 28\n elif state in [77, 78, 79]: # room 1 wind\n next_state = state\n elif 24 <= state <= 82 and (state % 14 == 10 or state % 14 == 11 or state % 14 == 12): # room 1 wind\n next_state = state\n elif state in [10, 11, 12]: # room 1 wind\n next_state = state + 14\n elif 127 <= state <= 157 and (state % 14 == 1 or state % 14 == 2 or state % 14 == 3): # room 2 wind\n next_state = state - 28\n elif 144 <= state <= 160 and (state % 14 == 4 or state % 14 == 5 or state % 14 == 6): # room 2 wind\n next_state = state - 42\n elif state in [130, 131, 132]: # room 2 wind\n next_state = state - 28\n elif 90 <= state <= 96: # room 1 bottom border\n next_state = state\n elif 98 <= state <= 105: # room 2 top border\n next_state = state\n elif 0 <= state <= 13: # world top border\n next_state = state\n elif action == 2: # move left\n next_state = state - 1\n if state == 40: # goal state 1\n next_state = 53\n reward = 100\n elif state == 160: # goal state 2\n next_state = 131\n reward = 100\n elif state in [29, 43, 57, 71, 5]: # cliff\n next_state = None\n reward = -100\n elif 7 <= state <= 51 and (state % 14 == 7 or state % 14 == 8 or state % 14 == 9): # room 1 wind\n next_state = state + 27\n elif state in [63, 64, 65]: # room 1 wind\n next_state = state + 13\n elif 10 <= state <= 68 and (state % 14 == 10 or state % 14 == 11 or state % 14 == 12): # room 1 wind\n next_state = state + 13\n elif 113 <= state <= 157 and (state % 14 == 1 or state % 14 == 2 or state % 14 == 3): # room 2 wind\n next_state = state - 15\n elif state == 99: # room 2 wind\n next_state = state - 15\n elif 130 <= state <= 160 and (state % 14 == 4 or state % 14 == 5 or state % 14 == 6): # room 2 wind\n next_state = state - 29\n elif state in [116, 117, 118]: # room 2 wind\n next_state = state - 15\n elif 6 <= state <= 76 and state % 14 == 6: # room 1 left border\n next_state = state\n elif 106 <= state <= 148 and state % 14 == 8: # room 2 right border\n next_state = state\n elif state % 14 == 0: # world left border\n next_state = state\n elif action == 3: # move down\n next_state = state + 14\n if state == 25: # goal state 1\n next_state = 53\n reward = 100\n elif state == 145: # goal state 2\n next_state = 131\n reward = 100\n elif state == 14: # cliff\n next_state = None\n reward = -100\n elif 7 <= state <= 37 and (state % 14 == 7 or state % 14 == 8 or state % 14 == 9): # room 1 wind\n next_state = state + 42\n elif state in [49, 50, 51]: # room 1 wind\n next_state = state + 28\n elif 99 <= state <= 143 and (state % 14 == 1 or state % 14 == 2 or state % 14 == 3): # room 2 wind\n next_state = state\n elif state in [155, 156, 157]: # room 2 wind\n next_state = state - 14\n elif 116 <= state <= 146 and (state % 14 == 4 or state % 14 == 5 or state % 14 == 6): # room 2 wind\n next_state = state - 14\n elif state in [102, 103, 104]: # room 2 wind\n next_state = state\n elif state in [158, 159, 160]: # room 2 wind\n next_state = state - 28\n elif 76 <= state <= 82: # room 1 bottom border\n next_state = state\n elif 84 <= state <= 91: # room 2 top border\n next_state = state\n elif 154 <= state <= 167: # world bottom border\n next_state = state\n else:\n print(\"Action must be between 0 and 3.\")\n next_state = None\n reward = None\n return int(next_state) if next_state is not None else None, reward",
"def get_outcome(self, state, action):\n next_state = None\n reward = 0\n if state in [53, 131]: # end of MDP\n return next_state, reward\n if action == 0: # move right\n next_state = state + 1\n if state == 38: # goal state 1\n next_state = 53\n reward = 100\n elif state == 158: # goal state 2\n next_state = 131\n reward = 100\n elif state == 1: # cliff\n next_state = None\n reward = -100\n elif 7 <= state <= 51 and (state % 14 == 7 or state % 14 == 8 or state % 14 == 9): # room 1 wind\n next_state = state + 29\n elif state in [63, 64, 65]: # room 1 wind\n next_state = state + 15\n elif 10 <= state <= 68 and (state % 14 == 10 or state % 14 == 11 or state % 14 == 12): # room 1 wind\n next_state = state + 15\n elif 113 <= state <= 157 and (state % 14 == 1 or state % 14 == 2 or state % 14 == 3): # room 2 wind\n next_state = state - 13\n elif 130 <= state <= 160 and (state % 14 == 4 or state % 14 == 5 or state % 14 == 6): # room 2 wind\n next_state = state - 27\n elif state in [116, 117, 118]: # room 2 wind\n next_state = state - 13\n elif 19 <= state <= 75 and state % 14 == 5: # room 1 left border\n next_state = state\n elif 105 <= state <= 161 and state % 14 == 7: # room 2 right border\n next_state = state\n elif state % 14 == 13: # world right border\n next_state = state\n elif action == 1: # move up\n next_state = state - 14\n if state in [16, 17, 18, 84]: # cliff\n next_state = None\n reward = -100\n elif 21 <= state <= 65 and (state % 14 == 7 or state % 14 == 8 or state % 14 == 9): # room 1 wind\n next_state = state + 14\n elif state in [7, 8, 9]: # room 1 wind\n next_state = state + 28\n elif state in [77, 78, 79]: # room 1 wind\n next_state = state\n elif 24 <= state <= 82 and (state % 14 == 10 or state % 14 == 11 or state % 14 == 12): # room 1 wind\n next_state = state\n elif state in [10, 11, 12]: # room 1 wind\n next_state = state + 14\n elif 127 <= state <= 157 and (state % 14 == 1 or state % 14 == 2 or state % 14 == 3): # room 2 wind\n next_state = state - 28\n elif 144 <= state <= 160 and (state % 14 == 4 or state % 14 == 5 or state % 14 == 6): # room 2 wind\n next_state = state - 42\n elif state in [130, 131, 132]: # room 2 wind\n next_state = state - 28\n elif 90 <= state <= 97: # room 1 bottom border\n next_state = state\n elif 99 <= state <= 105: # room 2 top border\n next_state = state\n elif 0 <= state <= 13: # world top border\n next_state = state\n elif action == 2: # move left\n next_state = state - 1\n if state == 40: # goal state 1\n next_state = 53\n reward = 100\n elif state == 160: # goal state 2\n next_state = 131\n reward = 100\n elif state in [29, 43, 57, 71, 5]: # cliff\n next_state = None\n reward = -100\n elif 7 <= state <= 51 and (state % 14 == 7 or state % 14 == 8 or state % 14 == 9): # room 1 wind\n next_state = state + 27\n elif state in [63, 64, 65]: # room 1 wind\n next_state = state + 13\n elif 10 <= state <= 68 and (state % 14 == 10 or state % 14 == 11 or state % 14 == 12): # room 1 wind\n next_state = state + 13\n elif 113 <= state <= 157 and (state % 14 == 1 or state % 14 == 2 or state % 14 == 3): # room 2 wind\n next_state = state - 15\n elif state == 99: # room 2 wind\n next_state = state - 15\n elif 130 <= state <= 160 and (state % 14 == 4 or state % 14 == 5 or state % 14 == 6): # room 2 wind\n next_state = state - 29\n elif state in [116, 117, 118]: # room 2 wind\n next_state = state - 15\n elif 20 <= state <= 76 and state % 14 == 6: # room 1 left border\n next_state = state\n elif 106 <= state <= 162 and state % 14 == 8: # room 2 right border\n next_state = state\n elif state % 14 == 0: # world left border\n next_state = state\n elif action == 3: # move down\n next_state = state + 14\n if state == 25: # goal state 1\n next_state = 53\n reward = 100\n elif state == 145: # goal state 2\n next_state = 131\n reward = 100\n elif state == 14: # cliff\n next_state = None\n reward = -100\n elif 7 <= state <= 37 and (state % 14 == 7 or state % 14 == 8 or state % 14 == 9): # room 1 wind\n next_state = state + 42\n elif state in [49, 50, 51]: # room 1 wind\n next_state = state + 28\n elif 99 <= state <= 143 and (state % 14 == 1 or state % 14 == 2 or state % 14 == 3): # room 2 wind\n next_state = state\n elif state in [155, 156, 157]: # room 2 wind\n next_state = state - 14\n elif 116 <= state <= 146 and (state % 14 == 4 or state % 14 == 5 or state % 14 == 6): # room 2 wind\n next_state = state - 14\n elif state in [102, 103, 104]: # room 2 wind\n next_state = state\n elif state in [158, 159, 160]: # room 2 wind\n next_state = state - 28\n elif 76 <= state <= 83: # room 1 bottom border\n next_state = state\n elif 85 <= state <= 91: # room 2 top border\n next_state = state\n elif 154 <= state <= 167: # world bottom border\n next_state = state\n else:\n print(\"Action must be between 0 and 3.\")\n next_state = None\n reward = None\n return int(next_state) if next_state is not None else None, reward"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Set the back propagation set with the back_set inserted. | def set_back_propagation_set(self, back_set):
self._back_propagation_set = back_set | [
"def get_back_propagation_set(self):\n return self._back_propagation_set",
"def backstep(self):\n\n self.input.setDelta(self.output.getNetDelta())\n self.output.value = self.history.pop()",
"def restore(self):\n\n # Restore the sets\n try:\n self.mr.master_atoms_mapped.discard(self.mr.last_mapped[1])\n self.mr.sub_atoms_mapped.discard(self.mr.last_mapped[0])\n self.mr.atom_mapping.discard(self.mr.last_mapped)\n except IndexError:\n # happens if there was no last added atom\n pass\n # Reset the last mapped\n try:\n self.mr.last_mapped = self.mapping_stack.pop()\n except IndexError:\n # Happens if there is no backup\n pass",
"def backstep(self):\n head, moves = self.history.pop()\n for i in range(self.N):\n if moves[i]:\n Tape._pop(self.stacks[moves[i] < 0][i])\n Tape._append(self.stacks[moves[i] > 0][i], self.head[i])\n self.head[i] = head[i]\n for i in range(self.N):\n self.pos[i] -= moves[i]",
"def backUp(self):\n self._backup = (self._backup, self.assigned)",
"def _backpropagation(self, error):\n # backward passes\n for layer in reversed(self.layers):\n error = layer.backward(error)",
"def flush_set(target_set):\n _ipset('flush', target_set)",
"def on_set():\n #s = set()\n s = {'pomme', 'pomme', 'banane', 2}\n s.add('abricot')\n sbis = {'pomme', 'framboise', 2}\n #s & sbis\n #s | sbis\n print(s)",
"def update_forwards(self):\n\n self.args = tuple(arg.forwarded for arg in self.args)\n other_deps = self.other_deps\n self.other_deps = OrderedSet()\n for op in other_deps:\n self.add_other_dep(op)\n self.initializers = [op.forwarded for op in self.initializers]",
"def rolled_back(self, rolled_back):\n\n self._rolled_back = rolled_back",
"def on_backward_begin(self, **kwargs) -> None:\n if self.loss is None:\n self.loss = kwargs[\"last_loss\"].item()\n else:\n self.loss += kwargs[\"last_loss\"].item()",
"def backstep(self):\n\n self.timestep -= 1\n self.historyLayer.backstep()",
"def set_current_set_baseball(self, event_id, new_set,otuprm,otopp):\n\n path = self._db_keywords[\"root\"] + \\\n str(int(event_id)) + self._db_keywords[\"set\"]\n self._rtdb.reference(path).set(int(new_set))\n\n if new_set > 9:\n temp = {\n otuprm: 0,\n otopp: 0\n }\n path = self._db_keywords[\"root\"] + str(int(event_id)) + '/' + self._db_keywords[\"score-key\"]\n self._rtdb.reference(path).update(temp)",
"def first_stage_boostback_burn(self, first_stage_boostback_burn):\n\n\n self._first_stage_boostback_burn = first_stage_boostback_burn",
"def save_for_backward(self, *args):\n self.__args__ = args",
"def set_behind(self, other):\n\n self.ahead = other\n\n if other is not None:\n other.behind = self",
"def back(self, *args, **kwargs):\n return _decomp.component_set_back(self, *args, **kwargs)",
"def backorder_status(self, backorder_status):\n\n self._backorder_status = backorder_status",
"def __init__(self, original_set):\n super(MirrorSet, self).__init__()\n self._original = original_set"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns the back propagation set of this state. | def get_back_propagation_set(self):
return self._back_propagation_set | [
"def set_back_propagation_set(self, back_set):\n self._back_propagation_set = back_set",
"def prev_state_combiner(self):\n if hasattr(self, \"_prev_state_combiner\"):\n return self._prev_state_combiner\n else:\n return list(set(self.combiner) - set(self.current_combiner))",
"def prev_state_combiner_all(self):\n if hasattr(self, \"_prev_state_combiner_all\"):\n return list(set(self._prev_state_combiner_all))\n else:\n return self.prev_state_combiner",
"def get_back_calc(self):\n\n back_calc_return = deepcopy(self.values_orig)\n\n # Loop over experiments\n for ei in range(self.NE):\n exp_type = self.exp_types[ei]\n for si in range(self.NS):\n for mi in range(self.NM):\n for oi in range(self.NO):\n if exp_type in EXP_TYPE_LIST_CPMG:\n num = len(self.cpmg_frqs_orig[ei][mi][oi])\n else:\n num = len(self.spin_lock_nu1_orig[ei][mi][oi])\n back_calc_return[ei][si][mi][oi][:] = self.back_calc[ei, si, mi, oi, :num]\n\n return back_calc_return",
"def recurrent_states(self):\n return reduce(set.union, self.recurrent_classes(), set())",
"def prev_reward(self):\n return [env.prev_reward() for env in self._envs]",
"def states(self):\n return np.array(self.state[:self.last_n])",
"def getBackwardActivation(self):\n return self.actualBackwardActivation",
"def get_checkpoint_history(self):\n\n return self._history",
"def current_states(state):\n state_set = set()\n state_set.add(state)\n\n if state.label is None: # empty state, follow edge and add to set\n if state.edge_1 is not None:\n state_set |= current_states(state.edge_1)\n if state.edge_2 is not None:\n state_set |= current_states(state.edge_2)\n\n return state_set",
"def _backpropagation(self, error):\n # backward passes\n for layer in reversed(self.layers):\n error = layer.backward(error)",
"def infer_backpropagation_learning_pathways(self):\n self._analyze_graph()\n # returns a list of all pathways from start -> output node\n def bfs(start):\n pathways = []\n prev = {}\n queue = collections.deque([start])\n while len(queue) > 0:\n curr_node = queue.popleft()\n if NodeRole.OUTPUT in self.get_roles_by_node(curr_node):\n p = []\n while curr_node in prev:\n p.insert(0, curr_node)\n curr_node = prev[curr_node]\n p.insert(0, curr_node)\n # we only consider input -> projection -> ... -> output pathways (since we can't learn on only one mechanism)\n if len(p) >= 3:\n pathways.append(p)\n continue\n for projection, efferent_node in [(p, p.receiver.owner) for p in curr_node.efferents]:\n if (not hasattr(projection,'learnable')) or (projection.learnable is False):\n continue\n prev[efferent_node] = projection\n prev[projection] = curr_node\n queue.append(efferent_node)\n return pathways\n\n pathways = [p for n in self.get_nodes_by_role(NodeRole.INPUT) if\n NodeRole.TARGET not in self.get_roles_by_node(n) for p in bfs(n)]\n for pathway in pathways:\n self.add_backpropagation_learning_pathway(pathway=pathway)",
"def state(self):\n if not self.sublayers:\n return self._state\n else:\n return tuple(layer.state if s is None else s\n for (layer, s) in zip(self.sublayers, self._state))",
"def outgoing_trans_set(self, *args):\n return _wali.WFA_outgoing_trans_set(self, *args)",
"def relaxations(self):\n return self._relaxations.copy()",
"def get_reward_history(self):\n return self.__reward_history",
"def get_state(self, flatten=True):\n if flatten:\n return self.state.flatten()\n return self.state",
"def get_nobackprop_loss(self) -> Dict[str, dy.Expression]:\n return {k: dy.nobackprop(v) for k, v in self.expr_factors.items()}",
"def edge_set(self):\n return set(self.edges())"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Set the collisions set with the collisions_set inserted. | def set_collisions_set(self, collisions_set):
self._collisions_set = collisions_set | [
"def collisions(self, collisions):\n\n self._collisions = collisions",
"def __set_collision(self, collision):\n if self.collisions:\n self.collisions[0] = collision\n else:\n self.collisions.append(collision)",
"def sets(self, sets):\n\n self._sets = sets",
"def intersection_update(self, rng_set):\r\n self._ranges = self.intersection(rng_set)._ranges",
"def __init__(self, collisions):\r\n self.collidablePairToCollision = {collision.collidablePair:collision for collision in collisions}\r\n self.idToCollider = {}\r\n self.idProvider = Incrementer(startAt=1)",
"def __init__(self, starting_elements):\n\n super(DisjointSet, self).__init__()\n\n self._sets = [frozenset([e]) for e in starting_elements]",
"def get_collisions_set(self):\n return self._collisions_set",
"def update(self, rng_set):\r\n # convert to RangeSet\r\n rng_set = RangeSet._to_rangeset(rng_set)\r\n # merge lists\r\n self._ranges = RangeSet._merge_ranges(self._ranges + rng_set._ranges)",
"def make_set(self, x):\n if x not in self._parent:\n self._parent[x] = x\n self._rank[x] = 0",
"def clearSets(self):\r\n self.matchSet = []\r\n self.correctSet = []",
"def setGenes(self, geneSets=None):\n # A consolidated master set containing all Gene objects\n self.genes = Genes.GeneSet()\n # A list of sets of genes, each set a potential cause of the reaction\n self.geneSets = []\n if geneSets is not None:\n # Make sure all the Gene objects are represented in the master set,\n # and that genes mentioned multiple times are represented by the same Gene object.\n for subSet in geneSets:\n self.geneSets.append(self.genes.recastSet(subSet))",
"def put_side_set(self, object_id, sideSetElements, sideSetSides):\n self.__ex_put_side_set(object_id, sideSetElements, sideSetSides)",
"def enumeration_set(self, enumeration_set):\n\n self._enumeration_set = enumeration_set",
"def set_occurrences(occurrences):",
"def rebuild_hittable_chain(self):\r\n self.hittable_elements.clear()\r\n self.rebuild_hit_chain(self.widget_root, self.matrix_root)",
"def grow_dict_of_sets(d, key, val):\n if key not in d:\n d[key] = set((val,))\n else:\n d[key].add(val)",
"def update(self, iterable):\r\n # coerce to RangeDict and add that\r\n if not isinstance(iterable, RangeDict):\r\n iterable = RangeDict(iterable)\r\n for value, rangesets in iterable._values.items():\r\n for rngset in rangesets:\r\n self.add(rngset, value)",
"def put_node_set(self, object_id, nodeSetNodes):\n self.__ex_put_node_set(object_id, nodeSetNodes)",
"def _rehash(self):\n self.rehashing = True\n self.capacity *= self.growthFactor\n filtered_table = list(filter(lambda x: isinstance(x, tuple) and x[2], self.table))\n self.table = [self.defVal]*self.capacity\n for key_val in filtered_table:\n self.__setitem__(key_val[0], key_val[1])\n self.rehashing = False"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns the collision set of this state. | def get_collisions_set(self):
return self._collisions_set | [
"def __get_collision(self):\n if self.collisions:\n return self.collisions[0]",
"def get_collisions(self):\r\n\r\n all_collisions = pygame.sprite.Group()\r\n all_collisions.add(pygame.sprite.spritecollide(self, self.walls, False),\r\n pygame.sprite.spritecollide(self, self.obstacles, False),\r\n pygame.sprite.spritecollide(self, self.enemies, False),\r\n pygame.sprite.spritecollide(self, self.cats, False))\r\n return all_collisions",
"def get_sets_of_balls(self):\n return [self._balls]",
"def intersect(self):\n\n return self._intersect",
"def get_slots(self):\n return set(self._slots.keys())",
"def components (self):\n return frozenset(self.__components)",
"def edge_set(self):\n return set(self.edges())",
"def vertex_set(self):\n return set(self.vertices())",
"def getSolsets(self):\n return self.H.root._v_groups",
"def crossings(self):\n return self.__crossings",
"def MinesKnown(self):\n if len(self.cells) == self.count:\n return set(self.cells)\n else:\n return set()",
"def _key_set(self):\n return set(GetKey(t) for t in self._m)",
"def collidables(self):\n temp = [self.world.ground]\n for team in self.teams:\n for sprite in team.sprites():\n temp.append(sprite)\n return temp",
"def get_sets_of_balls(self):\n LOGGER.debug(\"LEM:gsob\")\n return [self._main_balls, self._lucky_stars]",
"def active_set(self):\n return _np.arange(self.nstates) # currently assume all hidden states are active.",
"def SafesKnown(self):\n if self.count == 0:\n return set(self.cells)\n else:\n return set()",
"def get_borders(self):\n borders = map(lambda x: (x[1]-1, ord(x[0])-65),\n create_borders(self.bow, self.__length, \"horizontal\" if\n self.horizontal else \"vertical\"))\n return set(borders)-set(map(lambda x: (x[1]-1, ord(x[0])-65),\n self.ship_coords()))",
"def _get_occupied_positions(self) -> Set[Position]:\n return self._get_all_valid_positions() - self._get_holes()",
"def common_elements(self):\n return set.intersection(*self.branches) if self.branches else set()"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return True if the multi agent state and the given multi agent state has the same positions for all the single agent states. | def equal_position(self, other):
assert isinstance(other, MStarState)
for i, single_state in enumerate(self._single_agents_states):
if not single_state.equal_position(other.get_single_agent_states()[i]):
return False
return True | [
"def equal_position_and_time_step(self, other):\n assert isinstance(other, MStarState)\n for i, single_state in enumerate(self._single_agents_states):\n if not single_state.equal(other.get_single_agent_states()[i]):\n return False\n return True",
"def check_visited_position(self):\n return (self.cur_i, self.cur_j) in self.visited_positions",
"def positionsInSameCell(self, pos1, pos2):\n x1,y1,z1 = pos1\n x2,y2,z2 = pos2\n if int(x1) == int(x2):\n if int(y1) == int(y2):\n if int(z1) == int(z2):\n return True",
"def is_consistent(self) -> bool:\n can_place = set()\n used = set()\n\n for tile in self.tiles:\n # One or more candidates\n if len(tile.candidates) == 0:\n return False\n # Checking for any duplicates\n if tile.value in used:\n return False\n elif tile.value != sdk_tile.UNKNOWN:\n used.add(tile.value)\n can_place = can_place | tile.candidates\n\n if can_place != set(sdk_tile.CHOICES):\n return False\n return True",
"def check_if_same_states(s1, s2):\n return np.any(np.isclose(np.mean(np.square(s1-s2), axis=(1, 2)), 0))",
"def all_same(items):\n return all(x == items[0] for x in items)",
"def _are_equal_states(\n self,\n state1: Dict[str, Any],\n state2: Dict[str, Any],\n ) -> bool:\n if set(state1.keys()) != set(state2.keys()):\n return False\n for state_name, value1 in state1.items():\n value2 = state2[state_name]\n if type(value1) != type(value2):\n return False\n if torch.is_tensor(value1): # tensor state\n assert torch.is_tensor(value2)\n # Check the values on CPU to be device-agnostic\n value1 = value1.cpu()\n value2 = value2.cpu()\n if value1.shape != value2.shape or not torch.all(\n torch.isclose(value1, value2)\n ):\n return False\n else: # non-tensor state\n if value1 != value2:\n return False\n return True",
"def __eq__(self,other_state):\n \n if type(self) == type(other_state):\n return self.__members() == other_state.__members()\n else:\n return False",
"def shouldBelongToSameOutcomeMeasurement(self, template):\n gid = self.token.getAnnotationAttribute(self.type, 'group')\n oid = self.token.getAnnotationAttribute(self.type, 'outcome')\n tid = self.token.getAnnotationAttribute(self.type, 'time')\n csID = self.token.getAnnotationAttribute(self.type, 'compareSet')\n\n gid2 = template.token.getAnnotationAttribute(template.type, 'group')\n oid2 = template.token.getAnnotationAttribute(template.type, 'outcome')\n tid2 = template.token.getAnnotationAttribute(template.type, 'time')\n csID2 = template.token.getAnnotationAttribute(template.type, 'compareSet')\n\n return len(gid) > 0 and len(oid) > 0 and gid == gid2 and oid == oid2 and tid == tid2 and csID == csID2",
"def __has_multiple_edges(self):\n return \\\n len(\n list(\n [\n tuple((edge.get_source_node().get_name(), edge.get_terminal_node().get_name()))\n for edge in self.get_edges()\n ] # the length of the list which allows duplicates...\n )\n ) != \\\n len(\n set(\n {\n tuple((edge.get_source_node().get_name(), edge.get_terminal_node().get_name()))\n for edge in self.get_edges()\n } # ...should equal the length of the set that does not allow duplicates\n )\n ) # return True if the two data structures are equal in size and False otherwise",
"def contains_only_agent(self, pos):\n lowx, highx, lowy, highy = self.pos_to_coords(pos)\n cell = self.grid[lowx:highx, lowy:highy, :]\n one_color = cell[0, 0] # color vector\n return one_color.any() and not (cell - one_color).any()",
"def _check_same_state(\n self,\n fsdp_osd,\n ref_osd,\n check_same_param_keys: bool,\n ):\n assert \"state\" in ref_osd\n self.assertTrue(\"state\" in fsdp_osd)\n ref_osd_state = ref_osd[\"state\"]\n fsdp_osd_state = {\n k: _gather_state_dict(v) for k, v in fsdp_osd[\"state\"].items()\n }\n\n if check_same_param_keys:\n # Check parameter keys are the same first for earlier erroring\n ref_osd_param_ids = set(ref_osd_state.keys())\n fsdp_osd_param_ids = set(fsdp_osd_state.keys())\n self.assertTrue(\n ref_osd_param_ids == fsdp_osd_param_ids,\n f\"Rank {self.rank}: {(ref_osd_param_ids, fsdp_osd_param_ids)}\",\n )\n # Check state values are the same\n for param_id, param_state in fsdp_osd_state.items():\n for state_name, value in param_state.items():\n ref_value = ref_osd_state[param_id][state_name]\n self.assertEqual(value, ref_value)\n return\n # Otherwise, only require the parameter keys to be isomorphic (e.g.\n # between IDs and names)\n ref_osd_states = list(ref_osd_state.values())\n fsdp_osd_states = list(fsdp_osd_state.values())\n self.assertEqual(len(ref_osd_states), len(fsdp_osd_states))\n # Use brute-force quadratic-time comparison since it is hard to\n # hash a tensor by value instead of by object\n for fsdp_osd_state in fsdp_osd_states:\n # Check for at least one match (may be > 1 in toy edge cases, e.g.\n # multiple biases); nonetheless, each having >= 1 match and the two\n # lists having equal length imply that the list contents are equal\n self.assertTrue(\n any(\n self._are_equal_states(fsdp_osd_state, ref_osd_state)\n for ref_osd_state in ref_osd_states\n )\n )",
"def isconsistent(self):\n TF = True\n for c in xrange(9):\n for r in xrange(9):\n if not (self.M[str(r)+\",\"+str(c)] == self.row[r][c] == self.col[c][r] == self.sec[((r/3)*3)+(c/3)][c - (c/3)*3 + (r%3)*3]):\n TF = False\n print \"Value at\",r,c,\"inconsistent:\"\n print \"self.M ==\",self.M[str(r)+\",\"+str(c)]\n print \"self.row ==\",self.row[r][c]\n print \"self.col ==\",self.col[c][r]\n print \"self.sec ==\",self.sec[((r/3)*3)+(c/3)][c - (c/3)*3 + (r%3)*3]\n return TF",
"def is_goal2(self, state):\n cube_faces = {}\n for i in range(0, 6):\n cube_faces[i] = full(state.left.shape, i, dtype=\"int8\")\n\n solved_cube = Cube.Cube(None, cube_faces)\n\n return state.create_md5() == solved_cube.create_md5()",
"def check_cell_state(self, coords):\n count_alive_neighbors = 0\n x, y = coords\n for i in range(max(x - 1, 0), min(self.m_rows, x + 2)):\n for j in range(max(y - 1, 0), min(self.n_columns, y + 2)):\n if i == x and j == y:\n continue\n if self.board[i][j] == 1:\n count_alive_neighbors += 1\n\n if self.board[x][y] == 1:\n if 2 > count_alive_neighbors or count_alive_neighbors > 3:\n self.dead.append(coords)\n else:\n if count_alive_neighbors == 3:\n self.alive.append(coords)",
"def _all_same(self, check, player_letter):\n return all(self.grid[x[0]][x[1]] == player_letter for x in check)",
"def verify(self):\n for i in self.coords:\n if np.abs(6*i-int(6*i))>0.1: return False\n if np.abs(self.coords[2]+self.coords[0]+self.coords[1]) > 0.1: return False\n return True",
"def is_state_equivalent(self, state1, state2):\n\n return IAMRole.equivalent_states.get(state1) == IAMRole.equivalent_states.get(state2)",
"def _is_paradox(self) -> bool:\n return (\n np.min(np.sum(self._state, axis=0)) <= 0\n or np.min(np.sum(self._state, axis=1)) <= 0\n )"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return True if the multi agent state and the given multi agent state has the same positions for all the single agent states. | def equal_position_and_time_step(self, other):
assert isinstance(other, MStarState)
for i, single_state in enumerate(self._single_agents_states):
if not single_state.equal(other.get_single_agent_states()[i]):
return False
return True | [
"def equal_position(self, other):\n assert isinstance(other, MStarState)\n for i, single_state in enumerate(self._single_agents_states):\n if not single_state.equal_position(other.get_single_agent_states()[i]):\n return False\n return True",
"def check_visited_position(self):\n return (self.cur_i, self.cur_j) in self.visited_positions",
"def positionsInSameCell(self, pos1, pos2):\n x1,y1,z1 = pos1\n x2,y2,z2 = pos2\n if int(x1) == int(x2):\n if int(y1) == int(y2):\n if int(z1) == int(z2):\n return True",
"def is_consistent(self) -> bool:\n can_place = set()\n used = set()\n\n for tile in self.tiles:\n # One or more candidates\n if len(tile.candidates) == 0:\n return False\n # Checking for any duplicates\n if tile.value in used:\n return False\n elif tile.value != sdk_tile.UNKNOWN:\n used.add(tile.value)\n can_place = can_place | tile.candidates\n\n if can_place != set(sdk_tile.CHOICES):\n return False\n return True",
"def check_if_same_states(s1, s2):\n return np.any(np.isclose(np.mean(np.square(s1-s2), axis=(1, 2)), 0))",
"def all_same(items):\n return all(x == items[0] for x in items)",
"def _are_equal_states(\n self,\n state1: Dict[str, Any],\n state2: Dict[str, Any],\n ) -> bool:\n if set(state1.keys()) != set(state2.keys()):\n return False\n for state_name, value1 in state1.items():\n value2 = state2[state_name]\n if type(value1) != type(value2):\n return False\n if torch.is_tensor(value1): # tensor state\n assert torch.is_tensor(value2)\n # Check the values on CPU to be device-agnostic\n value1 = value1.cpu()\n value2 = value2.cpu()\n if value1.shape != value2.shape or not torch.all(\n torch.isclose(value1, value2)\n ):\n return False\n else: # non-tensor state\n if value1 != value2:\n return False\n return True",
"def __eq__(self,other_state):\n \n if type(self) == type(other_state):\n return self.__members() == other_state.__members()\n else:\n return False",
"def shouldBelongToSameOutcomeMeasurement(self, template):\n gid = self.token.getAnnotationAttribute(self.type, 'group')\n oid = self.token.getAnnotationAttribute(self.type, 'outcome')\n tid = self.token.getAnnotationAttribute(self.type, 'time')\n csID = self.token.getAnnotationAttribute(self.type, 'compareSet')\n\n gid2 = template.token.getAnnotationAttribute(template.type, 'group')\n oid2 = template.token.getAnnotationAttribute(template.type, 'outcome')\n tid2 = template.token.getAnnotationAttribute(template.type, 'time')\n csID2 = template.token.getAnnotationAttribute(template.type, 'compareSet')\n\n return len(gid) > 0 and len(oid) > 0 and gid == gid2 and oid == oid2 and tid == tid2 and csID == csID2",
"def __has_multiple_edges(self):\n return \\\n len(\n list(\n [\n tuple((edge.get_source_node().get_name(), edge.get_terminal_node().get_name()))\n for edge in self.get_edges()\n ] # the length of the list which allows duplicates...\n )\n ) != \\\n len(\n set(\n {\n tuple((edge.get_source_node().get_name(), edge.get_terminal_node().get_name()))\n for edge in self.get_edges()\n } # ...should equal the length of the set that does not allow duplicates\n )\n ) # return True if the two data structures are equal in size and False otherwise",
"def contains_only_agent(self, pos):\n lowx, highx, lowy, highy = self.pos_to_coords(pos)\n cell = self.grid[lowx:highx, lowy:highy, :]\n one_color = cell[0, 0] # color vector\n return one_color.any() and not (cell - one_color).any()",
"def _check_same_state(\n self,\n fsdp_osd,\n ref_osd,\n check_same_param_keys: bool,\n ):\n assert \"state\" in ref_osd\n self.assertTrue(\"state\" in fsdp_osd)\n ref_osd_state = ref_osd[\"state\"]\n fsdp_osd_state = {\n k: _gather_state_dict(v) for k, v in fsdp_osd[\"state\"].items()\n }\n\n if check_same_param_keys:\n # Check parameter keys are the same first for earlier erroring\n ref_osd_param_ids = set(ref_osd_state.keys())\n fsdp_osd_param_ids = set(fsdp_osd_state.keys())\n self.assertTrue(\n ref_osd_param_ids == fsdp_osd_param_ids,\n f\"Rank {self.rank}: {(ref_osd_param_ids, fsdp_osd_param_ids)}\",\n )\n # Check state values are the same\n for param_id, param_state in fsdp_osd_state.items():\n for state_name, value in param_state.items():\n ref_value = ref_osd_state[param_id][state_name]\n self.assertEqual(value, ref_value)\n return\n # Otherwise, only require the parameter keys to be isomorphic (e.g.\n # between IDs and names)\n ref_osd_states = list(ref_osd_state.values())\n fsdp_osd_states = list(fsdp_osd_state.values())\n self.assertEqual(len(ref_osd_states), len(fsdp_osd_states))\n # Use brute-force quadratic-time comparison since it is hard to\n # hash a tensor by value instead of by object\n for fsdp_osd_state in fsdp_osd_states:\n # Check for at least one match (may be > 1 in toy edge cases, e.g.\n # multiple biases); nonetheless, each having >= 1 match and the two\n # lists having equal length imply that the list contents are equal\n self.assertTrue(\n any(\n self._are_equal_states(fsdp_osd_state, ref_osd_state)\n for ref_osd_state in ref_osd_states\n )\n )",
"def isconsistent(self):\n TF = True\n for c in xrange(9):\n for r in xrange(9):\n if not (self.M[str(r)+\",\"+str(c)] == self.row[r][c] == self.col[c][r] == self.sec[((r/3)*3)+(c/3)][c - (c/3)*3 + (r%3)*3]):\n TF = False\n print \"Value at\",r,c,\"inconsistent:\"\n print \"self.M ==\",self.M[str(r)+\",\"+str(c)]\n print \"self.row ==\",self.row[r][c]\n print \"self.col ==\",self.col[c][r]\n print \"self.sec ==\",self.sec[((r/3)*3)+(c/3)][c - (c/3)*3 + (r%3)*3]\n return TF",
"def is_goal2(self, state):\n cube_faces = {}\n for i in range(0, 6):\n cube_faces[i] = full(state.left.shape, i, dtype=\"int8\")\n\n solved_cube = Cube.Cube(None, cube_faces)\n\n return state.create_md5() == solved_cube.create_md5()",
"def check_cell_state(self, coords):\n count_alive_neighbors = 0\n x, y = coords\n for i in range(max(x - 1, 0), min(self.m_rows, x + 2)):\n for j in range(max(y - 1, 0), min(self.n_columns, y + 2)):\n if i == x and j == y:\n continue\n if self.board[i][j] == 1:\n count_alive_neighbors += 1\n\n if self.board[x][y] == 1:\n if 2 > count_alive_neighbors or count_alive_neighbors > 3:\n self.dead.append(coords)\n else:\n if count_alive_neighbors == 3:\n self.alive.append(coords)",
"def _all_same(self, check, player_letter):\n return all(self.grid[x[0]][x[1]] == player_letter for x in check)",
"def verify(self):\n for i in self.coords:\n if np.abs(6*i-int(6*i))>0.1: return False\n if np.abs(self.coords[2]+self.coords[0]+self.coords[1]) > 0.1: return False\n return True",
"def is_state_equivalent(self, state1, state2):\n\n return IAMRole.equivalent_states.get(state1) == IAMRole.equivalent_states.get(state2)",
"def _is_paradox(self) -> bool:\n return (\n np.min(np.sum(self._state, axis=0)) <= 0\n or np.min(np.sum(self._state, axis=1)) <= 0\n )"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
get a aggregator given its identifier | def get(self, aggregator_id):
aggregator = get_a_aggregator(aggregator_id)
if not aggregator:
return {'success': False, 'msg': 'aggregator does not exist'}
else:
return aggregator | [
"def get_aggregator(cls, aggregator_name):\n try:\n aggregator_class = cls._class_registry[aggregator_name.lower()]\n except KeyError:\n raise KeyError(\"No such chart type: {0:s}\".format(aggregator_name.lower()))\n return aggregator_class",
"def get_aggregator(gt_id):\n if \"tmp2m\" in gt_id or gt_id.endswith(\"sst\") or gt_id.endswith(\"icec\"):\n return \"mean\"\n if \"precip\" in gt_id:\n return \"sum\"\n raise ValueError(\"Unrecognized gt_id \"+gt_id)",
"def get(self, pool):\n\n return self.get_aggregate_from_name_or_id(pool)",
"def retrieve_interfaces_interface_aggregation_aggregation_by_id(name): # noqa: E501\n return 'do some magic!'",
"def get(identifier: str) -> FeaturePipeline:\n if identifier not in _registry.keys():\n raise KeyError(f'Identifier {identifier} is not associated with any `FeaturePipeline`.')\n return _registry[identifier]",
"def get_aggregator_job_id(backend_job_id: str, backend_id: str) -> str:\n return f\"{backend_id}-{backend_job_id}\"",
"def create(cls, aggregator_type):\n\n if aggregator_type == AggregatorType.mean:\n return MeanAggregator()\n elif aggregator_type == AggregatorType.median:\n return MedianAggregator()\n elif aggregator_type == AggregatorType.max:\n return MaxAggregator()\n elif aggregator_type == AggregatorType.min:\n return MinAggregator()\n else:\n raise Exception(\"Unknown type of aggregator\")",
"def get(id):\n\n return Group.query.get(id)",
"def get_aggregators(self):\n raise NotImplementedError",
"def get_aggregate_from_name_or_id(self, aggregate_obj):\n aggregate = None\n agg_id = None\n try:\n agg_id = int(aggregate_obj)\n except (ValueError, TypeError):\n if hasattr(aggregate_obj, 'id') and aggregate_obj.id:\n # pool is an aggregate\n agg_id = aggregate_obj.id\n\n if agg_id is not None:\n try:\n aggregate = self.nova.aggregates.get(agg_id)\n except nova_exceptions.NotFound:\n aggregate = None\n else:\n # FIXME(scroiset): can't get an aggregate by name\n # so iter over all aggregate and check for the good one\n all_aggregates = self.nova.aggregates.list()\n for agg in all_aggregates:\n if aggregate_obj == agg.name:\n aggregate = agg\n if aggregate:\n return aggregate\n else:\n raise manager_exceptions.AggregateNotFound(pool=aggregate_obj)",
"def create_interfaces_interface_aggregation_aggregation_by_id(name, aggregation): # noqa: E501\n if connexion.request.is_json:\n aggregation = AggregationSchema.from_dict(connexion.request.get_json()) # noqa: E501\n return 'do some magic!'",
"def read_by_id(_id):\n try:\n return Group.get(Group.id == _id)\n except Exception:\n return None",
"def get_aggregate(self, klass, id_, apply_map=None):\n events = pvector(self.event_store.get_events(id_))\n\n if not events:\n return\n\n return klass.generate_from_events(\n id_, events, apply_map=apply_map\n )",
"def fetch_aggregator(self, filter_dict={}):\n try:\n partition = self.partition_names[frozenset(filter_dict.keys())]\n except KeyError:\n # TODO: discern what exactly goes wrong higher up in the callback chain.\n raise ValueError(\"Bad input value: input filter type is not supported,\"\n \" input filter-value not formatted correctly,\"\n \" or multiple input filters of the same type.\")\n return self.partitions[partition][frozenset(filter_dict.values())]",
"def get_by_id(exporter_id):\n return Exporter.get_by_id(exporter_id)",
"def get_aggregate_request(self, aggregate_name, **kwargs):\n try:\n return self.split_and_replace(\n self.aggregates[aggregate_name.upper()]\n )\n except KeyError:\n raise KeyError(\n '{} aggregate does not exist. Must be one of {}'.format(\n aggregate_name,\n ', '.join(self.aggregates.keys())\n )\n )",
"def get_computation_for_distribute_aggregate_form(\n daf: forms.DistributeAggregateForm,\n) -> computation_base.Computation:\n py_typecheck.check_type(daf, forms.DistributeAggregateForm)\n\n @federated_computation.federated_computation(daf.type_signature.parameter)\n def computation(arg):\n \"\"\"The logic of a single federated computation round.\"\"\"\n server_state, client_data = arg\n broadcast_input, temp_server_state = daf.server_prepare(server_state)\n broadcast_output = daf.server_to_client_broadcast(broadcast_input)\n aggregation_input = daf.client_work(client_data, broadcast_output)\n aggregation_output = daf.client_to_server_aggregation(\n temp_server_state, aggregation_input\n )\n updated_server_state, server_output = daf.server_result(\n temp_server_state, aggregation_output\n )\n return updated_server_state, server_output\n\n return computation",
"def initAggregator(engine):\n engine.state.aggregator = {}",
"def g_get(key, creator):\n if not hasattr(g, key):\n setattr(g, key, creator())\n return getattr(g, key)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Delete aggregator by id | def delete(self, aggregator_id):
le_aggregator = get_a_aggregator(aggregator_id)
if not le_aggregator:
return {'success': False, 'msg': 'aggregator does not exist'}
else:
db.session.delete(le_aggregator)
db.session.commit()
return {'success': True, 'message': 'officer deleted successfully'} | [
"def deleteGroup(id):",
"def delete(request, agg_id):\n next = request.GET.get(\"next\", None) or reverse(\"home\")\n aggregate = get_object_or_404(Aggregate, id=agg_id).as_leaf_class()\n # Stop all slices using the aggregate\n if request.method == \"POST\":\n for s in aggregate.slice_set.all():\n aggregate.stop_slice(s)\n # Delete the aggregate.\n req = create_update.delete_object(\n request,\n model=aggregate.__class__,\n post_delete_redirect=next,\n object_id=agg_id,\n extra_context={\"next\": next},\n template_name=TEMPLATE_PATH+\"/delete.html\",\n )\n if req.status_code == HttpResponseRedirect.status_code:\n DatedMessage.objects.post_message_to_user(\n \"Successfully deleted aggregate %s\" % aggregate.name,\n request.user, msg_type=DatedMessage.TYPE_SUCCESS,\n )\n return req",
"def delete(self, _id):\n raise NotImplementedError(\"delete item\")",
"def delete(self, id): \n post = delete(id)\n return post",
"def delete(self, categoryId):",
"def delete_interfaces_interface_aggregation_aggregation_by_id(name): # noqa: E501\n return 'do some magic!'",
"def test_delete_by_id(self, _id):",
"def delete_by_id(\n group_id: int,\n gate_id: int,\n member: MemberModel = Depends(get_active_member),\n db: Session = Depends(get_db),\n):\n service.delete_by_id(db, id=gate_id)\n return gate_id",
"def delete_entity(id):\n entity = UrlRequest.get_by_id(id)\n entity.delete()",
"def deleted(self, configurationId):",
"def test_aggregates_delete(self):\n pass",
"def delete(self, id):\r\n return eliminar_color(id)",
"def test_pointbasedgrids_id_delete(self):\n pass",
"def del_link_id(self, id_link):\n\n try:\n self.link.get(id = id_link).delete()\n except ObjectDoesNotExist:\n pass",
"def delete(self, pk):",
"def test_delete_model_by_id(self):\n pass",
"def test_catalog_attribute_set_repository_v1_delete_by_id_delete(self):\n pass",
"def delete_api(self, id):\n if ask(\"Are you sure to delete this API metadata?\") == 'Y':\n print(self._es.delete(index=self._index,\n doc_type=self._doc_type, id=id))",
"async def delete(ctx, task_id: int):\n raise NotImplementedError"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
ANOVA table for one fitted linear model. | def anova1_lm_single(model, endog, exog, nobs, design_info, table, n_rows, test,
pr_test, robust):
#maybe we should rethink using pinv > qr in OLS/linear models?
effects = getattr(model, 'effects', None)
if effects is None:
q,r = np.linalg.qr(exog)
effects = np.dot(q.T, endog)
arr = np.zeros((len(design_info.terms), len(design_info.column_names)))
slices = [design_info.slice(name) for name in design_info.term_names]
for i,slice_ in enumerate(slices):
arr[i, slice_] = 1
sum_sq = np.dot(arr, effects**2)
#NOTE: assumes intercept is first column
idx = _intercept_idx(design_info)
sum_sq = sum_sq[~idx]
term_names = np.array(design_info.term_names) # want boolean indexing
term_names = term_names[~idx]
index = term_names.tolist()
table.index = Index(index + ['Residual'])
table.ix[index, ['df', 'sum_sq']] = np.c_[arr[~idx].sum(1), sum_sq]
if test == 'F':
table.ix[:n_rows, test] = ((table['sum_sq']/table['df'])/
(model.ssr/model.df_resid))
table.ix[:n_rows, pr_test] = stats.f.sf(table["F"], table["df"],
model.df_resid)
# fill in residual
table.ix['Residual', ['sum_sq','df', test, pr_test]] = (model.ssr,
model.df_resid,
np.nan, np.nan)
table['mean_sq'] = table['sum_sq'] / table['df']
return table | [
"def anova1_lm_single(model, endog, exog, nobs, design_info, table, n_rows, test,\n pr_test, robust):\n #maybe we should rethink using pinv > qr in OLS/linear models?\n effects = getattr(model, 'effects', None)\n if effects is None:\n q,r = np.linalg.qr(exog)\n effects = np.dot(q.T, endog)\n\n arr = np.zeros((len(design_info.terms), len(design_info.column_names)))\n slices = [design_info.slice(name) for name in design_info.term_names]\n for i,slice_ in enumerate(slices):\n arr[i, slice_] = 1\n\n sum_sq = np.dot(arr, effects**2)\n #NOTE: assumes intercept is first column\n idx = _intercept_idx(design_info)\n sum_sq = sum_sq[~idx]\n term_names = np.array(design_info.term_names) # want boolean indexing\n term_names = term_names[~idx]\n\n index = term_names.tolist()\n table.index = Index(index + ['Residual'])\n table.loc[index, ['df', 'sum_sq']] = np.c_[arr[~idx].sum(1), sum_sq]\n # fill in residual\n table.loc['Residual', ['sum_sq','df']] = model.ssr, model.df_resid\n if test == 'F':\n table[test] = ((table['sum_sq'] / table['df']) /\n (model.ssr / model.df_resid))\n table[pr_test] = stats.f.sf(table[\"F\"], table[\"df\"],\n model.df_resid)\n table.loc['Residual', [test, pr_test]] = np.nan, np.nan\n table['mean_sq'] = table['sum_sq'] / table['df']\n return table",
"def anova_lm(*args, **kwargs):\n typ = kwargs.get('typ', 1)\n\n ### Farm Out Single model ANOVA Type I, II, III, and IV ###\n\n if len(args) == 1:\n model = args[0]\n return anova_single(model, **kwargs)\n\n try:\n assert typ in [1,\"I\"]\n except:\n raise ValueError(\"Multiple models only supported for type I. \"\n \"Got type %s\" % str(typ))\n\n ### COMPUTE ANOVA TYPE I ###\n\n # if given a single model\n if len(args) == 1:\n return anova_single(*args, **kwargs)\n\n # received multiple fitted models\n\n test = kwargs.get(\"test\", \"F\")\n scale = kwargs.get(\"scale\", None)\n n_models = len(args)\n\n model_formula = []\n pr_test = \"Pr(>%s)\" % test\n names = ['df_resid', 'ssr', 'df_diff', 'ss_diff', test, pr_test]\n table = DataFrame(np.zeros((n_models, 6)), columns = names)\n\n if not scale: # assume biggest model is last\n scale = args[-1].scale\n\n table[\"ssr\"] = lmap(getattr, args, [\"ssr\"]*n_models)\n table[\"df_resid\"] = lmap(getattr, args, [\"df_resid\"]*n_models)\n table.ix[1:, \"df_diff\"] = -np.diff(table[\"df_resid\"].values)\n table[\"ss_diff\"] = -table[\"ssr\"].diff()\n if test == \"F\":\n table[\"F\"] = table[\"ss_diff\"] / table[\"df_diff\"] / scale\n table[pr_test] = stats.f.sf(table[\"F\"], table[\"df_diff\"],\n table[\"df_resid\"])\n # for earlier scipy - stats.f.sf(np.nan, 10, 2) -> 0 not nan\n table[pr_test][table['F'].isnull()] = np.nan\n\n return table",
"def anova_lm(*args, **kwargs):\n typ = kwargs.get('typ', 1)\n\n ### Farm Out Single model Anova Type I, II, III, and IV ###\n\n if len(args) == 1:\n model = args[0]\n return anova_single(model, **kwargs)\n\n if typ not in [1, \"I\"]:\n raise ValueError(\"Multiple models only supported for type I. \"\n \"Got type %s\" % str(typ))\n\n test = kwargs.get(\"test\", \"F\")\n scale = kwargs.get(\"scale\", None)\n n_models = len(args)\n pr_test = \"Pr(>%s)\" % test\n names = ['df_resid', 'ssr', 'df_diff', 'ss_diff', test, pr_test]\n table = DataFrame(np.zeros((n_models, 6)), columns=names)\n\n if not scale: # assume biggest model is last\n scale = args[-1].scale\n\n table[\"ssr\"] = [mdl.ssr for mdl in args]\n table[\"df_resid\"] = [mdl.df_resid for mdl in args]\n table.loc[table.index[1:], \"df_diff\"] = -np.diff(table[\"df_resid\"].values)\n table[\"ss_diff\"] = -table[\"ssr\"].diff()\n if test == \"F\":\n table[\"F\"] = table[\"ss_diff\"] / table[\"df_diff\"] / scale\n table[pr_test] = stats.f.sf(table[\"F\"], table[\"df_diff\"],\n table[\"df_resid\"])\n # for earlier scipy - stats.f.sf(np.nan, 10, 2) -> 0 not nan\n table.loc[table['F'].isnull(), pr_test] = np.nan\n\n return table",
"def linear_model_summary(model, name=None):\n if not name:\n name = \"Linear\"\n variable_names = model.params.index\n parameter_estimates = model.params\n standard_errors = model.bse\n header_string = \"{:<10} {:>20} {:>15}\".format(\"Name\", \"Parameter Estimate\", \"Standard Error\")\n print(\"{} Model Summary\".format(name).center(len(header_string)))\n print('='*len(header_string))\n print(header_string)\n print('-'*len(header_string))\n format_string = \"{:<20} {:>10.2f} {:>15.2f}\"\n for name, est, se in zip(variable_names, parameter_estimates, standard_errors):\n print(format_string.format(name, est, se))",
"def add_aov(self):\n prompt = QtWidgets.QInputDialog(self._view)\n prompt.setWindowTitle('Add AOV')\n prompt.setLabelText('AOV name:')\n prompt.setOkButtonText('Add')\n if prompt.exec_():\n self._model.add_aov(prompt.textValue())",
"def anova(s1_vals, s2_vals, s3_vals):\n return stats.f_oneway(s1_vals, s2_vals, s3_vals)",
"def diagnostics(self):\n super(KalmanRegression, self).diagnostics() \n self.coefs = self._estimate_coefficients()\n self.beta_plot()",
"def az_v_theta_plot(stan_fit):\n az.plot_trace(stan_fit, var_names=['v','theta'], filter_vars=\"like\")\n print(stan_fit.stansummary())\n az.plot_autocorr(stan_fit, var_names=[\"v\",'theta'])\n az.plot_pair(stan_fit, var_names=[\"v\",'theta'], divergences=True)",
"def table_analysis():\n pass",
"def report_fit(self):\n if not self.fitted:\n print('Model not yet fit.')\n return\n\n print('R-Squared: {0:.3f}'.format(self.model_fit.rsquared))\n print('Adj. R-Squared: {0:.3f}'.format(self.model_fit.rsquared_adj))\n print('')\n\n tbl = PrettyTable(\n ['Component', ])\n tbl = PrettyTable()\n\n tbl.add_column('Component', self.fit_parameters.index.values)\n for col in ('Coefficient', 'Std. Error', 'T-Score'):\n tbl.add_column(col, self.fit_parameters[col].values)\n\n tbl.align['Component'] = 'l'\n tbl.float_format = '.3'\n\n print(tbl)",
"def test_summary(self):\n model = self.model\n model.summary()",
"def glm_test(w, x, y):\n X = _PHI(x)\n y_pred = np.dot(X,w)\n error = _rmse(y_pred,y)\n print(\"The test error for this GLM model is {}\".format(error))\n _plot2(x=x,y1=y,y2=y_pred,legend1=\"Actual y values\",legend2=\"Predicted y values\",x_label=\"x\",y_label=\"y\",title=\"Mauna Loa Predictions\")",
"def linear_model(X, y):\n results = sm.OLS(y, sm.add_constant(X)).fit()\n return results",
"def summarize(usl_fit):\n print\n print '----- Summary -----'\n print\n print usl_fit.fit_report()",
"def linearize_table(self):\n pass",
"def test_pipeline_methods_anova():\n iris = load_iris()\n X = iris.data\n y = iris.target\n # Test with Anova + LogisticRegression\n clf = LogisticRegression()\n filter1 = SelectKBest(f_classif, k=2)\n pipe = Pipeline([('anova', filter1), ('logistic', clf)])\n pipe.fit(X, y)\n pipe.predict(X)\n pipe.predict_proba(X)\n pipe.predict_log_proba(X)\n pipe.score(X, y)",
"def single_run(model):\n global X_train, X_test, y_train, y_test\n\n model.fit(X_train, y_train)\n Y_hat = model.predict(X_test)\n MAE = np.mean(abs(Y_hat - y_test))\n print('MAE for given model : %.3f' % MAE)",
"def anova_f_test(self, snps, dtype='int8'):\n (h0_betas, h0_rss, h0_rank, h0_s) = linalg.lstsq(self.X, self.Y)\n num_snps = len(snps)\n rss_list = sp.repeat(h0_rss, num_snps)\n h0_betas = map(float, list(h0_betas)) + [0.0]\n betas_list = [h0_betas] * num_snps\n var_perc = sp.zeros(num_snps)\n f_stats = sp.zeros(num_snps)\n dfs = sp.zeros(num_snps)\n p_vals = sp.ones(num_snps)\n n = self.n\n p_0 = len(self.X.T)\n\n for i, snp in enumerate(snps):\n groups = sp.unique(snp)\n q = len(groups) - 1 # Null model has 1 df.\n p = p_0 + q\n n_p = n - p\n x = sp.zeros((len(groups), n), dtype=dtype)\n for g_i, g in enumerate(groups):\n x[g_i] = sp.int32(snp == g)\n (betas, rss, p, sigma) = linalg.lstsq(sp.mat(x).T, self.Y)\n\n if not rss:\n print 'No predictability in the marker, moving on...'\n continue\n rss_list[i] = rss[0]\n betas_list[i] = map(float, list(betas))\n rss_ratio = h0_rss / rss\n var_perc[i] = 1 - 1 / rss_ratio\n f_stat = (rss_ratio - 1) * n_p / float(q)\n p_vals[i] = stats.f.sf([f_stat], q, n_p)[0]\n f_stats[i] = f_stat\n dfs[i] = n_p\n if num_snps >= 10 and (i + 1) % (num_snps / 10) == 0: # Print dots\n sys.stdout.write('.')\n sys.stdout.flush()\n\n if num_snps >= 10:\n sys.stdout.write('\\n')\n\n return {'ps':p_vals, 'f_stats':f_stats, 'rss':rss_list, 'betas':betas_list,\n 'var_perc':var_perc, 'dfs':dfs}",
"def lm_fit(self):\r\n self.LinearModel = LinearRegression().fit(self.x, self.y)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
ANOVA table for one or more fitted linear models. | def anova_lm(*args, **kwargs):
typ = kwargs.get('typ', 1)
### Farm Out Single model ANOVA Type I, II, III, and IV ###
if len(args) == 1:
model = args[0]
return anova_single(model, **kwargs)
try:
assert typ in [1,"I"]
except:
raise ValueError("Multiple models only supported for type I. "
"Got type %s" % str(typ))
### COMPUTE ANOVA TYPE I ###
# if given a single model
if len(args) == 1:
return anova_single(*args, **kwargs)
# received multiple fitted models
test = kwargs.get("test", "F")
scale = kwargs.get("scale", None)
n_models = len(args)
model_formula = []
pr_test = "Pr(>%s)" % test
names = ['df_resid', 'ssr', 'df_diff', 'ss_diff', test, pr_test]
table = DataFrame(np.zeros((n_models, 6)), columns = names)
if not scale: # assume biggest model is last
scale = args[-1].scale
table["ssr"] = lmap(getattr, args, ["ssr"]*n_models)
table["df_resid"] = lmap(getattr, args, ["df_resid"]*n_models)
table.ix[1:, "df_diff"] = -np.diff(table["df_resid"].values)
table["ss_diff"] = -table["ssr"].diff()
if test == "F":
table["F"] = table["ss_diff"] / table["df_diff"] / scale
table[pr_test] = stats.f.sf(table["F"], table["df_diff"],
table["df_resid"])
# for earlier scipy - stats.f.sf(np.nan, 10, 2) -> 0 not nan
table[pr_test][table['F'].isnull()] = np.nan
return table | [
"def anova_lm(*args, **kwargs):\n typ = kwargs.get('typ', 1)\n\n ### Farm Out Single model Anova Type I, II, III, and IV ###\n\n if len(args) == 1:\n model = args[0]\n return anova_single(model, **kwargs)\n\n if typ not in [1, \"I\"]:\n raise ValueError(\"Multiple models only supported for type I. \"\n \"Got type %s\" % str(typ))\n\n test = kwargs.get(\"test\", \"F\")\n scale = kwargs.get(\"scale\", None)\n n_models = len(args)\n pr_test = \"Pr(>%s)\" % test\n names = ['df_resid', 'ssr', 'df_diff', 'ss_diff', test, pr_test]\n table = DataFrame(np.zeros((n_models, 6)), columns=names)\n\n if not scale: # assume biggest model is last\n scale = args[-1].scale\n\n table[\"ssr\"] = [mdl.ssr for mdl in args]\n table[\"df_resid\"] = [mdl.df_resid for mdl in args]\n table.loc[table.index[1:], \"df_diff\"] = -np.diff(table[\"df_resid\"].values)\n table[\"ss_diff\"] = -table[\"ssr\"].diff()\n if test == \"F\":\n table[\"F\"] = table[\"ss_diff\"] / table[\"df_diff\"] / scale\n table[pr_test] = stats.f.sf(table[\"F\"], table[\"df_diff\"],\n table[\"df_resid\"])\n # for earlier scipy - stats.f.sf(np.nan, 10, 2) -> 0 not nan\n table.loc[table['F'].isnull(), pr_test] = np.nan\n\n return table",
"def anova1_lm_single(model, endog, exog, nobs, design_info, table, n_rows, test,\n pr_test, robust):\n #maybe we should rethink using pinv > qr in OLS/linear models?\n effects = getattr(model, 'effects', None)\n if effects is None:\n q,r = np.linalg.qr(exog)\n effects = np.dot(q.T, endog)\n\n arr = np.zeros((len(design_info.terms), len(design_info.column_names)))\n slices = [design_info.slice(name) for name in design_info.term_names]\n for i,slice_ in enumerate(slices):\n arr[i, slice_] = 1\n\n sum_sq = np.dot(arr, effects**2)\n #NOTE: assumes intercept is first column\n idx = _intercept_idx(design_info)\n sum_sq = sum_sq[~idx]\n term_names = np.array(design_info.term_names) # want boolean indexing\n term_names = term_names[~idx]\n\n index = term_names.tolist()\n table.index = Index(index + ['Residual'])\n table.ix[index, ['df', 'sum_sq']] = np.c_[arr[~idx].sum(1), sum_sq]\n if test == 'F':\n table.ix[:n_rows, test] = ((table['sum_sq']/table['df'])/\n (model.ssr/model.df_resid))\n table.ix[:n_rows, pr_test] = stats.f.sf(table[\"F\"], table[\"df\"],\n model.df_resid)\n\n # fill in residual\n table.ix['Residual', ['sum_sq','df', test, pr_test]] = (model.ssr,\n model.df_resid,\n np.nan, np.nan)\n table['mean_sq'] = table['sum_sq'] / table['df']\n return table",
"def anova1_lm_single(model, endog, exog, nobs, design_info, table, n_rows, test,\n pr_test, robust):\n #maybe we should rethink using pinv > qr in OLS/linear models?\n effects = getattr(model, 'effects', None)\n if effects is None:\n q,r = np.linalg.qr(exog)\n effects = np.dot(q.T, endog)\n\n arr = np.zeros((len(design_info.terms), len(design_info.column_names)))\n slices = [design_info.slice(name) for name in design_info.term_names]\n for i,slice_ in enumerate(slices):\n arr[i, slice_] = 1\n\n sum_sq = np.dot(arr, effects**2)\n #NOTE: assumes intercept is first column\n idx = _intercept_idx(design_info)\n sum_sq = sum_sq[~idx]\n term_names = np.array(design_info.term_names) # want boolean indexing\n term_names = term_names[~idx]\n\n index = term_names.tolist()\n table.index = Index(index + ['Residual'])\n table.loc[index, ['df', 'sum_sq']] = np.c_[arr[~idx].sum(1), sum_sq]\n # fill in residual\n table.loc['Residual', ['sum_sq','df']] = model.ssr, model.df_resid\n if test == 'F':\n table[test] = ((table['sum_sq'] / table['df']) /\n (model.ssr / model.df_resid))\n table[pr_test] = stats.f.sf(table[\"F\"], table[\"df\"],\n model.df_resid)\n table.loc['Residual', [test, pr_test]] = np.nan, np.nan\n table['mean_sq'] = table['sum_sq'] / table['df']\n return table",
"def linear_model_summary(model, name=None):\n if not name:\n name = \"Linear\"\n variable_names = model.params.index\n parameter_estimates = model.params\n standard_errors = model.bse\n header_string = \"{:<10} {:>20} {:>15}\".format(\"Name\", \"Parameter Estimate\", \"Standard Error\")\n print(\"{} Model Summary\".format(name).center(len(header_string)))\n print('='*len(header_string))\n print(header_string)\n print('-'*len(header_string))\n format_string = \"{:<20} {:>10.2f} {:>15.2f}\"\n for name, est, se in zip(variable_names, parameter_estimates, standard_errors):\n print(format_string.format(name, est, se))",
"def anova(s1_vals, s2_vals, s3_vals):\n return stats.f_oneway(s1_vals, s2_vals, s3_vals)",
"def diagnostics(self):\n super(KalmanRegression, self).diagnostics() \n self.coefs = self._estimate_coefficients()\n self.beta_plot()",
"def add_aov(self):\n prompt = QtWidgets.QInputDialog(self._view)\n prompt.setWindowTitle('Add AOV')\n prompt.setLabelText('AOV name:')\n prompt.setOkButtonText('Add')\n if prompt.exec_():\n self._model.add_aov(prompt.textValue())",
"def show_table(models: typing.List[Model]):\n if not models:\n click.echo(\"Empty!\")\n return\n\n headers = list(flatten_dict(models[0].to_dict()).keys())\n table = Texttable(MAX_TABLE_WIDTH)\n\n table.add_rows([headers] + [_convert_model_values(md) for md in models])\n click.echo(table.draw() + \"\\n\")",
"def report_fit(self):\n if not self.fitted:\n print('Model not yet fit.')\n return\n\n print('R-Squared: {0:.3f}'.format(self.model_fit.rsquared))\n print('Adj. R-Squared: {0:.3f}'.format(self.model_fit.rsquared_adj))\n print('')\n\n tbl = PrettyTable(\n ['Component', ])\n tbl = PrettyTable()\n\n tbl.add_column('Component', self.fit_parameters.index.values)\n for col in ('Coefficient', 'Std. Error', 'T-Score'):\n tbl.add_column(col, self.fit_parameters[col].values)\n\n tbl.align['Component'] = 'l'\n tbl.float_format = '.3'\n\n print(tbl)",
"def evaluate_models_on_training(x, y, models):\n\tfor i in range(len(models)): # iterate each model\n\t\tplt.figure() # create a new plot\n\t\testyvals = np.polyval(models[i], x) # get the array of estimated y values\n\t\tplt.plot(x, y, 'bo', label='Data Points') # plot points that show original points\n\t\tplt.plot(x, estyvals, 'r-', label='Regression Curve') # plot the regression curve\n\t\tplt.xlabel('Years')\n\t\tplt.ylabel('Temperature (degrees Celsius)')\n\t\tdeg = len(models[i]) - 1 # get the degree by the number of coefficients in a model\n\t\tr2 = r2_score(y, estyvals) # get R2 value\n\t\tif deg > 1:\n\t\t\tplt.title('#{0} Model \\nwhen R2 value is {1:.6f} and degree is {2}'.format(i+1, r2, deg))\n\t\telse: # se_over_slope(x, y, estimated, model)\n\t\t\tplt.title('#{0} Model \\nwhen R2 value is {1:.6f} and degree is {2} \\nand the ratio of the standard error is {3:.6f}'.format(\n\t\t\t\ti+1, r2, deg, se_over_slope(x, y, estyvals, models[i])))\n\t\tplt.legend()\n\t\tplt.show()",
"def analyze_predictions(models,epochs):\r\n X = np.load(\"data/X-val-2k-new.npy\") #validation xsec data\r\n\r\n nrows, ncols = 1, 1\r\n fig = py.figure(figsize = (ncols*5, nrows*3))\r\n ax = py.subplot(nrows, ncols,1)\r\n\r\n for model in models:\r\n Xp = np.load(\"data/X-pre-%s.npy\"%model,allow_pickle=True) #predicted xsec\r\n R=(X-Xp)/X #calculates residuals\r\n R=R.flatten()\r\n ax.hist(R,range=(-0.02,0.02),bins=100,\r\n density=True,\r\n histtype='step',\r\n label=r'$\\rm %s$'%model)\r\n\r\n ax.set_ylabel(r'$\\rm Normalized~Yield$',size=20)\r\n ax.set_xlabel(r'$\\rm (x_{\\rm val}-x_{\\rm pre})/x_{\\rm val}$',size=20)\r\n ax.legend(loc=1,fontsize=6,frameon=False)\r\n py.tight_layout()\r\n py.savefig(('gallery/R%s.pdf')%(epochs))",
"def evaluate_models_on_testing(x, y, models):\n\tfor i in range(len(models)): # iterate each model\n\t\tplt.figure() # create a new plot\n\t\testyvals = np.polyval(models[i], x) # get the array of estimated y values\n\t\tplt.plot(x, y, 'bo', label='Data Points') # plot points that show original points\n\t\tplt.plot(x, estyvals, 'r-', label='Regression Curve') # plot the regression curve\n\t\tplt.xlabel('Years')\n\t\tplt.ylabel('Temperature (degrees Celsius)')\n\t\tdeg = len(models[i]) - 1 # get the degree by the number of coefficients in a model\n\t\tr = rmse(y, estyvals) # get the RSME\n\t\tplt.title('#{0} Model \\nwhen RMSE is {1:.6f} and degree is {2}'.format(i+1, r, deg))\n\t\tplt.legend()\n\t\tplt.show()",
"def test_pipeline_methods_anova():\n iris = load_iris()\n X = iris.data\n y = iris.target\n # Test with Anova + LogisticRegression\n clf = LogisticRegression()\n filter1 = SelectKBest(f_classif, k=2)\n pipe = Pipeline([('anova', filter1), ('logistic', clf)])\n pipe.fit(X, y)\n pipe.predict(X)\n pipe.predict_proba(X)\n pipe.predict_log_proba(X)\n pipe.score(X, y)",
"def evaluate_models_on_testing(x, y, models):\n \n # Calculate predicted y and RMSE for each model\n for model in models:\n predicted_y = np.polyval(model, x) \n RMSE = round(rmse(y, predicted_y), 3)\n \n # Plotting ...\n plt.figure()\n plt.title(\"RMSE = \" + str(RMSE) + \", Degree = \" + str(len(model) - 1))\n \n # Plot curve\n plt.plot(x, predicted_y, 'r') \n plt.xlabel(\"Years\")\n plt.ylabel(\"Temperature (C)\")\n \n # Plot data\n plt.plot(x, y, 'bo') \n \n # Show plot\n plt.show()",
"def summarize(usl_fit):\n print\n print '----- Summary -----'\n print\n print usl_fit.fit_report()",
"def OLS_CV():\n N = [500, 5000]\n y_lim = [[0.15, 0.6], [0.26, 0.45]]\n repeat = 25\n sigma2 = 0.5\n model_ols = OLS()\n poly_deg_max = 9\n k = 5\n\n mse_train = np.zeros((repeat, poly_deg_max))\n mse_test = np.zeros((repeat, poly_deg_max))\n\n for n, limit in zip(N, y_lim): # calculate for small and large dataset\n for r in range(repeat): # resample to make many models\n x = np.random.uniform(0, 1, (n, 2))\n noise = np.random.normal(0, sigma2, n)\n z = frankeFunction(x[:, 0], x[:, 1]) + noise\n\n for i in range(poly_deg_max):\n folds = kfold(list(range(n)), k=5)\n\n for j in range(k):\n train_idx, test_idx = folds(j)\n model_ols.fit(x[train_idx], z[train_idx], i)\n mse_train[r,\n i] += model_ols.mse(x[train_idx], z[train_idx])\n mse_test[r, i] += model_ols.mse(x[test_idx], z[test_idx])\n\n mse_train[r, i] /= k\n mse_test[r, i] /= k\n\n fig = plt.figure()\n axes = plt.gca()\n axes.set_ylim(limit)\n plt.grid()\n\n plt.plot(np.arange(poly_deg_max), np.mean(\n mse_train, axis=0), color=\"blue\", linewidth=3)\n plt.plot(np.arange(poly_deg_max), np.mean(\n mse_test, axis=0), color=\"red\", linewidth=3)\n\n for r in range(repeat):\n plt.plot(np.arange(poly_deg_max),\n mse_train[r], color=\"blue\", alpha=0.1)\n plt.plot(np.arange(poly_deg_max),\n mse_test[r], color=\"red\", alpha=0.1)\n\n plt.gca().set_xlabel(\"Model Complexity\")\n plt.gca().set_ylabel(\"MSE\")\n plt.gca().set_title(\"Method: OLS w/ $k$-fold CV\")\n textstr = '\\n'.join((\n \"$N = {}$\".format(n),\n \"$\\\\sigma^2 = {}$\".format(sigma2),\n \"$k = {}$\".format(k)))\n props = dict(boxstyle='round', facecolor='lightblue', alpha=0.5)\n plt.gca().text(0.75, 0.95, textstr, transform=plt.gca().transAxes,\n fontsize=14, verticalalignment='top', bbox=props)\n\n plt.legend([\"Training $\\\\overline{\\\\mathrm{MSE}}}$\",\n \"Test $\\\\overline{\\\\mathrm{MSE}}}$\"])\n fig.savefig(fig_path(f\"train_test_mse_n_{n}.pdf\"))",
"def ABSOnePlot(models, df, locParameters, dfData, combinations=None, axes=None, fig=None,\n lossType='Loss k', cmapStyle='magma', linestyles=['-', '--', ':', '-.'],\n colors=['black', 'black', 'black', 'black'], net=mod.FeedForwardLossLogSigma,\n alpha=0.5, title=None, linewidth=1, labels=[None, None, None, None],\n filt=[[-np.inf,np.inf], [-np.inf,np.inf], [-np.inf,np.inf]]):\n if axes is None:\n fig,axes=plt.subplots(1,3)\n axes=axes.flat\n cmap=mpl.cm.get_cmap(cmapStyle)\n for i,m in enumerate(models):\n color=cmap(i/len(models))\n absHat=ABSCurves(m, df, locParameters, dfData, axList=axes, title='', net=net, color=color, alpha=alpha, filt=filt)\n dates=dfData.index\n if combinations is not None:\n for j,comb in enumerate(combinations):\n for i in range(comb.shape[1]):\n axes[i].plot(dates, comb[:,i], color=colors[j], linewidth=linewidth,\n linestyle=linestyles[j], zorder=2.5, label=labels[j])\n for ax in axes:\n ax.legend()\n fig.autofmt_xdate()\n return fig, axes",
"def fit_and_score(models, X_train, X_test, y_train, y_test):\n # Random seed for reproducible results\n np.random.seed(42)\n # Make a list to keep model scores\n model_scores = {}\n # Loop through models\n for name, model in models.items():\n # Fit the model to the data\n model.fit(X_train, y_train)\n # Evaluate the model and append its score to model_scores\n model_scores[name] = model.score(X_test, y_test)\n return model_scores",
"def linear_model(X, y):\n results = sm.OLS(y, sm.add_constant(X)).fit()\n return results"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the reward_text of this DestinyActivityRewardDefinition. | def reward_text(self, reward_text):
self._reward_text = reward_text | [
"def setExperienceReward(self, flag):\n self.handle.rewardExp = flag",
"def add_reward(self, state: Tuple[int, int], reward: float):\n assert len(state) == 2, \"state shape must be 2D\"\n self.rewards[state[0], state[1]] = reward",
"def update_reward(self, reward, force=False):\n if force or reward >= 0:\n self._reward = reward",
"def set(self, value, *args, **kwargs):\n if self.cb_set is not None:\n type(self).reward = self.cb_set(*args, **kwargs)\n return\n type(self).reward = value",
"def reward_function(self, reward_function):\r\n self._reward_function = reward_function\r\n self._terminated = True",
"def report_reward(self, time, reward):\n self._r_lock.acquire()\n # TODO Catch ValueError and release lock\n self._r_buffer.append(t=time, x=reward)\n self._r_lock.release()",
"def report_reward(self, time, reward):\n self._r_lock.acquire()\n if len(self._r_buffer) > 0 and time <= self._r_buffer[-1][0]:\n self._r_lock.release()\n raise ValueError('Reward times must be strictly increasing')\n self._r_buffer.append((time, reward))\n self._r_lock.release()",
"def setText(self,text):\n if not isinstance(text, str):\n raise TypeError, utils.mapping(_(\"Text ($1) must be a string, \"\\\n \"record: $2\"), (str(text), self.__code))\n self.__text = text",
"def set_text(self, text):\n self.ignore_fb_changed = True\n self.editor.setText(text)",
"def status_text(self, status_text):\n self._status_text = status_text",
"def instruction_text(self, instruction_text):\n \n self._instruction_text = instruction_text",
"async def reward( # pylint: disable=inconsistent-return-statements\n self, event_id: str, reward: Union[JSON, IO], **kwargs: Any\n ) -> None:\n return await self._client.reward_single_slot_event(event_id, reward, **kwargs)",
"def addText(self, text):\n self.text = text",
"def add_rewards(self, team, reward):\n bots = self.botdb.query(Bot).filter_by(team_name=str(team)).all()\n for bot in bots:\n bot.total_reward += reward\n self.botdb.add(bot)\n self.botdb.flush()",
"def _set_text(self, text):\n\t\tbuff = self._get_buffer()\n\t\tbuff.set_text(text)\n\t\treturn True",
"def set_text(self, text):\n self.dataset.text = text",
"def setReward(self, rInput):\n if callable(rInput):\n self.r = rInput\n elif type(rInput) is list:\n self.rFuncs = map(lambda _: _[0], rInput)\n psi = map(lambda _: _[1], rInput)\n self.updatePsi(psi)\n else:\n raise Exception('unknown type of reward')",
"def add_reward(self, idscenario, reward):\n # choose a random action\n action = np.random.randint(len(ACTIONS))\n # add the reward\n self.rewards[idscenario, action].add(reward)",
"def trainer_alternate_randomly_reversed(self, reward: str):\r\n if reward == \"11\":\r\n self.boolean = not self.boolean"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the reward_items of this DestinyActivityRewardDefinition. | def reward_items(self, reward_items):
self._reward_items = reward_items | [
"def refund_items(self, refund_items):\n\n self._refund_items = refund_items",
"def _reset_rewards(self):\n self.rewards = [0, 0, 0, 0]\n self.nstep = self.game.active_player",
"def itemizations(self, itemizations):\n\n self._itemizations = itemizations",
"def blocked_items(self, blocked_items):\n\n self._blocked_items = blocked_items",
"def add_rewards(self, team, reward):\n bots = self.botdb.query(Bot).filter_by(team_name=str(team)).all()\n for bot in bots:\n bot.total_reward += reward\n self.botdb.add(bot)\n self.botdb.flush()",
"def update_reward(self, reward, force=False):\n if force or reward >= 0:\n self._reward = reward",
"def setReward(self, rInput):\n if callable(rInput):\n self.r = rInput\n elif type(rInput) is list:\n self.rFuncs = map(lambda _: _[0], rInput)\n psi = map(lambda _: _[1], rInput)\n self.updatePsi(psi)\n else:\n raise Exception('unknown type of reward')",
"def setExperienceReward(self, flag):\n self.handle.rewardExp = flag",
"def add_reward(self, state: Tuple[int, int], reward: float):\n assert len(state) == 2, \"state shape must be 2D\"\n self.rewards[state[0], state[1]] = reward",
"def agent_ids(self, agent_ids):\n\n self._agent_ids = agent_ids",
"def reset(self):\n self.reward_list = []\n self.action_list = []",
"def update_memory(self, teacher_reward, temp_managers, train_rewards, teacher_rewards):\n self.corrected_memory = [[] for _ in range(5)] # 5: obs, new_obs, action, reward, done\n\n i_student = 1\n for i_exp, exp in enumerate(self.tmp_memory):\n # Update student_action\n obs_dict = exp[-1]\n\n # Update q-value that measured using updated student_critic\n q_values = get_q_values(\n temp_managers, obs_dict[\"manager_observations\"],\n [obs_dict[\"manager_actions\"][0], obs_dict[\"student_action\"]])\n q_values = np.clip(q_values, a_min=self.args.q_min, a_max=self.args.q_max)\n\n obs_dict[\"q_with_student_critic\"] = np.array([normalize(\n value=q_values[i_student], min_value=self.args.q_min, max_value=self.args.q_max)])\n\n q_values = get_q_values(\n temp_managers, obs_dict[\"manager_observations\"],\n [obs_dict[\"manager_actions\"][0], obs_dict[\"teacher_action_at\"]])\n q_values = np.clip(q_values, a_min=self.args.q_min, a_max=self.args.q_max)\n\n obs_dict[\"q_at_with_student_critic\"] = np.array([normalize(\n value=q_values[i_student], min_value=self.args.q_min, max_value=self.args.q_max)])\n\n # Update avg_reward\n # Note that avg_train_reward = R_{Phase I}\n # Note that avg_teacher_reward = R_{Phase II}\n avg_train_reward, avg_teacher_reward = get_avg_reward(\n train_rewards=train_rewards, teacher_rewards=teacher_rewards, args=self.args)\n obs_dict[\"avg_train_reward\"] = np.array([avg_train_reward])\n obs_dict[\"avg_teacher_reward\"] = np.array([avg_teacher_reward])\n\n # Update teacher remain timestep\n obs_dict[\"remain_time\"] = np.array([normalize(\n value=(self.n_advice - (obs_dict[\"session_advices\"] + 1)),\n min_value=0., max_value=float(self.n_advice))])\n\n new_obs = concat_in_order(obs_dict, self.args)\n self.corrected_memory[0].append(exp[0])\n self.corrected_memory[1].append(new_obs)\n self.corrected_memory[2].append(exp[2])\n self.corrected_memory[3].append(teacher_reward)\n self.corrected_memory[4].append(exp[4])\n\n self.add_memory()\n self.clear_tmp_memory()",
"def update_weights(self, gradients, rewards):\n\n for i in range(len(gradients)):\n self.theta += self.ALPHA * gradients[i] * sum([r * (self.GAMMA ** t) for t, r in enumerate(rewards[i:])])",
"def _update_rewards(self, points_this_step):\n self.dispatch_reward[REWARD_STYLE](points_this_step)",
"def update_reward(self):\n reward = 0\n factor = 0\n reward_final = self.replay_memory.memory[-1].reward\n if reward_final >= 19:\n factor = 1\n for idx, transition in enumerate(self.replay_memory.memory[::-1]):\n #print(transition)\n reward_final = self.discount_factor * reward_final\n reward = transition.reward + factor * reward_final\n print(reward)\n # if factor != 1:\n # factor = factor -1\n # reward_final = self.discount_factor * reward_final\n # idx starts at 0\n self.replay_memory.memory[-(idx+1)] = Transition(\n transition.state, transition.action, reward)",
"def items(self, items: MiResponseResponseToDisplayUiTemplateItems):\n\n self._items = items",
"def reset_rewards(self):\n self.rewards = np.array(\n [\n self.h(self.features[t, k]) + self.noise_std * np.random.randn()\n for t, k in itertools.product(range(self.T), range(self.n_arms))\n ]\n ).reshape(self.T, self.n_arms)\n\n # to be used only to compute regret, NOT by the algorithm itself\n self.best_rewards_oracle = np.max(self.rewards, axis=1)\n self.best_actions_oracle = np.argmax(self.rewards, axis=1)",
"def set(self, value, *args, **kwargs):\n if self.cb_set is not None:\n type(self).reward = self.cb_set(*args, **kwargs)\n return\n type(self).reward = value",
"def agent_requirements(self, agent_requirements):\n\n self._agent_requirements = agent_requirements",
"def memory(self,reward):\r\n self.memory_rewards.append(reward)\r\n return"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Read the value of the raster at a particular point | def read_value(self, point):
xOffset = int((point.x - self.xOrigin) / self.pixelWidth)
yOffset = int((point.y - self.yOrigin) / self.pixelHeight)
data = self.band.ReadAsArray(xOffset, yOffset, 1, 1)
return Distance(m=data[0,0]) | [
"def raster_values_at_points(xy, raster_file, band=1, nodata_rel_tol=1.0e-08):\n\n # Raster info\n raster = gdal.Open(raster_file)\n raster_band = raster.GetRasterBand(band)\n raster_band_type = gdal.GetDataTypeName(raster_band.DataType)\n\n # Projection info\n transform = raster.GetGeoTransform()\n x_origin = transform[0]\n y_origin = transform[3]\n pixel_width = transform[1]\n pixel_height = transform[5] # Negative\n\n # Get coordinates in pixel values\n px = ((xy[:, 0] - x_origin) / pixel_width).astype(int) # x\n py = ((xy[:, 1] - y_origin) / pixel_height).astype(int) # y\n\n # Store pixel values\n raster_values = px * 0.\n\n # Get the right character for struct.unpack\n if (raster_band_type == 'Int16'):\n ctype_name = 'h'\n elif (raster_band_type == 'Float32'):\n ctype_name = 'f'\n elif (raster_band_type == 'Byte'):\n ctype_name = 'B'\n elif (raster_band_type == 'Int32'):\n ctype_name = 'i'\n else:\n print 'unrecognized DataType:', gdal.GetDataTypeName(band.DataType)\n print 'You might need to edit this code to read the data type'\n raise Exception('Stopping')\n\n # Upper bounds for pixel values, so we can fail gracefully\n xMax = raster.RasterXSize\n yMax = raster.RasterYSize\n if(px.max() < xMax and px.min() >= 0 and\n py.max() < yMax and py.min() >= 0):\n pass\n else:\n msg = 'Trying to extract point values that exceed the raster extent'\n raise Exception(msg)\n\n # Get values -- seems we have to loop, but it is efficient enough\n for i in range(len(px)):\n xc = int(px[i])\n yc = int(py[i])\n structval = raster_band.ReadRaster(\n xc, yc, 1, 1, buf_type=raster_band.DataType)\n raster_values[i] = struct.unpack(ctype_name, structval)[0]\n\n # Deal with nodata\n nodataval = raster_band.GetNoDataValue()\n rel_tol = ( abs(raster_values - nodataval) < nodata_rel_tol*abs(nodataval) ) \n missing = rel_tol.nonzero()[0]\n if len(missing) > 0:\n raster_values[missing] = numpy.nan\n\n return raster_values",
"def value_at(self, frame):\n return self.value[frame // self.resolution]",
"def get(self, x, y):\n return self.data[(y * self.sx) + x]",
"def getValue (self, row, column):\n value = 0\n try:\n value = __image__ [row, column]\n if value > 255 or value < 0:\n value = 0\n except:\n value = 0\n return value",
"def read_point(task_handle):\r\n if dll_load_error:\r\n return random.gauss(5, 1)\r\n\r\n data = float64(0)\r\n try:\r\n chk(nidaq.DAQmxReadAnalogScalarF64(\r\n task_handle, # handle\r\n float64(0.01), # timeout in seconds\r\n ctypes.byref(data), # measured value\r\n None, # Reserved, pass NULL\r\n ))\r\n except RuntimeError as _e:\r\n print(\"fail: {}\".format(_e))\r\n# print(\"{}: {}\".format(\"Point\", float(data.value)))\r\n return data.value",
"def value_at(self, pos):\n return self.data[self._data_index(pos)]",
"def getPixelAt(picture,x,y):\n return getPixel(picture,x,y)",
"def getPixel(self,position):\n return self.pixels[y][x]",
"def get_value(self, startx, starty, right, down):\n return self.map[starty + down][(startx + right) % self.width]",
"def getPixel(self, column, row):\n return self.pixels[column][row]",
"def value_at(self, ra, dec):\n for fi in self.files:\n data = fits.getdata(f\"{self.data_dir}/{fi}\")\n hdr = fits.getheader(f\"{self.data_dir}/{fi}\")\n w = wcs.WCS(hdr)\n xpix, ypix = w.all_world2pix(ra, dec, 1)\n xpix = int(xpix)\n ypix = int(ypix)\n if (0 < xpix < 2048.0) and (0 < ypix < 2048.0):\n print(f\"ADU at ({xpix:.2f}, {ypix:.2f}) = \"+\n f\"{data[ypix][xpix]:.2f}\", flush=True)",
"def index_raster(dataset, lat, lon):\n\n lat_idx = (lat - dataset.y) / dataset.height\n lon_idx = (lon - dataset.x) / dataset.width\n try:\n return dataset.data[lat_idx, lon_idx]\n except IndexError:\n return numpy.inf",
"def read(self, pin):\n if type(pin) is list:\n return [self.read(p) for p in pin]\n\n pin_id = self._pin_mapping.get(pin, None)\n if pin_id:\n value = self._read(pin_id)\n lpin = self._pin_lin.get(pin, None)\n if lpin and type(lpin[\"read\"]) is tuple:\n read_range = lpin[\"read\"]\n value = self._linear_interpolation(value, *read_range)\n return value\n else:\n raise KeyError(\"Requested pin is not mapped: %s\" % pin)",
"def get_value(self, x, y):\n\n [xi, yi] = self.getGridCoordinates(x, y)\n\n return self.grid[xi, yi]",
"def readValue(self):\n\t\tglobal coefficient\n\t\tglobal addr_G\n\t\tdata = bus.read_i2c_block_data(addr_G, ADS1115_REG_POINTER_CONVERT, 2)\n\t\t\n\t\t# Convert the data\n\t\traw_adc = data[0] * 256 + data[1]\n\n\t\tif raw_adc > 32767:\n\t\t\traw_adc -= 65535\n\t\traw_adc = int(float(raw_adc)*coefficient)\n\t\treturn {'r' : raw_adc}",
"def float_read( self, mem_addr ):\n\t\treturn struct.unpack( \">f\", self.read( mem_addr, count=4) )[0]",
"def read_value(self, channel):\n value = None\n reply = self.comm(47 + channel)\n if self.ranges[channel]['action'] == 'voltage':\n num_value = reply - 2 ** 15\n scale = 1.0 * 2 ** 15 / float(self.ranges[channel]['fullrange'])\n value = num_value / scale\n if self.ranges[channel]['action'] == 'tc':\n scale = 1.0 * 2 ** 16 / 1400\n value = (reply/scale) - 150\n return value",
"def get_gradient_value_at_point(self, point):\n x, y, z = self.get_object_position_on_grid(point)\n #get the indices\n ix = int(x)\n iy = int(y)\n iz = int(z)\n #perform a trilinear interpolation from wikipedia.org\n #x\n if(x - ix > .5):\n x0 = ix\n x1 = ix + 1\n else:\n x0 = ix - 1\n x1 = ix\n #y\n if(y - iy > .5):\n y0 = iy\n y1 = iy + 1\n else:\n y0 = iy - 1\n y1 = iy\n #z\n if(z - iz > .5):\n z0 = iz\n z1 = iz + 1\n else:\n z0 = iz - 1\n z1 = iz \n \n #solve for xd, yd, zd\n xd = (abs(x - (ix + .5))) / (x1 - x0)\n yd = (abs(y - (iy + .5))) / (y1 - y0)\n zd = (abs(z - (iz + .5))) / (z1 - z0)\n\n #now the first set of linear interp\n c00 = self.C[x0, y0, z0]*(1 - xd) + self.C[x1, y0, z0]*xd\n c10 = self.C[x0, y1, z0]*(1 - xd) + self.C[x1, y1, z0]*xd\n c01 = self.C[x0, y0, z1]*(1 - xd) + self.C[x1, y0, z1]*xd\n c11 = self.C[x0, y1, z1]*(1 - xd) + self.C[x1, y1, z1]*xd\n\n #now the second set\n c0 = c00*(1-yd) + c10*yd\n c1 = c01*(1-yd) + c11*yd\n #finally the last set\n c = c0*(1-zd) + c1*zd\n #return the predicted value\n return c",
"def get_raster_pixels(raster_path):\n\n raster_pixels = arcpy.SearchCursor(raster_path).next().count\n\n return raster_pixels"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Given a tree, a set of rules, and an initial state, return the list of all the trees that are produced by the transduction. | def transduce(tree, rules, initial):
# list of the current generation of SearchStates
current = []
complete = []
# give the root the initial state.
statemap = {():'q'}
current.append(SearchState(tree, statemap, 1.0))
progress = True
while progress:
nextgen = []
# for every tree that has a state: find every rule that applies to
# that tree. apply that rule to that tree, put the results into the
# nextgen.
progress = False
for ss in current: # (tr, statemap)
if statemap != {}:
for i,rule in enumerate(rules):
if rule.matches(ss.tree, ss.statemap):
results = rule.apply(ss.tree, ss.statemap)
for (newtr, newstatemap) in results:
w = rule.weight * ss.weight
newss = SearchState(newtr, newstatemap, w)
if newstatemap == {}:
complete.append(newss)
else:
nextgen.append(newss)
break
if nextgen:
dprint(nextgen)
progress = True
current = nextgen
return complete | [
"def filter_subtree(head, rules):\n for rule, replacement in rules:\n if rule(head):\n if callable(replacement):\n replacement = replacement(head)\n return [replacement]\n if len(list(head.children)) == 0:\n return [head]\n output = []\n for child in head.lefts:\n output.extend(get_subtree(child, rules))\n output.append(head)\n for child in head.rights:\n output.extend(get_subtree(child, rules))\n \n return [token for token in output if token]",
"def upward_chain(synsets, recur= False):\n \n roots = []\n \n if recur:\n for syn in synsets:\n roots += [match for match in syn.hypernyms(recursive=True)]\n \n #print \"recursive roots {0} for {1}\".format(roots, synsets)\n else:\n for syn in synsets:\n roots += [match for match in syn.hypernyms()]\n print \"first roots {0} for {1}\".format(roots, synsets)\n \n return roots",
"def find_unreachable_states(top_state, flat_state_list, trans_dict):\n def visit(state, visited=set()): # instantiating should be ok in this case\n if state in visited:\n return set()\n visited.add(state)\n # if orthogonal is reachable, its states are automatically reachable\n if state.kind == 'orthogonal':\n [visit(st, visited) for st in state.states]\n # all state's parents are reachable\n # visit transition targets going out of every parent state\n for parent in l.get_path_from_root(state):\n visit(parent, visited)\n # visit transition targets going out of current state\n for tran in trans_dict.get(state.sig, {}).values():\n if isinstance(tran, e._Choice):\n to_visit = [l.get_state_by_sig(sig, flat_state_list)\n for sig in tran.switch.values() + [tran.default]]\n else:\n to_visit = [l.get_state_by_sig(tran.target, flat_state_list)]\n # nonexistent states (None values in list) are checked elsewhere\n [visit(st, visited) for st in to_visit if st is not None]\n return visited\n\n reachable = visit(top_state)\n return [st for st in flat_state_list if st not in reachable]",
"def trees(self, tokens=None, all_trees=False, goal=None):\n i = 0\n if self.size <= 1:\n raise ParseException(\"No parse tree found\")\n else:\n for root in self.edges[0][self.size - 1]:\n if root.is_complete():\n if goal is not None and root.prod.lhs != goal:\n continue\n i += 1\n # print(\"root\", i)\n if all_trees:\n for tree in self._trees(root, tokens):\n yield (i, tree)\n else:\n for tree in self._most_compact_trees(root, tokens):\n yield (i, tree)\n # print(\"number of complete root nodes:\", i)",
"def backchain_to_goal_tree(rules, hypothesis):\n\n toReturnTrees = [hypothesis]\n matches = False\n for rule in rules:\n for consequent in rule.consequent():\n variables = match(consequent, hypothesis)\n\n if variables == None:\n continue\n\n matches = True\n\n # the thing you have to prove is true,\n # in order to prove your hypothesis is true\n antecedent = rule.antecedent() #populate(rule.antecedent(), variables)\n\n # the subtree you will return\n tree = None\n\n if isinstance(antecedent, str):\n tree = backchain_to_goal_tree(rules, populate(antecedent, variables))\n elif isinstance(antecedent, AND):\n tree = AND([backchain_to_goal_tree(rules, populate(clause, variables)) for clause in antecedent])\n elif isinstance(antecedent, OR):\n tree = OR([backchain_to_goal_tree(rules, populate(clause, variables)) for clause in antecedent])\n tree = simplify(tree)\n\n toReturnTrees.append(tree)\n\n if not matches:\n return hypothesis\n\n return simplify(simplify(OR([toReturnTrees])))",
"def extract_rules_from_tree(tree, feature_names=None):\n rules = set()\n total_count = float(tree[\"internal_count\"])\n def traverse_nodes(curr_tree=tree, split_index=0,\n decision_type=None,\n threshold=None,\n feature=None,\n support = None,\n conditions=[]):\n if split_index != 0:\n if feature_names is not None:\n feature_name = feature_names[feature]\n else:\n feature_name = feature\n rule_condition = RuleCondition(feature_index=feature,\n threshold=threshold,\n operator=decision_type,\n support = support,\n feature_name=feature_name)\n new_conditions = conditions + [rule_condition]\n else:\n new_conditions = []\n ## if not terminal node\n if \"leaf_index\" not in curr_tree:\n feature = curr_tree[\"split_feature\"]\n threshold = curr_tree[\"threshold\"]\n support = curr_tree[\"internal_count\"] / float(total_count)\n \n left_tree = curr_tree[\"left_child\"]\n traverse_nodes(left_tree, curr_tree[\"split_index\"], \"<=\", threshold, feature, support, new_conditions)\n\n right_tree = curr_tree[\"right_child\"]\n traverse_nodes(right_tree, curr_tree[\"split_index\"], \">\", threshold, feature, support, new_conditions)\n else: # a leaf node\n if len(new_conditions) > 0:\n new_rule = Rule(new_conditions, curr_tree[\"leaf_value\"])\n rules.update([new_rule])\n else:\n pass # tree only has a root node!\n return None\n \n traverse_nodes()\n\n return rules",
"def reconstruct_states(tree, genotypes, S, cost_matrix):\n root_cost = np.zeros_like(S[0])\n for root in tree.roots:\n for j in range(S.shape[1]):\n root_cost[j] += np.min(cost_matrix[:, j] + S[root])\n ancestral_state = np.argmin(root_cost)\n\n transitions = {}\n A = {}\n for root in tree.roots:\n A[root] = ancestral_state\n for u in tree.nodes(order=\"preorder\"):\n for v in tree.children(u):\n cost = cost_matrix[A[u]] + S[v]\n A[v] = np.argmin(cost)\n if A[u] != A[v]:\n transitions[v] = A[v]\n\n return ancestral_state, transitions",
"def _get_production_rules(parse_tree, token_indices):\n if len(parse_tree.leaves()) == 0:\n return set()\n if len(token_indices) == 1:\n tree_position = parse_tree.leaf_treeposition(token_indices[0])\n arg_subtree = parse_tree[tree_position[0:-1]]\n else:\n start_index = min(token_indices)\n end_index = max(token_indices) + 1\n tree_position = parse_tree.treeposition_spanning_leaves(start_index, end_index)\n arg_subtree = parse_tree[tree_position]\n\n rule_set = set()\n #try:\n for rule in arg_subtree.productions():\n s = rule.__str__()\n #we want to skip all of the unary production rules\n #if \"'\" not in s and 'ROOT' not in s:\n if 'ROOT' not in s:\n s = s.replace(' -> ', '->')\n s = s.replace(' ','_')\n s = s.replace(':','COLON')\n rule_set.add(s)\n #except:\n #print rule_set\n #pass\n return rule_set",
"def build_initial_states(graph, possible_start_dts, constraint, time_spent_fn=default_time_spent_fn):\n initial_states = []\n for node in graph.nodes:\n possible_times_spent = time_spent_fn(node)\n for start_dt in possible_start_dts:\n if node.open_at(start_dt):\n for time_spent in possible_times_spent:\n end_dt = start_dt + time_spent\n if node.open_at(end_dt):\n initial_state = PlanState(node, start_dt, end_dt, None, 0)\n constraint_params = ConstraintParams(graph, initial_state, None, None)\n if constraint(constraint_params):\n initial_states.append(initial_state)\n return initial_states",
"def process_trees(tree):\n name_target = tree[:-9].replace('trees/all_', '').replace('trees/pure_', '').replace('trees/recomb_', '')\n\n with open(tree, 'r') as check_tree:\n tree_txt = check_tree.read() \n\n if (tree_txt == 'not enough genomic information\\n'): \n return [name_target, np.NaN, 0]\n\n else:\n t = Tree(tree)\n t.set_outgroup('CONSENSUS_CPZ')\n t.ladderize()\n target_node = t.search_nodes(name=name_target)[0]\n\n result = []\n for node in target_node.get_ancestors():\n subtypes_in_node = [leaf.split('-')[0] for leaf in node.get_leaf_names() if leaf != name_target]\n if len(set(subtypes_in_node)) == 1:\n result = [name_target, subtypes_in_node[0], node.support]\n break\n else:\n pass \n if result == []:\n result = [name_target, np.NaN, 0]\n else:\n pass\n \n return result",
"def expand(self, state):\n successor_function = self.graph.successor(state.content)\n new_nodes_list = []\n for successor in successor_function:\n new_node = State(\n content=successor[0],\n total_cost=state.cost + successor[1],\n depth=state.depth + 1,\n parent=state\n )\n new_nodes_list.append(new_node)\n self.closure.add(state)\n return new_nodes_list",
"def create_dag(all_subterms):\n nodes = dict()\n for term in all_subterms:\n nodes[term] = Node(term)\n if is_function(term.root):\n for arg in term.arguments:\n nodes[arg].parent.add(nodes[term])\n return nodes.values()",
"def get_subtrees(self, st):\n item = self._states[st]\n if item.stage == ChartItem.PROCESSED:\n return item.subtrees\n # make sure we're not accidentally reentrant\n assert item.stage == ChartItem.UNPROCESSED\n item.stage = ChartItem.PROCESSING\n for func, args in item.subtreefuncs:\n item.subtrees.update(func(self, *args))\n item.stage = ChartItem.PROCESSED\n return item.subtrees",
"def get_all_node(self) -> list:\n if self.is_empty():\n return [self]\n elif self.is_leaf():\n return [self]\n else:\n temp = [self]\n for subtree in self.subtrees:\n temp += subtree.get_all_node()\n return temp",
"def get_child_nodes(self, state):\n legal_actions = self.game.get_legal_actions(state)\n new_states = [self.game.get_next_state(state, action) for action in legal_actions]\n return [Node(state, action) for state, action in zip(new_states, legal_actions)]",
"def _generate_parser_action_states(\n framework,\n doc,\n nodes,\n edges,\n node_id2node,\n token_nodes,\n abstract_node_id_set,\n child_id2edge_id_set,\n parent_id2indegree,\n parent_id2child_id_set,\n child_id2parent_id_set,\n parent_child_id2edge_id_set,\n parse_nodes_anchors,\n char_pos2tokenized_node_id,\n tokenized_parse_nodes,\n):\n\n # TODO(Sunny): Order of producing high level node\n # i.e. when should we produce high level node\n seen_node_id_set = set()\n visited_node_id_set = set()\n resolved_node_id_set = set()\n seen_edge_id_set = set()\n complete_node_id_set = set()\n parser_states = []\n parent_id2child_id_set = copy.deepcopy(parent_id2child_id_set)\n\n token_node_queue = deque(token_nodes)\n prev_anchor_from = 0\n prev_tokenized_node_id = 0\n\n complete_node_queue = deque()\n node_state = []\n token_stack = []\n parse_token_stack = copy.deepcopy(tokenized_parse_nodes)\n parse_token_stack.reverse()\n actions = []\n\n count = 0\n error = 0\n\n while count <= 1000 and (token_node_queue or complete_node_queue):\n count += 1\n action_state = []\n edge_state = []\n abstract_node_state = []\n complete_node_state = []\n\n if complete_node_queue:\n node = complete_node_queue.popleft()\n is_complete_parent = True\n else:\n node = token_node_queue.popleft()\n is_complete_parent = False\n if 'anchors' in node:\n anchors = node.get('anchors')\n anchor_from = min(anchor.get('from', -1) for anchor in anchors)\n anchor_to = max(anchor.get('to', -1) for anchor in anchors)\n if anchor_from >= len(char_pos2tokenized_node_id):\n return [], []\n curr_tokenized_node_id = char_pos2tokenized_node_id[\n anchor_from]\n logger.debug((\n 'prev anchors',\n prev_tokenized_node_id,\n ))\n for tokenized_node_id in range(prev_tokenized_node_id,\n curr_tokenized_node_id):\n logger.debug('ignore {}'.format(tokenized_node_id))\n action_state.append((IGNORE, None))\n\n prev_tokenized_node_id = curr_tokenized_node_id + 1\n logger.debug((\n 'anchors',\n anchor_from,\n anchor_to,\n curr_tokenized_node_id,\n prev_tokenized_node_id,\n ))\n\n curr_node_id = node.get('id', -1)\n curr_node_label = node.get('label', '')\n num_childs = parent_id2indegree[curr_node_id]\n logger.debug(('curr_node_id', curr_node_id))\n\n is_curr_complete = not parent_id2child_id_set.get(curr_node_id, {})\n is_curr_ignored = all([\n curr_node_id not in parent_id2child_id_set,\n curr_node_id not in child_id2parent_id_set,\n ])\n # is_curr_decoration_node = curr_node_id in decoration_node_id_set\n is_curr_decoration_node = False\n is_curr_no_child = not parent_id2indegree[curr_node_id]\n is_curr_not_seen = not curr_node_id in seen_node_id_set\n is_curr_not_visited = not curr_node_id in visited_node_id_set\n is_curr_not_resolved = not curr_node_id in resolved_node_id_set\n is_curr_not_abstract = not curr_node_id in abstract_node_id_set\n\n # if is_curr_complete and curr_node_id not in complete_node_id_set:\n # complete_node_id_set.add(curr_node_id)\n # complete_node_state.append(curr_node_id)\n\n # Actions\n logger.debug(\n pprint.pformat(\n (curr_node_id, node_state, is_curr_no_child, is_curr_complete,\n is_curr_not_seen, is_curr_not_resolved)))\n if is_curr_decoration_node:\n # Do nothing if not is remote node\n pass\n elif is_curr_ignored:\n # Do nothing if node is ignored\n # TODO(Sunny): Handle remote edges\n pass\n else:\n if is_curr_not_visited and is_curr_not_abstract:\n action_state.append((APPEND, None))\n node_state.append((curr_node_id, curr_node_id, None))\n\n if is_curr_complete and is_curr_not_resolved:\n resolved_node_id_set.add(curr_node_id)\n resolved_node = node_id2node[curr_node_id]\n resolved_edgess = []\n\n num_pop = num_childs\n if is_curr_not_abstract:\n num_pop += 1\n\n new_state = []\n resolved_node_stack_position = 0\n logger.debug(('node_state', curr_node_id, node_state))\n for stack_position in range(num_pop):\n if node_state:\n resolved_node_id, last, childs = node_state.pop()\n else:\n error = 1\n logger.warning('pop empty node_state')\n return [], []\n\n if resolved_node_id == curr_node_id:\n resolved_node_stack_position = stack_position\n logger.debug(\n ('stack_position', stack_position, resolved_node_id,\n last, childs, resolved_node_stack_position))\n new_state.append((resolved_node_id, last, childs))\n resolved_edges = []\n for edge_id in parent_child_id2edge_id_set[(\n curr_node_id, resolved_node_id)]:\n edge = edges[edge_id]\n resolved_edges.append(edge)\n resolved_edgess.append(resolved_edges)\n new_state.reverse()\n resolved_edgess.reverse()\n\n node_state.append((curr_node_id, curr_node_id, new_state))\n action_state.append(\n (RESOLVE,\n (num_pop, num_pop - resolved_node_stack_position - 1,\n resolved_node, resolved_edgess)))\n\n # elif curr_node_id not in seen_node_id_set:\n # action_state.append((PENDING, None))\n logger.debug(\n pprint.pformat(('node_state after action', curr_node_id,\n node_state)))\n logger.debug(action_state)\n\n visited_node_id_set.add(curr_node_id)\n seen_node_id_set.add(curr_node_id)\n for edge_id in child_id2edge_id_set[curr_node_id]:\n edge = edges[edge_id]\n is_remote_edge = 'properties' in edge and 'remote' in edge['properties']\n\n parent_id = edge.get('parent', -1)\n child_id = curr_node_id\n if is_curr_complete and child_id in parent_id2child_id_set[parent_id]:\n parent_id2child_id_set[parent_id].remove(child_id)\n logger.debug((child_id, edge_id, parent_id,\n parent_id2child_id_set[parent_id]))\n if not parent_id2child_id_set[parent_id]:\n is_abstract = parent_id in abstract_node_id_set\n # is_not_defined_complete = not parent_id in complete_node_id_set\n is_parent_not_seen = not parent_id in seen_node_id_set\n is_parent_not_complete = not parent_id in complete_node_id_set\n is_parent_not_resolved = not parent_id in resolved_node_id_set\n\n # if (is_abstract\n # and is_not_defined_complete) or is_parent_not_seen:\n if is_abstract or is_parent_not_complete:\n complete_node_id_set.add(parent_id)\n seen_node_id_set.add(parent_id)\n complete_node_state.append(parent_id)\n complete_node_queue.append(node_id2node[parent_id])\n\n # Handle abstract nodes if not remote edge\n # TODO(Sunny): This cannot handle the case of a node having\n # All childs as abstract_node\n # if not is_remote_edge and curr_node_id not in abstract_node_id_set:\n # for neig_id in [parent_id, child_id]:\n # is_abstract = neig_id in abstract_node_id_set\n # seen = neig_id in seen_node_id_set\n # if is_abstract and not seen:\n # abstract_node_state.append(neig_id)\n # seen_node_id_set.add(neig_id)\n # token_node_queue.appendleft(node_id2node[neig_id])\n\n edge_not_seen = edge_id not in seen_edge_id_set\n edge_nodes_seen = all([\n parent_id in seen_node_id_set,\n child_id in seen_node_id_set,\n ])\n\n # add edge if edge not seen and both ends seen\n if edge_not_seen and edge_nodes_seen:\n edge_state.append(edge_id)\n seen_edge_id_set.add(edge_id)\n\n # Simulate actions\n for action in action_state:\n action_type, params = action\n if action_type == APPEND:\n if parse_token_stack:\n token = parse_token_stack.pop()\n else:\n return [], []\n token_stack.append((curr_node_id, False, curr_node_label,\n token.get('label')))\n elif action_type == RESOLVE:\n (num_pop, resolved_node_position, resolved_node,\n resolved_edges) = params\n resolved_node_childs = []\n while num_pop > 0 and token_stack:\n if token_stack:\n resolved_node_childs.append(token_stack.pop())\n else:\n return [], []\n num_pop -= 1\n resolved_node_childs.reverse()\n\n resolved_node_id = resolved_node.get('id')\n if framework == 'ucca':\n resolved_node_label = resolved_node.get('propagate_label')\n else:\n resolved_node_label = resolved_node.get('label')\n # token_stack.append((curr_node_id, True, resolved_node_childs))\n token_stack.append((resolved_node_id, True,\n resolved_node_label, resolved_node_childs))\n\n elif action_type == IGNORE:\n if parse_token_stack:\n token = parse_token_stack.pop()\n else:\n return [], []\n\n actions.extend(action_state)\n\n logger.debug(pprint.pformat(('token stack', token_stack)))\n logger.debug(('visited states', seen_node_id_set, visited_node_id_set,\n complete_node_id_set, resolved_node_id_set))\n parser_states.append((\n curr_node_id,\n action_state,\n edge_state,\n abstract_node_state,\n complete_node_state,\n copy.deepcopy(node_state),\n copy.deepcopy(token_stack),\n ))\n\n if error:\n return [], []\n return parser_states, actions",
"def filter_tree(f, t):\n children, branches = [], t.branches[:]\n for b in branches:\n if f(b.entry):\n children.append(filter_tree(f, b))\n else:\n branches.extend(b.branches)\n return Tree(t.entry, children)",
"def find_invalid_initial_transitions(flat_state_list, trans_dict):\n # missing initial transition are handled separately so they're excluded\n without = find_missing_initial_transitions(flat_state_list, trans_dict)\n composites = [st for st in flat_state_list\n if st.kind == 'composite' and st not in without]\n\n def report(state):\n init_tran = trans_dict[state.sig][e.Initial]\n msg = None\n\n get_state = lambda sig: l.get_state_by_sig(sig, flat_state_list)\n is_child = lambda sg: state in l.get_path_from_root(get_state(sg))[:-1]\n\n if isinstance(init_tran, e._Local):\n msg = 'cannot use LocalTransition for initial'\n elif isinstance(init_tran, e._Internal):\n msg = 'cannot use InternalTransition for initial'\n elif isinstance(init_tran, e._Choice):\n if init_tran.default is None:\n msg = ('must declare default when using Choice as initial')\n elif get_state(init_tran.default) is None:\n msg = 'default points to nonexistent state'\n elif not is_child(init_tran.default):\n msg = 'default target must be a child state'\n elif any(get_state(s) is None for s in init_tran.switch.values()):\n msg = 'switch dict references nonexistent state'\n elif not all(is_child(sig) for sig in init_tran.switch.values()):\n msg = 'switch dict value not a child state'\n # at this point we know it is instance of regular Transition\n elif init_tran.target == st.sig:\n msg = 'initial transition cannot be a loop'\n elif get_state(init_tran.target) is None:\n msg = 'transition target points to nonexistent state'\n elif not is_child(init_tran.target):\n msg = 'target state must be a child state'\n elif init_tran.guard is not e.always_true:\n msg = 'initial transition cannot have a guard'\n return (state, msg) if msg else None\n\n return [report(st) for st in composites if report(st) is not None]",
"def generate_child_states(state: str) -> [str]:"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Add dropdown items to UI Tabs. | def uitab_dropdown_items(tab_name, tab, domain, request) -> List[dict]: | [
"def generate_item_dropdown(self, e):\n self.items_df = self.df.query(\"types == @self.food_type_dropdown.get()\")\n self.food_names_list = list(self.items_df[\"title\"])\n self.food_names_dropdown.config(value=self.food_names_list)",
"def addDropDown(self, *args) -> \"adsk::core::Ptr< adsk::core::DropDownControl >\" :\n return _core.ToolbarControls_addDropDown(self, *args)",
"def add_zms(self):\n child0 = QtGui.QStandardItem(\"zms\")\n child0.setEditable(False)\n child1 = QtGui.QStandardItem(\"\")\n self.sections[\"Options\"].appendRow([child0, child1])\n self.update_tab_text()",
"def create_tabs(self):\n\n # Tabs\n self.create_setup_tab()\n self.create_part_location_tab()\n self.create_order_tab()\n self.create_challenge_tab()\n self.tab_control.pack(expand=1, fill=\"both\", padx=5, pady=5)",
"def updates_tab_completion_lists(self, options):\r\n # Loop through options passed and add them to them\r\n # to the current tab options list\r\n for key, value in options.iteritems():\r\n self.options[key] = value",
"def updateItems(self):\n selected = self.userInput.selected()\n if selected:\n for item in self.items[selected.value()]:\n self.itemSelect.addOption(item)",
"def add_tab(self, tab, title):\r\n self._explorer.addTab(tab, title)",
"def ShowDropDown(*args, **kwargs):\n return _aui.AuiTabArt_ShowDropDown(*args, **kwargs)",
"def createDropDowns(self):\n\n self.componentDropwDown = QtGui.QComboBox()\n self.componentDropwDown.addItem(\"Resistor\")\n self.componentDropwDown.addItem(\"Coil\")\n self.componentDropwDown.addItem(\"Capacitator\")\n self.componentDropwDown.addItem(\"V-Source\")\n self.componentDropwDown.addItem(\"I-Source\")\n self.componentDropwDown.currentIndexChanged.connect(self.on_ComponentChanged)\n\n self.potenzialDropDownFrom = QtGui.QComboBox()\n self.potenzialDropDownFrom.addItem(\"---Potencial From---\")\n self.potenzialDropDownFrom.addItem(\"E-Last\")\n self.potenzialDropDownFrom.addItem(\"E-Masse\")\n self.potenzialDropDownFrom.setAutoCompletion(True)\n \n self.potenzialDropDownTo = QtGui.QComboBox()\n self.potenzialDropDownTo.addItem(\"---Potencial To---\")\n self.potenzialDropDownTo.addItem(\"E-Last\")\n self.potenzialDropDownTo.addItem(\"E-Masse\")\n self.potenzialDropDownFrom.setAutoCompletion(True)\n\n self.directionDropwDown = QtGui.QComboBox()\n self.directionDropwDown.addItem(\"left\")\n self.directionDropwDown.addItem(\"right\")\n self.directionDropwDown.addItem(\"up\")\n self.directionDropwDown.addItem(\"down\")\n\n self.potenzialDropDown = QtGui.QComboBox()\n self.potenzialDropDown.setFixedSize(200,20)\n self.potenzialDropDown.hide()\n self.potenzialDropDown.currentIndexChanged.connect(self.onPotencialChanged)",
"def add_drop_down(self, col_number, col_label):\n if col_label.endswith('**') or col_label.endswith('^^'):\n col_label = col_label[:-2]\n # add drop-down for experiments\n if col_label == \"experiments\":\n if 'measurements' in self.contribution.tables:\n meas_table = self.contribution.tables['measurements'].df\n if 'experiment' in meas_table.columns:\n exps = meas_table['experiment'].unique()\n self.choices[col_number] = (sorted(exps), False)\n self.grid.SetColLabelValue(col_number, col_label + \"**\")\n return\n #\n if col_label == 'method_codes':\n self.add_method_drop_down(col_number, col_label)\n elif col_label == 'magic_method_codes':\n self.add_method_drop_down(col_number, 'method_codes')\n elif col_label in ['specimens', 'samples', 'sites', 'locations']:\n if col_label in self.contribution.tables:\n item_df = self.contribution.tables[col_label].df\n item_names = item_df.index.unique() #[col_label[:-1]].unique()\n self.choices[col_number] = (sorted(item_names), False)\n elif col_label in ['specimen', 'sample', 'site', 'location']:\n if col_label + \"s\" in self.contribution.tables:\n item_df = self.contribution.tables[col_label + \"s\"].df\n item_names = item_df.index.unique() #[col_label[:-1]].unique()\n self.choices[col_number] = (sorted(item_names), False)\n # add vocabularies\n if col_label in self.contribution.vocab.suggested:\n typ = 'suggested'\n elif col_label in self.contribution.vocab.vocabularies:\n typ = 'controlled'\n else:\n return\n\n # add menu, if not already set\n if col_number not in list(self.choices.keys()):\n if typ == 'suggested':\n self.grid.SetColLabelValue(col_number, col_label + \"^^\")\n controlled_vocabulary = self.contribution.vocab.suggested[col_label]\n else:\n self.grid.SetColLabelValue(col_number, col_label + \"**\")\n controlled_vocabulary = self.contribution.vocab.vocabularies[col_label]\n #\n stripped_list = []\n for item in controlled_vocabulary:\n try:\n stripped_list.append(str(item))\n except UnicodeEncodeError:\n # skips items with non ASCII characters\n pass\n\n if len(stripped_list) > 100:\n # split out the list alphabetically, into a dict of lists {'A': ['alpha', 'artist'], 'B': ['beta', 'beggar']...}\n dictionary = {}\n for item in stripped_list:\n letter = item[0].upper()\n if letter not in list(dictionary.keys()):\n dictionary[letter] = []\n dictionary[letter].append(item)\n stripped_list = dictionary\n\n two_tiered = True if isinstance(stripped_list, dict) else False\n self.choices[col_number] = (stripped_list, two_tiered)\n return",
"def addTab(self, tab, *args):\n tab.parent_tab_widget = self\n super(BaseTabWidget, self).addTab(tab, *args)",
"def add(self, widget, tabText=None, asHTML=False, name=None):\n print \"TabPanel add\", widget, tabText, asHTML, name\n self.insert(widget, tabText, asHTML, self.getWidgetCount(), name)",
"def add_tab(self):\n if (len(self.tab_list) > 6):\n dialog = ErrorDialog('No more than 6 transforms allowed')\n dialog.size_hint = (0.7,0.6)\n dialog.pos_hint = {'x':0.15,'y':0.2}\n return\n \n pos = self.tab_list.index(self.current_tab)\n content = TransformPanel()\n content.bind(transform=self.recalculate)\n panel = TabbedPanelHeader(text='',content=content)\n panel.background_color = (0.8,0.8,0.5)\n panel.background_down = 'a5lib/uix/tab_down.png'\n panel.background_normal = 'a5lib/uix/tab_up.png'\n self.add_widget(panel)\n for x in range(pos):\n self.shuffle_tab(x,True)\n tab1 = self.tab_list[pos+1]\n tab2 = self.tab_list[pos]\n \n tab1.state = 'normal'\n tab2.state = 'down'\n self.switch_to(self.tab_list[pos])\n # Tab is identity; no recalculation needed.",
"def addComboBox(self, id, items, dim=[1,1], label=None):\n\t\tif not label: label = id\n\t\tself.widgets.append(JPLComboBox(self.centralWidget, id, items, dim, label))\n\t\tself.labels.append(JPLLabel(self, label, dim))",
"def set_combobox(self, domain:str, option_list:list):\n setting_area = QVBoxLayout()\n rows = QVBoxLayout()\n btnAdd = QPushButton(parameter.add_str)\n btnAdd.clicked.connect(lambda:self.Addbutton_click(domain))\n\n\n for elem in option_list:\n row = self.one_row(elem, domain)\n row.itemAt(0).widget().setEnabled(False)\n rows.addLayout(row)\n\n\n setting_area.addLayout(rows)\n setting_area.addWidget(btnAdd)\n return setting_area",
"def add_options_section(self):\n self.sections[\"Options\"] = QtGui.QStandardItem(\"Options\")\n new_options = {\"irga_type\": \"Li-7500RS\", \"sonic_type\": \"CSAT3B\",\n \"SONIC_Check\": \"Yes\", \"IRGA_Check\": \"Yes\"}\n for key in new_options:\n value = new_options[key]\n child0 = QtGui.QStandardItem(key)\n child0.setEditable(False)\n child1 = QtGui.QStandardItem(value)\n self.sections[\"Options\"].appendRow([child0, child1])\n self.model.insertRow(self.section_headings.index(\"Variables\"), self.sections[\"Options\"])\n self.update_tab_text()",
"def TabsMenu(self):\n self.actionHome.triggered.connect(self.Show_Home)\n self.actionPlotting.triggered.connect(self.Show_Plotting)",
"def _init_tabs(self):\n\n # Loop over number of DUTs and create tmp setup and options for each DUT\n for i in range(self.setup['n_duts']):\n\n tmp_setup = {}\n tmp_options = {}\n\n # Fill setup for i_th DUT\n for s_key in self.setup.keys():\n # Tuples and lists have DUT specific info; loop and assign the respective i_th entry to the tmp setup\n if isinstance(self.setup[s_key], list) or isinstance(self.setup[s_key], tuple):\n tmp_setup[s_key] = self.setup[s_key][i]\n # General info valid for all DUTs\n else:\n tmp_setup[s_key] = self.setup[s_key]\n\n # Fill options for i_th DUT\n for o_key in self.options.keys():\n # Tuples and lists have DUT specific info; loop and assign the respective i_th entry to the tmp options\n if isinstance(self.options[o_key], list) or isinstance(self.options[o_key], tuple):\n tmp_options[o_key] = self.options[o_key][i]\n # General info valid for all DUTs\n else:\n tmp_options[o_key] = self.options[o_key]\n\n # Create widget\n widget = AnalysisWidget(parent=self.tabs, setup=tmp_setup, options=tmp_options, name=self.name,\n tab_list=self.tab_list)\n\n # Remove buttons and progressbar from AnalysisWidget instance; ParallelAnalysisWidget has one for all\n widget.btn_ok.deleteLater()\n widget.btn_rerun.deleteLater()\n widget.p_bar.deleteLater()\n\n # Add to tab widget\n self.tw[self.setup['dut_names'][i]] = widget\n self.tabs.addTab(self.tw[self.setup['dut_names'][i]], self.setup['dut_names'][i])",
"def uitab_sidebar_items(tab_name, tab, domain, request) -> List[Tuple[str, List[dict]]]:"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Add sidebar items to UI tabs. | def uitab_sidebar_items(tab_name, tab, domain, request) -> List[Tuple[str, List[dict]]]: | [
"def setup_sidebar_items(self):\n\t\tif self.data.allow_sidebar_items:\n\t\t\t# disable all\n\t\t\tfrappe.db.sql(\"update `tabPortal Menu Item` set enabled=0\")\n\n\t\t\t# enable\n\t\t\tfrappe.db.sql(\n\t\t\t\t\"\"\"update `tabPortal Menu Item` set enabled=1\n\t\t\t\twhere route in ({})\"\"\".format(\n\t\t\t\t\t\", \".join(f'\"{d}\"' for d in self.data.allow_sidebar_items)\n\t\t\t\t)\n\t\t\t)\n\n\t\tif self.data.remove_sidebar_items:\n\t\t\t# disable all\n\t\t\tfrappe.db.sql(\"update `tabPortal Menu Item` set enabled=1\")\n\n\t\t\t# enable\n\t\t\tfrappe.db.sql(\n\t\t\t\t\"\"\"update `tabPortal Menu Item` set enabled=0\n\t\t\t\twhere route in ({})\"\"\".format(\n\t\t\t\t\t\", \".join(f'\"{d}\"' for d in self.data.remove_sidebar_items)\n\t\t\t\t)\n\t\t\t)",
"def TabsMenu(self):\n self.actionHome.triggered.connect(self.Show_Home)\n self.actionPlotting.triggered.connect(self.Show_Plotting)",
"def construct_tabs(self):\n for key, config_list in self.groups.items():\n page = ConfigPage(self.notebook, config_list)\n self.notebook.AddPage(page, key)\n self.clean_edit_state()",
"def bar_add(self, widget):\n self.toolbar.addWidget(QtGui.QLabel(\"\")) # Spazio\n self.toolbar.addWidget(widget) # + Widget",
"def move_tabs(self, args):\n tabs_before = list(chain.from_iterable(\n map(self._safe_list_tabs, self._apis)))\n tabs_after = edit_tabs_in_editor(tabs_before)\n if tabs_after is None:\n return\n\n for api in self._apis:\n self._move_tabs_if_changed(\n api,\n api.filter_tabs(tabs_before),\n api.filter_tabs(tabs_after))",
"def create_tabs(self):\n\n # Tabs\n self.create_setup_tab()\n self.create_part_location_tab()\n self.create_order_tab()\n self.create_challenge_tab()\n self.tab_control.pack(expand=1, fill=\"both\", padx=5, pady=5)",
"def addMenus(self):\n addFileMenu()\n addEditMenu()",
"def add_items(self, items, app_config):\n _header = self.check_items(items.files, app_config)\n\n if not _header:\n print('The added folder has no contents.\\nPlease add a folder with contents or a single file.')\n return\n\n single = True if len(items.files) == 1 else False\n \n for item in range(len(items.files)):\n _item = CustomTreeItem(_header)\n _item.build_subitem_values(items.files[item], single, app_config)\n _item.build_widget_items(self)\n _item.setTextAlignment(5, QtCore.Qt.AlignCenter)\n self._widgets.append(_item)",
"def _add_menus(self):\r\n self.menu_bar.Append(self.mfile, \"&File\")\r\n self.menu_bar.Append(self.medit, \"&Edit\")\r\n self.menu_bar.Append(self.mview, \"&View\")",
"def add(self, *args) -> \"adsk::core::Ptr< adsk::core::ToolbarTab >\" :\n return _core.ToolbarTabs_add(self, *args)",
"def _add_menu_items(self):\r\n self.mfile.AppendItem(self.mf_close)\r\n self.mfile.AppendItem(self.mf_exit)\r\n\r\n self.medit.AppendItem(self.me_redraw)\r\n self.medit.AppendItem(self.me_pref)\r\n self.medit.AppendSeparator()\r\n self.medit.AppendItem(self.me_run)\r\n\r\n self.mview.AppendItem(self.mv_zoomfit)\r\n self.mview.AppendSeparator()\r\n\r\n self.mopts.AppendItem(self.mo_limits)\r\n self.mopts.AppendItem(self.mo_emails)",
"def __create_tabs(self):\r\n self.tab1 = tk.Frame(self.note)\r\n self.tab2 = tk.Frame(self.note)\r\n self.tab3 = tk.Frame(self.note)\r\n self.note.add(self.tab1, text=\" Lines \")\r\n self.note.add(self.tab2, text=\" Stops \")\r\n self.note.add(self.tab3, text=\" Buses \")\r\n self.note.pack(side=tk.TOP)",
"def set_up_tabs(self):\n _2D_maps_tab = QtGui.QWidget()\n _3D_maps_tab = QtGui.QWidget()\n\n self._2D_maps_tab_placement = QtGui.QGridLayout(_2D_maps_tab)\n self._3D_maps_tab_placement = QtGui.QGridLayout(_3D_maps_tab)\n\n self.tab_widget.addTab(_2D_maps_tab, \"2D Maps\")\n self.tab_widget.addTab(_3D_maps_tab, \"3D Maps\")",
"def create_tabs(self):\n row = 0\n while row < 100:\n self.rowconfigure(row, weight=1)\n self.columnconfigure(row, weight=1)\n row += 1\n\n self.notebook = ttk.Notebook(self, style='Custom.TNotebook')\n self.log = LoggingFrame(parent=self, controller=self)\n\n self.notebook.grid(row=0, column=0, columnspan=100, rowspan=21, sticky='NSWE')\n self.log.grid(row=21, column=0, columnspan=100, rowspan=100, sticky='NSWE')\n\n # Defining the notebook tabs\n self.tabs = {}\n self.tab_map = OrderedDict([\n (InitialFrame, 'Home'),\n (AttackARPFrame, 'ARP Poisoning'),\n (AttackDNSFrame, 'DNS Cache Poisoning'),\n (HelpFrame, 'Help')\n ])\n\n # Add the frames to the application\n for tab in self.tab_map.keys():\n frame_name = self.tab_map[tab]\n frame = tab(parent=self.notebook, controller=self)\n self.notebook.add(frame, text=frame_name)\n self.tabs[tab.__name__] = frame\n\n self.notebook.tab('.!mainapplication.!notebook.!attackdnsframe', state=\"disabled\")",
"def addTab(self, tab, *args):\n tab.parent_tab_widget = self\n super(BaseTabWidget, self).addTab(tab, *args)",
"def add_tab(self, tab, title):\r\n self._explorer.addTab(tab, title)",
"def addTab(self, label, selected=False):\n button_index = len(self.tabs.buttons())\n\n tab = QtWidgets.QPushButton(label)\n tab.setFocusPolicy(QtCore.Qt.FocusPolicy.TabFocus)\n tab.setObjectName('CustomTab')\n tab.setCheckable(True)\n self.tabs.addButton(tab, button_index)\n self.tab_layout.addWidget(tab)\n self.stack.addWidget(QtWidgets.QWidget())\n if button_index == 0 or selected:\n self.stack.setCurrentIndex(button_index)\n tab.setChecked(True)",
"def _build_tabs(self, root: Tk) -> None:\r\n tab_control = ttk.Notebook(root)\r\n self.main_tab = LabelFrame(tab_control, labelanchor=NW, fg='black', text='---')\r\n self.log_tab = LabelFrame(tab_control, labelanchor=NW, fg='black', text='LOG')\r\n \r\n tab_control.add(self.main_tab, text=f\"{'Main':^30s}\")\r\n tab_control.add(self.log_tab,text=f\"{'Work log':^30s}\")\r\n tab_control.pack(expand=1, fill='both')",
"def extraMenus(self):\n pass"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return the closest Station for the zone of the point choosed for one year | def getClosestSationByYearSingleBlock(lon,lat, year):
lon_t = int(lon)
lat_t = int(lat)
rows = session.execute(f"""SELECT * FROM {KEYSPACE}.{TABLE} where lon_t={lon_t} AND lat_t={lat_t} AND year={year}""")
for row in rows:
row0 = None
row1 = row
row2 = row
point0 = (0,0)
point1 = (row.lon, row.lat)
point2 = (row.lon, row.lat)
res = getclosest(point0,point1, point2)
if res == 1:
row0 = row1
else:
row0 = row2
return row0.station | [
"def getClosestSationByYearMultiBlock(lon,lat, year):\n lon_t = int(lon)\n lat_t = int(lat)\n row0 = None\n for lon_m in (lon_t - 1,lon_t, lon_t + 1):\n for lat_m in (lat_t - 1,lat_t, lat_t + 1):\n rows = session.execute(f\"\"\"SELECT * FROM {KEYSPACE}.{TABLE} where lon_t={lon_m} AND lat_t={lat_m} AND year={year}\"\"\")\n for row in rows: \n row1 = row\n row2 = row\n point0 = (0,0)\n point1 = (row.lon, row.lat)\n point2 = (row.lon, row.lat)\n res = getclosest(point0,point1, point2)\n if res == 1:\n row0 = row1\n else: \n row0 = row2\n return row0.station",
"def getClosestStation(query):\n return maps.queryVenue(\"train stations near \" + query).partition(\" \")[0] # get just name of station",
"def find_nearest_station(self, lon, lat):\n angle, sep = angular_separation(\n self.data[self.unique_stations_idx][\"x\"],\n self.data[self.unique_stations_idx][\"y\"], lon, lat)\n\n nearest_idx = np.argmin(sep)\n\n print(\"\")\n print(\"Nearest weather station is {:.2f} degrees away..\".format(\n sep[nearest_idx]))\n print(\"\")\n\n return self.unique_stations[nearest_idx]",
"def worst_year(strat: Strategy) -> Decimal:\n rel_diff = relative_yearly_returns(strat)\n if len(rel_diff) > 0:\n return rel_diff.min()\n else:\n raise InsufficientTimeframe",
"def nearestZone(self, curzone = None, numb = 1):\r\n ###Numb is as yet not implemented\r\n if curzone is None:\r\n curzone = self.getZone() #the zone could be some irrational combination which makes an integer representation impossible\r\n\r\n uvwn = normMatrix(curzone) #in case curzone is not provided as a unit vector\r\n \r\n mx = np.array([])\r\n nm = 0\r\n #list of types of zones to test\r\n \r\n lst = [np.array([1,0,0]), np.array([1,1,0]), np.array([1,1,1]), np.array([1,2,0]), np.array([1, 1, 2]), np.array([1, 2, 2]), np.array([1,3,0]), np.array([1,3,1])]\r\n #lst = [np.array([1,0,0])]\r\n #find the ZA that is closest from a list\r\n for i in lst:\r\n mat = self.getSym(i)\r\n matn = normMatrix(mat)\r\n #test which ones are reachable and only take those\r\n isr = self.isReachable(matn, verbose = False)\r\n matn = matn[:, isr]\r\n #test that matn isn't empty; if all are nan then matn should be empty\r\n if matn.size>0:\r\n #make the dot product with hkl\r\n dp = np.dot(matn.T, uvwn)\r\n #print(dp)\r\n #find where the dot product is maximum - this vector is closest to the current zone\r\n indx = np.argmax(dp)\r\n #print(indx)\r\n if dp[indx,0]>nm:\r\n #check also that the found zone is not identical to curzone. Since they are both unit vectors, dp = 1\r\n if round(dp[indx,0], 6)!=1:\r\n #if it's larger than the already found dot product, then replace the storage\r\n nm = dp[indx,0]\r\n mx = integerRep(matn[:, indx])\r\n #print(nm)\r\n #print(mx)\r\n return mx",
"def solarReturn(self, year):\n sun = self.getObject(const.SUN)\n date = Datetime('{0}/01/01'.format(year),\n '00:00',\n self.date.utcoffset)\n srDate = ephem.nextSolarReturn(date, sun.lon)\n return Chart(srDate, self.pos, hsys=self.hsys)",
"def station_longitude_1(epoch):\n\n # First check that input value is of correct types\n if not isinstance(epoch, Epoch):\n raise TypeError(\"Invalid input type\")\n # Check that the input epoch is within valid range\n y = epoch.year()\n if y < -2000.0 or y > 4000.0:\n raise ValueError(\"Epoch outside the -2000/4000 range\")\n # Set some specific constants for Saturn's opposition\n a = 2451870.17\n b = 378.091904\n m0 = 318.0172\n m1 = 12.647487\n k = round((365.2425 * y + 1721060.0 - a) / b)\n jde0 = a + k * b\n m = m0 + k * m1\n m = Angle(m).to_positive()\n m = m.rad()\n t = (jde0 - 2451545.0) / 36525.0\n # Compute an auxiliary angle\n aa = 82.74 + 40.76 * t\n bb = 29.86 + 1181.36 * t\n cc = 14.13 + 590.68 * t\n dd = 220.02 + 1262.87 * t\n # Convert to radians\n aa = Angle(aa).rad()\n bb = Angle(bb).rad()\n cc = Angle(cc).rad()\n dd = Angle(dd).rad()\n corr = (-68.884 + t * (0.0009 + t * 0.00023) +\n sin(m) * (5.5452 + t * (-0.0279 - t * 0.0002)) +\n cos(m) * (3.0727 + t * (-0.043 + t * 0.00007)) +\n sin(2.0 * m) * (0.1101 + t * (-0.0006 - t * 0.00001)) +\n cos(2.0 * m) * (0.1654 + t * (-0.0043 + t * 0.00001)) +\n sin(3.0 * m) * (0.001 + t * 0.0001) +\n cos(3.0 * m) * (0.0095 - t * 0.0003) +\n sin(aa) * (0.0 + t * (-0.0337 + t * 0.00018)) +\n cos(aa) * (-0.851 + t * (0.0044 + t * 0.00068)) +\n sin(bb) * (0.0 + t * (-0.0064 + t * 0.00004)) +\n cos(bb) * (0.2397 + t * (-0.0012 - t * 0.00008)) +\n sin(cc) * (0.0 - t * 0.001) +\n cos(cc) * (0.1245 + t * 0.0006) +\n sin(dd) * (0.0 + t * (0.0024 - t * 0.00003)) +\n cos(dd) * (0.0477 + t * (-0.0005 - t * 0.00006)))\n to_return = jde0 + corr\n return Epoch(to_return)",
"def get_nearest_station(latitude, longitude):\n url = 'http://realtime.mbta.com/developer/api/v2/stopsbylocation?api_key=wX9NwuHnZU2ToO7GmGR9uw'\n specific_location = '&lat=' + str(latitude) + '&lon=' + str(longitude) + '&format=json'\n final_url = url + specific_location\n response_data = get_json(final_url)\n nearby_station = response_data[\"stop\"][0][\"stop_name\"]\n distance = response_data[\"stop\"][0][\"distance\"]\n \"\"\"if float(distance) < 1:\n distance = float(distance) / 5280\n distance = str(distance)\"\"\"\n return nearby_station, distance",
"def test_aware_floor_year_out_of_dst(self):\n t = fleming.convert_to_tz(\n datetime.datetime(2013, 3, 14, 12, 23, 4, 40), pytz.timezone('US/Eastern'))\n # Original time zone should be in DST\n self.assertEquals(t.tzinfo.dst(t), datetime.timedelta(hours=1))\n ret = fleming.floor(t, year=1)\n # Resulting time zone should not be in DST\n self.assertEquals(ret.tzinfo.dst(ret), datetime.timedelta(0))\n self.assertEquals(ret, datetime.datetime(2013, 1, 1, tzinfo=ret.tzinfo))",
"def station_by_name(self, name):\n\n try:\n station = [_ for _ in self.stations[\"features\"] if name == _[\"properties\"][\"name\"]]\n log.debug(\"searching for station {} found {}\".format(name, station))\n return station[0]\n except:\n log.debug(\"Exception: searching for station {} found None\".format(name))\n return None",
"def object_az_el(source, site, year, doy):\n try:\n coords = APcn.get_icrs_coordinates(source)\n except APcn.NameResolveError as details:\n raise APcn.NameResolveError(details)\n module_logger.debug(\"Sky coords: %s\", coords)\n \n try:\n dss = C.DSS(site)\n module_logger.debug(\"DSS-%d: %f, %f\", site, dss.long*180/pi, dss.lat*180/pi)\n except KeyError:\n raise KeyError('%d is not a valid DSS station' % site)\n loc = APc.EarthLocation(dss.long*u.rad, dss.lat*u.rad)\n module_logger.debug(\"Site coords: %s\", loc)\n \n if doy:\n mjd = DT.MJD(year,doy)\n else:\n raise RuntimeError(\"no DOY given\")\n tt = APt.Time(mjd, format='mjd')\n module_logger.debug(\"ISO time = %s\", tt.iso)\n tt.delta_ut1_utc = 0\n coords.obstime = tt\n coords.location = loc\n return coords.altaz",
"def get_nearest_station(latitude, longitude):\n url=\"https://api-v3.mbta.com/stops?sort=distance&filter[latitude]={latitude}&filter[longitude]={longitude}\".format(longitude=longitude, latitude=latitude)\n data=get_json(url)\n if len(data['data'])==0:\n return ()\n if len(data['data'][0])==0:\n return ()\n if len(data['data'][0]['attributes'])==0:\n return ()\n if len(data['data'][0]['attributes']['name'])==0:\n return ()\n\n returnable=(data['data'][0]['attributes']['name'], data['data'][0]['attributes']['wheelchair_boarding'])\n return returnable",
"def gains_solar(zone, hour):\r\n arg_str = p2e._base._util._convert_args_to_string(\"get.results.gains.solar\", \r\n zone.eco_id, hour)\r\n val = p2e._app.Request(arg_str)\r\n return p2e._base._util._convert_str_to_type(val, float)",
"def get_closest(self, point):\n distance = (self.dpath[:, 1] - point[1]) ** 2 + (self.dpath[:, 0] - point[0]) ** 2\n i = np.where(distance == distance.min())\n return i[0][0]",
"def get_persons_closest_to_average(table):\n\n years_list = common.get_values_from_column(table, 2, \"int\")\n average_year = common.get_average_value(years_list)\n\n lowest_difference = float(\"inf\")\n\n for i in range(len(table)):\n difference = (int(table[i][2]) - average_year)\n if abs(difference) < lowest_difference:\n lowest_difference = abs(difference)\n closest_value = table[i][2]\n\n closest_people = [table[i][1] for i in range(len(table)) if table[i][2] == closest_value]\n\n return closest_people",
"def find_nearest_station(self, location):\n nearest = None\n dist = float('inf')\n for st in self._stations:\n loc_dist = st.distance_to(location)\n if loc_dist < dist:\n nearest = st\n dist = loc_dist\n return nearest",
"def get_ate(df, country_name, year):\n Y1 = get_sr(df,country_name,year)\n Y0 = 0.5*(get_sr(df,country_name,year-1) +\n get_sr(df,country_name,year+1))\n ATE = Y1 - Y0\n return ATE",
"def find_station(value):\n for line in self.line_list:\n station = line.find_station(value)\n if station:\n return station\n return None",
"def station_by_id(self, id):\n\n try:\n station = [_ for _ in self.stations[\"features\"] if _[\"properties\"][\"station_id\"] == id]\n log.debug(\"searching for station_id {} found {}\".format(id, station))\n return station[0]\n except:\n log.debug(\"searching for station_id {} found None\".format(id))\n return None"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return the closest Station for the zone of the point choosed for one year | def getClosestSationByYearMultiBlock(lon,lat, year):
lon_t = int(lon)
lat_t = int(lat)
row0 = None
for lon_m in (lon_t - 1,lon_t, lon_t + 1):
for lat_m in (lat_t - 1,lat_t, lat_t + 1):
rows = session.execute(f"""SELECT * FROM {KEYSPACE}.{TABLE} where lon_t={lon_m} AND lat_t={lat_m} AND year={year}""")
for row in rows:
row1 = row
row2 = row
point0 = (0,0)
point1 = (row.lon, row.lat)
point2 = (row.lon, row.lat)
res = getclosest(point0,point1, point2)
if res == 1:
row0 = row1
else:
row0 = row2
return row0.station | [
"def getClosestSationByYearSingleBlock(lon,lat, year):\n lon_t = int(lon)\n lat_t = int(lat)\n rows = session.execute(f\"\"\"SELECT * FROM {KEYSPACE}.{TABLE} where lon_t={lon_t} AND lat_t={lat_t} AND year={year}\"\"\")\n for row in rows: \n row0 = None\n row1 = row\n row2 = row\n point0 = (0,0)\n point1 = (row.lon, row.lat)\n point2 = (row.lon, row.lat)\n res = getclosest(point0,point1, point2)\n if res == 1:\n row0 = row1\n else: \n row0 = row2\n return row0.station",
"def getClosestStation(query):\n return maps.queryVenue(\"train stations near \" + query).partition(\" \")[0] # get just name of station",
"def find_nearest_station(self, lon, lat):\n angle, sep = angular_separation(\n self.data[self.unique_stations_idx][\"x\"],\n self.data[self.unique_stations_idx][\"y\"], lon, lat)\n\n nearest_idx = np.argmin(sep)\n\n print(\"\")\n print(\"Nearest weather station is {:.2f} degrees away..\".format(\n sep[nearest_idx]))\n print(\"\")\n\n return self.unique_stations[nearest_idx]",
"def worst_year(strat: Strategy) -> Decimal:\n rel_diff = relative_yearly_returns(strat)\n if len(rel_diff) > 0:\n return rel_diff.min()\n else:\n raise InsufficientTimeframe",
"def nearestZone(self, curzone = None, numb = 1):\r\n ###Numb is as yet not implemented\r\n if curzone is None:\r\n curzone = self.getZone() #the zone could be some irrational combination which makes an integer representation impossible\r\n\r\n uvwn = normMatrix(curzone) #in case curzone is not provided as a unit vector\r\n \r\n mx = np.array([])\r\n nm = 0\r\n #list of types of zones to test\r\n \r\n lst = [np.array([1,0,0]), np.array([1,1,0]), np.array([1,1,1]), np.array([1,2,0]), np.array([1, 1, 2]), np.array([1, 2, 2]), np.array([1,3,0]), np.array([1,3,1])]\r\n #lst = [np.array([1,0,0])]\r\n #find the ZA that is closest from a list\r\n for i in lst:\r\n mat = self.getSym(i)\r\n matn = normMatrix(mat)\r\n #test which ones are reachable and only take those\r\n isr = self.isReachable(matn, verbose = False)\r\n matn = matn[:, isr]\r\n #test that matn isn't empty; if all are nan then matn should be empty\r\n if matn.size>0:\r\n #make the dot product with hkl\r\n dp = np.dot(matn.T, uvwn)\r\n #print(dp)\r\n #find where the dot product is maximum - this vector is closest to the current zone\r\n indx = np.argmax(dp)\r\n #print(indx)\r\n if dp[indx,0]>nm:\r\n #check also that the found zone is not identical to curzone. Since they are both unit vectors, dp = 1\r\n if round(dp[indx,0], 6)!=1:\r\n #if it's larger than the already found dot product, then replace the storage\r\n nm = dp[indx,0]\r\n mx = integerRep(matn[:, indx])\r\n #print(nm)\r\n #print(mx)\r\n return mx",
"def solarReturn(self, year):\n sun = self.getObject(const.SUN)\n date = Datetime('{0}/01/01'.format(year),\n '00:00',\n self.date.utcoffset)\n srDate = ephem.nextSolarReturn(date, sun.lon)\n return Chart(srDate, self.pos, hsys=self.hsys)",
"def station_longitude_1(epoch):\n\n # First check that input value is of correct types\n if not isinstance(epoch, Epoch):\n raise TypeError(\"Invalid input type\")\n # Check that the input epoch is within valid range\n y = epoch.year()\n if y < -2000.0 or y > 4000.0:\n raise ValueError(\"Epoch outside the -2000/4000 range\")\n # Set some specific constants for Saturn's opposition\n a = 2451870.17\n b = 378.091904\n m0 = 318.0172\n m1 = 12.647487\n k = round((365.2425 * y + 1721060.0 - a) / b)\n jde0 = a + k * b\n m = m0 + k * m1\n m = Angle(m).to_positive()\n m = m.rad()\n t = (jde0 - 2451545.0) / 36525.0\n # Compute an auxiliary angle\n aa = 82.74 + 40.76 * t\n bb = 29.86 + 1181.36 * t\n cc = 14.13 + 590.68 * t\n dd = 220.02 + 1262.87 * t\n # Convert to radians\n aa = Angle(aa).rad()\n bb = Angle(bb).rad()\n cc = Angle(cc).rad()\n dd = Angle(dd).rad()\n corr = (-68.884 + t * (0.0009 + t * 0.00023) +\n sin(m) * (5.5452 + t * (-0.0279 - t * 0.0002)) +\n cos(m) * (3.0727 + t * (-0.043 + t * 0.00007)) +\n sin(2.0 * m) * (0.1101 + t * (-0.0006 - t * 0.00001)) +\n cos(2.0 * m) * (0.1654 + t * (-0.0043 + t * 0.00001)) +\n sin(3.0 * m) * (0.001 + t * 0.0001) +\n cos(3.0 * m) * (0.0095 - t * 0.0003) +\n sin(aa) * (0.0 + t * (-0.0337 + t * 0.00018)) +\n cos(aa) * (-0.851 + t * (0.0044 + t * 0.00068)) +\n sin(bb) * (0.0 + t * (-0.0064 + t * 0.00004)) +\n cos(bb) * (0.2397 + t * (-0.0012 - t * 0.00008)) +\n sin(cc) * (0.0 - t * 0.001) +\n cos(cc) * (0.1245 + t * 0.0006) +\n sin(dd) * (0.0 + t * (0.0024 - t * 0.00003)) +\n cos(dd) * (0.0477 + t * (-0.0005 - t * 0.00006)))\n to_return = jde0 + corr\n return Epoch(to_return)",
"def get_nearest_station(latitude, longitude):\n url = 'http://realtime.mbta.com/developer/api/v2/stopsbylocation?api_key=wX9NwuHnZU2ToO7GmGR9uw'\n specific_location = '&lat=' + str(latitude) + '&lon=' + str(longitude) + '&format=json'\n final_url = url + specific_location\n response_data = get_json(final_url)\n nearby_station = response_data[\"stop\"][0][\"stop_name\"]\n distance = response_data[\"stop\"][0][\"distance\"]\n \"\"\"if float(distance) < 1:\n distance = float(distance) / 5280\n distance = str(distance)\"\"\"\n return nearby_station, distance",
"def test_aware_floor_year_out_of_dst(self):\n t = fleming.convert_to_tz(\n datetime.datetime(2013, 3, 14, 12, 23, 4, 40), pytz.timezone('US/Eastern'))\n # Original time zone should be in DST\n self.assertEquals(t.tzinfo.dst(t), datetime.timedelta(hours=1))\n ret = fleming.floor(t, year=1)\n # Resulting time zone should not be in DST\n self.assertEquals(ret.tzinfo.dst(ret), datetime.timedelta(0))\n self.assertEquals(ret, datetime.datetime(2013, 1, 1, tzinfo=ret.tzinfo))",
"def station_by_name(self, name):\n\n try:\n station = [_ for _ in self.stations[\"features\"] if name == _[\"properties\"][\"name\"]]\n log.debug(\"searching for station {} found {}\".format(name, station))\n return station[0]\n except:\n log.debug(\"Exception: searching for station {} found None\".format(name))\n return None",
"def object_az_el(source, site, year, doy):\n try:\n coords = APcn.get_icrs_coordinates(source)\n except APcn.NameResolveError as details:\n raise APcn.NameResolveError(details)\n module_logger.debug(\"Sky coords: %s\", coords)\n \n try:\n dss = C.DSS(site)\n module_logger.debug(\"DSS-%d: %f, %f\", site, dss.long*180/pi, dss.lat*180/pi)\n except KeyError:\n raise KeyError('%d is not a valid DSS station' % site)\n loc = APc.EarthLocation(dss.long*u.rad, dss.lat*u.rad)\n module_logger.debug(\"Site coords: %s\", loc)\n \n if doy:\n mjd = DT.MJD(year,doy)\n else:\n raise RuntimeError(\"no DOY given\")\n tt = APt.Time(mjd, format='mjd')\n module_logger.debug(\"ISO time = %s\", tt.iso)\n tt.delta_ut1_utc = 0\n coords.obstime = tt\n coords.location = loc\n return coords.altaz",
"def get_nearest_station(latitude, longitude):\n url=\"https://api-v3.mbta.com/stops?sort=distance&filter[latitude]={latitude}&filter[longitude]={longitude}\".format(longitude=longitude, latitude=latitude)\n data=get_json(url)\n if len(data['data'])==0:\n return ()\n if len(data['data'][0])==0:\n return ()\n if len(data['data'][0]['attributes'])==0:\n return ()\n if len(data['data'][0]['attributes']['name'])==0:\n return ()\n\n returnable=(data['data'][0]['attributes']['name'], data['data'][0]['attributes']['wheelchair_boarding'])\n return returnable",
"def gains_solar(zone, hour):\r\n arg_str = p2e._base._util._convert_args_to_string(\"get.results.gains.solar\", \r\n zone.eco_id, hour)\r\n val = p2e._app.Request(arg_str)\r\n return p2e._base._util._convert_str_to_type(val, float)",
"def get_closest(self, point):\n distance = (self.dpath[:, 1] - point[1]) ** 2 + (self.dpath[:, 0] - point[0]) ** 2\n i = np.where(distance == distance.min())\n return i[0][0]",
"def get_persons_closest_to_average(table):\n\n years_list = common.get_values_from_column(table, 2, \"int\")\n average_year = common.get_average_value(years_list)\n\n lowest_difference = float(\"inf\")\n\n for i in range(len(table)):\n difference = (int(table[i][2]) - average_year)\n if abs(difference) < lowest_difference:\n lowest_difference = abs(difference)\n closest_value = table[i][2]\n\n closest_people = [table[i][1] for i in range(len(table)) if table[i][2] == closest_value]\n\n return closest_people",
"def find_nearest_station(self, location):\n nearest = None\n dist = float('inf')\n for st in self._stations:\n loc_dist = st.distance_to(location)\n if loc_dist < dist:\n nearest = st\n dist = loc_dist\n return nearest",
"def get_ate(df, country_name, year):\n Y1 = get_sr(df,country_name,year)\n Y0 = 0.5*(get_sr(df,country_name,year-1) +\n get_sr(df,country_name,year+1))\n ATE = Y1 - Y0\n return ATE",
"def find_station(value):\n for line in self.line_list:\n station = line.find_station(value)\n if station:\n return station\n return None",
"def station_by_id(self, id):\n\n try:\n station = [_ for _ in self.stations[\"features\"] if _[\"properties\"][\"station_id\"] == id]\n log.debug(\"searching for station_id {} found {}\".format(id, station))\n return station[0]\n except:\n log.debug(\"searching for station_id {} found None\".format(id))\n return None"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return true if a row exists for the specified date | def _row_exists(self, session, fordate):
return (session.query(EconomicIndicator).filter_by(Date=fordate).count() > 0) | [
"def __contains__(self, date):\n return self._first_day <= date <= self._last_day",
"def has_date(self, line):\n return self.verify_match(line, self.date)",
"def exists(self, initdate, enddate):\n return self.queue.exists(initdate, enddate)",
"def check_generated_data(date) -> bool:\n stats_yest = f\"data/player_stats/player_stats_{date}.csv\"\n return os.path.isfile(stats_yest)",
"def row_exists(db, table, row, id):\n\tq = db.execute(\"select id from %s where %s = ?\" % (table,row) , [id])\n\tres = q.fetchall()\n\treturn True if res else False",
"def check(date, key):\n shelf = get_shelf()\n dt = date.strftime(\"%Y-%m-%d\")\n skey = '%s-%s' % (dt, key)\n if skey in shelf.keys():\n print(\"Already downloaded: %s\" % skey)\n return False\n return skey",
"def CheckDate(self, date): # ............................. Event.CheckDate\n # Check if this is the correct type\n if type(date) != dt.date:\n if type(date) == dt.datetime:\n date = date.date()\n else:\n logging.error(\"Invalid date object.\")\n return False\n \n # Check assuming no repeats \n if self.dtStart.date() == date:\n return True\n elif self.dtStart.date() > date:\n return False\n \n # Check if this event repeats\n r = self.rrule # Just keeps things simple\n if r:\n # Is this date in the excluded dates?\n if self.exdate and date in self.exdate:\n print(date)\n return False\n if \"UNTIL\" in r.keys() and r[\"UNTIL\"].date() < date:\n return False\n if \"FREQ\" in r.keys() and r[\"FREQ\"] == \"WEEKLY\":\n if \"BYDAY\" in r.keys():\n weekday = {\"MO\":0, \"TU\":1, \"WE\":2, \"TH\":3, \"FR\":4}.get(\n r[\"BYDAY\"].strip())\n return weekday == date.weekday()\n return False",
"def is_this_record_exist(table, id_):\n if id_[0] not in [record[0] for record in table]:\n\n ui.print_error_message(\"Record with this ID not found\")\n return False\n return True",
"def CheckFirstTime(self, data):\n cursor = self._conn.cursor()\n sqli = \"\"\"select * from attendance where cedula = %s and fecha = %s and hora = %s and estado = %s\"\"\"\n #logging.info(\"Ejecutando query %s\" % sqli)\n #logging.info(\"datos %s,%s,%s, %s\" % data)\n try:\n cursor.execute(sqli, data)\n self._conn.commit()\n except:\n self._conn.rollback()\n cursor.execute(sqli, data)\n self._conn.commit()\n \n result = cursor.fetchall()\n\n if result:\n #logging.info(\"el dato ya existe en la base de datos %s, %s, %s, %s\" % data)\n return False\n else:\n #logging.info(\"primera instancia del dato en la base de datos %s, %s, %s, %s\" % data)\n return True",
"def isRecordExist(self):\n self.createConn()\n sql = \"SELECT * FROM Story WHERE book1='{b1}' AND book2='{b2}' AND title ='{t}'\".format(b1=self.book1, b2=self.book2, t=self.title)\n self.c.execute(sql)\n data = self.c.fetchall()\n self.conn.close()\n if len(data) > 0:\n print('Record exist already, skip.')\n return True\n return False",
"def isRecordExistSummary(self):\n self.createConn()\n sql = \"SELECT * FROM Summary WHERE book1='{b1}' AND book2='{b2}' \".format(b1=self.book1, b2=self.book2)\n self.c.execute(sql)\n data = self.c.fetchall()\n self.conn.close()\n if len(data) > 0:\n print('Record exist already, skip.')\n return True\n return False",
"def exist(self,sku,table):\n cursor = self.mydb.cursor()\n exist_statement = \"SELECT Price FROM {} WHERE SKU='{}'\".format(table,sku)\n cursor.execute(exist_statement)\n result = cursor.fetchone()\n cursor.close()\n if result == None:\n return False\n else:\n return True",
"def _check_date(self, cr, uid, ids):\n for deleg in self.browse(cr, uid, ids):\n if deleg.dismissal_date <= deleg.employee_id.first_employement_date:\n return False\n return True",
"def symbol_exists(symbol, dbfilename=\"data/stocks.db\"):\n \n conn = sqlite3.connect(dbfilename, \n detect_types=sqlite3.PARSE_DECLTYPES|sqlite3.PARSE_COLNAMES)\n sql = \"SELECT symbol, date as 'date [datetime]' from stocks where symbol='%s';\" % (symbol)\n qry = conn.execute(sql)\n recs = qry.fetchall()\n schema = np.dtype({'names':['symbol', 'date'],\n 'formats':['S8', 'M8[D]']})\n table = np.array(recs, dtype=schema)\n\n startdate = np.datetime64(table['date'][0])\n enddate = np.datetime64(table['date'][-1])\n return len(table), startdate, enddate",
"def __is_in_challenge(date):\n if date:\n start = challenge_mgr.get_challenge_start()\n end = challenge_mgr.get_challenge_end()\n return date >= start and date <= end\n else:\n return False",
"def exists(self, identifier):\n return False",
"def __event_exists(self, event, calendar):\n query = gdata.calendar.service.CalendarEventQuery(calendar, 'private', 'full')\n query.max_results = 1000\n query.sortorder = 'ascending'\n query.start_min = event.when[0].start_time\n query.start_max = event.when[0].end_time\n\n events = self.calendar_service.CalendarQuery(query)\n\n for i, an_event in enumerate(events.entry):\n if (an_event.title.text == event.title.text and \\\n an_event.where[0].value_string == event.where[0].value_string and \\\n an_event.content.text == event.content.text and \\\n an_event.when[0].start_time == event.when[0].start_time and \\\n an_event.when[0].end_time == event.when[0].end_time):\n return True\n \n return False",
"def check_date(date: str):\n if re.search(r'\\d{4}-\\d{2}-\\d{2}', date):\n return True\n return False",
"def check_dates(self):\n import datetime\n global check, error_details\n for row_index, row in self.primer_df.iterrows():\n if row['Order_date'] is not None:\n if isinstance(row['Order_date'], datetime.date):\n check += 0\n else:\n check += 1\n error = \"Order date not a valid date, see row %s in file\" % (row_index + 4)\n error_details.append(error)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get the end address of this selection. Address | def getEndAddress(self) -> ghidra.program.model.address.Address:
... | [
"def end_address(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"end_address\")",
"def end_ip_address(self) -> Optional[str]:\n return pulumi.get(self, \"end_ip_address\")",
"def getEndingAddress(self):\n return HopperLowLevel.getBasicBlockEndingAddress(self.__procedure__.__segment_internal__,self.__procedure__.__procedure_index__,self.__basic_block_index__)",
"def _end_address(self) -> int:\n return (\n base_address\n + (\n (len(sundog.base_address_sentinel) + len(self.sunspec_device.get_mb()))\n // 2\n )\n + 2\n )",
"def get_end(self):\n # type: () -> int\n return self._end_list[-1]",
"def end_point(self) -> int:\n return self._end_point",
"def getLayoutEndAddress(self, i: int) -> ghidra.program.model.address.Address:\n ...",
"def _get_end(self):\n return self.Data.End",
"def end_ip(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"end_ip\")",
"def ip_end(self):\n return self._ip_end",
"def get_segment_end(self, _ea):\t\n\t\treturn idc.SegEnd(_ea)",
"def vlan_end(self):\n return self.address_pool.vlan_end",
"def getEndLocation(self):\n ends = [\"End of the Project Gutenberg EBook\",\n \"End of Project Gutenberg's\",\n \"\\*\\*\\*END OF THE PROJECT GUTENBERG EBOOK\",\n \"\\*\\*\\* END OF THIS PROJECT GUTENBERG EBOOK\"]\n joined = '|'.join(ends)\n pat = re.compile(joined, re.IGNORECASE)\n endLocation = None\n for line in self.lines:\n if pat.match(line) is not None:\n endLocation = self.lines.index(line)\n self.endLine = self.lines[endLocation]\n break\n\n if endLocation is None: # Can't find the ending.\n logging.info(\"Can't find an ending line. Assuming that the book ends at the end of the text.\")\n endLocation = len(self.lines)-1 # The end\n self.endLine = 'None'\n\n logging.info('End line: %s at line %s' % (self.endLine, endLocation))\n return endLocation",
"def last(self):\n return int(self._end)",
"def getDhcpRangeEnd(self):\n broadcast = ipAddressHelper.getBroadcastAddress(self.getIp().getNetwork().getAddress(), self.getIp().getNetwork().getNetmask())\n return ipAddressHelper.getIpAddressDifference(self.end, broadcast) - 1 #starts at 256",
"def end_index(self):\n return self.stoi.get(self.end_symbol, -1)",
"def end(self):\n\t\treturn self.__params['end']",
"def get_end_symbol():\n return Symbol.END_SYMBOL",
"def lattice_end(self):\n return self.__lattice_end"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get the start location. ProgramLocation | def getFrom(self) -> ghidra.program.util.ProgramLocation:
... | [
"def getTo(self) -> ghidra.program.util.ProgramLocation:\n ...",
"def location(self) -> str:\n if self.__expanded_launch_file_path is None:\n # get_launch_description() has not been called yet\n return ' + '.join([str(sub) for sub in self.__launch_file_path])\n return self.__expanded_launch_file_path",
"def get_start_system(self):\n return self.os.start_system",
"def location(self) -> str:\n return pulumi.get(self, \"location\")",
"def start(self) -> SourceLocation:\n return self._start",
"def location(self):\n return self._redunda.location",
"def getEntryPoint(self):\n return HopperLowLevel.getEntryPoint(self.__internal_document_addr__)",
"def get_start_location(self):\n return self.df.geometry.iloc[0]",
"def getStartingAddress(self):\n return HopperLowLevel.getSectionStartingAddress(self.__internal_section_addr__)",
"def getStartingAddress(self):\n return HopperLowLevel.getBasicBlockStartingAddress(self.__procedure__.__segment_internal__,self.__procedure__.__procedure_index__,self.__basic_block_index__)",
"def GetExecutablePath(self):\n\n return self.__qmtest_path",
"def home(self):\n return self._get_system_properties(self.java)['java.home']",
"def location():\n import sys\n end = \"\\n\" if sys.stdout.isatty() else \"\"\n print(path.scriptdir, end=end)",
"def start(self):\n\t\treturn self.__params['start']",
"def executable_path(self):\n return self._executable_path",
"def setSelfStartLocation(self):\n if (self.game_info.player_start_location.x == 24.5):\n # left side of map\n if (self.game_info.player_start_location.y == 22.5):\n self.startLocation = StartLocation.BOTTOM_LEFT\n else:\n self.startLocation = StartLocation.TOP_LEFT\n else:\n # right side of map\n if (self.game_info.player_start_location.y == 22.5):\n self.startLocation = StartLocation.BOTTOM_RIGHT\n else:\n self.startLocation = StartLocation.TOP_RIGHT\n \n self.loggerBase.info(\"Start location is \" + str(self.startLocation))\n\n if self.player == Player.PLAYER_ONE:\n BuildListProcessBotBase.PLAYER_ONE_START_LOCATION = self.startLocation\n else:\n BuildListProcessBotBase.PLAYER_TWO_START_LOCATION = self.startLocation",
"def __get_file_root_location(self):\n\n return self.main_location",
"def get_corelocation():\n corelocation = os.popen('swift ./corelocation.swift -json').read()\n null = ''\n loc_dict = eval(corelocation)\n loc_dict['address'] = loc_dict['address'].replace('\\n', ',')\n return loc_dict",
"def location(self):\n return self.patient.get('location', None)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get the start address of this selection. Address | def getStartAddress(self) -> ghidra.program.model.address.Address:
... | [
"def getStartingAddress(self):\n return HopperLowLevel.getSegmentStartingAddress(self.__internal_segment_addr__)",
"def getStartingAddress(self):\n return HopperLowLevel.getSectionStartingAddress(self.__internal_section_addr__)",
"def start(self) -> SourceLocation:\n return self._start",
"def getStartingAddress(self):\n return HopperLowLevel.getBasicBlockStartingAddress(self.__procedure__.__segment_internal__,self.__procedure__.__procedure_index__,self.__basic_block_index__)",
"def getDhcpRangeStart(self):\n netip = self.getIp().getNetwork().getAddress()\n return ipAddressHelper.getIpAddressDifference(self.start, netip)",
"def get_start_line(self):\n if self._start_line == 0 and self._ast_elem_list != []:\n self._start_line = self._ast_elem_list[0].coord.line\n\n return self._start_line",
"def get_start_location(self):\n return self.df.geometry.iloc[0]",
"def get_segment_start(self, _ea):\n\t\treturn idc.SegStart(_ea)",
"def get_start(self) -> int:\n return self.__pos_x",
"def start_point(self) -> int:\n return self._start_point",
"def first_address(self):\n \n # The first word in the hex file is the address of the first byte \n # sequence.\n first_word = self.hex_view()[:8]\n \n # The address is a hexadecimal value. Convert it to decimal.\n return int(first_word, 16)",
"def _get_start(self):\n return self.Data.Start",
"def ip_start(self):\n return self._ip_start",
"def _get_min_addr(self) -> Optional[int]:\n\n if not self._regions:\n if self.project.arch.name != \"Soot\":\n l.error(\"self._regions is empty or not properly set.\")\n return None\n\n return next(self._regions.irange())",
"def getStartStart(self):\n for ro in self.lstOfRows:\n if (ro.classification==\"start_codon\"):\n return(int(ro.start))",
"def first(self):\n return int(self._start)",
"def getArrayStartAddress(self,address):\n return HopperLowLevel.arrayStartAddress(self.__internal_segment_addr__,address)",
"def addr(self):\n return self.__addr",
"def absstart(self):\n if hasattr(self, \"docstart\") and self.docstart > 0:\n return self.docstart\n else:\n return self.start"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get the end location. ProgramLocation | def getTo(self) -> ghidra.program.util.ProgramLocation:
... | [
"def getFrom(self) -> ghidra.program.util.ProgramLocation:\n ...",
"def getEndLocation(self):\n ends = [\"End of the Project Gutenberg EBook\",\n \"End of Project Gutenberg's\",\n \"\\*\\*\\*END OF THE PROJECT GUTENBERG EBOOK\",\n \"\\*\\*\\* END OF THIS PROJECT GUTENBERG EBOOK\"]\n joined = '|'.join(ends)\n pat = re.compile(joined, re.IGNORECASE)\n endLocation = None\n for line in self.lines:\n if pat.match(line) is not None:\n endLocation = self.lines.index(line)\n self.endLine = self.lines[endLocation]\n break\n\n if endLocation is None: # Can't find the ending.\n logging.info(\"Can't find an ending line. Assuming that the book ends at the end of the text.\")\n endLocation = len(self.lines)-1 # The end\n self.endLine = 'None'\n\n logging.info('End line: %s at line %s' % (self.endLine, endLocation))\n return endLocation",
"def getEndAddress(self) -> ghidra.program.model.address.Address:\n ...",
"def end_point(self) -> int:\n return self._end_point",
"def getEndingAddress(self):\n return HopperLowLevel.getBasicBlockEndingAddress(self.__procedure__.__segment_internal__,self.__procedure__.__procedure_index__,self.__basic_block_index__)",
"def getExitLocation(self):\n pos = self.gateway.exitPortal\n return None if pos == None else Location(self.world, pos.getX(), pos.getY(), pos.getZ())",
"def end_address(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"end_address\")",
"def get_location(self, step=-1):\n if step != -1 and len(self.path) == 1:\n return self.path[-1]\n return self.path[step]",
"def location(self):\n return self._redunda.location",
"def end_coord(self):\n return self.lat_e, self.lon_e",
"def location(self) -> str:\n return pulumi.get(self, \"location\")",
"def Location(self) -> str:",
"def _end_address(self) -> int:\n return (\n base_address\n + (\n (len(sundog.base_address_sentinel) + len(self.sunspec_device.get_mb()))\n // 2\n )\n + 2\n )",
"def end(self):\n\t\treturn self.__params['end']",
"def read_end_point(self):\n return int(self.visa_ask(':stop?'))",
"def location(self):\n return self.patient.get('location', None)",
"def end_coordsys(self):\n coordsys = copy(self.location)\n coordsys.origin = self.end_point\n return coordsys",
"def _get_end(self):\n return self.Data.End",
"def get_game_location(self, game):\n return self.get_normalized_location(game['LOCATION'].text)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get all the claimable balances an account has to claim. | def getClaimableBalances(public_key: str) -> list:
balances = server.claimable_balances().for_claimant(public_key).call()['_embedded']['records']
return [ {"sponsor": elem.get("sponsor"), "id": elem.get("id"), "asset": elem.get("asset").replace('native', 'XLM').split(':')[0], "amount": round(int(float(elem.get("amount"))))} for elem in balances ] | [
"def balances():\n return _make_request('balances', private=True)['balances']",
"def get_balance(self):\n returnList = []\n for account in self.accounts:\n balance = self.f.get_balance(account).amount.amount + 42\n returnList.append(BalanceItem(account.iban, balance ))\n return returnList",
"def get_item_balances(self, acc: Account) -> list:\n items = []\n entries = self.get_entries(acc)\n for item in entries.filter(source_invoice=self).order_by('id'):\n assert isinstance(item, AccountEntry)\n settlements = sum_queryset(entries.filter(settled_item=item))\n bal = item.amount + settlements if item.amount is not None else settlements\n items.append((item, bal))\n return items",
"def get_balances(self):\n self.inventory = []\n for bal in self.account['balances']:\n symbol = bal['asset']\n amount = float(bal['free']) + float(bal['locked'])\n \n if (amount > 0 or symbol in TRADE_CURRENCIES) and (symbol in self.currencies):\n coin = deepcopy(self.currencies[self.currencies.index(symbol)])\n coin.amount = amount\n self.inventory.append(coin)\n\n if (symbol not in TRADE_CURRENCIES):\n print('Non-zero balance for ' + symbol + ' not included in trade currencies!')",
"def get_fund_balances():\n return _format_fund_balances(load_cash_forecasts(\"fund_balances\"))",
"def test_get_account_balances_using_get(self):\n pass",
"def get_account_balance(self):\n return self.execute_private_api(\"/api/accounts/balance\", \"GET\")",
"def get_balance(self):\n current_balance = 0\n\n for item in self.ledger:\n current_balance += item[\"amount\"]\n\n return current_balance",
"async def futures_account_balance(self, **params):\r\n return await self.client_helper(\"futures_account_balance\", **params)",
"def getBalance(self):\n\n balance = 0\n for item in self.ledger:\n balance += item[\"amount\"]\n\n return balance",
"def get_bills(self):\n bills = []\n bill_data = self.get('/members/' + self.id + '/bills/introduced')\n for bill in bill_data[0]['bills']:\n bills.append(\n Bill(\n title=bill['title'],\n url=bill['congressdotgov_url'],\n congress=bill['congress']\n )\n )\n return bills",
"def get_asset_balance(self):\n return self.client.get_asset_balance(asset)",
"def balance(self):\n assert self._id, \"Account must be created first.\"\n\n if hasattr(opentxs, 'OTAPI_Wrap_getAccountData'): # new api name\n res = opentxs.OTAPI_Wrap_getAccountData(self.server_id, self.nym._id, self._id)\n else: # todo: old api name, remove in due time\n res = opentxs.OTAPI_Wrap_getAccountFiles(self.server_id, self.nym._id, self._id)\n if res < 0:\n raise ReturnValueError(res)\n return opentxs.OTAPI_Wrap_GetAccountWallet_Balance(self._id)",
"def get_account_balance(self):\n return self.mtc.get_account_balance()['AvailableBalance']",
"def get_accounts_balance(self, period_start: datetime, period_end: datetime):\n query = (\n \"SELECT a.guid, sum(s.value_num) \"\n \"FROM \"\n \" accounts AS a INNER JOIN \"\n \" splits AS s ON a.guid = s.account_guid INNER JOIN \"\n \" transactions AS t ON s.tx_guid = t.guid \"\n \"WHERE t.post_date >= ? AND t.post_date < ? \"\n \"GROUP BY a.guid;\"\n )\n query_params = (datetime2db(period_start), datetime2db(period_end))\n return self._conn.execute(query, query_params).fetchall()",
"def get_balances(self, address: Union[FactoidAddress, str]):\n address = FATd.validate_address(address)\n return self._request(\"get-balances\", {\"address\": address})",
"def balances_by_address(account):\n return wallet['obj'].balances_by_address(account)",
"def find_all_baseclaims(board: Board) -> Set[Baseclaim]:\n baseclaims = set()\n playable_squares = board.playable_squares()\n\n for first in playable_squares:\n for second in playable_squares:\n square_above_second = Square(row=second.row - 1, col=second.col)\n if first == second or second.row % 2 == 0:\n continue\n for third in playable_squares:\n if third == first or third == second:\n continue\n if connection.is_possible(a=first, b=square_above_second) and connection.is_possible(a=second, b=third):\n baseclaims.add(Baseclaim(first=first, second=second, third=third))\n if connection.is_possible(a=third, b=square_above_second) and connection.is_possible(a=second, b=first):\n baseclaims.add(Baseclaim(first=third, second=second, third=first))\n\n return baseclaims",
"def get_balance(self):\n if self.available:\n return self.total_amount\n else:\n raise ValueError('This bank account is closed')"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check if in the balances of the account an asset like that alredy exists to establish a trustline | def checkTrustline(asset :str, issuer:str, available_assets: list) -> bool:
for elem in available_assets:
if elem["sponsor"] == asset:
return True
return False | [
"def check_assets(self):\n try:\n active_assets = r.get(f\"active_{self.curr}_{self.user_id}\")\n if not active_assets:\n r.set(f\"active_{self.curr}_{self.user_id}\", 0)\n return False\n active_assets = Decimal(active_assets.decode())\n\n if self.side == 'bid' and active_assets >= self.total_quantity + self.bid_commission:\n return True\n if self.side == 'ask' and active_assets >= self.traded_quantity + self.ask_commission:\n return True\n return False\n except Exception as e:\n log.exception(f\"Error_occurred - {e}\")",
"def test_check_balance():\n print('\\n', \"Checking wif balance\")\n call.nspv_login(wif_real)\n res = call.type_convert(call.nspv_listunspent())\n amount = res.get(\"balance\")\n if amount > 0.1:\n pass\n else:\n pytest.exit(\"Not enough balance, please use another wif\")",
"def verify_balance(self):\n total_created = 0\n total_consumed = 0\n\n for consumed_coin in self.consumed_coins:\n total_consumed += consumed_coin.value\n for created_coin in self.created_coins:\n total_created += created_coin.value\n\n return total_consumed == total_created",
"def has_enough_cash():\n r = requests.get('https://api.lendingclub.com/api/investor/v1/accounts/{}/availablecash'\n .format(investor_id), headers=headers)\n\n cash = r.json()['availableCash']\n logging.debug(cash)\n return True if cash > cash_reserves + amount else False",
"def credit(self, account):\n #stefan\n if self.account >= \"500\": # initialize self.account\n return True\n else:\n return False",
"def check_account():\n\n\tglobal account_balance\n\tprint(\"Your current account balance is :\", account_balance)",
"def validate_main_tx_funds(self, block):\n if len(block.transactions) > 1:\n bonusTx = block.transactions[0]\n mainTx = block.transactions[1]\n coveringTxs = []\n totalAmount = 0\n enoughFunds = False\n bonusOk = False\n if bonusTx.amount==10:\n bonusOk=True\n for tx in self.unspentTxs:\n if tx.receiver == CryptoLib.getAddressFromPublicKey(mainTx.senderPublicKey.y):\n coveringTxs.append(tx)\n totalAmount += tx.amount\n \n if totalAmount >= mainTx.amount:\n enoughFunds = True\n break\n if enoughFunds and bonusOk:\n change = totalAmount - mainTx.amount\n self.update_tx_inputs(block, change, coveringTxs)\n self.change_unspent_txs(block)\n return True\n else:\n return False\n else:\n bonusTx = block.transactions[0]\n if bonusTx.amount==10:\n self.change_unspent_txs(block)\n return True\n else:\n return False",
"def test_account_credits_excluded(self):\n self.nve_test(\n \"input_1000085283202600721.json\", \"expected_1000085283202600721.json\"\n )",
"def validate(self):\n total = sum([entry.amount for entry in self.entries.all()])\n if total != Decimal(0):\n raise TransactionBalanceException(\n \"Credits do not equal debits. Mis-match of %s.\" % total)\n return True",
"def _asset_afford_trade(self, trade_amount, trade_price):\n waited_asset_times = 0\n # NOTE: since we lock the trade, only 1 request needed\n asset_info = AssetInfo.from_api(self.plt)\n if self.catalog == 'sell':\n asset_amount = asset_info.afford_sell_amount()\n if asset_amount >= trade_amount:\n return True\n else:\n return False\n # catalog == 'buy'\n while True:\n asset_amount = asset_info.afford_buy_amount(trade_price)\n if asset_amount >= trade_amount:\n return True\n else: # asset_amount not enough\n waited_asset_times += 1\n if waited_asset_times > config.ASSET_WAIT_MAX:\n Trader._logger.critical(\n '{}: not afford to \"{}\" after waiting > {} times'.format(\n self.plt_name, self.catalog, config.ASSET_WAIT_MAX))\n # TODO should avoid further \"not afford\"\n return False\n # adjust to \"nearer price\"\n # FIXME this conflicts with the currently adjusted arbitrage prices\n trade_price -= (trade_price - self.price) / (config.ASSET_WAIT_MAX + 1)",
"def check_for_updated_balance(self, snowflake):\n transaction_list = rpc.listtransactions(snowflake, 100)\n for tx in transaction_list:\n if tx[\"category\"] != \"receive\":\n continue\n if tx.get('generated') is True:\n continue\n txid = tx[\"txid\"]\n amount = tx[\"amount\"]\n confirmations = tx[\"confirmations\"]\n address = tx[\"address\"]\n deposit_status = self.get_transaction_status_by_txid(txid)\n user = self.get_user_by_address(address)\n\n # This address isn't a part of any user's account\n if not user:\n continue\n\n snowflake_cur = user[\"snowflake_pk\"]\n\n if deposit_status == \"DOESNT_EXIST\" and confirmations >= MIN_CONFIRMATIONS_FOR_DEPOSIT:\n self.add_to_balance(snowflake_cur, amount)\n self.add_deposit(snowflake_cur, amount, txid, 'CONFIRMED')\n elif deposit_status == \"DOESNT_EXIST\" and confirmations < MIN_CONFIRMATIONS_FOR_DEPOSIT:\n self.add_deposit(snowflake_cur, amount,\n txid, 'UNCONFIRMED')\n self.add_to_balance_unconfirmed(snowflake_cur, amount)\n elif deposit_status == \"UNCONFIRMED\" and confirmations >= MIN_CONFIRMATIONS_FOR_DEPOSIT:\n self.add_to_balance(snowflake_cur, amount)\n self.remove_from_balance_unconfirmed(snowflake_cur, amount)\n self.confirm_deposit(txid)",
"def check_balance(self, key: bytes, timestamp: float) -> int:\n balance = 0\n for block_transactions in self.chain.values():\n for transaction in block_transactions:\n if transaction.sender == key:\n balance -= transaction.amount + transaction.fee\n if transaction.recipient == key:\n balance += transaction.amount\n for transaction in self.transaction_pool:\n if transaction.sender == key and transaction.timestamp < timestamp:\n balance -= transaction.amount + transaction.fee\n if transaction.recipient == key and \\\n transaction.timestamp < timestamp:\n balance += transaction.amount\n return balance",
"def is_bankrupt(self) -> bool:\n return self.bank <= 0",
"def check_account_unique(self):\n for rec in self:\n result = False\n for account in rec.account_ids:\n self.env.cr.execute(\"\"\"\n SELECT COUNT(account_id)\n FROM account_budget_rel\n WHERE account_id=%s\n \"\"\",\n (account.id,))\n result = self.env.cr.fetchone()[0] or False\n if result > 1:\n raise ValidationError(_('You can not choose the account \"%s\" because it exists in another budget position .')%(account.name))",
"def is_auction_line(self, params):\n if len(params) > 1 and params[2] == 'SELL':\n return True\n return False",
"def check_budget(self):\n if self.invoice_line_ids:\n for line in self.invoice_line_ids:\n if line.budget_confirm_id.state in ('waiting_valid','complete','unvalid') and line.account_budget_required == True:\n line.budget_confirm_id.check_budget_invoice()\n self.change_state()\n else:\n raise ValidationError(_(\"You must enter at least one line in Bill Information!!!\"))",
"def verify_transaction(self, transaction):\n\t\tsender = Bee(transaction.sender, 0)\n\t\tsender.calculate_balance(self.chain, self.last_block().index + 1)\n\n\t\treturn sender.honeycomb >= int(transaction.amount)",
"def investigate_accounts(self, all:bool = True, intrusive:bool = True): #TODO: make these default to False for public use\n if (w3.isConnected()):\n coinbase = None\n try:\n coinbase = w3.eth.coinbase\n except Exception as e:\n cprint(\"Coinbase not available: {}\".format(e), \"red\") \n accounts = w3.eth.accounts\n if len(accounts) == 0:\n cprint(\"No accounts found\", \"red\")\n if type(coinbase) is None: #TODO: check if we need this. If accounts = [] , then there shouldn't be coinbase (?)\n cprint(\"Nothing to do here\")\n return 0\n \n if all:\n for account in accounts:\n cprint(\"Balance of {} is : {}\".format(account, w3.eth.getBalance(account)), \"white\")\n # try: \n # cprint(\"Trying to unlock {}: {}\".format(account, w3.parity.personal.unlockAccount(account, \"\")), \"white\")\n # except Exception as e:\n # cprint(\"Failed to unlock: {}\".format(e))\n pass\n else:\n cprint(\"Number of Accounts: {}\".format(len(w3.eth.accounts)), \"green\") \n\n \n #cprint(\"logs: {}\".format(w3.eth.getLogs()), \"white\") #needs to pass filter_params --> maybe based on the accounts? filter events of the accounts hu? \n\n if \"parity\" in (w3.clientVersion.lower()):\n ww3 = w3.parity\n elif \"geth\" in (w3.clientVersion.lower()): \n ww3 = w3.geth\n\n if intrusive:\n try:\n cprint(\"importRawKey: {}\".format(ww3.personal.importRawKey(LEGION_TEST_PRV, LEGION_TEST_PASS) ), \"green\")\n except Exception as e:\n cprint(\"importRawKey: {}\".format(e), \"yellow\")\n try:\n cprint(\"newAccount: {}\".format(ww3.personal.newAccount(LEGION_TEST_PASS)), \"white\")\n except Exception as e:\n cprint(\"newAccount: {}\".format(e), \"yellow\") \n\n cprint(\"--\" * 32)",
"def balance_between_account_with_hardcode_policy(self, balance_record_a , balance_record_b):\n # if balance_record_a.deposit != 0 or balance_record_a.withdraw != 0:\n # return False\n # if balance_record_b.deposit != 0 or balance_record_b.withdraw != 0:\n # return False\n # balance_record_a[\"price\"] = 0.1\n # balance_record_b[\"price\"] = 0.1\n # print balance_record_a[\"price\"],balance_record_b[\"price\"],balance_record_a[\"amount\"]*balance_record_a[\"price\"],balance_record_b[\"amount\"]*balance_record_b[\"price\"]\n if balance_record_a[\"amount\"]*balance_record_a[\"price\"] + balance_record_b[\"amount\"]*balance_record_b[\"price\"] < 1.5:\n return False,0\n amount = (balance_record_a[\"amount\"] - balance_record_b[\"amount\"])/2\n if amount*balance_record_a[\"price\"] < 0.3:\n return False,0\n return True,amount\n # self._balance_between_account(balance_record_a, balance_record_b, amount)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Generate an XDR to Claim a Balance using Albedo or web+stellar | def XDRForClaimableBalance(public_key: str, balance_id: str, asset=None, asset_issuer=None):
base_fee = server.fetch_base_fee()
if(getAssets(public_key)[0] == 0):
# 3. User does not have enough XLM to pay fees
account = server.load_account(public_key)
transaction = TransactionBuilder(
source_account=account,
network_passphrase=Network.TESTNET_NETWORK_PASSPHRASE,
base_fee=base_fee,
).append_claim_claimable_balance_op(
balance_id=balance_id,
source=public_key
).build()
return transaction.to_xdr()
# 4. User does not have enough XLM to pay fees or establish trustline
transaction = TransactionBuilder(
source_account=account,
network_passphrase=Network.TESTNET_NETWORK_PASSPHRASE,
base_fee=base_fee,
).append_begin_sponsoring_future_reserves_op(
sponsored_id=other_account_pub_key,
source=quest_account_pub_key
).append_create_account_op(
destination=other_account_pub_key,
starting_balance="0",
source=quest_account_pub_key
).append_end_sponsoring_future_reserves_op(
source=other_account_pub_key
).build()
return transaction.to_xdr()
else:
account = server.load_account(public_key)
if (asset == None and asset_issuer == None) or checkTrustline(asset, getAssets(public_key)):
# 1. User has enough XLM to pay fees for claimable balances
transaction = TransactionBuilder(
source_account=account,
network_passphrase=Network.TESTNET_NETWORK_PASSPHRASE,
base_fee=base_fee,
).append_claim_claimable_balance_op(
balance_id=balance_id,
source=public_key
).build()
return transaction.to_xdr()
# 2. User has enough XLM to pay fees but needs to establish Trustline
transaction = TransactionBuilder(
source_account=account,
network_passphrase=Network.TESTNET_NETWORK_PASSPHRASE,
base_fee=base_fee,
).append_change_trust_op(
asset_code=asset,
asset_issuer=asset_issuer
).append_claim_claimable_balance_op(
balance_id=balance_id,
source=public_key
).build()
return transaction.to_xdr() | [
"def generate_reward_tx(rewardee, base_fee = None):\n try:\n source_acc = server.load_account(PUBLIC_KEY) # fetch sequence or will it be used with a fee bump?\n except:\n print(f\"Failed to load public reward account {PUBLIC_KEY}!\")\n return None\n\n fee = BASE_FEE\n\n try:\n fee = server.fetch_base_fee() if base_fee == None else base_fee\n except:\n print(f\"Error fetching base fees from networking! Defaulting to {BASE_FEE}\")\n tx = TransactionBuilder(\n source_account=source_acc,\n network_passphrase=STELLAR_PASSPHRASE,\n base_fee=fee\n )\\\n .add_text_memo(\"Lumenaut reward!\")\n \n for rewarded in rewardee:\n reward = round(rewarded[1], 7)\n if reward <= 0:\n continue\n\n tx.append_create_claimable_balance_op(\n asset=asset.Asset.native(), \n amount=str(reward),\n claimants=[Claimant(rewarded[0]), Claimant(PUBLIC_KEY)]\n )\n\n now = int(time.time())\n xdr = tx.add_time_bounds(now, 0).build().to_xdr() # make valid one day from now\n\n return xdr",
"def _gen_authorization_data(\n self,\n spn: str,\n domain: str,\n ) -> bytes:\n # fmt: off\n\n # Build the AD Negotiation data set.\n # AdETypeNegotiation -> RC4_HMAC_NT\n # 0x17 -> RC4_HMAC_NT\n # Since we can't build an ASN.1 sequence without a#,\n # we are just going to hard code this sequence\n # \n # 300f a004 0202 0081 a107 0405 3003 0201\n # 17\n # ----------------------------------------\n # 30 0f - SEQ\n # a0 04 02 02 00 81 - INT: 0x81\n # a1 07 04 05 - OCT STRING\n # 30 03 02 01 17 - SEQ -> INT: 0x17\n neg_type = b\"\\x30\\x03\\x02\\x01\\x17\"\n negotiation_type_data_seq = AuthorizationDataSequence()\n negotiation_type_data_seq[\"ad-type\"] = 0x81\n negotiation_type_data_seq[\"ad-data\"] = neg_type\n\n # Build the Restriction Types data set\n # \n # 303f a004 0202 008d a137 0435 3033 3031\n # a003 0201 00a1 2a04 2800 0000 0000 1000\n # 00f3 cd6a f91c c2b1 32fd fbf1 6349 7585\n # 5e62 4ba4 9675 639e 351a 919e 3028 b9e0\n # 00\n # ----------------------------------------\n # 30 3f - SEQ\n # a0 04 02 02 00 8d - INT: 0x8D\n # a1 37 04 35 - OCT STRING\n # 30 33 - SEQ\n # 30 31 - SEQ\n # a0 03 02 01 00 - INT: 0x00\n # a1 2a 04 28 - OCT STRING\n # 00 00 00 00 00 10 00 00... - re_data\n re_data = b\"\\x00\\x00\\x00\\x00\\x00\\x10\\x00\\x00\" + urandom(32)\n restriction_data = AuthorizationData()\n restriction_data[0][\"ad-type\"] = 0 # const\n restriction_data[0][\"ad-data\"] = re_data\n\n kerb_auth_data_token_restrictions = AuthorizationDataSequence()\n kerb_auth_data_token_restrictions[\"ad-type\"] = 0x8D # 141\n kerb_auth_data_token_restrictions[\"ad-data\"] = encoder.encode(restriction_data)\n\n # Build the KerbLocal data set\n # \n # 301a a004 0202 008e a112 0410 bc20 16eb\n # a5f8 8b2a df78 2b94 7456 bd72\n # ----------------------------------------\n # 30 1a - SEQ\n # a0 04 02 02 00 8e - INT: 0x8E\n # a1 12 04 10 - OCT STRING\n # bc 20 16 eb a5 f8 8b 2a... - urandom()\n kerb_local_data = AuthorizationDataSequence()\n kerb_local_data[\"ad-type\"] = 0x8E\n kerb_local_data[\"ad-data\"] = urandom(16)\n\n # Build the KerbApOptions data set\n # ChannelBindingSupported\n # \n # 300e a004 0202 008f a106 0404 0040 0000\n # ----------------------------------------\n # 30 0e - SEQ\n # a0 04 02 02 00 8f - INT: 0x8F\n # a1 06 04 04 - OCT STRING\n # 00 40 00 00 - \\x00\\x40\\x00\\x00\n binding_data = AuthorizationDataSequence()\n binding_data[\"ad-type\"] = 0x8F\n binding_data[\"ad-data\"] = b\"\\x00\\x40\\x00\\x00\"\n\n # Build the KerbServiceTarget data set\n # \n # 304a a004 0202 0090 a142 0440 6800 6f00\n # 7300 7400 2f00 7300 7400 7300 2e00 6300\n # 6f00 6d00 7000 6100 6e00 7900 2e00 6300\n # 6f00 6d00 4000 6300 6f00 6d00 7000 6100\n # 6e00 7900 2e00 6300 6f00 6d00\n # ----------------------------------------\n # 30 4a - SEQ\n # a0 04 02 02 00 90 - INT: 0x90\n # a1 42 04 40 68 - OCT STRING\n # 00 6f 00 73 00 74 00 2f 00... - spn@domain -> UTF-16LE (null padded)\n kerb_service_target_data = AuthorizationDataSequence()\n kerb_service_target_data[\"ad-type\"] = 0x90\n kerb_service_target_data[\"ad-data\"] = f\"{spn}@{domain}\".encode(\"utf-16le\")\n\n # Now, wrap the above data sets in a sequence (top down).\n # Since we can't build an ASN.1 sequence without a#, we\n # are just going to hard code the sequence and manually\n # calculate the data length.\n # \n # 3081 XX ....\n # ----------------------------------------\n # 30 81 -- SEQ\n # XX -- LEN\n # .. .. -- COMBINED DATA\n auth_data = (\n encoder.encode(negotiation_type_data_seq)\n + encoder.encode(kerb_auth_data_token_restrictions)\n + encoder.encode(kerb_local_data)\n + encoder.encode(binding_data)\n + encoder.encode(kerb_service_target_data)\n )\n auth_data_len = hex(len(auth_data))[2:] # Strip `0x`\n auth_data_len = unhexlify(auth_data_len) # Convert to `\\x`\n authorization_data = b\"\\x30\\x81\" + auth_data_len + auth_data\n\n # fmt: on\n return authorization_data",
"def get_test_account_balance_response():\n\treturn {\n\t\t\"ResultType\":0,\n\t\t\"ResultCode\":0,\n\t\t\"ResultDesc\":\"The service request has been accepted successfully.\",\n\t\t\"OriginatorConversationID\":\"10816-694520-2\",\n\t\t\"ConversationID\":\"AG_20200927_00007cdb1f9fb6494315\",\n\t\t\"TransactionID\":\"LGR0000000\",\n\t\t\"ResultParameters\":{\n\t\t\"ResultParameter\":[\n\t\t\t{\n\t\t\t\"Key\":\"ReceiptNo\",\n\t\t\t\"Value\":\"LGR919G2AV\"\n\t\t\t},\n\t\t\t{\n\t\t\t\"Key\":\"Conversation ID\",\n\t\t\t\"Value\":\"AG_20170727_00004492b1b6d0078fbe\"\n\t\t\t},\n\t\t\t{\n\t\t\t\"Key\":\"FinalisedTime\",\n\t\t\t\"Value\":20170727101415\n\t\t\t},\n\t\t\t{\n\t\t\t\"Key\":\"Amount\",\n\t\t\t\"Value\":10\n\t\t\t},\n\t\t\t{\n\t\t\t\"Key\":\"TransactionStatus\",\n\t\t\t\"Value\":\"Completed\"\n\t\t\t},\n\t\t\t{\n\t\t\t\"Key\":\"ReasonType\",\n\t\t\t\"Value\":\"Salary Payment via API\"\n\t\t\t},\n\t\t\t{\n\t\t\t\"Key\":\"TransactionReason\"\n\t\t\t},\n\t\t\t{\n\t\t\t\"Key\":\"DebitPartyCharges\",\n\t\t\t\"Value\":\"Fee For B2C Payment|KES|33.00\"\n\t\t\t},\n\t\t\t{\n\t\t\t\"Key\":\"DebitAccountType\",\n\t\t\t\"Value\":\"Utility Account\"\n\t\t\t},\n\t\t\t{\n\t\t\t\"Key\":\"InitiatedTime\",\n\t\t\t\"Value\":20170727101415\n\t\t\t},\n\t\t\t{\n\t\t\t\"Key\":\"Originator Conversation ID\",\n\t\t\t\"Value\":\"19455-773836-1\"\n\t\t\t},\n\t\t\t{\n\t\t\t\"Key\":\"CreditPartyName\",\n\t\t\t\"Value\":\"254708374149 - John Doe\"\n\t\t\t},\n\t\t\t{\n\t\t\t\"Key\":\"DebitPartyName\",\n\t\t\t\"Value\":\"600134 - Safaricom157\"\n\t\t\t}\n\t\t]\n\t},\n\t\"ReferenceData\":{\n\t\"ReferenceItem\":{\n\t\t\"Key\":\"Occasion\",\n\t\t\"Value\":\"aaaa\"\n\t}\n\t}\n\t\t}",
"def Burn(self, eth_addr, amt, acctPkey=distributor_secretKey, memo=\"XLMBurn\"):\n print(\"Burning Token on Stellar ##########################\")\n distributor = Keypair.from_secret(acctPkey)\n fees = self.stellar_server.fetch_base_fee()\n src_acct = self.stellar_server.load_account(distributor.public_key)\n transaction_obj = TransactionBuilder(\n source_account=src_acct,\n network_passphrase=general_network_passphrase,\n base_fee=fees\n ).append_payment_op(\n self.asset_issuer, str(amt), self.asset_code, self.asset_issuer\n ).append_manage_data_op(data_name=eth_addr, data_value=eth_addr\n ).add_text_memo(memo\n ).build()\n\n transaction_obj.sign(distributor.secret)\n\n submit_tx = self.stellar_server.submit_transaction(transaction_obj)\n \n\n return submit_tx",
"def debit_bank_account(amount):\n\n # Create a merchantAuthenticationType object with authentication details\n # retrieved from the constants file\n merchantAuth = apicontractsv1.merchantAuthenticationType()\n merchantAuth.name = '9cjV8Jv88Fg'\n merchantAuth.transactionKey = '799TzN55727ZPvbv'\n\n # Create the payment data for a bank account\n bankAccount = apicontractsv1.bankAccountType()\n accountType = apicontractsv1.bankAccountTypeEnum\n bankAccount.accountType = accountType.checking\n bankAccount.routingNumber = \"121042882\"\n bankAccount.accountNumber = str(random.randint(10000,999999999999))\n bankAccount.nameOnAccount = \"John Doe\"\n\n # Add the payment data to a paymentType object\n payment = apicontractsv1.paymentType()\n payment.bankAccount = bankAccount\n\n # Create order information\n order = apicontractsv1.orderType()\n order.invoiceNumber = \"10101\"\n order.description = tracker.get_slot(\"order\")\n\n # Set the customer's Bill To address\n customerAddress = apicontractsv1.customerAddressType()\n customerAddress.firstName = \"Ankit\"\n customerAddress.lastName = \"Hans\"\n customerAddress.company = \"ABInbev\"\n customerAddress.address = \"14 Main Street\"\n customerAddress.city = \"Kaithal\"\n customerAddress.state = \"HR\"\n customerAddress.zip = \"21100\"\n customerAddress.country = \"India\"\n\n # Set the customer's identifying information\n customerData = apicontractsv1.customerDataType()\n customerData.type = \"individual\"\n customerData.id = \"99999456654\"\n customerData.email = \"ankithans1947@gmail.com\"\n\n # Add values for transaction settings\n duplicateWindowSetting = apicontractsv1.settingType()\n duplicateWindowSetting.settingName = \"duplicateWindow\"\n duplicateWindowSetting.settingValue = \"60\"\n settings = apicontractsv1.ArrayOfSetting()\n settings.setting.append(duplicateWindowSetting)\n\n \n\n\n # Create a transactionRequestType object and add the previous objects to it.\n transactionrequest = apicontractsv1.transactionRequestType()\n transactionrequest.transactionType = \"authCaptureTransaction\"\n transactionrequest.amount = amount\n transactionrequest.payment = payment\n transactionrequest.order = order\n transactionrequest.billTo = customerAddress\n transactionrequest.customer = customerData\n transactionrequest.transactionSettings = settings\n\n # Assemble the complete transaction request\n createtransactionrequest = apicontractsv1.createTransactionRequest()\n createtransactionrequest.merchantAuthentication = merchantAuth\n createtransactionrequest.refId = \"MerchantID-0001\"\n createtransactionrequest.transactionRequest = transactionrequest\n # Create the controller\n createtransactioncontroller = createTransactionController(\n createtransactionrequest)\n createtransactioncontroller.execute()\n\n response = createtransactioncontroller.getresponse()\n\n if response is not None:\n # Check to see if the API request was successfully received and acted upon\n if response.messages.resultCode == \"Ok\":\n # Since the API request was successful, look for a transaction response\n # and parse it to display the results of authorizing the card\n if hasattr(response.transactionResponse, 'messages') is True:\n print(\n 'Successfully created transaction with Transaction ID: %s'\n % response.transactionResponse.transId)\n print('Transaction Response Code: %s' %\n response.transactionResponse.responseCode)\n print('Message Code: %s' %\n response.transactionResponse.messages.message[0].code)\n print('Description: %s' % response.transactionResponse.\n messages.message[0].description)\n else:\n print('Failed Transaction.')\n if hasattr(response.transactionResponse, 'errors') is True:\n print('Error Code: %s' % str(response.transactionResponse.\n errors.error[0].errorCode))\n print(\n 'Error message: %s' %\n response.transactionResponse.errors.error[0].errorText)\n # Or, print errors if the API request wasn't successful\n else:\n print('Failed Transaction.')\n if hasattr(response, 'transactionResponse') is True and hasattr(\n response.transactionResponse, 'errors') is True:\n print('Error Code: %s' % str(\n response.transactionResponse.errors.error[0].errorCode))\n print('Error message: %s' %\n response.transactionResponse.errors.error[0].errorText)\n else:\n print('Error Code: %s' %\n response.messages.message[0]['code'].text)\n print('Error message: %s' %\n response.messages.message[0]['text'].text)\n else:\n print('Null Response.')\n \n dispatcher.utter_message(\n f\"Thanks for placing the order.\\nSuccessfully created transaction with Transaction ID: {str(random.randint(10000,999999999999))}\\nTransaction Response Code: 1\\nMessage Code: 1\\nDescription: This transaction has been approved.\\nWe have sent your e-receipt on your mail.\")\n return [AllSlotsReset()]",
"def make_acct():\n\n cc_names = [\"Sapphire Preferred\", \"Sapphire Reserve\", \"The Platinum Card\",\"The Gold Card\", \"British Airways Visa Signature\",\"Marriott Bonvoy Brilliant\"]\n \n cc_account = FCreditCardAccount(random.choice(cc_names), fake.random_int(min=0, max=150000), fake.date(), fake.date(), True)\n\n cc_account = cc_account.get_acct_attributes()\n \n return cc_account",
"def credit_card_charge(\n amount,\n chargeFee,\n token,\n expirationYear,\n expirationMonth,\n name,\n zipcode,\n address,\n city,\n state,\n phone,\n number,\n validationValue,\n clientReferenceData1,\n isRecurring,\n accountGroupCode,\n callbackId,\n save,\n convenienceFeeType,\n customerId,\n splitPayGroupId\n):\n URL = 'https://stgapiprocessone.oneincsystems.com/api/CreditCard/Charge'\n data = {\n \"Amount\": amount,\n \"ChargeFee\": chargeFee,\n \"Token\": token,\n \"CreditCard\": {\n \"ExpirationYear\": expirationYear,\n \"ExpirationMonth\": expirationMonth,\n \"Holder\": {\n \"Name\": name,\n \"Zip\": zipcode,\n \"Address\": address,\n \"City\": city,\n \"State\": state,\n \"Phone\": phone\n },\n \"Number\": number,\n \"ValidationValue\": validationValue\n },\n \"ClientReferenceData\": {\n \"ClientReferenceData1\": clientReferenceData1,\n },\n \"IsRecurring\": isRecurring,\n \"AccountGroupCode\": accountGroupCode,\n \"CallbackId\": callbackId,\n \"Save\": save,\n \"ConvenienceFeeType\": convenienceFeeType,\n \"CustomerId\": customerId,\n \"SplitPayGroupId\": splitPayGroupId,\n \"AuthenticationKey\": settings.AUTHENTICATIONKEY\n }\n response = requests.post(url=URL, json=data)\n print('CHAREG API START')\n #print(response.text)\n response_code_api_charge_credit_card(response)\n print('CHAREG API END')\n return response",
"def test_deposit_no_jwt(client, acc1_usd_deposit_transaction_factory):\n deposit = acc1_usd_deposit_transaction_factory()\n response = client.get(\n f\"/deposit?asset_code=USD&account={deposit.stellar_account}&memo=foo&memo_type=text\",\n follow=True,\n )\n content = json.loads(response.content)\n assert response.status_code == 400\n print(content)\n assert content == {\"error\": \"JWT must be passed as 'Authorization' header\", \"status_code\": 400}",
"def GET_request(action):\n\n # OAuth token of the user that requests will be made on behalf of\n\n\n # Login of the advertising agency client\n # Required parameter if requests are made on behalf of an advertising agency\n clientLogin = 'marketingdigital@zara.com'\n\n headers = {\n # OAuth token. The word Bearer must be used\n \"Authorization\": 'OAuth AQAAAABDFBfdAAcVB0yqdlcRyEzIu8BBs1TTLuE',\n # Login of the advertising agency client\n \"Client-Login\": clientLogin,\n # Language for response messages\n \"Accept-Language\": \"en\",\n # Mode for report generation\n \"processingMode\": \"auto\"\n # Format for monetary values in the report\n # \"returnMoneyInMicros\": \"false\",\n # Don't include the row with the report name and date range in the report\n # \"skipReportHeader\": \"true\",\n # Don't include the row with column names in the report\n # \"skipColumnHeader\": \"true\",\n # Don't include the row with the number of statistics rows in the report\n # \"skipReportSummary\": \"true\"\n }\n\n\n API_URL = 'https://api.webmaster.yandex.net/v4'\n\n\n\n retry_count = 0\n retry_max = 1\n\n try:\n resp = requests.get(API_URL + action, headers=headers)\n except Exception as message:\n if \"400\" or \"401\" in message:\n logging.error(f\"Could not retrieve html, authentication or token error: {message}\")\n sys.exit(1)\n elif retry_count < retry_max:\n print(f\"Retrying ... (count {retry_count})\")\n # sleep for fifteen minutes\n time.sleep(10)\n\n # increase the counter\n retry_count = retry_count + 1\n\n else:\n logging.error(f\"Could not retrieve response: {message}\")\n raise Exception(str(message))\n\n return resp.json()",
"def toDuckbill(self):\n a = Account(\n fname = self.fname,\n lname = self.lname,\n email = self.email,\n company = self.company,\n phone = self.phone,\n address1 = self.addr1,\n address2 = self.addr2,\n city = self.city,\n state = self.state,\n postal = self.postal,\n countryCD = self.country,\n account = self.username,\n nextDue = Date(\"today\")+30,\n brand = self.brand )\n \n s = Subscription(\n username = self.username,\n service = self.plan,\n rate = self.calcRate(),\n cycLen = self.cycLen,\n \n # thirty day free trial\n nextDue = Date(\"today\")+30)\n\n e = Event(\n event = \"note\",\n posted = Date(\"today\"),\n amount = 0,\n note = \"30 day free trial\")\n\n a.subscriptions << s\n a.events << e\n\n return a",
"def balanceOf(acct, tokenId):\n return Get(GetContext(), _concatkey(_concatkey(BALANCE_PREFIX, tokenId), acct))",
"def request_balance(self):\n req = \"x\" + json.dumps({\"identifier\": self.pubkey})\n replies = self.broadcast_request(req)\n return int(SPVClient._process_replies(replies))",
"def generate_payment_request_message(self):\n #REFACTOR\n cost = self.torApp.get_relay().get_cost()\n interval = self.bank.currentACoinInterval\n #generate ACoin request\n request = BankMessages.make_acoin_request(self.bank, interval, cost)\n request.id = self.currentRequestId\n self.requests[request.id] = request\n self.currentRequestId += 1\n return Basic.write_long(request.id) + request.msg + Globals.PRIVATE_KEY.sign(request.msg)",
"def card_foreign_payment(request):\n data = request.data\n user = request.user\n serializer = ForeignCardPaymentSerializer(data=data)\n if serializer.is_valid():\n pay_data = {\n 'cardno': serializer.validated_data.get('cardno'),\n 'cvv': serializer.validated_data.get('cvv'),\n 'expirymonth': serializer.validated_data.get('expirymonth'),\n 'expiryyear': serializer.validated_data.get('expiryyear'),\n 'country': serializer.validated_data.get('country', 'NG'),\n 'amount': str(serializer.validated_data.get('amount')),\n 'save_card': serializer.validated_data.get('save_card'),\n 'email': user.email,\n 'firstname': user.first_name,\n 'lastname': user.last_name,\n }\n purpose = str(serializer.validated_data.get('purpose'))\n property_id = serializer.validated_data.get('property_id')\n if purpose == 'Buying':\n get_object_or_404(Property, pk=property_id)\n init_resp = TransactionServices.initiate_card_payment(pay_data)\n if init_resp.get('status') == 'error':\n return Response(\n {'message': init_resp.get('message')},\n status=status.HTTP_400_BAD_REQUEST\n )\n auth_dict = {\n 'suggested_auth': 'NOAUTH_INTERNATIONAL',\n 'billingzip': serializer.validated_data.get('billingzip'),\n 'billingcity': serializer.validated_data.get('billingcity'),\n 'billingaddress': serializer.validated_data.get('billingaddress'),\n 'billingstate': serializer.validated_data.get('billingstate'),\n 'billingcountry': serializer.validated_data.get('billingcountry'),\n }\n pay_data['auth_dict'] = auth_dict\n pay_data['purpose'] = purpose\n pay_data['property_id'] = property_id\n auth_resp = TransactionServices.authenticate_card_payment(pay_data)\n if auth_resp.get('status') == 'success':\n return Response(\n {\n 'message': auth_resp['data']['authurl'],\n 'txRef': auth_resp.get('data').get('txRef')\n },\n status=status.HTTP_200_OK)\n else:\n return Response(\n {'message': auth_resp.get('message')},\n status=status.HTTP_400_BAD_REQUEST\n )\n\n else:\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)",
"def test_create_counterparty_bankdetails(self):\n pass",
"def test_deposit_authenticated_success(client, acc1_usd_deposit_transaction_factory):\n client_address = \"GDKFNRUATPH4BSZGVFDRBIGZ5QAFILVFRIRYNSQ4UO7V2ZQAPRNL73RI\"\n client_seed = \"SDKWSBERDHP3SXW5A3LXSI7FWMMO5H7HG33KNYBKWH2HYOXJG2DXQHQY\"\n deposit = acc1_usd_deposit_transaction_factory()\n\n # SEP 10.\n response = client.get(f\"/auth?account={client_address}\", follow=True)\n content = json.loads(response.content)\n\n envelope_xdr = content[\"transaction\"]\n envelope_object = TransactionEnvelope.from_xdr(envelope_xdr, network_passphrase=settings.STELLAR_NETWORK_PASSPHRASE)\n client_signing_key = Keypair.from_secret(client_seed)\n envelope_object.sign(client_signing_key)\n client_signed_envelope_xdr = envelope_object.to_xdr()\n\n response = client.post(\n \"/auth\",\n data={\"transaction\": client_signed_envelope_xdr},\n content_type=\"application/json\",\n )\n content = json.loads(response.content)\n encoded_jwt = content[\"token\"]\n assert encoded_jwt\n\n header = {\"HTTP_AUTHORIZATION\": f\"Bearer {encoded_jwt}\"}\n response = client.get(\n f\"/deposit?asset_code=USD&account={deposit.stellar_account}\",\n follow=True,\n **header,\n )\n content = json.loads(response.content)\n assert response.status_code == 403\n assert content[\"type\"] == \"interactive_customer_info_needed\"",
"def main(request, response):\n\n token = \"ArQvBL/jhDJ62HaUm/ak0dIUYDjZAfeCQTXwa92cOrHZbL7R+bhb3qrVO2pHWkgJPgvIzvLX5m3wfaUJfOKY0Q4AAABqeyJvcmlnaW4iOiAiaHR0cHM6Ly93d3cud2ViLXBsYXRmb3JtLnRlc3Q6ODQ0NCIsICJmZWF0dXJlIjogIk9yaWdpbklzb2xhdGlvbkhlYWRlciIsICJleHBpcnkiOiAyMDAwMDAwMDAwfQ==\"\n\n header_order = request.GET.first(\"headerOrder\")\n if header_order == \"otoi\":\n response.headers.set(\"Origin-Trial\", token)\n response.headers.set(\"Origin-Isolation\", \"?1\")\n elif header_order == \"oiot\":\n response.headers.set(\"Origin-Isolation\", \"?1\")\n response.headers.set(\"Origin-Trial\", token)\n else:\n raise AssertionError(\"Invalid headerOrder\")\n\n response.headers.set(\"Content-Type\", \"text/html\")\n\n return \"\"\"\n <!DOCTYPE html>\n <meta charset=\"utf-8\">\n <title>Helper page for origin isolation tests</title>\n\n <script type=\"module\">\n window.onmessage = e => {\n if (e.data.constructor === WebAssembly.Module) {\n parent.postMessage(\"WebAssembly.Module message received\", \"*\");\n } else if (e.data.command === \"set document.domain\") {\n document.domain = e.data.newDocumentDomain;\n parent.postMessage(\"document.domain is set\", \"*\");\n }\n };\n\n window.onmessageerror = () => {\n parent.postMessage(\"messageerror\", \"*\");\n };\n </script>\n \"\"\"",
"async def send_rev_reg_def(request: web.BaseRequest):\n context: AdminRequestContext = request[\"context\"]\n profile = context.profile\n outbound_handler = request[\"outbound_message_router\"]\n rev_reg_id = request.match_info[\"rev_reg_id\"]\n create_transaction_for_endorser = json.loads(\n request.query.get(\"create_transaction_for_endorser\", \"false\")\n )\n write_ledger = not create_transaction_for_endorser\n endorser_did = None\n connection_id = request.query.get(\"conn_id\")\n\n # check if we need to endorse\n if is_author_role(profile):\n # authors cannot write to the ledger\n write_ledger = False\n create_transaction_for_endorser = True\n if not connection_id:\n # author has not provided a connection id, so determine which to use\n connection_id = await get_endorser_connection_id(profile)\n if not connection_id:\n raise web.HTTPBadRequest(reason=\"No endorser connection found\")\n\n if not write_ledger:\n try:\n async with profile.session() as session:\n connection_record = await ConnRecord.retrieve_by_id(\n session, connection_id\n )\n except StorageNotFoundError as err:\n raise web.HTTPNotFound(reason=err.roll_up) from err\n except BaseModelError as err:\n raise web.HTTPBadRequest(reason=err.roll_up) from err\n\n async with profile.session() as session:\n endorser_info = await connection_record.metadata_get(\n session, \"endorser_info\"\n )\n if not endorser_info:\n raise web.HTTPForbidden(\n reason=(\n \"Endorser Info is not set up in \"\n \"connection metadata for this connection record\"\n )\n )\n if \"endorser_did\" not in endorser_info.keys():\n raise web.HTTPForbidden(\n reason=(\n ' \"endorser_did\" is not set in \"endorser_info\"'\n \" in connection metadata for this connection record\"\n )\n )\n endorser_did = endorser_info[\"endorser_did\"]\n\n try:\n revoc = IndyRevocation(profile)\n rev_reg = await revoc.get_issuer_rev_reg_record(rev_reg_id)\n\n rev_reg_resp = await rev_reg.send_def(\n profile,\n write_ledger=write_ledger,\n endorser_did=endorser_did,\n )\n LOGGER.debug(\"published rev reg definition: %s\", rev_reg_id)\n except StorageNotFoundError as err:\n raise web.HTTPNotFound(reason=err.roll_up) from err\n except RevocationError as err:\n raise web.HTTPBadRequest(reason=err.roll_up) from err\n\n if not create_transaction_for_endorser:\n return web.json_response({\"sent\": rev_reg.serialize()})\n\n else:\n transaction_mgr = TransactionManager(profile)\n try:\n transaction = await transaction_mgr.create_record(\n messages_attach=rev_reg_resp[\"result\"], connection_id=connection_id\n )\n except StorageError as err:\n raise web.HTTPBadRequest(reason=err.roll_up) from err\n\n # if auto-request, send the request to the endorser\n if context.settings.get_value(\"endorser.auto_request\"):\n try:\n (\n transaction,\n transaction_request,\n ) = await transaction_mgr.create_request(\n transaction=transaction,\n # TODO see if we need to parameterize these params\n # expires_time=expires_time,\n # endorser_write_txn=endorser_write_txn,\n )\n except (StorageError, TransactionManagerError) as err:\n raise web.HTTPBadRequest(reason=err.roll_up) from err\n\n await outbound_handler(transaction_request, connection_id=connection_id)\n\n return web.json_response({\"txn\": transaction.serialize()})",
"def test_billing_recurring_charge(self):\n pass"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
read regions from a file. | def readFromFile(self, infile, ignore_strand=False):
self.mForwardRegions = {}
self.mReverseRegions = {}
self.mRegions = []
self.mIgnoreStrand = ignore_strand
n = 0
for line in infile:
if line[0] == "#":
continue
token, sbjct_token, sbjct_strand, sbjct_from, sbjct_to = line[
:-1].split("\t")[:5]
if ignore_strand:
key = sbjct_token
else:
key = "%s-%s" % (sbjct_token, sbjct_strand)
if key not in self.mForwardRegions:
self.mForwardRegions[key] = []
self.mReverseRegions[key] = []
self.mForwardRegions[key].append((int(sbjct_from), n))
self.mReverseRegions[key].append((int(sbjct_to), n))
self.mRegions.append((token, sbjct_from, sbjct_to))
n += 1
for k, v in self.mForwardRegions.items():
v.sort()
self.mForwardRegions[k] = (map(lambda x: x[0], v),
map(lambda x: x[1], v))
for k, v in self.mReverseRegions.items():
v.sort()
self.mReverseRegions[k] = (map(lambda x: x[0], v),
map(lambda x: x[1], v)) | [
"def read_regions_data(prefix):\n\n ret = []\n idx = {}\n columns = []\n for line in open(prefix+'_regions.txt'):\n line = line.strip()\n if line == '':\n continue\n\n if line.startswith('#'):\n line = line[1:]\n header = line.split()\n for i in range(len(header)):\n idx[header[i]] = i\n\n columns = ['RC', 'MEDCOV', 'MINCOV', 'MEDQCOV', 'MINQCOV', 'MAXFLMQ', 'MAXFLBQ', 'Pass_or_flag']\n if 'MEDCOV+' in header:\n columns += [\n 'RC+', 'MEDCOV+', 'MINCOV+', 'MEDQCOV+', 'MINQCOV+', 'MAXFLMQ+', 'MAXFLBQ+',\n 'RC-', 'MEDCOV-', 'MINCOV-', 'MEDQCOV-', 'MINQCOV-', 'MAXFLMQ-', 'MAXFLBQ-'\n ]\n continue\n\n cols = line.split('\\t')\n record = {'region': cols[0]}\n for c in columns:\n value = cols[idx[c]]\n if value == '.':\n value = '--'\n elif c.startswith('RC') or c.startswith('MIN'):\n value = int(value)\n elif c != 'Pass_or_flag':\n value = float(value)\n if c.startswith('MED') and value == int(value):\n value = int(value)\n record[c.lower()] = value\n ret.append(record)\n\n return ret",
"def import_regions(regions_file, format='ds9', logger=None):\n regs = regions.Regions.read(regions_file, format=format)\n\n return [astropy_region_to_ginga_canvas_object(r, logger=logger)\n for r in regs]",
"def fromtextfile(self, *args, **kwargs):\n return _regionmanager.regionmanager_fromtextfile(self, *args, **kwargs)",
"def parse_regions(self, regions_file):\n regions = Map()\n continents = []\n with open(regions_file) as f:\n data = json.loads(f.read())\n\n for continent in data:\n continents.append( Continent(continent['name'], json_dic=continent) )\n for region, neighbors in continents[-1].region_connections():\n regions.add_region(region, neighbors)\n\n return continents, regions",
"def read_txt_grains(fname):\n\n # Note: (21) fields named below with an underscore are not yet used\n #\n # Fields from grains.out header:\n \"\"\"grain ID completeness chi2\n xi[0] xi[1] xi[2]\n tVec_c[0] tVec_c[1] tVec_c[2]\n vInv_s[0] vInv_s[1] vInv_s[2] vInv_s[4]*sqrt(2) vInv_s[5]*sqrt(2) vInv_s[6]*sqrt(2)\n ln(V[0,0]) ln(V[1,1]) ln(V[2,2]) ln(V[1,2]) ln(V[0,2]) ln(V[0,1])\"\"\"\n\n # Use shortened names in construction of numpy data type.\n\n d = {'names': ('id', 'completeness', 'chisq',\n 'ori_0', 'ori_1', 'ori_2',\n 'cen_0', 'cen_1', 'cen_2',\n 'vi0', 'vi1', 'vi2', 'vi3', 'vi4', 'vi5',\n 'lnV00', 'lnV11', 'lnV22', 'lnV12', 'lnV02', 'lnV01'),\n 'formats': ('i4',) + 20*('f4',)}\n\n return np.loadtxt(fname, dtype=d)",
"def get_regions_from_genbank(genbank_file):\n target_regions = list()\n for item in SeqIO.parse(genbank_file, 'genbank'):\n chrom = item.id\n foundthings = False\n for ftype in ('gene', 'CDS'):\n if foundthings:\n continue\n for feature in item.features:\n if feature.type != ftype:\n continue\n if 'locus_tag' in feature.qualifiers:\n name = feature.qualifiers['locus_tag'][0]\n elif 'gene' in feature.qualifiers:\n name = feature.qualifiers['gene'][0]\n else:\n logging.error('No locus_tag or gene for {feature}.'.format(**vars()))\n template = 'BAILING OUT UNTIL MISSING FEATURE-NAME ISSUE IS RESOLVED'\n logging.error(template.format(**vars()))\n sys.exit(2)\n foundthings = True\n start = int(feature.location.start)\n end = int(feature.location.end)\n if feature.location.strand == 1:\n target_regions.append((name, chrom, start, end, '+'))\n elif feature.location.strand == -1:\n target_regions.append((name, chrom, start, end, '-'))\n else:\n # If we don't know what strand it's on, just claim all targets.\n target_regions.append((name, chrom, start, end, '+'))\n target_regions.append((name, chrom, start, end, '-'))\n logging.info(\n 'Found {0} target regions in genbank file.'.format(len(target_regions)))\n return target_regions",
"def load_regions(fname, doplot=False, verbose=False, **kw):\n print('reading geom from:', fname)\n with open(fname) as fobj:\n\n circles = []\n polygons = []\n\n for line in fobj:\n line = line.strip()\n\n if line[:6] == 'circle':\n circles.append(extract_circle(line))\n elif line[:7] == 'polygon':\n polygons.append(extract_polygon(line))\n else:\n continue\n\n if verbose:\n for circle in circles:\n print('circle:', circle)\n\n for polygon in polygons:\n print('polygon:', polygon)\n\n allgeom = circles + polygons\n\n if doplot:\n from .plotting import plotrand\n\n nside = 2**17\n smap = hs.HealSparseMap.make_empty(\n nside_coverage=32,\n nside_sparse=nside,\n dtype=np.int16,\n sentinel=0,\n )\n hs.realize_geom(allgeom, smap)\n\n nrand = 100000\n plt = plotrand(\n smap,\n nrand,\n randpix=False,\n by_val=True,\n show=False,\n title=os.path.basename(fname),\n )\n return plt\n\n return allgeom",
"def parse_target_regions(target_regions_file):\n logging.info('Parsing target region file.')\n target_regions = list()\n for x in open(target_regions_file):\n if x.startswith('#'):\n continue\n parts = x.strip().split('\\t')\n try:\n (name,chrom,start,end,strand) = parts\n except ValueError:\n trf = target_regions_file\n logging.error('Could not parse from {trf}: {x}'.format(**vars()))\n sys.exit(1)\n try:\n target_regions.append((name, chrom, int(start), int(end), strand))\n except ValueError:\n x = x.strip()\n logging.warning('Could not fully parse: {x}'.format(**vars()))\n continue\n logging.info(\n 'Found {0} target regions in region file.'.format(len(target_regions)))\n return target_regions",
"def read_file(filename):\n\n infile = open(filename, 'r')\n lines = infile.readlines()\n infile.close()\n \n return lines",
"def get_regions_dictionary(self):\n logging.debug(\"Loading Bed regions into a dict...\")\n reader = pybedtools.BedTool(self.bed)\n regions = {}\n for interval in reader:\n chromosome = interval.chrom\n start = int(interval.start)\n end = int(interval.end)\n if chromosome not in regions:\n regions[chromosome] = []\n regions[chromosome].append((start, end))\n return regions",
"def open_and_parse_file(file_name, range_of_square):\n try:\n with open(file_name, 'r') as tsv_file:\n list_of_objects = []\n for row in tsv_file:\n try:\n splitted_row = row.split(\"\\t\")\n data = filtering_by_coordinates(splitted_row,\n range_of_square[constants.fov_ra_min_index],\n range_of_square[constants.fov_ra_max_index],\n range_of_square[constants.fov_dec_min_index],\n range_of_square[constants.fov_dec_max_index])\n if data is not None:\n list_of_objects.append(data)\n except ValueError:\n pass\n except FileNotFoundError:\n raise Exception(f'path isn\\'t correct {file_name}')\n return list_of_objects",
"def load_ranges(filepath: str) -> List[RangePair]:\n return [(\n tuple(map(int, range_pair[0].split(\"-\"))),\n tuple(map(int, range_pair[1].split(\"-\")))\n ) for range_pair in read_lines_as_list(filepath=filepath, split=\",\")]",
"def gdal_readmap(file_name, file_format):\n # Open file for binary-reading\n mapFormat = gdal.GetDriverByName(file_format)\n mapFormat.Register()\n ds = gdal.Open(file_name)\n if ds is None:\n logging.warning('Could not open {:s} Shutting down').format(file_name)\n sys.exit(1)\n # Retrieve geoTransform info\n geotrans = ds.GetGeoTransform()\n originX = geotrans[0]\n originY = geotrans[3]\n resX = geotrans[1]\n resY = geotrans[5]\n cols = ds.RasterXSize\n rows = ds.RasterYSize\n x = np.linspace(originX+resX/2, originX+resX/2+resX*(cols-1), cols)\n y = np.linspace(originY+resY/2, originY+resY/2+resY*(rows-1), rows)\n # Retrieve raster\n RasterBand = ds.GetRasterBand(1) # there's only 1 band, starting from 1\n data = RasterBand.ReadAsArray(0, 0, cols, rows)\n fill_val = RasterBand.GetNoDataValue()\n RasterBand = None\n ds = None\n return x, y, data, fill_val",
"def regions(self):\n for protein, regions in self._regions.items():\n for start, end in regions:\n yield protein, start, end",
"def read_cities_from_file ( filename ):\n Cities= [] # List of Empty Cities\n with open(filename) as file:\n for line in file:\n R= line.split()\n Cities.append ( City( R[0], float(R[1]), float(R[2]) ) )\n return Cities",
"def read(infile):\n return open(infile, 'r').readlines()",
"def loadTEranges(TE_file_loc):\n with open(TE_file_loc) as TE_file:\n for line in TE_file:\n line_col = str.split(line)\n TE_ranges.setdefault(line_col[CHROM],[]).append((line_col[START],line_col[STOP]))\n\n TE_file.close()\n return",
"def __read_file(self):\r\n \r\n try:\r\n \r\n return gpd.read_file(self.path,encoding='utf-8')\r\n \r\n \r\n except FileNotFoundError as err:\r\n \r\n print(\"File could not be found,ensure you enter a valid geojson file\")\r\n \r\n raise err",
"def read_split_ranges(path):\n pattern = re.compile(r'^(\\d+)-(\\d+)')\n split_ranges = []\n with open(path, 'r') as file_in:\n for line in file_in:\n result = pattern.match(line)\n split_ranges.append((int(result.group(1)), int(result.group(2))))\n return split_ranges"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Updates angular velocities and linear accelerations via IMU measurement | def callback_imu(msg):
global omega, a, imu_callback_done
if not imu_callback_done:
imu_callback_done = True
omega = [-msg.angular_velocity.x, -msg.angular_velocity.y, -msg.angular_velocity.z]
a = [msg.linear_acceleration.x, msg.linear_acceleration.y, msg.linear_acceleration.z] | [
"def update_imu(self, msg):\n\t\tself.sen.imu.acc_body = enu_to_ned(np.array([[msg.linear_acceleration.x], [msg.linear_acceleration.y], [msg.linear_acceleration.z]]))\n\t\tself.sen.imu.ang_vel = enu_to_ned(np.array([[msg.angular_velocity.x], [msg.angular_velocity.y], [msg.angular_velocity.z]]))",
"def update( self, msg ):\n\n if self._last_msg is None:\n self._last_msg = msg\n\n # imu.orientation is a normalized quaternion. euler_from_quaternion returns radians\n rpy = euler_from_quaternion( [msg.orientation.x, msg.orientation.y, msg.orientation.z, msg.orientation.w] )\n\n velocity = msg.angular_velocity # should be rad/s\n\n accel = [0.]*3\n\n # compute delta-t\n dt = ( msg.header.stamp - self._last_msg.header.stamp ).to_sec()\n \n if dt > 0.:\n last_velocity = self._last_msg.angular_velocity\n accel[0]= ( velocity.x - last_velocity.x ) / dt\n accel[1]= ( velocity.y - last_velocity.y ) / dt\n accel[2]= ( velocity.z - last_velocity.z ) / dt\n\n self._last_msg = msg\n\n self.roll = rpy[0]*self.roll_alpha + velocity.x*self.roll_beta + accel[0]*self.roll_gamma\n self.pitch = rpy[1]*self.pitch_alpha + velocity.y*self.pitch_beta + accel[1]*self.pitch_gamma\n self.yaw = rpy[2]*self.yaw_alpha + velocity.z*self.yaw_beta + accel[2]*self.yaw_gamma",
"def imu_data_callback(self, data):\n self._current_quaternion = (data.orientation.x, data.orientation.y,\n data.orientation.z, data.orientation.w)\n self._current_angular = data.angular_velocity\n self._current_linear = data.linear_acceleration",
"def update_angular_velocity(self, msg):\n\t\tself.ekf.ang_vel = enu_to_ned(np.array([[msg.twist.angular.x], [msg.twist.angular.y], [msg.twist.angular.z]]))",
"def update(self, **kwargs):\n self.apply_velocity()",
"def _updateVelocity(self):\n\t\t# Find difference between two vectors\n\t\tdifferenceVector = [0, 0]\n\t\tdifferenceVector[0] = self.targetVelocity[0] - self.currentVelocity[0]\n\t\tdifferenceVector[1] = self.targetVelocity[1] - self.currentVelocity[1]\n\n\t\t# Exit if there's nothing to update to avoid extra calculations\n\t\tif(differenceVector[0] == 0 and differenceVector[1] == 0):\n\t\t\treturn\n\n\t\t# Find the hypotenuse of the difference vector\n\t\tdifferenceMagnitude = math.sqrt((differenceVector[0] ** 2) + (differenceVector[1] ** 2))\n\n\t\t# If hypotenuse <= maxAcceleration, set currentVelocity = targetVelocity\n\t\tif(differenceMagnitude <= self.maxAcceleration):\n\t\t\tself.currentVelocity[0] = self.targetVelocity[0]\n\t\t\tself.currentVelocity[1] = self.targetVelocity[1]\n\t\t\treturn\n\n\t\t# Else, divide the distance vector by the hypotenuse (to make unit vector), multiply by maxAcceleration, and add to currentVelocity\n\t\tdifferenceVector[0] = self.maxAcceleration * (differenceVector[0] / differenceMagnitude)\n\t\tdifferenceVector[1] = self.maxAcceleration * (differenceVector[1] / differenceMagnitude)\n\n\t\tself.currentVelocity[0] += differenceVector[0]\n\t\tself.currentVelocity[1] += differenceVector[1]\n\n\t\treturn",
"def update_mag(self, msg):\n\t\tself.sen.imu.mag = enu_to_ned(np.array([[msg.magnetic_field.x], [msg.magnetic_field.y], [msg.magnetic_field.z]]))",
"def update_model():\n global alpha_0, alpha_current, x_full, v_full, t\n #update alpha_0 values\n alpha_0 = np.arctan2((o[1]-x[1]),(o[0]-x[0]))\n alpha_current = np.arctan2(v[1,:],v[0,:])\n ind = v[0,:]==0\n ind[v[1,:]!=0]=False\n alpha_current[ind]=alpha_0[ind]\n #save information about positions of each individual\n x_full = np.dstack((x_full,x))\n v_full = np.dstack((v_full,v))\n #increment time\n t = t + time_step",
"def apply_velocity(self):\n for moon in self.moons:\n for axis, vel in moon['vel'].items():\n moon['pos'][axis] += vel",
"def calc_ang_acc(self, u, omega, I, L, b, k):\n # Calculate torque given control input and physical constants\n tau = self.calc_torque(u, L, b, k)\n\n # Calculate body frame angular acceleration using Euler's equation\n omegaddot = np.dot(np.linalg.inv(\n I), (tau - np.cross(omega, np.dot(I, omega))))\n\n return omegaddot",
"def angular_velocity(self):\r\n\r\n self.omega += self.angular_acceleration*self.dt\r\n return self.omega",
"def test_imu_sensor(self):\n # Create an engine: no controller and no internal dynamics\n engine = jiminy.Engine()\n setup_controller_and_engine(engine, self.robot)\n\n # Run simulation and extract log data\n x0 = np.array([0.1, 0.1])\n tf = 2.0\n time, gyro_jiminy, accel_jiminy = \\\n SimulateSimplePendulum._simulate_and_get_imu_data_evolution(\n engine, tf, x0, split=True)\n\n # Pendulum dynamics\n def dynamics(t: float, x: np.ndarray) -> np.ndarray:\n return np.stack(\n (x[..., 1], self.g / self.l * np.sin(x[..., 0])), axis=-1)\n\n # Integrate this non-linear dynamics\n x_rk_python = integrate_dynamics(time, x0, dynamics)\n\n # Compute sensor acceleration, i.e. acceleration in polar coordinates\n theta = x_rk_python[:, 0]\n dtheta = x_rk_python[:, 1]\n dtheta = x_rk_python[:, 1]\n\n # Acceleration: to resolve algebraic loop (current acceleration is\n # function of input which itself is function of sensor signal, sensor\n # data is computed using q_t, v_t, a_t\n ddtheta = dynamics(0.0, x_rk_python)[:, 1]\n\n expected_accel = np.stack([\n - self.l * ddtheta + self.g * np.sin(theta),\n np.zeros_like(theta),\n self.l * dtheta ** 2 - self.g * np.cos(theta)], axis=-1)\n expected_gyro = np.stack([\n np.zeros_like(theta),\n dtheta,\n np.zeros_like(theta)], axis=-1)\n\n # Compare sensor signal, ignoring first iterations that correspond to\n # system initialization\n self.assertTrue(np.allclose(\n expected_gyro[2:, :], gyro_jiminy[2:, :], atol=TOLERANCE))\n self.assertTrue(np.allclose(\n expected_accel[2:, :], accel_jiminy[2:, :], atol=TOLERANCE))",
"def update_velocity(self, msg):\n\t\tself.ekf.vel = enu_to_ned(np.array([[msg.twist.linear.x], [msg.twist.linear.y], [msg.twist.linear.z]]))",
"def cgm_update(self, eta, u, v, alpha):\n self.Y = (1 - eta) * self.Y + eta * u.dot(v.dot(self.Omega))\n self.W = (1 - eta) * self.W + eta * (self.Psi.dot(u)).dot(v)",
"def update(self):\r\n self.updateVelocities()\r\n self.updatePositions()",
"def update_relative_velocities(self, msg):\t\n\t\tself.sen.emu.rel_vel = ROS_list_to_np_array(msg.data)",
"def update_attitude(self, msg):\n\t\tros_q = [msg.pose.orientation.x, msg.pose.orientation.y, msg.pose.orientation.z, msg.pose.orientation.w]\n\t\tself.ekf.att_q = ROS_quaternion_to_SI_quaternion(ros_q)\n\t\tself.ekf.att_euler = enu_to_ned(np.asarray(euler_from_quaternion(ros_q)).reshape(3,1))",
"def angular_velocity(self):\n return 0.0",
"def update_physical_parameters(self):\n\t\tnet_forces = self.compute_all_net_forces()\n\t\tfor i in range(0, self.num_points):\n\t\t\tforce = net_forces[i, :]\n\t\t\tself.velocity_deltas[i, :] = self.acceleration(i, force) * self.time_step # force exerted changes velocity. Old val erased each time\n\t\tself.move_points(self.time_step) # all points take step in direction of velocity"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Publish estimated position and orientation and preparation for next cycle | def finish_loop(self):
pub = rospy.Publisher('/ekf_pose', PoseStamped, queue_size=10)
msg = PoseStamped()
msg.header.frame_id = '/map'
msg.header.stamp = rospy.Time().now()
msg.pose.position = Point(self.X_est[0, 0], self.X_est[1, 0], self.X_est[2, 0])
x, y, z, w = get_quaternion(self.X_est[3, 0], self.X_est[4, 0], self.X_est[5, 0])
msg.pose.orientation = Quaternion(x, y, z, w)
pub.publish(msg)
self.X_prev = self.X_est
self.P_prev = self.P_est | [
"def print_position(self) -> None:\n self.hkl_now = list(self.calculate_hkl_from_angles())\n self.pseudo_dict_to_update = self.get_pseudo_angles_from_motor_angles()\n print(\"\")\n print(\n \"HKL now = \",\n format_5_decimals(self.hkl_now[0]),\n format_5_decimals(self.hkl_now[1]),\n format_5_decimals(self.hkl_now[2]),\n )\n print(\"\")\n print(\n \"Alpha = {}\".format(\n format_5_decimals(self.pseudo_dict_to_update[\"alpha\"])\n )\n )\n print(\n \"Beta = {}\".format(\n format_5_decimals(self.pseudo_dict_to_update[\"beta\"])\n )\n )\n print(\n \"Psi = {}\".format(\n format_5_decimals(self.pseudo_dict_to_update[\"psi\"])\n )\n )\n print(\n \"Tau = {}\".format(\n format_5_decimals(self.pseudo_dict_to_update[\"tau\"])\n )\n )\n print(\n \"Qaz = {}\".format(\n format_5_decimals(self.pseudo_dict_to_update[\"qaz\"])\n )\n )\n print(\n \"Naz = {}\".format(\n format_5_decimals(self.pseudo_dict_to_update[\"naz\"])\n )\n )\n print(\n \"Omega = {}\".format(\n format_5_decimals(self.pseudo_dict_to_update[\"omega\"])\n )\n )\n print(\"\")\n print(\n \"Del = {}\".format(\n format_5_decimals(self.experiment_file_dict[\"motors\"][\"del\"][\"value\"])\n )\n )\n print(\n \"Eta = {}\".format(\n format_5_decimals(self.experiment_file_dict[\"motors\"][\"eta\"][\"value\"])\n )\n )\n print(\n \"Chi = {}\".format(\n format_5_decimals(self.experiment_file_dict[\"motors\"][\"chi\"][\"value\"])\n )\n )\n print(\n \"Phi = {}\".format(\n format_5_decimals(self.experiment_file_dict[\"motors\"][\"phi\"][\"value\"])\n )\n )\n print(\n \"Nu = {}\".format(\n format_5_decimals(self.experiment_file_dict[\"motors\"][\"nu\"][\"value\"])\n )\n )\n print(\n \"Mu = {}\".format(\n format_5_decimals(self.experiment_file_dict[\"motors\"][\"mu\"][\"value\"])\n )\n )\n print(\"\")",
"def _publish_odometry(self):\n # only publish if we have a subscriber\n if self._odom_pub.get_num_connections() == 0:\n return\n\n now = rospy.Time.now()\n odom = Odometry()\n odom.header.frame_id = self._odom_frame\n odom.header.stamp = now\n odom.child_frame_id = self._footprint_frame\n odom.pose.pose.position.x = self._vector.pose.position.x * 0.001\n odom.pose.pose.position.y = self._vector.pose.position.y * 0.001\n odom.pose.pose.position.z = self._vector.pose.position.z * 0.001\n q = quaternion_from_euler(.0, .0, self._vector.pose_angle_rad)\n odom.pose.pose.orientation.x = q[0]\n odom.pose.pose.orientation.y = q[1]\n odom.pose.pose.orientation.z = q[2]\n odom.pose.pose.orientation.w = q[3]\n odom.pose.covariance = np.diag([1e-2, 1e-2, 1e-2, 1e3, 1e3, 1e-1]).ravel()\n odom.twist.twist.linear.x = self._lin_vel\n odom.twist.twist.angular.z = self._ang_vel\n odom.twist.covariance = np.diag([1e-2, 1e3, 1e3, 1e3, 1e3, 1e-2]).ravel()\n self._odom_pub.publish(odom)",
"def update(self):\n # combine GPS and compass into [x, y, theta]\n self.globalPose = np.concatenate(\n [self.gpsCallback(), self.compassCallback()], axis=0)\n self.localPose = self.stereoPoseCallback()\n\n self.globalBuffer[self.frameid, :] = self.globalPose\n self.localBuffer[self.frameid, :] = self.localPose\n\n cv2.imwrite(\"image/\" + str(self.frameid) + \".png\", devhub.depthImage)\n \n self.frameid = (self.frameid + 1) % self.bufmax",
"def prepare_estimation(self):\n\n # a. calculate moments\n self.calc_moments()\n\n # b. count moments\n self.par.Nmoms = len(self.moms)\n\n # c. extract covariances\n self.par.cov_moms = np.zeros((self.par.Nmoms,self.par.Nmoms))\n self.par.W = np.zeros((self.par.Nmoms,self.par.Nmoms))\n \n for i,key_i in enumerate(self.moms.keys()):\n for j,key_j in enumerate(self.moms.keys()):\n \n # i. full\n if (key_i,key_j,'cov') in self.datamoms:\n self.par.cov_moms[i,j] = self.datamoms[(key_i,key_j,'cov')]\n else:\n self.par.cov_moms[i,j] = np.nan\n\n # ii. diagonal\n if i == j:\n \n if self.par.use_equal_weighting:\n \n self.par.W[i,j] = 1\n \n else:\n \n self.par.W[i,j] = 1/(self.par.cov_moms[i,j]*self.par.Ndata)\n\n key_weight = (key_i,'weight')\n if key_weight in self.datamoms:\n self.par.W[i,j] *= self.datamoms[key_weight]\n\n # d. compute Ypsilon \n self.par.Ypsilon = self.par.Ndata*self.par.cov_moms",
"def _odom_callback(self, data):\n\t\torientation_q = data.pose.pose.orientation\n\t\t\n\t\torientation_list = [orientation_q.x, orientation_q.y, orientation_q.z, orientation_q.w]\n\t\t\n\t\t(self.roll, self.pitch, self.yaw) = euler_from_quaternion (orientation_list)\n\t\tself.x_pos = data.pose.pose.position.x\n\t\tself.y_pos = data.pose.pose.position.y\n\t\tself.z_pos = data.pose.pose.position.z",
"def do_transform_cycle(self):\n points_world = self.transform_points()\n if points_world:\n self.publish_points(points_world)\n else:\n rospy.logwarn(\"No points published, List object is empty, could not transform\")",
"def __next__(self):\n now = self.clock.now()\n\n delta_t = self._get_delta_t(now)\n self.time_last_msg = now\n\n self.yaw_rate += self.random_generator.random() * 0.03 - 0.015\n\n # Let's make acceleration more dynamic!\n if self.acceleration < 1:\n self.acc_function = self._acc_increasing\n elif self.acceleration > 3.5:\n self.acc_function = self._acc_decreasing\n\n self.acceleration += self.acc_function()\n\n self.yaw += delta_t * self.yaw_rate\n self.yaw = self.yaw % (2 * np.pi)\n self.velocity += delta_t * self.acceleration\n self.pos += delta_t * self.velocity\n\n state = np.zeros(Track.STATE_SIZE, dtype='float32')\n\n state[Track.STATE_X_IDX] = self.pos * np.cos(self.yaw)\n state[Track.STATE_Y_IDX] = self.pos * np.sin(self.yaw)\n state[Track.STATE_VELOCITY_X_IDX] = self.velocity * np.cos(self.yaw)\n state[Track.STATE_VELOCITY_Y_IDX] = self.velocity * np.sin(self.yaw)\n state[Track.STATE_ACCELERATION_X_IDX] = self.acceleration * np.cos(self.yaw)\n state[Track.STATE_ACCELERATION_Y_IDX] = self.acceleration * np.sin(self.yaw)\n state[Track.STATE_YAW_IDX] = self.yaw\n state[Track.STATE_YAW_RATE_IDX] = self.yaw_rate\n\n state = self._add_state_noise(state)\n\n dimensions = np.zeros(2, dtype='float32')\n dimensions[Dimensions.DIMENSIONS_WIDTH_IDX] = self.width\n dimensions[Dimensions.DIMENSIONS_LENGHT_IDX] = self.length\n\n return state, dimensions, now",
"def publish():\n car_pose = mux(g['curr_car_state'])\n if car_pose is not None:\n car_pose.header.stamp = rospy.Time.now()\n pub.publish(car_pose)",
"def positions_feed(self):",
"def do_transform_cycle(self):\n points_world = self.transform_points(self.camera_points, self.z)\n if points_world:\n self.publish_points(points_world)\n else:\n rospy.logwarn(\"No points published, List object is empty, could not transform\")",
"def initialize_pose_estimate(self, msg):\n if self.debug:\n print(\"Got initial pose.\")\n self.xy_theta = \\\n self.transform_helper.convert_pose_to_xy_and_theta(msg.pose.pose)\n self.create_particle_cloud(msg.header.stamp)\n self.pose_set = True",
"def send_setpoints(self):\n pose = PoseStamped()\n pose.header.stamp = rospy.Time.now()\n pose.pose.position.x = 0\n pose.pose.position.y = 0\n pose.pose.position.z = 0\n\n rate = rospy.Rate(20.0)\n for i in range(100):\n self.local_pos_pub.publish(pose)\n rate.sleep()\n #rospy.loginfo(pose)",
"def publishCommand(self):\n self.joint_angles_msg.joint_angles = [self.angle_setpoints[key] for key in self.joint_angles_msg.joint_names]\n self.joint_angles_pub.publish(self.joint_angles_msg)\n\n # Update previous setpoint\n for key in self.joint_names:\n self.angle_setpoints_previous[key] = self.angle_setpoints[key]",
"def pid(self):\n\n # Calculating Error for altitude, latitude, longitude\n self.check_obstacle()\n self.waypoint_setter()\n rospy.loginfo(\"##Setpoint:%s, %s, %s\",str(self.setpoint[1]),str(self.setpoint[2]),str(self.setpoint[0]))\n self.error_in_meters()\n self.marker_status()\n self.err[0] = self.setpoint[0] - self.altitude_coord\n self.err[1] = self.setpoint[1] - self.latitude_coord\n self.err[2] = self.setpoint[2] - self.longitude_coord\n\n # Calculating Change in Error for altitude, latitude, longitude\n\n self.changerror[0] = self.err[0] - self.lasterror[0]\n self.changerror[1] = self.err[1] - self.lasterror[1]\n self.changerror[2] = self.err[2] - self.lasterror[2]\n\n # Calculating sum of Error for altitude, latitude, longitude\n\n self.errorsum[0] = (self.errorsum[0] + self.err[0])*self.sample_time\n self.errorsum[1] = (self.errorsum[1] + self.err[1])*self.sample_time\n self.errorsum[2] = (self.errorsum[2] + self.err[2])*self.sample_time\n\n # Calculating Output which is to be sent to attitude_controller.py through edrone/cmd pub\n\n self.output[0] = self.k_p[0]*self.err[0] + self.k_i[0]*self.errorsum[0] + self.k_d[0]*self.changerror[0]/self.sample_time\n self.output[1] = self.k_p[1]*self.err[1] + self.k_i[1]*self.errorsum[1] + self.k_d[1]*self.changerror[1]/self.sample_time\n self.output[2] = self.k_p[2]*self.err[2] + self.k_i[2]*self.errorsum[2] + self.k_d[2]*self.changerror[2]/self.sample_time\n\n # Equation for Throttle , Pitch, Roll and Yaw values for attitude_controller.py\n\n self.drone_cmd = edrone_cmd()\n self.drone_cmd.rcThrottle = self.output[0] + 1500\n self.drone_cmd.rcPitch = 1500 + 6*self.output[1]\n self.drone_cmd.rcRoll = 1500 + 6*self.output[2]\n self.drone_cmd.rcYaw = 1500\n\n # Storing Current error\n\n self.lasterror[0] = self.err[0]\n self.lasterror[1] = self.err[1]\n self.lasterror[2] = self.err[2]\n # Publishing msg drone_cmd\n self.drone_pub.publish(self.drone_cmd)",
"def then(self, new_pose_measurement):\n new_pose_measurement_stability = self.last_pose_measurement_stability + 1 \\\n if new_pose_measurement.front_measurement == self.last_pose_measurement.front_measurement \\\n else 0\n\n new_facing = self.facing\n new_stable_measurement = self.stable_pose_measurement\n new_rotations = self.rotations\n\n if new_pose_measurement_stability >= 3:\n new_stable_measurement = new_pose_measurement\n\n # if the front side changed, use the new side to figure out which quarter X or Y rotation happened\n # TODO: what about quick 180s?\n if new_facing.current_front != new_pose_measurement.front_measurement.current_front:\n adj = [(new_facing.rotated_by(r), r)\n for r in [Rotation(x=0.25), Rotation(x=-0.25), Rotation(y=0.25), Rotation(y=-0.25)]]\n for facing, rotation in adj:\n if facing.current_front == new_pose_measurement.front_measurement.current_front:\n new_facing = facing\n new_rotations = Rotation.plus_rotation_simplified(new_rotations, rotation)\n\n # if diagonals are flipped, guess which Z rotation happened based on the last stable measured angle\n if new_facing.is_top_right_darker() != new_pose_measurement.front_measurement.is_top_right_darker:\n advance = self.stable_pose_measurement.angle < 0\n r = Rotation(z=0.25 if advance else -0.25)\n new_rotations = Rotation.plus_rotation_simplified(new_rotations, r)\n new_facing = new_facing.z()\n if not advance:\n new_facing = new_facing.z().z()\n\n return PoseTrack(new_facing,\n new_stable_measurement,\n new_pose_measurement,\n new_pose_measurement_stability,\n new_rotations)",
"def update(self):\n\n self.storm_duration = self.get_precipitation_event_duration()\n self.interstorm_duration = self.get_interstorm_event_duration()\n self.storm_depth = self.get_storm_depth()\n self.intensity = self.get_storm_intensity()",
"def publish_hybrid_position(self):\n odometry_message = Odometry()\n odometry_message.header.frame_id = self._tf_prefix + \"base_link\"\n\n (odometry_message.pose.pose.position.x, odometry_message.pose.pose.position.y,\n odometry_message.pose.pose.position.z) = self._current_xyz\n\n (odometry_message.pose.pose.orientation.x, odometry_message.pose.pose.orientation.y,\n odometry_message.pose.pose.orientation.z,\n odometry_message.pose.pose.orientation.w) = self._current_quaternion\n\n odometry_message.twist.twist.angular = self._current_angular\n odometry_message.twist.twist.angular = self._current_linear\n\n self._odometry_pub.publish(odometry_message)",
"def get_real_position(self, unit='volts'):\n with nidaqmx.Task() as fsm_task:\n fsm_task.ai_channels.add_ai_voltage_chan(self.ai_chan['x'], 'FSM x axis')\n fsm_task.ai_channels.add_ai_voltage_chan(self.ai_chan['y'], 'FSM y axis')\n target_x, target_y = fsm_task.read()\n\n curr_x = target_x\n curr_y = target_y\n self.go_to_position(self.volts_to_micron(curr_x,'x'),self.volts_to_micron(curr_y,'y'))\n \n threshold = 0.005 # volt\n \n # repeat at most 3 times\n for i in list(range(3)):\n \n with nidaqmx.Task() as fsm_task:\n fsm_task.ai_channels.add_ai_voltage_chan(self.ai_chan['x'], 'FSM x axis')\n fsm_task.ai_channels.add_ai_voltage_chan(self.ai_chan['y'], 'FSM y axis')\n curr_x2, curr_y2 = fsm_task.read()\n if max(abs(target_x - curr_x2),abs(target_y - curr_y2))< threshold:\n break\n\n curr_x += target_x - curr_x2\n curr_y += target_y - curr_y2\n self.go_to_position(self.volts_to_micron(curr_x,'x') ,self.volts_to_micron(curr_y,'y') )\n\n# self.go_to_position(self.volts_to_micron(curr_x,'x') +(curr_x-curr_x2)*self.conversion['x'] ,self.volts_to_micron(curr_y,'y')+(curr_y-curr_y2)*self.conversion['y'])\n\n return self.return_position(unit)",
"def get_panorama_done(self):\n self.calculate_ransac_parameters()\n for i in range(0,4,1):\n self.perform_ransac((str(i),str(i+1),str(i)+str(i+1)))\n self.get_product_homography()\n self.get_panorama_image(('02','12','22','32','42'))"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Errorhandling wrapper around pythonaddins.GPToolDialog. | def toolDialog(toolbox, tool):
result = None
try:
result = pythonaddins.GPToolDialog(toolbox, tool)
# FIXME: this is a hack to prevent:
# TypeError: GPToolDialog() takes at most 1 argument (2 given)
# print ''
except TypeError:
pass
# don't return anything. this prevents:
# TypeError: GPToolDialog() takes at most 1 argument (2 given)
return result | [
"def popupBadEventError(self):\n keys_origin = (\"Time\", \"Latitude\", \"Longitude\", \"Depth\",\n \"used P Count\", \"used S Count\")\n keys_magnitude = (\"Magnitude\",)\n missing = [key for key in keys_origin if key not in self.dictOrigin]\n missing += [key for key in keys_magnitude if key not in self.dictMagnitude]\n missing = \"\\n\".join(missing)\n err = \"The sysop event to submit misses some mandatory information:\"\n print >> sys.stderr, err\n print >> sys.stderr, missing\n qMessageBox = QtGui.QMessageBox()\n qMessageBox.setWindowIcon(QtGui.QIcon(QtGui.QPixmap(\"obspyck.gif\")))\n qMessageBox.setIcon(QtGui.QMessageBox.Critical)\n qMessageBox.setWindowTitle(\"SysOp Event with Missing Information!\")\n qMessageBox.setText(err)\n qMessageBox.setInformativeText(missing)\n qMessageBox.setStandardButtons(QtGui.QMessageBox.Abort)\n qMessageBox.exec_()",
"def __showError(self, out):\n self.errorGroup.show()\n self.errors.insertPlainText(out)\n self.errors.ensureCursorVisible()\n \n if not self.__hgClient:\n # show input in case the process asked for some input\n self.inputGroup.setEnabled(True)\n self.inputGroup.show()",
"def error_msg(self):\r\n messagebox.showerror(\"Ok\", \"The Start and the End Destination are same or not specified\")\r\n self.select_menu()",
"def prevent_dialog_box():\n import ctypes\n SEM_NOGPFAULTERRORBOX = 0x0002 # From MSDN\n old_err_mode = ctypes.windll.kernel32.GetErrorMode()\n new_err_mode = old_err_mode | SEM_NOGPFAULTERRORBOX\n ctypes.windll.kernel32.SetErrorMode(new_err_mode)\n yield\n ctypes.windll.kernel32.SetErrorMode(old_err_mode)",
"def __show_error_dialog(self):\n dialog = gtk.MessageDialog(None, 0, gtk.MESSAGE_ERROR,\n gtk.BUTTONS_NONE, None)\n dialog.format_secondary_text(constants.MESSAGE_0012)\n dialog.set_markup(constants.MESSAGE_0011)\n dialog.add_button(gtk.STOCK_OK, 1)\n dialog.run()\n dialog.destroy()",
"def _options_dialog_toolbox(*args, **kwargs) -> Any:\n pass",
"async def on_tool_error(\n self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any\n ) -> None:",
"def handle_error(self):\n\t\terror = self.get_error()\n\t\tclick.echo('\\n'.join(error.level(self.verbosity)))\n\t\tif isinstance(error, UsageError) and self.usage:\n\t\t\tclick.echo(self.usage)",
"def _reportErrors(self, msg) :\n self.help()\n print msg\n print self._line(\"-\")\n if not self.inhibitExceptions :\n raise ScriptInputError, msg",
"def exit_with_geoproc_error(filename):\r\n tb = sys.exc_info()[2] # get the traceback object\r\n # tbinfo contains the error's line number and the code\r\n tbinfo = traceback.format_tb(tb)[0]\r\n line = tbinfo.split(\", \")[1]\r\n\r\n gp.AddError(\"Geoprocessing error on **\" + line + \"** of \" + filename +\r\n \" :\")\r\n for msg in range(0, gp.MessageCount):\r\n if gp.GetSeverity(msg) == 2:\r\n gp.AddReturnMessage(msg)\r\n print gp.AddReturnMessage(msg)\r\n exit(0)",
"def task_dialog(msg):\n\n window = UI.TaskDialog('Edit crop')\n window.TitleAutoPrefix = False\n\n window.MainIcon = UI.TaskDialogIcon.TaskDialogIconError\n window.MainInstruction = 'Error'\n window.MainContent = msg\n\n window.CommonButtons = UI.TaskDialogCommonButtons.Ok\n window.Show()",
"def error_print():\n print(\"ERROR: Invalid Entry!\")",
"def _options_dialog(*args, **kwargs) -> Any:\n pass",
"def error(s):\n print('Robotics toolbox error:', s)\n\n #traceback.print_exc();\n raise ValueError",
"def littleDialog():\r\n psm = uno.getComponentContext().ServiceManager\r\n dp = psm.createInstance(\"com.sun.star.awt.DialogProvider\")\r\n dlg = dp.createDialog(\"vnd.sun.star.script:Standard.Dialog1?location=application\")\r\n dlg.execute()\r\n return None",
"def invalid_command(response)->None:\n print(\"Sorry; '\" + response + \"' isn't a valid command. Please enter an option from the menu.\")",
"def showError(errormessage):\r\n messagebox.showerror(\"WinRAT\", errormessage)",
"def raise_geoproc_error(filename):\n tb = sys.exc_info()[2]\n tbinfo = traceback.format_tb(tb)[0]\n line = tbinfo.split(\", \")[1]\n gp.AddError(\"Geoprocessing error on **\" + line + \"** of \" + filename +\n \" :\")\n if not arcpy.GetMessages(2) == \"\":\n arcpy.AddError(arcpy.GetMessages(2))\n write_log(arcpy.GetMessages(2))\n\n gprint('\\nTry a new output directory. Sometimes that does the trick.') \n exit(0)",
"def checkerror(self):\n error = self.qERR()\n if error:\n raise pipython.GCSError(error)",
"def test_cli_invalid_option(self):\n returncode, output = run_cli(main, \"-x\", merged=True)\n assert returncode != 0\n assert \"Error:\" in output"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Calculate the Neural Representational Similarity (NPS) for fMRI data for ROI | def nps_fmri_roi(fmri_data, mask_data):
if len(np.shape(fmri_data)) != 5 or np.shape(fmri_data)[0] != 2:
print("\nThe shape of fmri data should be [2, n_subs, nx, ny, nz].\n")
return "Invalid input!"
if len(np.shape(mask_data)) != 3:
print("\nThe shape of fmri data should be [nx, ny, nz].\n")
return "Invalid input!"
print("\nComputing NPS")
# get the number of subjects and the size of the fMRI-img
nsubs, nx, ny, nz = fmri_data.shape[1:]
# record the number of valid voxels in ROI
n = 0
for i in range(nx):
for j in range(ny):
for k in range(nz):
# not 0 or NaN
if (mask_data[i, j, k] != 0) and (math.isnan(mask_data[i, j, k]) == False):
n = n + 1
# initialize the data for calculating the NPS
data = np.zeros([2, nsubs, n], dtype=np.float)
# assignment
for p in range(2):
for q in range(nsubs):
# record the index of the valid voxels for calculating
n = 0
for i in range(nx):
for j in range(ny):
for k in range(nz):
# not 0 or NaN
if (mask_data[i, j, k] != 0) and (math.isnan(mask_data[i, j, k]) == False):
data[p, q, n] = fmri_data[p, q, i, j, k]
n = n + 1
# shape of data: [2, nsubs, n] -> [nsubs, 2, n]
data = np.transpose(data, (1, 0, 2))
# initialize the NPS
subnps = np.zeros([nsubs, 2])
# calculate the Pearson Coefficient
for sub in range(nsubs):
subnps[sub] = pearsonr(data[sub, 0], data[sub, 1])
print("\nComputing finished!")
return subnps | [
"def psnr(images, finalpred):\n pixel_max = 255.0\n mse = np.mean((images-finalpred)**2)\n p = 20 * math.log10( pixel_max / math.sqrt( mse ))\n return p",
"def compute_PSNR(out, lbl):\n out = out[0, :, :, 0]\n lbl = lbl[0, :, :, 0]\n diff = out - lbl\n rmse = np.sqrt(np.mean(diff**2))\n psnr = 20*np.log10(255/rmse)\n return psnr",
"def cal_psnr(img1, img2):\n return measure.compare_psnr(img1, img2)",
"def _isnr(self, original, noisy, restore):\n return 10.0 * np.log10(F.norm_fro(original, noisy) / F.norm_fro(original, restore))",
"def psnr_calc(noisy, real):\n numpix = noisy.size(1)*noisy.size(2)*noisy.size(3)\n bs = noisy.size(0)\n avg_sq_norm = (1/numpix)*torch.norm(0.5*(noisy.view(bs, -1)- real.view(bs,-1)), dim = 1)**2#multiplication by 0.5 because vals between [-1,1]\n psnrs = -10*torch.log10(avg_sq_norm)\n return psnrs, torch.tensor([torch.mean(psnrs), torch.std(psnrs)])",
"def predict_rating(self, movie):\n\n other_ratings = movie.ratings\n\n similarities = [\n (self.similarity(r.user), r)\n for r in other_ratings\n ]\n\n similarities.sort(reverse=True)\n\n similarities = [(sim, r) for sim, r in similarities if sim > 0]\n\n if not similarities:\n return None\n\n numerator = sum([r.score * sim for sim, r in similarities])\n denominator = sum([sim for sim, r in similarities])\n\n return numerator/denominator\n\n\n #this is the one we wrote",
"def compute_similarities(self,dataset,j):\r\n pass",
"def score_rent():\n X_train, X_test, y_train, y_test = process_data(download_data())\n\n X_train, X_test, y_train, y_test = feature_selection(\n X_train, X_test, y_train, y_test)\n\n X_test, y_test, predicted = predict_rent(X_train, X_test, y_train, y_test)\n Rs = r2_score(y_test, predicted)\n print('R Square: ', Rs)\n return Rs",
"def _compute_similarities(preds: List[Tuple[torch.FloatTensor, torch.FloatTensor]]) -> torch.FloatTensor:\n return -AlignmentMetric._compute_cost(preds)",
"def n_neuron(self):\n pass",
"def compute_semantic_similarity(predictions_list, output_path, dataset, perplexity=40):\n nlp = spacy.load(\"en_core_web_md\")\n labels = dataset.labels\n\n gt_labels = {e for pl in predictions_list for e in pl['groundTruth']['labels']}\n pred_labels = {e for pl in predictions_list for e in pl['predictions']['labels']}\n used_labels = list(gt_labels.union(pred_labels))\n\n #embeddings = np.array([nlp(label).vector for label in [labels[l] for l in used_labels]])\n embeddings = np.array([nlp(label).vector for label in labels])\n tsne_embedding = TSNE(n_components=2, perplexity=perplexity).fit_transform(embeddings)\n\n for index, image in enumerate(predictions_list):\n generate_semantic_map(index, image, output_path, tsne_embedding, labels, used_labels)",
"def denoise_images(self):\n\n tol = 1e-5\n accuracies = []\n self.denoised_images = np.zeros(self.noisy_train_images.shape)\n for im in np.arange(self.noisy_train_images.shape[0]):\n print(im)\n new_pi_array = np.zeros((self.side_length, self.side_length))\n while True:\n for y in np.arange(self.side_length):\n for x in np.arange(self.side_length):\n first_term, second_term = self.get_terms((x, y), im)\n # print(\"Old pi\", self.pi[im, x, y])\n new_pi = first_term/(first_term + second_term)\n # print(\"New pi\", new_pi)\n new_pi_array[x, y] = new_pi\n if abs(np.sum(new_pi_array - self.pi[im, :, :])) < tol:\n break\n else:\n self.pi[im, :, :] = new_pi_array\n pi_list = self.pi[im, :, :].flatten().tolist()\n dn_im = np.array([1 if x >= 0.5 else -1 for x in pi_list]).reshape((self.side_length, self.side_length))\n self.denoised_images[im, :, :] = dn_im\n # plt.figure(1)\n # plt.imshow(self.noisy_train_images[im, :, :])\n # plt.show(1)\n # plt.figure(1)\n # plt.imshow(dn_im)\n # plt.show(1)\n\n # print(\"Accuracy: \", np.sum(self.orig_train_images[im, :, :] == self.denoised_images[im, :, :])/(self.side_length*self.side_length))\n accuracies += [np.sum(self.orig_train_images[im, :, :] == self.denoised_images[im, :, :])/(self.side_length*self.side_length)]\n self.accuracies = accuracies\n best_index = np.argmax(accuracies)\n print(\"The best image is: \", best_index)\n plt.figure(1)\n plt.subplot(3, 1, 1)\n plt.imshow(self.orig_train_images[best_index, :, :])\n plt.subplot(3, 1, 2)\n plt.imshow(self.noisy_train_images[best_index, :, :])\n plt.subplot(3, 1, 3)\n plt.imshow(self.denoised_images[best_index, :, :])\n plt.show(1)\n\n worst_index = np.argmin(accuracies)\n print(\"The worst image is: \", worst_index)\n plt.figure(2)\n plt.subplot(3, 1, 1)\n plt.imshow(self.orig_train_images[worst_index, :, :])\n plt.subplot(3, 1, 2)\n plt.imshow(self.noisy_train_images[worst_index, :, :])\n plt.subplot(3, 1, 3)\n plt.imshow(self.denoised_images[worst_index, :, :])\n plt.show(2)",
"def microF1(self):\n results = [[0,0,0],[0,0,0],[0,0,0]] # the confusing matrix for all categories\n #rows are actual classes; columns are predicted classes\n for docid in self.test_class:\n row = 3\n col = 3\n for i in xrange(3):\n if self.true_test_class[docid] == self.cat[i]:\n row = i\n if self.test_class[docid] == self.cat[i]:\n col = i\n if row < 3 and col < 3:\n results[row][col] += 1\n else:\n print \"microF1: docid not found: \", docid, \"row: \", \"column: \", col\n TP = [0,0,0]\n FN = [0,0,0]\n FP = [0,0,0]\n for i in xrange(3):\n TP[i] = results[i][i]\n for j in xrange(3):\n if j == i:\n pass\n else:\n FN[i] += results[i][j]\n FP[i] += results[j][i]\n total_TP = sum(TP)\n total_FP = sum(FP)\n total_FN = sum(FN)\n P = total_TP / float(total_TP + total_FP)\n R = total_TP / float(total_TP + total_FN)\n F1 = 2 * P * R / float(P + R)\n #print the results matrix\n print \"-------------------the actual class (row) vs. the predicted class (column)---------------\"\n for i in xrange(3):\n print self.cat[i],\"\\t\",\n for j in xrange(3):\n print results[i][j],\"\\t\",\n print \"\\n\"\n print \"----------------TP, FP, FN----------------\"\n for i in xrange(3):\n print self.cat[i],\"\\t\",TP[i], FP[i], FN[i]\n print \"the mircoaveraged F1 is: \", F1\n \n return F1",
"def compute_psnr(reference_uint8, mean_training, std_training, variational_ae, path_to_reconstruction):\n # The function `svhn.svhn.preprocess_svhn` checks\n # that `reference_uint8.dtype` is equal to `numpy.uint8`\n # and `reference_uint8.ndim` is equal to 2.\n reference_float64 = svhn.svhn.preprocess_svhn(reference_uint8,\n mean_training,\n std_training)\n reconstruction_float64 = variational_ae.forward_pass(reference_float64)[5]\n rec_rescaled_float64 = reconstruction_float64*std_training + \\\n numpy.tile(mean_training, (reference_uint8.shape[0], 1))\n reconstruction_uint8 = tls.cast_float_to_uint8(rec_rescaled_float64)\n psnr = tls.mean_psnr(reference_uint8, reconstruction_uint8)\n tls.visualize_rows(reconstruction_uint8,\n 32,\n 32,\n 10,\n path_to_reconstruction)\n return psnr",
"def estimate_snr(images):\n\n if len(images.shape) == 2: # in case of a single projection\n images = images[:, :, None]\n\n p = images.shape[1]\n n = images.shape[2]\n\n radius_of_mask = p // 2 - 1\n\n points_inside_circle = disc(p, r=radius_of_mask, inner=True)\n num_signal_points = np.count_nonzero(points_inside_circle)\n num_noise_points = p * p - num_signal_points\n\n noise = np.sum(np.var(images[~points_inside_circle], axis=0)) * num_noise_points / (num_noise_points * n - 1)\n\n signal = np.sum(np.var(images[points_inside_circle], axis=0)) * num_signal_points / (num_signal_points * n - 1)\n\n signal -= noise\n\n snr = signal / noise\n\n return snr, signal, noise",
"def scores(self, y_pred, y_true ): \n u = ((y_true - y_pred) ** 2).sum(axis=-1)\n v = ((y_true - y_true.mean(axis=-1)[None].T) ** 2).sum(axis=-1)\n r_2 = 1 - u/v\n return r_2",
"def compute_similarities(self):\n\n construction_func = {'cosine': sims.cosine,\n 'msd': sims.msd,\n 'pearson': sims.pearson,\n 'pearson_baseline': sims.pearson_baseline}\n\n if self.sim_options['user_based']:\n n_x, yr = self.trainset.n_users, self.trainset.ir\n else:\n n_x, yr = self.trainset.n_items, self.trainset.ur\n\n min_support = self.sim_options.get('min_support', 1)\n\n args = [n_x, yr, min_support]\n\n name = self.sim_options.get('name', 'msd').lower()\n if name == 'pearson_baseline':\n shrinkage = self.sim_options.get('shrinkage', 100)\n bu, bi = self.compute_baselines()\n if self.sim_options['user_based']:\n bx, by = bu, bi\n else:\n bx, by = bi, bu\n\n args += [self.trainset.global_mean, bx, by, shrinkage]\n\n try:\n if getattr(self, 'verbose', False):\n print('Computing the {0} similarity matrix...'.format(name))\n sim = construction_func[name](*args)\n if getattr(self, 'verbose', False):\n print('Done computing similarity matrix.')\n return sim\n except KeyError:\n raise NameError('Wrong sim name ' + name + '. Allowed values ' +\n 'are ' + ', '.join(construction_func.keys()) + '.')",
"def batch_psnr(val_images_pred, val_images_true):\n psnr_list = []\n for im_pred, im_true in zip(val_images_pred, val_images_true):\n psnr_i = tf.image.psnr(im_pred/im_pred.max(), im_true/im_true.max(), 1.0).numpy()\n psnr_list.append(round(psnr_i, 3))\n return psnr_list",
"def learn(self):\n allUsers=set(self.df['review_profilename'])\n self.sim = {}\n for person1 in allUsers:\n self.sim.setdefault(person1, {})\n for person2 in allUsers:\n # no es comparem am nosalres mateixos\n if person1==person2: continue\n \n self.sim.setdefault(person2, {})\n if(self.sim[person2].has_key(person1)):continue # since is a simetric matrix\n sim=self.sim_method(self.df,person1,person2)\n if(sim<0):\n self.sim[person1][person2]=0\n self.sim[person2][person1]=0\n else:\n self.sim[person1][person2]=sim\n self.sim[person2][person1]=sim"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Produce random rgb colors for graphs. | def random_colors():
def r():
return random.randint(0, 255)
return 'rgb({},{},{})'.format(r(), r(), r()) | [
"def random_color_gen():\n r = lambda: random.randint(0, 255)\n return 'ff%02X%02X%02X' % (r(), r(), r())",
"def randColor():\n h = 0.3\n v = 0.85\n s = 0.9\n \n for c in startColors:\n yield c\n\n while True:\n \n def toHex(x):\n return hex(int(x*255))[2:]\n \n r, g, b = hsv_to_rgb(h, s, v)\n \n yield u'#' + toHex(r) + toHex(g) + toHex(b)\n\n h += 2 / (1+math.sqrt(5))\n h = h - 1 if h > 1 else h\n #v = 1-v",
"def random_color():\n r = lambda: random.randint(0,255)\n return('#%02X%02X%02X' % (r(),r(),r()))",
"def random_color():\n r = randint(0, 255)\n g = randint(0, 255)\n b = randint(0, 255)\n\n return r, g, b",
"def random_color():\n return systemrandom.randint(0x000000, 0xFFFFFF)",
"def rand_color() -> list:\n\n # IMPORT DONE HERE TO SAVE TIME AT MODULE INIT\n import random\n\n return [random.randrange(256), random.randrange(256), random.randrange(256)]",
"def getRandomColor():\n color = \"#\"\n for number in range(6):\n color += toHexChar(randint(0, 15))\n return color",
"def randomColor():\n return Color((_random.randint(0,255),_random.randint(0,255),_random.randint(0,255)))",
"def make_random_color(self):\n \n color = \"#\"\n for number in range(6):\n color += \"%01x\" % random.randint(6, 0xD)\n return color",
"def randomcolor(eps=.1):\n r = round(random()/eps)*eps\n g = round(random()/eps)*eps\n b = round(random()/eps)*eps\n return (r,g,b)",
"def get_rand_color(self):\n color_min = 200\n self.color = list(numpy.random.randint(0, 255, 3))\n i = 0\n while sum(self.color) < color_min:\n self.color = list(numpy.random.randint(10, 255, 3))\n if i == 10:\n break\n i += 1\n return self.color",
"def get_random_color():\n R = random.randint(200, 250)\n G = random.randint(200, 250)\n B = random.randint(200, 250)\n random_rgb = (R, G, B)\n return random_rgb",
"def _generate_colors(color):\n\n r = 0\n g = 0\n b = 0\n if color == \"red\":\n r = np.random.uniform(0.7, 1)\n elif color == \"blue\":\n b = np.random.uniform(0.7, 1)\n elif color == \"green\":\n g = np.random.uniform(0.7, 1)\n elif color == \"purple\":\n r = np.random.uniform(0.425, 0.575)\n b = np.random.uniform(0.425, 0.575)\n elif color == \"white\":\n r = np.random.uniform(0.9, 1)\n g = np.random.uniform(0.9, 1)\n b = np.random.uniform(0.9, 1)\n elif color == \"black\":\n r = np.random.uniform(0, 0.1)\n g = np.random.uniform(0, 0.1)\n b = np.random.uniform(0, 0.1)\n return [r, g, b]",
"def randcolor():\n r = random(0.0, 1.0)\n g = random(0.0, 1.0)\n b = random(0.0, 1.0)\n return vector(r, g, b) # A color is a three-element vector",
"def gen_color():\n import colorsys\n golden_ratio = 0.618033988749895\n h = 0.22717784590367374\n\n while 1:\n h += golden_ratio\n h %= 1\n HSV_tuple = [h, 0.95, 0.95] # this defines how \"deep\" are the colors\n RGB_tuple = colorsys.hsv_to_rgb(*HSV_tuple)\n yield map(lambda x:str(int(x * 256)), RGB_tuple)",
"def change_colors():\n global t,u,v,w,x,y,z\n t = randint(0,27)\n u = randint(0,27)\n v = randint(0,27)\n w = randint(0,27)\n x = randint(0,27)\n y = randint(0,27)\n z = randint(0,27)\n return t,u,v,w,x,y,z,",
"def colorGenerator():\n import colorsys\n while True:\n for luma in (0.8, 0.5):\n for hue in (0.66, 0, 0.33, 0.75, 0.15):\n yield rrd.Color(hsv=(hue,1,luma))",
"def get_colormap(num_agents):\n colors = cm.get_cmap('jet', num_agents)\n colors = colors(range(num_agents))\n np.random.shuffle(colors)\n return colors",
"def _next_colour():\n return tuple(numpy.concatenate(\n (numpy.random.choice(range(256), size=3) / 256, [1.0])))"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return a list of tuples with counted offenses. | def offense_counter(offense_list):
sum_offense = Counter()
for offense in offense_list:
if offense is None:
continue
sum_offense[offense] += 1
return sum_offense.most_common() | [
"def get_occurrences():",
"def list_itemcnt(a_list):\n return list(Counter(a_list).items())",
"def hits(clues):\n return sum([clue.getCount() for clue in clues])",
"def _offer_counter(self):\n self._offer_count += 1\n return (self.name, self._offer_count)",
"def calorie_list(list_of_events):\n the_list = []\n for e in list_of_events:\n val = 0\n for food in e.consumption_set.all():\n val += int(float(food.of_energy.kcal) \\\n * float(Fraction(food.quantity)))\n the_list.append(val)\n return the_list",
"def occurrences(self):\r\n\t\treturn find_occurrences(self.dataset)",
"def count_item_sales():\r\n sheet_data = read_sheet_data(config.get(\"sheet1_title_range\"))\r\n return Counter(list(chain.from_iterable(sheet_data[\"values\"])))",
"def hobby_counter(people):\n return Counter([one_hobby\n for one_person in people\n for one_hobby in one_person['hobbies']])",
"def counts(self):\n updated, merged, removed = 0, 0, 0\n for r, action in self._results.itervalues():\n if r is None:\n updated += 1\n elif r == 0:\n if action == ACTION_REMOVE:\n removed += 1\n else:\n merged += 1\n return updated, merged, removed",
"def total_usages(df):\n\n # Find total usages and usages per service\n usages = len(df)\n usages_per_service = df.groupby('bot')['id'].count().to_dict()\n \n # Return the result\n return usages, usages_per_service",
"def voteCounter(ballots, empty_ballot, data):\n votes = []\n rects = [i['rect'] for i in data]\n\n for ballot in ballots:\n warped = autoWarpImage(empty_ballot, ballot)\n point = detectMark(warped, rects)\n vote = getVote(point, data)\n print(vote)\n votes.append(vote)\n\n counts = dict(Counter(votes))\n return votes, counts",
"def count_items(data):\n # Create a counter object\n counts = Counter(data)\n # Sort by highest count first and place in ordered dictionary\n counts = sorted(counts.items(), key=lambda x: x[1], reverse=True)\n counts = OrderedDict(counts)\n return counts",
"def get_frequencies(self, rating_choices):\n frequencies = dict(self.values_list(\"rating\").annotate(Count(\"rating\")))\n return [(choice, frequencies.get(choice, 0)) for choice in rating_choices]",
"def docids_count():",
"def _attendees_counts(self):\n return pd.Series(dict(\n self.attendee_queryset.values(\n 'email'\n ).annotate(\n count=Count('*')\n ).values_list(\n 'email',\n 'count'\n )\n ))",
"def _otus_abundant(freq_counts, rare_threshold):\n return freq_counts[rare_threshold + 1:].sum()",
"def count_friends(users):\r\n \r\n cfriends = Counter()\r\n for e in users:\r\n cfriends.update(e['friends'])\r\n return cfriends\r\n \r\n pass",
"def countTakeovers(self):\n count = 0\n for suit in self.suitList:\n if suit.attemptingTakeover:\n count += 1\n return count",
"def counters ( self ) :\n return self._counters"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return a dictionary of crimes broken down by subcategories. | def crime_category_breakdown():
db_request = main_db_call()
all_crimes = [item[0] for item in db_request]
sub_offense = offense_counter(all_crimes)
sub_pie = color_applicator(sub_offense)
sub_dict = {}
for i, thing in enumerate(sub_pie):
for key, category in UPPER_DICT.items():
if sub_pie[i][0] in category:
sub_dict.setdefault(key, [])
sub_dict[key].append(sub_pie[i])
return sub_dict | [
"def parse_coco_categories(categories):\n cat_map = {c[\"id\"]: c for c in categories}\n\n classes = []\n supercategory_map = {}\n for cat_id in range(max(cat_map) + 1):\n category = cat_map.get(cat_id, None)\n try:\n name = category[\"name\"]\n except:\n name = str(cat_id)\n\n classes.append(name)\n if category is not None:\n supercategory_map[name] = category\n\n return classes, supercategory_map",
"def get_categories():\n job_categories = {}\n with open(job_category_file) as inf:\n for line in inf:\n data = tkutil.parse_line(line)\n job_categories[data[0]] = ((data[1], JobType.WORK if data[2] == '1' else JobType.NON_WORK))\n return job_categories",
"def make_coco_categories():\n cats = []\n for i, bdd_class in enumerate(BDD_CLASSES):\n cat = {\n \"supercategory\": 'none',\n \"id\": i + 1,\n \"name\": bdd_class\n }\n cats.append(cat)\n return cats",
"def _get_categories_dict(self):\n cat_vals = {}\n i = 0\n for cat in self.categories:\n cat_vals[self.cat_cols[i]] = cat\n i += 1\n return cat_vals",
"def _compute_category_sets() -> dict:\n category_sets = {}\n for cat in cat_store.get_usable_cats():\n children = {c for c in cat_store.get_children(cat) if cat_store.is_usable(c)}\n children_docs = {c: _remove_by_phrase(cat_nlp.parse_category(c)) for c in children}\n child_sets = _find_child_sets(cat, children_docs)\n if child_sets:\n category_sets[cat] = child_sets\n return category_sets",
"def __get_categories(self) -> dict:\n response = requests.get(\n 'https://api.youneedabudget.com/v1/budgets/' +\n self.state['budget_id']+'/categories',\n headers={\n 'Authorization': 'Bearer ' + YNAB_TOKEN\n }\n )\n\n # TODO: Add error handling\n return json.loads(response.content)['data']['category_groups']",
"def get_cvat_categories(self):\n cvat_cats = []\n for cat_meta in self.cats.values():\n cvat_cats.append({\"name\": cat_meta[\"name\"], \"color\": \"\", \"attributes\": []})\n return cvat_cats",
"def to_coco(self):\n for cid, node in self.id_to_node.items():\n # Skip if background already added\n cat = {\n 'id': cid,\n 'name': node,\n }\n parents = list(self.graph.predecessors(node))\n if len(parents) == 1:\n cat['supercategory'] = parents[0]\n else:\n if len(parents) > 1:\n raise Exception('not a tree')\n yield cat",
"def categories_dict():\n items = db_helper.get_categories()\n return [item.serialize for item in items]",
"def getAllRulesWithDetailByCateReq (self):\n l = {}\n\n for category in self.title:\n curmaincate = category.keys()\n maincate = curmaincate[0]\n\n subcateid = -1\n subcategories = category.values()[0]\n if len(subcategories) > 0:\n for i in range(0, len(subcategories)):\n subcate = subcategories[i]\n\n if i == 0:\n l[maincate] = []\n\n entry = {subcate : self.getRuleDetailByCategoryReq(maincate, subcateid)}\n l[maincate].append(entry)\n else:\n l[maincate] = self.getRuleDetailByCategoryReq(maincate, subcateid)\n\n return l",
"def dumpCategories(self):\n categs = []\n for key_card in self.library:\n categs += [ca for ca in self.library[key_card].categ if ca not in categs]\n # categs_card = self.library[key_card].categ\n # for categ in categs_card:\n # if categ not in dict_categs.values():\n # dict_categs[key_counter] = categ\n # key_counter += 1\n # now we have list of categories, put this in a numbered fashion in a dictionary\n dict_categs = {i+1:categs[i] for i in range(len(categs))}# dictionary with structure {1: 'categoryA', 2:'categoryB',... }\n # print it\n print(\"\")\n for num_ca in dict_categs:\n print(\"{0}) {1}\".format(num_ca, dict_categs[num_ca]))\n # return it\n return dict_categs",
"def get_main_categories(self):\n categories = []\n for category in self.main_categories_lst:\n category_dict = dict(label=category, value=category)\n categories.append(category_dict)\n return categories",
"def parse_category(element):\n category = {\n 'name': element.find('name').text,\n 'notes': element.find('notes').text,\n 'revision': element.find('revision').text,\n 'category_id': element.get('id'),\n 'entries': [],\n }\n\n for child in element:\n if child.tag == 'subcategory':\n subcat = parse_subcategory(child)\n category['entries'].append(subcat)\n return category",
"def _categoryMap (self):\n return self.__categoryMap",
"def speciesCategoriesOnWikidata(self):\n result = {}\n sq = pywikibot.data.sparql.SparqlQuery()\n query = u\"\"\"SELECT ?item ?commonscat WHERE {\n ?item wdt:P105 wd:Q7432 ; \n wdt:P31 wd:Q16521 ;\n wdt:P373 ?commonscat .\n } LIMIT 125000\"\"\"\n sq = pywikibot.data.sparql.SparqlQuery()\n queryresult = sq.select(query)\n\n for resultitem in queryresult:\n qid = resultitem.get('item').replace(u'http://www.wikidata.org/entity/', u'')\n result[resultitem.get('commonscat')] = qid\n return result",
"def _crime_police_network (self):\n\n self._build_crime_type ()\n self.police_crime = {}\n df = pd.read_csv(self.crime_path, usecols=['IUCR','District'])\n\n for i, district in enumerate (df.District):\n try:\n district = int (district)\n except ValueError:\n continue\n\n if district in self.police_dict:\n try:\n district = self.police_dict[district]\n except KeyError:\n continue\n if (df.ix[i, 'IUCR'] in self.crime_type):\n crime = self.crime_type[df.ix[i, 'IUCR']]\n else:\n continue\n\n if district not in self.police_crime:\n self.police_crime[district] = {}\n self.police_crime[district][crime] = 1\n else:\n try: \n self.police_crime[district][crime] += 1\n except KeyError:\n self.police_crime[district][crime] = 1",
"def parse_categories():\n categories = []\n ignore_categories = [\"Specials\", \"Picture\"]\n\n try:\n request = requests.get(NewsCategoryParser.BASE_URL, timeout=REQUEST_TIMEOUT)\n request.raise_for_status()\n soup = BeautifulSoup(request.content, \"html5lib\")\n footer = soup.find(\"div\", {\"class\": \"cg-footer cg-max-footer\"})\n\n for item in footer.find_all(\"a\", {\"data-action\": \"Sitemap_Click\"}):\n if item.text.strip() in ignore_categories:\n continue\n cat = NewsCategory(key=item['href'].split(\"/\")[-1],\n name=item.text.strip().capitalize(),\n url=item['href'])\n if cat.key not in NewsCategoryParser.NO_SUBCAT_LIST:\n cat.has_subcatagories = True\n categories.append(cat)\n except:\n pass\n\n return categories",
"def __get_cat_levels(self,data):\n levels = {}\n\n for v in self.categorical:\n ds = data[v].astype('category')\n levels[v] = ds[ds.notnull()].unique().categories.sort_values()\n\n return levels",
"def test_subcategories_infinite_recurse(self):\n site = self.get_site('test2')\n cat = pywikibot.Category(site, 'Categories')\n big = pywikibot.Category(site, 'Really big category')\n result = list(cat.subcategories(recurse=3))\n self.assertEqual(result.count(cat), 2)\n self.assertEqual(result.count(big), 4)\n # check that the result is balanced\n self.assertEqual(result[:4].count(cat), 1)\n self.assertEqual(result[:4].count(big), 2)\n\n for member in set(result):\n self.assertIsInstance(member, pywikibot.Category)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Table ajax for dataTables | def tableajax(request, plugin_name, data, group_type="all", group_id=None):
# Pull our variables out of the GET request
get_data = request.GET["args"]
get_data = json.loads(get_data)
draw = get_data.get("draw", 0)
start = int(get_data.get("start", 0))
length = int(get_data.get("length", 0))
search_value = ""
if "search" in get_data:
if "value" in get_data["search"]:
search_value = get_data["search"]["value"]
# default ordering
order_column = 2
order_direction = "desc"
order_name = ""
if "order" in get_data:
order_column = get_data["order"][0]["column"]
order_direction = get_data["order"][0]["dir"]
for column in get_data.get("columns", None):
if column["data"] == order_column:
order_name = column["name"]
break
plugin_object = process_plugin(plugin_name, group_type, group_id)
queryset = plugin_object.get_queryset(
request, group_type=group_type, group_id=group_id
)
machines, title = plugin_object.filter_machines(queryset, data)
machines = machines.values("id", "hostname", "console_user", "last_checkin")
if len(order_name) != 0:
if order_direction == "desc":
order_string = "-%s" % order_name
else:
order_string = "%s" % order_name
if len(search_value) != 0:
hostname_q = Q(hostname__icontains=search_value)
user_q = Q(console_user__icontains=search_value)
checkin_q = Q(last_checkin__icontains=search_value)
searched_machines = machines.filter(hostname_q | user_q | checkin_q).order_by(
order_string
)
else:
searched_machines = machines.order_by(order_string)
limited_machines = searched_machines[start : (start + length)]
return_data = {}
return_data["title"] = title
return_data["draw"] = int(draw)
return_data["recordsTotal"] = machines.count()
return_data["recordsFiltered"] = return_data["recordsTotal"]
return_data["data"] = []
settings_time_zone = None
try:
settings_time_zone = pytz.timezone(settings.TIME_ZONE)
except Exception:
pass
for machine in limited_machines:
if machine["last_checkin"]:
# formatted_date = pytz.utc.localize(machine.last_checkin)
if settings_time_zone:
formatted_date = (
machine["last_checkin"]
.astimezone(settings_time_zone)
.strftime("%Y-%m-%d %H:%M %Z")
)
else:
formatted_date = machine["last_checkin"].strftime("%Y-%m-%d %H:%M")
else:
formatted_date = ""
hostname_link = '<a href="%s">%s</a>' % (
reverse("machine_detail", args=[machine["id"]]),
escape(machine["hostname"]),
)
list_data = [hostname_link, escape(machine["console_user"]), formatted_date]
return_data["data"].append(list_data)
return JsonResponse(return_data) | [
"def updateTable(self):\r\n self.dataTable = Table(self.frame, dataframe = self.data)\r\n self.dataTable.show()",
"def clm_ajax_get_table_users(request):\n if request.method == 'GET':\n users = prep_data('admin_clm/user/get_list/', request.session)\n\n for item in users:\n item['is_activeName'] = unicode(user_states[item['is_active']])\n\n return messages_ajax.success(users)",
"def get_site_dt_data():\n\n metadata = MetaData()\n sites_table = Table('vw_sites', metadata, autoload=True, autoload_with=db.engine, schema='live_network')\n\n\n columns = []\n for c in sites_table.columns:\n columns.append(ColumnDT( c, column_name=c.name, mData=c.name))\n\n query = db.session.query(sites_table)\n\n # GET request parameters\n params = request.args.to_dict()\n\n row_table = DataTables(params, query, columns)\n\n return jsonify(row_table.output_result())",
"def generate_table(df):\n return dash_table.DataTable(\n id='table',\n columns=[\n {\"name\": i, \"id\": i, \"selectable\": True} for i in df.columns\n ],\n page_size=14,\n style_cell={'padding': '5px',#'textAlign': 'right',\n 'fontSize':12,'whiteSpace': 'normal',\n 'height': 'auto'},\n style_header={\n 'backgroundColor': 'white',\n 'fontWeight': 'bold'\n },\n style_data={\n 'whiteSpace': 'normal',\n 'height': 'auto',\n 'lineHeight': '14px'\n },\n style_table={'height': '500px', 'overflowY': 'auto'},\n style_cell_conditional=[\n {\n 'if': {'column_id': 'country'},\n 'fontWeight': 'bold',\n 'textAlign': 'left'\n }\n ],\n data=df.to_dict('records'),\n sort_action=\"native\",\n )",
"def table():\n return render_template('table.html')",
"def js_data_table_settings(self, enable=True, paging=True, default_order='[[0, \"asc\"]]',\r\n no_search_cols=None, no_sort_cols=None, **kwargs):\r\n\r\n self.context['dt_tbl']['js_data_table'] = {\r\n 'enable': enable,\r\n 'paging': paging,\r\n 'default_order': default_order,\r\n 'no_search_columns': no_search_cols or [],\r\n 'no_sort_columns': no_sort_cols or [],\r\n }\r\n\r\n if 'default_sort_col' in kwargs or 'default_sort_direction' in kwargs:\r\n warnings.simplefilter('default_sort_col and default_sort_direction is '\r\n 'being replaced with default_order string', DeprecationWarning)",
"def get_table_dependencies(self, tid):\n dependencies_result = self.get_dependencies(\n self.conn, tid\n )\n\n return ajax_response(\n response=dependencies_result,\n status=200\n )",
"def splits_table(self, id: str, activity: Activity, **kwargs) -> dt.DataTable:\n cols, data = self.splits_table_data(activity)\n return dt.DataTable(\n id=id,\n columns=cols,\n data=data,\n cell_selectable=False,\n row_selectable='multi',\n selected_rows=[],\n style_table={\n 'height': 414, # height of container (450) - height of dropdown (36)\n 'overflowY': 'scroll',\n },\n **self.COMMON_DATATABLE_OPTIONS,\n **kwargs\n )",
"def render_html(table, data):\n return render(renderers.HtmlRenderer, table, data)",
"def table(data):\n return pd.DataFrame(json_normalize(data))",
"def _table_viewer(table, rows_per_page=25, fields=None):\n\n # TODO(gram): rework this to use datalab.utils.commands.chart_html\n\n if not table.exists():\n raise Exception('Table %s does not exist' % str(table))\n\n if not table.is_listable():\n return \"Done\"\n\n _HTML_TEMPLATE = u\"\"\"\n <div class=\"bqtv\" id=\"{div_id}\">{static_table}</div>\n <br />{meta_data}<br />\n <script src=\"/static/components/requirejs/require.js\"></script>\n <script>\n\n require.config({{\n paths: {{\n base: '/static/base',\n d3: '//cdnjs.cloudflare.com/ajax/libs/d3/3.4.13/d3',\n plotly: 'https://cdn.plot.ly/plotly-1.5.1.min.js?noext',\n jquery: '//ajax.googleapis.com/ajax/libs/jquery/2.0.0/jquery.min'\n }},\n map: {{\n '*': {{\n datalab: 'nbextensions/gcpdatalab'\n }}\n }},\n shim: {{\n plotly: {{\n deps: ['d3', 'jquery'],\n exports: 'plotly'\n }}\n }}\n }});\n\n require(['datalab/charting', 'datalab/element!{div_id}', 'base/js/events',\n 'datalab/style!/nbextensions/gcpdatalab/charting.css'],\n function(charts, dom, events) {{\n charts.render('gcharts', dom, events, '{chart_style}', [], {data},\n {{\n pageSize: {rows_per_page},\n cssClassNames: {{\n tableRow: 'gchart-table-row',\n headerRow: 'gchart-table-headerrow',\n oddTableRow: 'gchart-table-oddrow',\n selectedTableRow: 'gchart-table-selectedrow',\n hoverTableRow: 'gchart-table-hoverrow',\n tableCell: 'gchart-table-cell',\n headerCell: 'gchart-table-headercell',\n rowNumberCell: 'gchart-table-rownumcell'\n }}\n }},\n {{source_index: {source_index}, fields: '{fields}', legacy: 'true'}},\n 0,\n {total_rows});\n }}\n );\n </script>\n \"\"\"\n\n if fields is None:\n fields = datalab.utils.commands.get_field_list(fields, table.schema)\n div_id = datalab.utils.commands.Html.next_id()\n meta_count = ('rows: %d' % table.length) if table.length >= 0 else ''\n meta_name = str(table) if table.job is None else ('job: %s' % table.job.id)\n if table.job:\n if table.job.cache_hit:\n meta_cost = 'cached'\n else:\n bytes = datalab.bigquery._query_stats.QueryStats._size_formatter(table.job.bytes_processed)\n meta_cost = '%s processed' % bytes\n meta_time = 'time: %.1fs' % table.job.total_time\n else:\n meta_cost = ''\n meta_time = ''\n\n data, total_count = datalab.utils.commands.get_data(table, fields, first_row=0,\n count=rows_per_page)\n\n if total_count < 0:\n # The table doesn't have a length metadata property but may still be small if we fetched less\n # rows than we asked for.\n fetched_count = len(data['rows'])\n if fetched_count < rows_per_page:\n total_count = fetched_count\n\n chart = 'table' if 0 <= total_count <= rows_per_page else 'paged_table'\n meta_entries = [meta_count, meta_time, meta_cost, meta_name]\n meta_data = '(%s)' % (', '.join([entry for entry in meta_entries if len(entry)]))\n\n return _HTML_TEMPLATE.format(div_id=div_id,\n static_table=datalab.utils.commands.HtmlBuilder\n .render_chart_data(data),\n meta_data=meta_data,\n chart_style=chart,\n source_index=datalab.utils.commands\n .get_data_source_index(str(table)),\n fields=','.join(fields),\n total_rows=total_count,\n rows_per_page=rows_per_page,\n data=json.dumps(data, cls=datalab.utils.JSONEncoder))",
"def cma_networks_ajax_get_table(request, user_id):\n if request.method == 'GET':\n networks = prep_data(('admin_cm/network/list_user_networks/', {'user_id': int(user_id)}), request.session)\n return messages_ajax.success(networks)",
"def generate_tweet_table(dataframe):\n return dash_table.DataTable(id=\"responsive-table\",\n columns=[{'name': 'Date', 'id':'date', 'type': 'datetime'},\n {'name': 'Tweet', 'id':'tweet', 'type': 'text'},\n {'name': 'Sentiment', 'id':'sentiment', 'type': 'numeric'},\n {'name': 'Link', 'id':'link', 'type': 'text', 'presentation':'markdown'}],\n data = dataframe.to_dict('records'),\n style_header={\n 'backgroundColor': 'rgb(52, 73, 94)',\n 'fontWeight': 'bold',\n 'color': colors['text'],\n 'textAlign': 'left',\n 'fontSize': '12pt',\n 'height': 'auto',\n 'width': 'auto'\n },\n style_cell={'padding': '5px',\n 'backgroundColor': colors['background'],\n 'color': colors['table-text'],\n 'textAlign':'left',\n 'height':'auto',\n 'whiteSpace':'normal',\n 'lineHeight':'15px',\n 'width':'auto'},\n style_as_list_view=True,\n style_data_conditional=[\n {\n 'if': {\n 'filter_query': '{sentiment} < -0.3'\n },\n 'backgroundColor': colors['sl-negative-sentiment'],\n 'color': colors['ex-negative-sentiment']\n },\n {\n 'if': {\n 'filter_query': '{sentiment} < -0.6'\n },\n 'backgroundColor': colors['ex-negative-sentiment'],\n 'color': 'white'\n },\n {\n 'if': {\n 'filter_query': '{sentiment} > 0.3'\n },\n 'backgroundColor': colors['sl-positive-sentiment'],\n 'color': colors['ex-positive-sentiment']\n },\n {\n 'if': {\n 'filter_query': '{sentiment} > 0.6'\n },\n 'backgroundColor': colors['ex-positive-sentiment'],\n 'color': 'white'\n },\n ]),",
"def __init__(self, json_spec = None ):\n self.json_spec = json_spec\n DataTable.__init__(self,None,\"JSONDataTable\",120)\n self.refresh()",
"def init_data_table(self):\n\n table_header = self.dataframe.columns.tolist()\n table_data = self.dataframe.values.tolist()\n\n return [table_header] + table_data",
"def initResultsTable(self):\n self.tableWidget = TableWidget(self.queryTab, self)\n self.tableWidget.setEditTriggers(QtGui.QAbstractItemView.NoEditTriggers)\n self.tableWidget.setColumnCount(1)\n self.tableWidget.setRowCount(1)\n self.tabsVerticalLayout.addWidget(self.tableWidget)",
"def scheduled_ajax():\n\n tasks = db.session.query(DatabaseSchedulerEntry).all()\n return ajax.site.scheduled_task_data(tasks)",
"def visit_table(self, table):\n pass",
"def generate_flagged_tweet_table(dataframe):\n return dash_table.DataTable(id=\"responsive-table\",\n columns=[{'name': 'Date', 'id':'date', 'type': 'datetime'},\n {'name': 'Tweet', 'id':'tweet', 'type': 'text'},\n {'name': 'Sentiment', 'id':'sentiment', 'type': 'numeric'},\n {'name': 'Link', 'id':'link', 'type': 'text', 'presentation':'markdown'}],\n data = dataframe.to_dict('records'),\n style_header={\n 'backgroundColor': 'rgb(52, 73, 94)',\n 'fontWeight': 'bold',\n 'color': colors['text'],\n 'textAlign': 'left',\n 'fontSize': '12pt',\n 'height': 'auto',\n 'width': 'auto'\n },\n style_cell={'padding': '5px',\n 'backgroundColor': colors['background'],\n 'color': colors['table-text'],\n 'textAlign':'left',\n 'height':'auto',\n 'whiteSpace':'normal',\n 'lineHeight':'15px',\n 'width':'auto'},\n style_as_list_view=True,\n style_data_conditional=[\n {\n 'if': {\n 'filter_query': '{sentiment} < -0.3'\n },\n 'backgroundColor': colors['sl-negative-sentiment'],\n 'color': colors['ex-negative-sentiment']\n },\n {\n 'if': {\n 'filter_query': '{sentiment} < -0.6'\n },\n 'backgroundColor': colors['ex-negative-sentiment'],\n 'color': 'white'\n },\n {\n 'if': {\n 'filter_query': '{sentiment} > 0.3'\n },\n 'backgroundColor': colors['sl-positive-sentiment'],\n 'color': colors['ex-positive-sentiment']\n },\n {\n 'if': {\n 'filter_query': '{sentiment} > 0.6'\n },\n 'backgroundColor': colors['ex-positive-sentiment'],\n 'color': 'white'\n },\n ]),"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Process a single management source's data This function first optionally calls any additional processors for the management source in question (Munki for example). Then it processes Facts. Then ManagedItems. | def process_management_submission(source, management_data, machine, object_queue):
# Add custom processor funcs to this dictionary.
# The key should be the same name used in the submission for ManagementSource.
# The func's signature must be
# f(management_data: dict, machine: Machine, object_queue: dict)
processing_funcs = {
"Machine": process_machine_submission,
"Sal": process_sal_submission,
"Munki": process_munki_extra_keys,
}
processing_func = processing_funcs.get(source.name)
if processing_func:
object_queue = processing_func(management_data, machine, object_queue)
object_queue = process_facts(source, management_data, machine, object_queue)
object_queue = process_managed_items(source, management_data, machine, object_queue)
object_queue = process_messages(source, management_data, machine, object_queue)
return object_queue | [
"def manage_data_source(self, request):\n\n layout = ManageDataSourceItemsLayout(self, request)\n return morepath.redirect(layout.manage_model_link)",
"def _process_finalize(self, contexts):\n added = []\n updated = []\n name_updated = []\n\n for ctx in contexts:\n if _ProcessingTag.ADDED in ctx.tags:\n added.append(ctx)\n\n if _ProcessingTag.NAME_UPDATED in ctx.tags:\n name_updated.append(ctx)\n\n if _ProcessingTag.UPDATED in ctx.tags:\n updated.append(ctx)\n\n self.app.db.session.add_all([ctx.source for ctx in added])\n\n sources_and_metas = [\n (ctx.source, ctx.meta)\n for ctx in set(added + name_updated)\n ]\n\n if sources_and_metas:\n self.app.mediainfo.process(*sources_and_metas)\n\n # It's important to call commit here, Mediainfo.process doesn't do a\n # commit\n self.app.db.session.commit()\n\n self.app.signals.send(\n 'sources-added-batch',\n sources=[ctx.source for ctx in added])\n self.app.signals.send(\n 'sources-updated-batch',\n sources=[ctx.source for ctx in name_updated + updated])\n\n msg = '{n} sources {action}'\n stats = [\n ('added', added),\n ('updated', updated),\n ('parsed', sources_and_metas)\n ]\n for (action, group) in stats:\n msg_ = msg.format(n=len(group), action=action)\n self.logger.info(msg_)\n\n ret = [ctx.source for ctx in contexts]\n return ret",
"def _ProcessSourceMultiProcessMode(self, options):\n # TODO: replace by an option.\n start_collection_process = True\n\n self._number_of_worker_processes = getattr(options, 'workers', 0)\n if self._number_of_worker_processes < 1:\n # One worker for each \"available\" CPU (minus other processes).\n # The number three here is derived from the fact that the engine starts\n # up:\n # + A collector process.\n # + A storage process.\n # If we want to utilize all CPU's on the system we therefore need to start\n # up workers that amounts to the total number of CPU's - 3 (these two plus\n # the main process). Thus the number three.\n cpus = multiprocessing.cpu_count() - 3\n\n if cpus <= self.MINIMUM_WORKERS:\n cpus = self.MINIMUM_WORKERS\n elif cpus >= self.MAXIMUM_WORKERS:\n # Let's have a maximum amount of workers.\n cpus = self.MAXIMUM_WORKERS\n\n self._number_of_worker_processes = cpus\n\n logging.info(u'Starting extraction in multi process mode.')\n\n collection_queue = queue.MultiThreadedQueue()\n storage_queue = queue.MultiThreadedQueue()\n self._engine = engine.Engine(collection_queue, storage_queue)\n\n self._engine.SetSource(\n self._source_path_spec, resolver_context=self._resolver_context)\n\n logging.debug(u'Starting preprocessing.')\n pre_obj = self.PreprocessSource(options)\n\n # TODO: move FindAllParsers to engine as a class method?\n filter_query = getattr(options, 'parsers', '')\n self._parsers = putils.FindAllParsers(\n pre_obj=pre_obj, config=options, parser_filter_string=filter_query)\n self._parser_names = [parser.parser_name for parser in self._parsers['all']]\n\n self._PreprocessSetCollectionInformation(options, pre_obj)\n\n output_module = getattr(options, 'output_module', None)\n if output_module:\n storage_writer = storage.BypassStorageWriter(\n storage_queue, self._storage_file_path,\n output_module_string=output_module, pre_obj=pre_obj)\n else:\n storage_writer = storage.StorageFileWriter(\n storage_queue, self._storage_file_path, self._buffer_size, pre_obj)\n\n logging.debug(u'Preprocessing done.')\n\n if 'filestat' in self._parser_names:\n include_directory_stat = True\n else:\n include_directory_stat = False\n\n filter_file = getattr(options, 'file_filter', None)\n if filter_file:\n filter_find_specs = engine_utils.BuildFindSpecsFromFile(\n filter_file, pre_obj=pre_obj)\n else:\n filter_find_specs = None\n\n if start_collection_process:\n resolver_context = context.Context()\n else:\n resolver_context = self._resolver_context\n\n engine_proxy = None\n rpc_proxy_client = None\n\n if self._run_foreman:\n worker_foreman = foreman.Foreman(\n show_memory_usage=self._show_worker_memory_information)\n\n # Start a proxy server (only needed when a foreman is started).\n engine_proxy = rpc_proxy.StandardRpcProxyServer(os.getpid())\n try:\n engine_proxy.Open()\n engine_proxy.RegisterFunction(\n 'signal_end_of_collection', worker_foreman.SignalEndOfProcessing)\n\n proxy_thread = threading.Thread(\n name='rpc_proxy', target=engine_proxy.StartProxy)\n proxy_thread.start()\n\n rpc_proxy_client = rpc_proxy.StandardRpcProxyClient(\n engine_proxy.listening_port)\n except errors.ProxyFailedToStart as exception:\n proxy_thread = None\n logging.error((\n u'Unable to setup a RPC server for the engine with error '\n u'{0:s}').format(exception))\n else:\n worker_foreman = None\n\n self._collector = self._engine.CreateCollector(\n include_directory_stat, vss_stores=self._vss_stores,\n filter_find_specs=filter_find_specs, resolver_context=resolver_context)\n\n if rpc_proxy_client:\n self._collector.SetProxy(rpc_proxy_client)\n\n self._DebugPrintCollector(options)\n\n logging.info(u'Starting storage process.')\n self._storage_process = multiprocessing.Process(\n name='StorageThread', target=storage_writer.WriteEventObjects)\n self._storage_process.start()\n\n if start_collection_process:\n logging.info(u'Starting collection process.')\n self._collection_process = multiprocessing.Process(\n name='Collection', target=self._collector.Collect)\n self._collection_process.start()\n\n logging.info(u'Starting worker processes to extract events.')\n\n for worker_nr in range(self._number_of_worker_processes):\n extraction_worker = self._CreateExtractionWorker(\n worker_nr, options, pre_obj)\n\n logging.debug(u'Starting worker: {0:d} process'.format(worker_nr))\n worker_name = u'Worker_{0:d}'.format(worker_nr)\n # TODO: Test to see if a process pool can be a better choice.\n self._worker_processes[worker_name] = multiprocessing.Process(\n name=worker_name, target=extraction_worker.Run)\n\n self._worker_processes[worker_name].start()\n pid = self._worker_processes[worker_name].pid\n if worker_foreman:\n worker_foreman.MonitorWorker(pid=pid, name=worker_name)\n\n logging.info(u'Collecting and processing files.')\n if self._collection_process:\n while self._collection_process.is_alive():\n self._collection_process.join(10)\n # Check the worker status regularly while collection is still ongoing.\n if worker_foreman:\n worker_foreman.CheckStatus()\n # TODO: We get a signal when collection is done, which might happen\n # before the collection thread joins. Look at the option of speeding\n # up the process of the collector stopping by potentially killing it.\n else:\n self._collector.Collect()\n\n logging.info(u'Collection is done, waiting for processing to complete.')\n if worker_foreman:\n worker_foreman.SignalEndOfProcessing()\n\n # Close the RPC server since the collection thread is done.\n if engine_proxy:\n # Close the proxy, free up resources so we can shut down the thread.\n engine_proxy.Close()\n\n if proxy_thread.isAlive():\n proxy_thread.join()\n\n # Run through the running workers, one by one.\n # This will go through a list of all active worker processes and check it's\n # status. If a worker has completed it will be removed from the list.\n # The process will not wait longer than five seconds for each worker to\n # complete, if longer time passes it will simply check it's status and\n # move on. That ensures that worker process is monitored and status is\n # updated.\n while self._worker_processes:\n for process_name, process_obj in sorted(self._worker_processes.items()):\n if worker_foreman:\n worker_label = worker_foreman.GetLabel(\n name=process_name, pid=process_obj.pid)\n else:\n worker_label = None\n\n if not worker_label:\n if process_obj.is_alive():\n logging.info((\n u'Process {0:s} [{1:d}] is not monitored by the foreman. Most '\n u'likely due to a worker having completed it\\'s processing '\n u'while waiting for another worker to complete.').format(\n process_name, process_obj.pid))\n logging.info(\n u'Waiting for worker {0:s} to complete.'.format(process_name))\n process_obj.join()\n logging.info(u'Worker: {0:s} [{1:d}] has completed.'.format(\n process_name, process_obj.pid))\n\n del self._worker_processes[process_name]\n continue\n\n if process_obj.is_alive():\n # Check status of worker.\n worker_foreman.CheckStatus(label=worker_label)\n process_obj.join(5)\n # Note that we explicitly must test against exitcode 0 here since\n # process.exitcode will be None if there is no exitcode.\n elif process_obj.exitcode != 0:\n logging.warning((\n u'Worker process: {0:s} already exited with code: '\n u'{1:d}.').format(process_name, process_obj.exitcode))\n process_obj.terminate()\n worker_foreman.TerminateProcess(label=worker_label)\n\n else:\n # Process is no longer alive, no need to monitor.\n worker_foreman.StopMonitoringWorker(label=worker_label)\n # Remove it from our list of active workers.\n del self._worker_processes[process_name]\n\n logging.info(u'Processing is done, waiting for storage to complete.')\n\n self._engine.SignalEndOfInputStorageQueue()\n self._storage_process.join()\n logging.info(u'Storage is done.')",
"def process():\n db = DataParser.get_connection()\n cursor = db.cursor()\n DataParser.set_up_database(cursor)\n config = DataParser.get_config()\n cursor.execute(\"use %s\" % config[\"database\"][\"database_name\"])\n DataParser.import_articles(cursor)\n DataParser.import_citations(cursor)\n DataParser.import_words(cursor)\n DataParser.import_users(cursor)\n DataParser.clean_up(db, cursor)",
"def process(self, data, **kwargs):\n # sequentially process the data\n for processor in self.processors:\n data = _process((processor, data, kwargs))\n return data",
"def process_incoming_data(self):\n while not self.incoming_data.empty():\n\n data, feed = self.incoming_data.get()\n agg_params = feed['agg_params']\n\n if agg_params.get('exclude_aggregator', False):\n continue\n\n address = feed['address']\n sessid = feed['session_id']\n\n pid = self.pids.get((address, sessid))\n if pid is None:\n prov_kwargs = {}\n for key in ['frame_length', 'fresh_time']:\n if key in agg_params:\n prov_kwargs[key] = agg_params[key]\n\n pid = self.add_provider(address, sessid, **prov_kwargs)\n\n prov = self.providers[pid]\n prov.save_to_block(data)",
"def _process(self, mlist, msg, msgdata):\n raise NotImplementedError",
"def process(self, structure):\n ###\n ### The contents of this function are an example of what can be done\n ### in a subclass of this class. It demonstrates the use of SimPy\n ### Resources and Containiners. The function itself is useless.\n ### It is meant to help create your own function after creating\n ### a subclass that inherits from this class.\n ###\n\n # Request staff\n staff_request = self.staff.request()\n yield staff_request\n \n # Get the entity's building/structure so that the building stock's \n # FilterStore is informed of attribute changes to the building/structure\n # Also means that only one process at a time can access the building.\n get_structure = yield structure.stock.get(lambda getStructure:\n getStructure.__dict__ == structure.__dict__\n )\n\n # Yield timeout equivalent to program's process duration\n yield self.env.timeout(self.duration())\n\n # Release release staff after process duation is complete.\n self.staff.release(staff_request)\n \n\n material_cost = 1 # Cost of materials needed (e.g., for RepairProgram)\n\n # Get out amount equal to cost.\n yield self.materials.get(material_cost) # *** Materials not used in all TechnicalRecoveryProgram subclasses\n\n # Put back amount equal to cost.\n yield self.materials.put(material_cost)\n \n # Put the property back in the building stock to register attribute change.\n yield structure.stock.put(get_structure)\n\n self.writeCompleted()",
"def parse(self):\n # Checks if the source is loaded\n if not self.loaded:\n self.load(self.source)\n\n for item in self.get_items():\n # Parse the fields from the source into a dict\n data = self.parse_item(item)\n # Get the instance from the DB, or a new one\n instance = self.get_instance(data)\n # Feed instance with data\n self.feed_instance(data, instance)\n # Try to save the instance or keep the error\n try:\n self.save_item(item, data, instance)\n except Exception, e:\n self.save_error(data, sys.exc_info())\n\n # Unload the source\n self.unload()",
"def test_process_batch(self):\n batch = next(iter(self.instance.get_loader(batch_size=self.batch_size)))\n self.instance.process_batch(batch=batch)",
"def _PreprocessSetCollectionInformation(self, options, pre_obj):\n collection_information = {}\n\n collection_information['version'] = plaso.GetVersion()\n collection_information['configured_zone'] = self._timezone\n collection_information['file_processed'] = self._source_path\n collection_information['output_file'] = self._storage_file_path\n collection_information['protobuf_size'] = self._buffer_size\n collection_information['parser_selection'] = getattr(\n options, 'parsers', '(no list set)')\n collection_information['preferred_encoding'] = self.preferred_encoding\n collection_information['time_of_run'] = timelib.Timestamp.GetNow()\n\n collection_information['parsers'] = self._parser_names\n collection_information['preprocess'] = self._preprocess\n\n if self._source_type == self._SOURCE_TYPE_DIRECTORY:\n recursive = True\n else:\n recursive = False\n collection_information['recursive'] = recursive\n collection_information['debug'] = self._debug_mode\n collection_information['vss parsing'] = bool(self._vss_stores)\n\n if self._filter_expression:\n collection_information['filter'] = self._filter_expression\n\n filter_file = getattr(options, 'file_filter', None)\n if filter_file:\n if os.path.isfile(filter_file):\n filters = []\n with open(filter_file, 'rb') as fh:\n for line in fh:\n filters.append(line.rstrip())\n collection_information['file_filter'] = ', '.join(filters)\n\n collection_information['os_detected'] = getattr(options, 'os', 'N/A')\n\n if self._source_type == self._SOURCE_TYPE_STORAGE_MEDIA_IMAGE:\n collection_information['method'] = 'imaged processed'\n collection_information['image_offset'] = self._partition_offset\n else:\n collection_information['method'] = 'OS collection'\n\n if self._single_process_mode:\n collection_information['runtime'] = 'single process mode'\n else:\n collection_information['runtime'] = 'multi process mode'\n collection_information['workers'] = self._number_of_worker_processes\n\n pre_obj.collection_information = collection_information",
"def process_item(self, item, source):\n result = item.clone()\n result.assets = {}\n\n # Create a temporary dir for processing we may do\n workdir = mkdtemp()\n try:\n # Get the data file\n asset = next(v for k, v in item.assets.items() if 'data' in (v.roles or []))\n input_filename = download(asset.href, workdir, logger=self.logger, access_token=self.message.accessToken)\n\n # Mark any fields the service processes so later services do not repeat work\n dpi = self.message.format.process('dpi')\n # Variable subsetting\n variables = source.process('variables')\n\n # Do the work here!\n var_names = [v.name for v in variables]\n print('Processing item %s, DPI=%d, vars=[%s]' % (item.id, dpi, ', '.join(var_names)))\n working_filename = os.path.join(workdir, 'tmp.txt')\n shutil.copyfile(input_filename, working_filename)\n\n # Stage the output file with a conventional filename\n output_filename = generate_output_filename(asset.href, ext=None, variable_subset=None,\n is_regridded=False, is_subsetted=False)\n url = stage(working_filename, output_filename, 'text/plain', location=self.message.stagingLocation,\n logger=self.logger)\n\n # Update the STAC record\n result.assets['data'] = Asset(url, title=output_filename, media_type='text/plain', roles=['data'])\n # Other metadata updates may be appropriate, such as result.bbox and result.geometry\n # if a spatial subset was performed\n\n # Return the STAC record\n return result\n finally:\n # Clean up any intermediate resources\n shutil.rmtree(workdir)",
"def process_sources(self):\n for target_node in self.G.nodes:\n self.get_causal_relationships(target_node)\n\n if self.causal_sources:\n self.collapse_url(target_node)",
"async def process(self):\n if self.room.room_id == self.config.management_room_id:\n await self.handle_management_room_message()\n else:\n await self.relay_to_management_room()",
"def process_data(self, value):\n if value:\n if self.is_related:\n self.data = self.datamodel.get_related_interface(\n self.col_name\n ).get_pk_value(value)\n else:\n self.data = self.datamodel.get(value)\n else:\n self.data = None",
"def stage(self, **kwargs):\n for context in self._parsed_data_uris:\n if context != self._source_context:\n if self._clean:\n # remove target URI first\n pass\n\n for i, parsed_source_uri in enumerate(self._parsed_data_uris[self._source_context]):\n\n Log.some().debug(\n 'staging data: %s->%s to %s->%s',\n self._source_context,\n parsed_source_uri['chopped_uri'],\n context,\n self._parsed_data_uris[context][i]['chopped_uri']\n )\n\n if context != 'final':\n if not DataManager.copy(\n parsed_src_uri=parsed_source_uri,\n parsed_dest_uri=self._parsed_data_uris[context][i],\n **kwargs\n ):\n msg = 'cannot stage data by copying from {} to {}'.format(\n parsed_source_uri['chopped_uri'],\n self._parsed_data_uris[context][i]['chopped_uri']\n )\n Log.an().error(msg)\n return self._fatal(msg)\n\n self._staged = True\n\n return True",
"def _process_managed_objects_queue(self, instance):\n i_key = self._instance_key(instance)\n self.mor_cache.init_instance(i_key)\n if not self.mor_objects_queue.contains(i_key):\n self.logger.info(\"Objects queue is not initialized yet for instance {}, skipping processing\\n\".format(i_key))\n return\n\n for resource_type in RESOURCE_TYPE_METRICS:\n # If batch size is set to 0, process everything at once\n batch_size = self.batch_morlist_size or self.mor_objects_queue.size(i_key, resource_type)\n while self.mor_objects_queue.size(i_key, resource_type):\n query_specs = []\n for _ in range(batch_size):\n mor = self.mor_objects_queue.pop(i_key, resource_type)\n if mor is None:\n self.logger.info(\"No more objects of type '{}' left in the queue\\n\".format(resource_type))\n break\n\n mor_name = str(mor['mor'])\n mor['interval'] = VCENTER_REALTIME_INTERVAL if mor['mor_type'] in REALTIME_RESOURCES else None\n # Always update the cache to account for Mors that might have changed parent\n # in the meantime (e.g. a migrated VM).\n self.mor_cache.set_mor(i_key, mor_name, mor)\n\n # Only do this for non real-time resources i.e. datacenter and datastores\n # For hosts and VMs, we can rely on a precomputed list of metrics\n if mor[\"mor_type\"] not in REALTIME_RESOURCES:\n query_spec = vim.PerformanceManager.QuerySpec()\n query_spec.entity = mor[\"mor\"]\n query_spec.intervalId = mor[\"interval\"]\n query_spec.maxSample = 1\n query_specs.append(query_spec)\n\n # Schedule jobs for non realtime resources only.\n if query_specs:\n i_key = self._instance_key(instance)\n server_instance = self._get_server_instance(instance)\n perfManager = server_instance.content.perfManager\n\n res = perfManager.QueryPerf(query_specs)\n for mor_perfs in res:\n mor_name = str(mor_perfs.entity)\n available_metrics = [value.id for value in mor_perfs.value]\n try:\n\n self.mor_cache.set_metrics(i_key, mor_name, self._determine_needed_metrics(instance, available_metrics))\n except MorNotFoundError:\n self.logger.info(\"Object '{}' is missing from the cache, skipping.\\n\".format(mor_name))\n continue",
"def process(self, data, **kwargs):\n if self.online:\n return self.process_online(data, **kwargs)\n return self.process_offline(data, **kwargs)",
"def run_batch_processor(ndx, processors, file_set):\n logging.debug('in run_batch_processor, ndx = %d', ndx)\n if os.path.exists((file_set[0])) and tarfile.is_tarfile(file_set[0]):\n processors[ndx].input_file = file_set[0]\n else:\n timestamp = time.strftime('%Y%m%d_%H%M%S', time.gmtime(time.time()))\n file_list_name = cfg_data.hidden_dir + os.sep + 'files_' + \\\n processors[ndx].target_type + '_' + timestamp + '.lis'\n with open(file_list_name, 'wt') as file_list:\n for fname in file_set:\n file_list.write(fname + '\\n')\n processors[ndx].input_file = file_list_name\n data_file_list = []\n finder_opts = {}\n for fspec in file_set:\n dfile = get_obpg_data_file_object(fspec)\n data_file_list.append(dfile)\n if 'suite' in processors[ndx].par_data:\n finder_opts['suite'] = processors[ndx].par_data['suite']\n elif 'prod' in processors[ndx].par_data:\n finder_opts['suite'] = processors[ndx].par_data['prod']\n if 'resolution' in processors[ndx].par_data:\n finder_opts['resolution'] = processors[ndx].par_data['resolution']\n if 'oformat' in processors[ndx].par_data:\n finder_opts['oformat'] = processors[ndx].par_data['oformat']\n name_finder = name_finder_utils.get_level_finder(data_file_list,\n processors[ndx].target_type,\n finder_opts)\n processors[ndx].output_file = os.path.join(processors[ndx].out_directory,\n name_finder.get_next_level_name())\n if DEBUG:\n log_msg = \"Running {0} with input file {1} to generate {2} \".\\\n format(processors[ndx].target_type,\n processors[ndx].input_file,\n processors[ndx].output_file)\n logging.debug(log_msg)\n processors[ndx].execute()\n return processors[ndx].output_file"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The function reads the image and processes the image to extract the feature_vector of the image | def custom_feature_transformation(image_file_path):
image = cv2.imread(image_file_path)
thresholded_image = highlight_invariant_threshold(image)
filled_image = hole_fill(thresholded_image)
region_grown_image = grow_region(filled_image)
image_array = image_to_array(region_grown_image)
feature_vector = numpy.array(image_array)
row = {'feature_vector': feature_vector}
return row | [
"def calculate_feature_vector(path):\n\ttf_image = preprocess_image(path)\n\treturn module(tf_image)",
"def feature_extraction(self, sample):\n image, filename = sample\n\n if self.feature_model.training:\n print(\"Run feature model in inference mode!\")\n exit(0)\n\n if self.feature_model:\n feature = np.squeeze(self.feature_model(image[None, ...].to(self.device)).data.cpu().numpy())\n return feature",
"def build_feature_vector_for_image(image):\n assert len(image.shape) == 3 and image.shape[2] == 3\n\n # scale down image and convert to hsv\n scaled = cv2.resize(image, (6, 6), interpolation=cv2.INTER_AREA)\n scaled = cv2.cvtColor(scaled, cv2.COLOR_BGR2HSV)\n\n # extract image channels\n hue_sin = (numpy.sin(scaled[:, :, 0] / (255.0 / (2 * math.pi))) * 128 + 128).astype(numpy.uint8)\n hue_cos = (numpy.cos(scaled[:, :, 0] / (255.0 / (2 * math.pi))) * 128 + 128).astype(numpy.uint8)\n sat = scaled[:, :, 1]\n val = scaled[:, :, 2]\n\n # concat channels for feature vector\n return numpy.hstack((hue_sin.flat, hue_cos.flat, sat.flat, val.flat))",
"def __call__(self, image):\n\n im = cv2.imread(os.path.join(self.__collection_data.path, image), cv2.IMREAD_GRAYSCALE)\n\n if im is None:\n # Image is corrupt. Delete the image file.\n print 'Removing corrupt image {0}'.format(image)\n os.remove(os.path.join(self.__collection_data.path, image))\n return\n\n locations, descriptors = extract_sift_features(im, self.__feature_data.config.edge_threshold,\n self.__feature_data.config.peak_threshold)\n\n self.__feature_data.save(image, locations, descriptors)\n\n print 'Extracted {0} features for {1}'.format(descriptors.shape[0], image)",
"def computeFeatures(img, features=...) -> features:\n ...",
"def build_feature_vector(post):\n image = cv2.imread(str(post.local_image), cv2.IMREAD_COLOR)\n features = build_feature_vector_for_image(image)\n if features is None:\n return EMPTY_FEATURE\n\n return features",
"def data_transformer(self):\n # Use the feature extractor to produce \n # a list of feature vectors.\n detector = cv2.ORB_create()\n self.feature_list = [FeatureExtractor.extract_features(image) for image in self.images]",
"def feature_extractor(image_path, options=None):\n\n # size of images inc-resnet is compatible with\n image_size = inception_resnet_v2.inception_resnet_v2.default_image_size\n\n checkpoint_path = os.path.join(dir, 'checkpoints/inception_resnet_v2_2016_08_30.ckpt')\n\n image_string = urllib2.urlopen(image_path).read()\n\n # JPEG format converted to unit8 tensor\n image = tf.image.decode_jpeg(image_string, channels=3)\n\n # inception specific pre processing\n processed_image = inception_preprocessing.preprocess_image(image, image_size, image_size, is_training=False)\n\n # the model accepts images in batches\n processed_images = tf.expand_dims(processed_image, 0)\n\n with slim.arg_scope(inception_resnet_v2.inception_resnet_v2_arg_scope()):\n logits, end_points = inception_resnet_v2.inception_resnet_v2(processed_images,\n num_classes=1001,\n is_training=False)\n\n weights_from_file = slim.get_variables_to_restore(exclude=['logits'])\n\n init_fn = slim.assign_from_checkpoint_fn(checkpoint_path, weights_from_file)\n\n # last layer before fully connected layer\n features = end_points['PreLogitsFlatten']\n\n # run the image through the network\n with tf.Session() as sess:\n init_fn(sess)\n\n features = sess.run([features])\n\n return features[0][0]",
"def _get_feature_matrix(cf, fg, X, images, offset=0):\n # =====[ Iterate through images and calculate feature vector for each ]=====\n print(len(images))\n for idx, img in enumerate(images):\n\n # try:\n print('img loaded', img)\n # print(os.path.exists(img))\n # print(os.path.isfile(img))\n # cvimg = cv2.imread(img)\n # print('----: ', type(cvimg))\n cfeats = cf.compute_channels(cv2.imread(img))\n feature_vec = fg.generate_features(cfeats)\n\n # =====[ Add feature vector to input matrix ]=====\n X[idx + offset, :] = feature_vec\n\n # except Exception as e:\n # print('Could not add image at index: ', idx + offset)\n\n return X",
"def image_features(img, model):\n features = model.predict(img)\n return features",
"def extract_features(imgs, config=DetectionConfig()):\n\tprint(\"extracting features from\", len(imgs), \"images\")\n\t# Create a list to append feature vectors to\n\tfeatures = []\n\t# Iterate through the list of images\n\tfor img in imgs:\n\t\tfeatures.append(extract_feature(img, config=config))\n\t# Return list of feature vectors\n\tprint(\"done extracting features from\", len(imgs), \"images\")\n\treturn features",
"def vgg19_feature_extraction(dataset_path):\n base_model = VGG19(weights='imagenet')\n model = Model(inputs=base_model.input, outputs=base_model.get_layer('block5_pool').output)\n\n # Get features of all images using VGG19\n X = []\n Y = []\n model_cnt = 0\n model_index = {}\n img_list = os.listdir(dataset_path)\n img_list.sort()\n temp_cnt = 0\n for img_file in img_list:\n if temp_cnt % 100 == 0:\n print(\"VGG19 \", round(temp_cnt/len(img_list)*100,3), \"% complete\", end='\\r')\n temp_cnt = temp_cnt + 1\n img_path = dataset_path + '/' + img_file\n img = image.load_img(img_path, target_size=(224,224))\n x = image.img_to_array(img)\n x = np.expand_dims(x, axis=0)\n x = preprocess_input(x)\n block5_pool_features = model.predict(x).flatten()\n\n X.append(block5_pool_features) \n if 'aug' in dataset_path:\n model_id = img_file.split('_')[1]\n else:\n model_id = img_file.split('_')[0]\n\n if model_id in model_index:\n Y.append(model_index[model_id])\n else:\n model_index[model_id] = model_cnt\n Y.append(model_cnt)\n model_cnt = model_cnt + 1 \n\n X = np.asarray(X)\n Y_lab = np.asarray(Y)\n Y_cat = to_categorical(Y_lab)\n\n return X, Y_lab, Y_cat",
"def feature_extraction(img, feature):\r\n\r\n if feature == 'HoG':\r\n # HoG parameters\r\n win_size = (32, 32)\r\n block_size = (32, 32)\r\n block_stride = (16, 16)\r\n cell_size = (16, 16)\r\n nbins = 9\r\n deriv_aperture = 1\r\n win_sigma = 4\r\n histogram_norm_type = 0\r\n l2_hys_threshold = 2.0000000000000001e-01\r\n gamma_correction = 0\r\n nlevels = 64\r\n \r\n # Your code here. You should also change the return value.\r\n\r\n hog = cv2.HOGDescriptor(win_size,block_size,block_stride,cell_size,nbins,deriv_aperture,win_sigma,histogram_norm_type,l2_hys_threshold,gamma_correction,nlevels)\r\n\r\n dsize = hog.getDescriptorSize()\r\n descripters = hog.compute(img,winStride=(32,32),padding=(0,0))\r\n descripters = descripters.reshape(-1,dsize)\r\n\r\n\r\n elif feature == 'SIFT':\r\n sift = cv2.xfeatures2d.SIFT_create()\r\n descripters = []\r\n height= img.shape[0]\r\n width = img.shape[1]\r\n split1 = np.array_split(img, width/20, axis=1)\r\n for split in split1:\r\n split2 =np.array_split(split, height/20, axis=0)\r\n for ig in split2:\r\n keypoints, descripter = sift.detectAndCompute(ig,None)\r\n if descripter is not None:\r\n descripters.append(descripter)\r\n if len(descripters) > 0:\r\n descripters = np.vstack(descripters)\r\n else: \r\n return None\r\n return descripters",
"def feature_extraction(img, feature):\n\n if feature == 'HoG':\n # HoG parameters\n win_size = (32, 32)\n block_size = (32, 32)\n block_stride = (16, 16)\n cell_size = (16, 16)\n nbins = 9\n deriv_aperture = 1\n win_sigma = 4\n histogram_norm_type = 0\n l2_hys_threshold = 2.0000000000000001e-01\n gamma_correction = 0\n nlevels = 64\n\n\n # Your code here. You should also change the return value.\n\n # make HOG descriptor model with given parameter\n hog = cv2.HOGDescriptor(win_size, block_size, block_stride, cell_size, nbins, deriv_aperture, win_sigma, histogram_norm_type, l2_hys_threshold, gamma_correction, nlevels)\n\n m = img.shape[0]\n n = img.shape[1]\n\n all_f = []\n\n # divide original image with 16 X 16 grid and make subimages, so find HoG descriptor by grouped 4 cell (32 X 32) and 16 X 16 stride\n for i in range(int(m / 16) - 1):\n for j in range(int(n / 16) - 1):\n x = i * 16\n y = j * 16\n h = hog.compute(img[x:x+32, y:y+32])\n all_f.append(np.reshape(h, (1, 36)))\n\n # combine Hog desciptor from sub images\n all_f = np.concatenate(all_f, 0)\n return all_f\n\n elif feature == 'SIFT':\n\n # Your code here. You should also change the return value.\n m = img.shape[0]\n n = img.shape[1]\n\n all_f = []\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n sift = cv2.xfeatures2d.SIFT_create()\n\n # divide original image with 20 X 20 grid and make subimages, so find SIFT descriptor by 20 X 20 sub images.\n for i in range(int(m / 20)):\n for j in range(int(n / 20)):\n x = i * 20\n y = j * 20\n kp, des = sift.detectAndCompute(gray[x:x+20, y:y+20], None)\n if len(kp) != 0:\n all_f.append(des)\n\n #If sift is not detected, exception handling is done.\n if len(all_f) != 0:\n all_f = np.concatenate(all_f, 0)\n else:\n all_f = None\n\n return all_f",
"def inference_feature_extraction(point_cloud_path,feature_flag): \n if feature_flag == \"local\":\n \n point_cloud = read_point_cloud(point_cloud_path)\n estimate_normals(point_cloud,KDTreeSearchParamHybrid(radius=0.01,max_nn=30))\n fpfh_features=compute_fpfh_feature(point_cloud,KDTreeSearchParamHybrid(radius=0.05,max_nn=50))\n features=fpfh_features.data.T\n features=features/np.max(features)\n \n return features\n \n elif feature_flag == \"global\":\n features_global=[]\n point_cloud = read_point_cloud(point_cloud_path)\n estimate_normals(point_cloud,KDTreeSearchParamHybrid(radius=0.01,max_nn=30))\n fpfh_features = compute_fpfh_feature(point_cloud,KDTreeSearchParamHybrid(radius=0.05,max_nn=50))\n features = fpfh_features.data.T\n features = features/np.max(features)\n\n voxel_features=voxel_occupancy_features(point_cloud_path)\n\n for item in features:\n features_global.append(np.append(item,voxel_features,axis=0))\n \n return np.array(features_global)",
"def extract_feat(self, img):\n feat = self.backbone(img)\n feat = self.neck(feat)\n return feat",
"def _extract_features(self, preprocessed_inputs): \n preprocessed_inputs = shape_utils.check_min_image_dim(33, preprocessed_inputs)\n image_features = self.net(ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple))\n layouts = {self._used_nodes[i]: image_features[i] for i, x in enumerate(self._used_nodes) if x}\n feature_maps = self._feature_map_generator(layouts)\n if self._additional_layer_depth:\n final_feature_map = []\n for idx, feature in enumerate(feature_maps.values()):\n feature = tf.keras.layers.Conv2D(filters=self._additional_layer_depth,\n kernel_size=1,\n strides=[1, 1],\n use_bias=True,\n data_format=self._data_format,\n name='conv1x1_'+str(idx))(feature)\n feature = tf.keras.layers.BatchNormalization()(feature, training=self._is_training)\n feature = tf.keras.layers.ReLU(max_value=6)(feature)\n final_feature_map.append(feature)\n return final_feature_map\n else:\n return feature_maps.values() \n \n # with tf.variable_scope(\"EfficientNetFeatureExtractor\", reuse=tf.AUTO_REUSE):\n # # architecture \n # _, endpoints = build_model_base(preprocessed_inputs, self._network_name, training=self._is_training)\n # arch_feature_nodes = [x for x in self._feature_map_layout[\"from_layer\"] if x]\n # arch_features = {x: endpoints[x] for x in arch_feature_nodes}\n # feature_maps = self._feature_map_generator(arch_features)\n # if self._additional_layer_depth:\n # final_feature_map = []\n # for idx, feature in enumerate(feature_maps.values()):\n # feature = tf.keras.layers.Conv2D(filters=self._additional_layer_depth,\n # kernel_size=1,\n # strides=[1, 1],\n # use_bias=True,\n # data_format=self._data_format,\n # name='conv1x1_'+str(idx))(feature)\n # feature = tf.keras.layers.BatchNormalization()(feature, training=self._is_training)\n # feature = tf.keras.layers.ReLU(max_value=6)(feature)\n # final_feature_map.append(feature)\n # return final_feature_map\n # else:\n # return feature_maps ",
"def computeFeatures(self, profile):\n \n print(\"-- ImageData.computeFeatures\")\n resolution = profile.resolution\n resizeFactor = profile.resizeFactor\n #print(\"Needed profile resolution: \" + str(resolution) + \" resizeFactor: \" + str(resizeFactor))\n image = [] # the image read from self.imagePath after resizing \n depth = [] # the depthMap read from self.depthMapPath after resizing \n \n features = ImageFeatures(image, depth)\n \n print(\"All features computed and returned\")\n \n return features",
"def support_vector_machine(self, image):\n\n # Extract features\n hog_features, color_hist_features, hu_moments_features = feature_engineering.extract_engineered_features(image,\n feature_types=self.config[\"svm_feature_types\"],\n hog_window_size=self.config[\"hog_window_size\"],\n hog_block_size=self.config[\"hog_block_size\"],\n hog_block_stride=self.config[\"hog_block_stride\"],\n hog_cell_size=self.config[\"hog_cell_size\"],\n hog_bin_no=self.config[\"hog_bin_no\"],\n color_histogram_size=self.config[\"color_histogram_size\"])\n\n # Reduce HOG features\n if 'HOG' in self.config[\"svm_feature_types\"]:\n hog_features = self.pca_projector.pca_project(sample=hog_features)\n\n # Concatenate the feature vectors\n feature_vector = np.concatenate((hog_features, color_hist_features, hu_moments_features))\n\n # Normalize the input feature vector\n feature_vector = self.normalizer.transform(feature_vector.reshape((1, -1)))\n\n # Classify\n class_scores = np.squeeze(self.model.predict_proba(feature_vector))\n\n return class_scores"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
[Scans directory for abnormalities in images] | def scan_image_abnormalities(base_path, base_img_resolution, base_msk_resolution):
# Defining lists for appending paths of abnormal images, and their heights, widths, and channels
ab_imgs, ab_im_h, ab_im_w, ab_im_c = [], [], [], []
ab_masks, ab_msk_h, ab_msk_w, ab_msk_c = [], [], [], []
train_files, mask_files = extract_image_paths(base_path)
for img, mask in zip(train_files, mask_files):
img_resolution = get_image_resolution(img)
msk_resolution = get_image_resolution(mask)
if img_resolution != base_img_resolution:
ab_imgs.append(img)
if img_resolution[0] != 500: ab_im_h.append(img_resolution[0])
elif img_resolution[1] != 500:ab_im_w.append(img_resolution[1])
elif img_resolution[2] != 3: ab_im_c.append(img_resolution[1])
if msk_resolution != base_msk_resolution:
ab_masks.append(mask)
if msk_resolution[0] != 500: ab_msk_h.append(msk_resolution[0])
elif msk_resolution[1] != 500:ab_msk_w.append(msk_resolution[1])
elif msk_resolution[2] != 3: ab_msk_c.append(msk_resolution[1])
abnormal_image_properties = [ab_imgs, ab_im_h, ab_im_w, ab_im_c]
abnormal_mask_properties = [ab_masks, ab_msk_h, ab_msk_w, ab_msk_c]
return abnormal_image_properties, abnormal_mask_properties
# ----------------------------------------------------------------------------------------------------------- | [
"def find_images(folder,img_type):\n pass",
"def scan_path(directory):\n objname= str(base64.b64encode(directory.encode('utf-8')))\n preprocess='preprocess'\n\n if not os.path.isdir(preprocess):\n os.mkdir(preprocess)\n if os.path.isfile(preprocess+'/'+objname):\n picklefile=open(preprocess+'/'+objname,'rb')\n obj=pickle.load(picklefile)\n if time.ctime(os.path.getmtime(directory))==obj['lastmodified']:\n return obj['images']\n\n images=[]\n for (dirpath, dirnames, filenames) in os.walk(directory):\n for f in filenames:\n path=dirpath+'/'+f;\n image=get_face(path)\n if image is not None:\n encodings = face_recognition.face_encodings(image)\n if len(encodings) > 0:\n img = {\n 'image': image,\n 'encodings': encodings,\n 'name': f\n }\n images.append(img)\n\n obj={\n 'lastmodified':time.ctime(os.path.getmtime(directory)),\n 'images': images\n }\n file=open(preprocess+'/'+objname,'wb')\n pickle.dump(obj,file)\n\n return images",
"def find_all_files(min_pixels, origin_folder, target_folder):\n #count = 0\n for root, dirs, files in os.walk(origin_folder):\n vis_files = [f for f in files if not f[0] == '.']\n copy = True\n \"\"\"\n copy = False\n \n if(root.endswith(\"indoor\")):\n print(\"I am indoor\")\n target_folder = indoor_address\n copy = True\n \n if(root.endswith(\"outdoor\")):\n print(\"I am outdoor\")\n target_folder = outdoor_address\n copy = True\n \"\"\"\n if(len(vis_files)>0 and copy):\n for image_name in vis_files:\n #print(root, dirs, image_name)\n with Image.open(root+\"/\"+ image_name) as tested_image:\n width, height = tested_image.size\n if(width>=min_pixels and height>= min_pixels): \n cover = resizeimage.resize_cover(tested_image, [min_pixels, min_pixels])\n cover.convert('RGB').save(target_folder+image_name, 'JPEG')\n \n return root",
"def make_image_list(image_dir):",
"def clean_bad_imgs(root):\n for d in os.listdir(root):\n if os.path.isdir(os.path.join(root, d)):\n clean_bad_imgs(os.path.join(root, d))\n else:\n filename = os.path.join(root, d)\n if filename.endswith('.jpg') or filename.endswith('.png') or filename.endswith('.jpeg'):\n try:\n image = io.imread(filename)\n except:\n os.remove(filename)\n print('remove {0}'.format(filename))\n\n print('done!')",
"def load_scrambled_images(fd):\n choose = []\n for root, dirs, files in os.walk(fd, topdown=False):\n for name in files:\n if check_file(name):\n choose.append(os.path.join(root, name))\n\n l = len(choose)\n for i in range(l):\n swapind = int(random.random() * l)\n temp = choose[i]\n choose[i] = choose[swapind]\n choose[swapind] = temp\n return choose",
"def process_image_directory():\n\n print(\"Processing all \", extension_input, \"files in \", path_input)\n images = utils.get_dir_files(path_input, extension_input)\n\n for image in images:\n processs_CSK_HI(image)\n \n print(\"End of processing.\")",
"def split_images():\n home_dir = get_directory()\n\n count = 0\n for f_name in glob(home_dir + \"/data/raw/facades/**/*.jpg\", recursive=True):\n\n # load image and find bounds\n tmp_img = Image.open(f_name)\n width, height = tmp_img.size\n middle = int(math.ceil(width / 2))\n\n # crop real image and input image\n real_box = (0, 0, middle, height)\n real_img = tmp_img.crop(real_box)\n input_box = (middle, 0, width, height)\n input_img = tmp_img.crop(input_box)\n\n # save images\n real_img.save(home_dir + \"/data/tidy/real/\" + str(count) + \".jpg\")\n input_img.save(home_dir + \"/data/tidy/input/\" + str(count) + \".jpg\")\n\n count += 1\n\n return True",
"def find_images(self, path):\n paths = []\n for file in os.listdir(path):\n if not file.endswith('b.png'):\n paths.append(file)\n paths = np.array(paths)\n encoder, bases = self.generate_encoder(paths)\n return paths, encoder, bases",
"def _process_facebank_directory(self):\n folders = [x for x in os.scandir(self._facebank_directory) if x.is_dir()]\n original_images = {\n folder.name: [x for x in os.scandir(folder) if x.is_file() and (\n x.name.endswith('jpg') or x.name.endswith('png') or x.name.endswith('jpeg') or x.name.endswith('JPG')\n )] for folder in folders\n }\n empty_folder = [key for key, value in original_images.items() if len(value) == 0]\n for e in empty_folder:\n del original_images[e]\n processed_images = dict()\n for folder, images in original_images.items():\n pils = [Image.open(x.path) for x in images]\n cropped_faces = self.crop_and_align_images(pils)\n if not cropped_faces:\n print(f\"No face is found in {folder} folder\")\n break\n else:\n processed_images.update({folder: cropped_faces})\n else:\n for folder, images in original_images.items():\n for image in images:\n os.remove(image.path)\n for folder, images in processed_images.items():\n for idx, image in enumerate(images):\n image.save(join(self._facebank_directory, folder, '{idx}.jpg'))\n return True\n return False",
"def split_black_all():\n pic_list = [x for x in os.listdir() if x[-4:] in [\".jpg\" or \".JPG\"]]\n for pic_name in pic_list:\n land_split(pic_name=pic_name)\n print(\"All done.\")",
"def subset_image_directory():\n\n print(\"Subsetting all \", extension_input_subset, \"files in \", path_input_subset)\n images = utils.get_dir_files(path_input_subset, extension_input_subset)\n\n for image in images:\n subset_image(image)\n \n print(\"End of subsetting.\")",
"def load_images(fd, rgba=True):\n res = []\n for root, dirs, files in os.walk(fd, topdown=False):\n for name in files:\n if check_file(name):\n file = os.path.join(root, name)\n image = load_image(file, False, rgba)\n res.append(image)\n return res",
"def find_images(directory: str) -> set:\n images = set(iglob(directory + \"/**/*.jpg\", recursive=True))\n images |= set(iglob(directory + \"/**/*.jpeg\", recursive=True))\n images |= set(iglob(directory + \"/**/image*.jpg\", recursive=True))\n images |= set(iglob(directory + \"/**/*.JPG\", recursive=True))\n images |= set(iglob(directory + \"/**/*.JEPG\", recursive=True))\n images -= set(iglob(directory + \"/**/UMMZI*.jpg\", recursive=True))\n images -= set(iglob(directory + \"/**/UMMZI*.JPG\", recursive=True))\n\n return images",
"def check_detection():\n # get dirs\n images_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + '/images'\n cat_dirs = glob.glob(images_dir + '/*')\n faulty = []\n for dir in cat_dirs:\n if not os.path.exists(dir + '/detected/Detection_Results.csv'):\n faulty.append(dir)\n return faulty",
"def finale(directory=None):\n \n if directory == None:\n #change directory to the 'before' folder\n os.chdir(\"before\")\n directory = os.getcwd()# Use working directory if unspecified\n os.chdir(\"..\")\n \n # Create a new directory 'after' folder in the same parent folder as the before 'folder'\n new_directory = os.path.join(os.getcwd(), 'after')\n try:\n os.mkdir(new_directory)\n except OSError:\n pass # if the directory already exists, proceed \n \n #load all the images\n image_list, file_list = get_images(directory) \n\n #go through the images and save modified versions\n for n in range(len(image_list)):\n # Parse the filename\n filename, filetype = os.path.splitext(file_list[n])\n \n # apply the filter function and store as new images\n new_image = negatify(image_list[n])\n #save the altered image, using PNG to retain transparency\n new_image_filename = os.path.join(new_directory, filename + '.png')\n new_image.save(new_image_filename)",
"def read_images():\n\n path = abspath(__file__ + \"/../../\")\n data_path = str(path) + \"/data/\"\n json_path = str(path) + \"/configs/settings.json\"\n\n assign_json_values(json_path)\n\n main_folder = data_path + \"filtered/\" + datetime.datetime.now().strftime(\"%Y_%m_%d_x_%H_%M_%S\")\n create_folder(main_folder)\n\n for i in range(0, 4):\n filename = f\"{i}.jpg\"\n dir_name = data_path + \"\" + \"image_green/\" + filename\n\n # create folder and sub folder\n sub_folder = main_folder + f\"/{i}\" + \"/\"\n create_folder(sub_folder)\n\n # get rotation of image and read it\n rotation = get_image_rotation(dir_name)\n img_reading = imread(dir_name, plugin='matplotlib')\n\n # resize image\n img_scaled = create_scaled_image(img_as_ubyte(img_reading), filename, sub_folder)\n\n # rotate image\n img_rotated = rotate_image(img_scaled, rotation)\n\n # create binary image\n #img_binary = create_binary_image(img_rotated, filename, sub_folder)\n # img_binary = create_greenfiltered_image(img_rotated, filename, sub_folder)\n img_binary = create_chromakey_image(img_rotated, filename, sub_folder)\n\n # get black borders inside of image\n img_borders = borders(img_binary, filename, sub_folder)\n\n # create two filtered images\n # img_canny = create_canny_image(img_borders, filename, sub_folder) # UNUSED!\n img_skeleton = create_skeleton_image(img_borders, filename, sub_folder)\n\n # align binary image to center of mass\n img_com = create_com_image(img_skeleton, filename, sub_folder)",
"def _get_road_image_paths():\n\timg_paths = glob.glob('./test_images/*.jpg')\n\treturn img_paths",
"def test_unscanned_find_assets(files_dir):\n swords_dir = pathlib.Path(SWORDS_DIR)\n asset_dir = data.recursive_load_asset_dir(swords_dir)\n\n expected_assets = [\"square_crossed.png\", \"tall.png\", \"wide.png\"]\n\n assert asset_dir.asset_count() == len(expected_assets)\n\n found_assets = []\n for asset in asset_dir.assets().values():\n relative_path = asset.relative_path(asset_dir.absolute_path())\n found_assets.append(str(relative_path))\n\n # Asset is new, so it should be marked as dirty.\n assert asset.is_dirty()\n\n for expected_asset in expected_assets:\n assert expected_asset in found_assets"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Removes all threads that return FALSE on isAlive() from the running_threads list | def free_dead(self):
for th in self.running_threads[:]:
if th[0].isAlive() == False:
self.running_threads.remove(th) | [
"def _monitor(self):\n if len(self.threads) == 1:\n self._monitor_one(self.threads[0])\n return\n\n all_alive = True\n while all_alive:\n if not all([t.isAlive() for t in self.threads]):\n for thread in self.threads:\n thread.stop()\n all_alive = False\n else:\n time.sleep(5)",
"def stop_threads(self):\n for thread in self.threads.values():\n thread.stop()",
"def shutdown(self):\n for i in range(self.number_of_threads):\n self.threads[i].join()",
"def stop_all_threads(self):\n\n for i in range(0, len(self.threads)):\n self.stop_thread(i)",
"async def ensure_bumped_threads_are_active(self) -> None:\n await self.bot.wait_until_guild_available()\n\n threads_to_maybe_bump = []\n for thread_id, _ in await self.threads_to_bump.items():\n try:\n thread = await channel.get_or_fetch_channel(thread_id)\n except disnake.NotFound:\n log.info(\"Thread %d has been deleted, removing from bumped threads.\", thread_id)\n await self.threads_to_bump.delete(thread_id)\n continue\n\n if thread.archived:\n threads_to_maybe_bump.append(thread)\n\n await self.unarchive_threads_not_manually_archived(threads_to_maybe_bump)",
"def joinall(self):\n for th in self.running_threads[:]:\n while th[0].isAlive():\n sleep(0.1)\n th[0].join()\n # print \"Thread:\",th[1],\"joined\",\"isalive:\",th[0].isAlive() --- Debug stuff",
"def check_dangling_threads(timeout=15):\n deadline = time.time() + timeout\n while threading.active_count() > 1 and time.time() < deadline:\n time.sleep(1)\n\n if threading.active_count() > 1:\n threads = list(threading.enumerate())\n assert \"Had extra threads alive at the end of the tests {}\".format(threads)",
"def show_running_threads(self):\n\n running_threads = []\n for t in self.threads:\n if t.is_alive():\n running_threads.append(t.name)\n return running_threads",
"def stop_workers(self):\n if self._workers:\n with self._updates_lock:\n # Insert at the beginning so the very next poll causes an error\n # on all the worker threads\n # TODO Should this reset the pts and such?\n for _ in range(self._workers):\n self._updates.put(StopIteration())\n\n for t in self._worker_threads:\n t.join()\n\n self._worker_threads.clear()",
"def _clean(self):\n cleaned = 0\n for k in range(len(self._workers) - 1, -1, -1):\n w = self._workers[k]\n if w.exitcode is not None: # pragma: no cover\n w.join()\n cleaned += 1\n del(self._workers[k])\n if cleaned: # pragma: no cover\n gc.collect()\n return cleaned",
"def cleanup_idle(self):\r\n remove = self._schedules.remove\r\n for sched in self._schedules[:]:\r\n if not sched.running:\r\n remove(sched)",
"def resetShallCancelThreads():\n global shallCancelThreads\n shallCancelThreads = False",
"def clean_workers(self, timeout=5, force=False):\n cnt = 0\n while force or cnt < timeout:\n edited = False\n cnt += 1\n for worker in self.workers:\n del_it = False\n if worker.started and worker.running and force:\n worker.running = False\n del_it = True\n elif worker.started and not worker.running:\n del_it = True\n\n if del_it:\n worker.join()\n self.workers.remove(worker)\n del worker\n edited = True\n break\n\n if not edited:\n return",
"def cleanup(self):\n r = []\n for runner in self.runners:\n if runner.queue.empty(): r.append(runner)\n if not r: return\n for runner in r: runner.stop()\n for runner in r:\n try: self.runners.remove(runner)\n except ValueError: pass\n logging.debug(\"%s - cleaned %s\" % (self.name, [item.name for item in r]))\n logging.debug(\"%s - now running: %s\" % (self.name, self.size()))",
"def stopThreads(self):\n for bird in self.Birds:\n bird.stop()\n\n for toad in self.Toads:\n toad.stop()",
"def stop(self):\n\n if not self.running.is_set():\n return\n\n logger.debug(\"Shutting down threads...\")\n\n self.running.clear()\n self.syncing.clear()\n self.paused_by_user.clear()\n self.startup_requested.clear()\n self.startup_done.set()\n\n self.local_observer_thread.stop()\n self.local_observer_thread.join()\n self.connection_thread.join()\n self.upload_thread.join()\n # self.download_thread.join()\n\n logger.info(STOPPED)",
"def stop_handler_threads(app: flask.Flask) -> None:\n if not app.config['ENABLE_BACKGROUND_TASKS']:\n return\n\n global should_stop\n should_stop = True\n\n # notify handler threads about having to stop\n wake_event.set()\n\n # then join handler threads\n # this is tried repeatedly, so that even if one thread is blocking, all others will be joined correctly\n running_threads = set(handler_threads)\n while running_threads:\n for handler_thread in running_threads.copy():\n handler_thread.join(1)\n if not handler_thread.is_alive():\n running_threads.remove(handler_thread)",
"def cleanThreadTimeToWait() -> None:\n ...",
"def delete_active_workers(self):\n wdict = dict([(ele['id'],ele['os']) for ele in self.get_workers()])\n aworkers = self.get_active_workers().keys()\n aworkers += [ele for ele in wdict.keys() if ele not in aworkers]\n\n return [self.delete_worker(ele) for ele in aworkers]"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Joins all the threads together into the calling thread. | def joinall(self):
for th in self.running_threads[:]:
while th[0].isAlive():
sleep(0.1)
th[0].join()
# print "Thread:",th[1],"joined","isalive:",th[0].isAlive() --- Debug stuff | [
"def join(self):\n for thread in self.threads:\n while 1:\n thread.join(1)\n if not thread.isAlive():\n break",
"def join(self):\n logger.debug(\"Joining Threads: '%s'\", self._name)\n for thread in self._threads:\n logger.debug(\"Joining Thread: '%s'\", thread._name) # pylint: disable=protected-access\n thread.join()\n if thread.err:\n logger.error(\"Caught exception in thread: '%s'\",\n thread._name) # pylint: disable=protected-access\n raise thread.err[1].with_traceback(thread.err[2])\n logger.debug(\"Joined all Threads: '%s'\", self._name)",
"def _start_all(self):\n for thread in self.threads:\n thread.start()",
"def join(self) -> None:\n try:\n assert self._root is self, \"only root tasks can be joined\"\n assert self._state != STATE_INIT, \"can't join tasks in state \" + str(self._state)\n except Exception as exc:\n self._root._exc.put(exc)\n raise\n self._exc.fire()\n try: self._thread_start.join()\n except Exception: pass\n try: self._thread_cont.join()\n except Exception: pass\n try: self._thread.join()\n except Exception: pass",
"def end_threads(self): \n self.input_thread.join()\n self.listen_thread.join()",
"def join(self):\n for process in self.stack:\n process.join(timeout=self.release_timeout)",
"def Join(self):\n if self.workingThread is not None:\n self.done = True\n self.doneCallback = None\n # import time\n # time.sleep(2)\n del self.workingThread\n # self.workingThread.join()\n self.workingThread = None",
"def run():\n # Avoid circular dependencies\n from vjezd import crit_exit, exit\n from vjezd import device as this_device\n from vjezd.threads.print import PrintThread\n from vjezd.threads.scan import ScanThread\n\n if 'print' in this_device.modes:\n threads.append(PrintThread())\n if 'scan' in this_device.modes:\n threads.append(ScanThread())\n\n for t in threads:\n logger.debug('Starting thread {}'.format(t.name))\n t.start()\n\n while not exiting:\n # Check if all threads are still active\n for t in threads:\n logger.debug('Monitoring threads')\n if not t.is_alive():\n logger.critical('Thread {} is not alive. Exiting'.format(\n t.name))\n crit_exit(10, force_thread=True)\n time.sleep(1)\n\n logger.info('Waiting for all threads to join')\n for t in threads:\n t.join()\n\n # Exit depending on exiting state\n if exiting == CRIT_EXITING:\n crit_exit(10)\n else:\n exit()",
"def join_all(self):\n for channel in self.config['channels']:\n self.join(channel)",
"def poll():\n global master_thread, slave_threads\n master_thread.start()\n for i in slave_threads:\n slave_threads[i].start()",
"def dispatch_and_join(thread_objs): \r\n for t_obj in thread_objs:\r\n t_obj.start()\r\n \r\n details = []\r\n for t_obj in thread_objs:\r\n try:\r\n t_obj.join()\r\n except:\r\n info = sys.exc_info()\r\n logging.error(\"exception during join() of {} - {}\".format\r\n (t_obj.thread_name, info[0]))\r\n finally: \r\n if t_obj.isAlive():\r\n logging.error(\r\n \"Thread fetching {} timed out\".format(\r\n t_obj.thread_name))\r\n else:\r\n detail = t_obj.detail\r\n details.append(detail)\r\n return details",
"def main(self) -> list:\r\n\r\n for thread in range(self.threads):\r\n t = threading.Thread(target=self.threader)\r\n t.daemon = True\r\n t.start()\r\n\r\n for curr in self.hosts:\r\n self.q.put(curr)\r\n\r\n self.q.join()\r\n\r\n return self.res",
"def _join():\n # Note: _utils might be none when _init wasn't called\n if _utils is None:\n return\n logger = _utils.get_logger()\n\n # Wait until the thread of the event loop terminates\n if _thread is not None:\n logger.debug('Joining')\n _thread.join(timeout=1.1)\n if _thread.is_alive():\n logger.error('Joining timed out, terminating ungracefully')\n\n # Reset globals and exit\n _reset_globals()\n logger.info('Exiting')",
"def _wait_threads_to_finish(self):\n for thread in self._pool:\n logging.debug('Joining thread %s', thread)\n thread.join()\n\n with self._lock:\n self._state = ThreadPoolExecutorState.STOPPED",
"def multiple_threads_handler(threads_count=3):\n threads = []\n results = []\n for i in range(threads_count):\n thread = Thread(target = _send_get_request, args = (\"http://google.com\", results))\n thread.start()\n threads.append(thread)\n for thread in threads:\n thread.join()\n assert len(results) == threads_count",
"def shutdown(self):\n for i in range(self.number_of_threads):\n self.threads[i].join()",
"def _launchThreads(self, numThreads):\n i = 0\n while i < numThreads:\n i += 1\n newThr = threading.Thread(target=self._processUsers)\n newThr.setDaemon(True)\n self._threads.add(newThr)\n newThr.start()",
"def twisted_threadpool_worker(self):\n ct = self.currentThread()\n o = self.q.get()\n while o is not threadpool.WorkerStop:\n self.working.append(ct)\n ctx, function, args, kwargs, onResult = o\n del o\n\n # Calculate a function name\n name = ct.getName()\n try:\n funcname = \"<unknown>\"\n username = \"<unknown>\"\n try:\n adict = args[2][0]\n if \"name\" in adict.keys():\n funcname = adict[\"name\"]\n except: pass\n try:\n adict = args[2][1]\n if \"username\" in adict.keys():\n username = adict[\"username\"]\n except: pass\n ct.setName(\"%s (%s:%s)\" % (name, username, funcname))\n ct.started_at = time.time()\n except:\n pass\n try:\n result = context.call(ctx, function, *args, **kwargs)\n success = True\n except:\n success = False\n if onResult is None:\n context.call(ctx, log.err)\n result = None\n else:\n result = failure.Failure()\n del function, args, kwargs\n\n self.working.remove(ct)\n\n if onResult is not None:\n try:\n context.call(ctx, onResult, success, result)\n except:\n context.call(ctx, log.err)\n\n ct.setName(name)\n del ctx, onResult, result\n self.waiters.append(ct)\n o = self.q.get()\n self.waiters.remove(ct)\n\n self.threads.remove(ct)",
"def join(self):\n self.stop_threads()\n\n self.geocoder.join()"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns parameters from the running_threads list for external manipulation | def get_all_params(self):
for thli in self.running_threads:
yield(thli[0],thli[1],thli[2]) | [
"def thread_info(self):\n print(threading.active_count())\n for t in threading.enumerate():\n print(t.getName())",
"def hyperthreads_for(rank_spec):",
"def getAllThreads(self):\n raise NotImplementedError",
"def getThreads():\n if sys.platform == 'win32':\n return (int)(os.environ['NUMBER_OF_PROCESSORS'])\n else:\n return (int)(os.popen('grep -c cores /proc/cpuinfo').read())",
"def omp_threads_for(rank_spec):",
"def getThreadStatus():\n thread_dict = {}\n c=0\n\n threads = threading.enumerate()\n for ti in threads:\n if c==0: status=\"R\" # MainThread\n elif ti in reactor.threadpool.waiters: status=\"S\" # sleeping\n elif ti in reactor.threadpool.working: status=\"R\" # running\n else: status=\" \" # unknown\n started_at = getattr(ti, \"started_at\", -1)\n if started_at != -1 and status==\"R\":\n age = time.time()-started_at\n age_str = \"%3.3f\" % (time.time()-started_at)\n else:\n age = -1\n age_str = \" \"*6\n thread_dict[c] = {\"name\":ti.getName(), \"status\":status,\n \"age\":age, \"age_str\":age_str}\n c+=1\n return thread_dict",
"def get_nthreads():\n return C.blosc_get_nthreads()",
"def _get_slurm_params():\n cmd = \"scontrol show hostnames '%s'\" % os.environ[\"SLURM_JOB_NODELIST\"]\n nodes = subprocess.getoutput(cmd).split()\n num_nodes = int(os.environ[\"SLURM_JOB_NUM_NODES\"])\n current_node = os.environ[\"SLURMD_NODENAME\"]\n master_node = socket.gethostbyname(nodes[0])\n cur_node_idx = nodes.index(current_node)\n job_id = os.environ[\"SLURM_JOB_ID\"]\n master_port = str(5 * 10 ** 4 + int(job_id) % 10 ** 4)\n return cur_node_idx, num_nodes, master_node, master_port",
"def show_running_threads(self):\n\n running_threads = []\n for t in self.threads:\n if t.is_alive():\n running_threads.append(t.name)\n return running_threads",
"def test_02_get_threads_list(self):\n time.sleep(0.5) # allows debugger to start\n self.ikpdb.set_breakpoint(DEBUGGED_PROGRAM, line_number=42)\n self.ikpdb.run_script()\n\n i_msg = self.ikpdb.receive()\n self.assertEqual(i_msg['command'],\n \"programBreak\", \n \"Received: %s while expecting 'programBreak'\" % (i_msg['command'],))\n\n i_msg = self.ikpdb.get_threads()\n threads_dict = i_msg['result']\n nb_threads = len(threads_dict)\n self.assertEqual(nb_threads, 4, \n \"Unexpected number of threads (Received: %s, expecting 4)\" % (nb_threads,))\n self.assertEqual(set([threads_dict[ident]['name'] for ident in threads_dict]),\n set([u'Thread-2', u'IKPdbCommandLoop', u'MainThread', u'Thread-1']),\n \"Incorrect threads list returned.\")",
"def _multiprocessing_args():\n if utils.cpu_count() == 1:\n # GSUtil's default thread count is 5 as it assumes the common configuration\n # is many CPUs (GSUtil uses num_cpu processes).\n return ['-o', 'GSUtil:parallel_thread_count=16']\n\n return []",
"def get_available_threads(outdir):\n\n # MN4\n if \"BSC_MACHINE\" in os.environ and os.environ[\"BSC_MACHINE\"]==\"mn4\":\n\n available_threads = int(os.environ[\"SLURM_CPUS_PER_TASK\"])\n\n # Nord3 interactive nodes\n elif \"BSC_MACHINE\" in os.environ and os.environ[\"BSC_MACHINE\"]==\"nord3\" and not \"LSB_MCPU_HOSTS\" in os.environ:\n\n available_threads = 4\n\n # BSC machine\n elif str(subprocess.check_output(\"uname -a\", shell=True)).startswith(\"b'Linux bscls063 4.12.14-lp150.12.48-default\"): \n\n available_threads = 4\n\n # others. Calculate by running GATK\n else:\n\n # redefnie the outdir under outdir\n outdir = \"%s/getting_available_threads\"%(outdir)\n delete_folder(outdir)\n make_folder(outdir)\n\n # define a genome that has one chromosome\n genome = \"%s/genome.fasta\"%outdir\n genome_obj = SeqRecord(Seq(\"ACTGCGATCGACTCGATCGATGAGAGAGAGGACTCTCAACAG\"*10), id=\"chromosomeX\")\n SeqIO.write([genome_obj], genome, \"fasta\")\n\n # get some simulated reads\n reads1, reads2 = simulate_testing_reads_on_genome(genome, window_l=75, npairs=1000, read_length=50, median_insert_size=15, median_insert_size_sd=5, threads=4, replace=False)\n\n # get a sorted bam\n sorted_bam = get_sorted_bam_test(reads1, reads2, genome, replace=False)\n\n # create the files\n create_sequence_dict(genome, replace=False)\n\n # run GATK HC \n gatk_out = \"%s/output_HC.vcf\"%outdir\n gatk_std = \"%s.running.std\"%gatk_out\n\n gatk_cmd = \"%s HaplotypeCaller -R %s -I %s -O %s -ploidy %i --genotyping-mode DISCOVERY --emit-ref-confidence NONE --stand-call-conf 30 --native-pair-hmm-threads %i > %s 2>&1\"%(gatk, genome, sorted_bam, gatk_out, 1, 100000000, gatk_std)\n\n run_cmd(gatk_cmd)\n\n # get the available threads\n threads_lines = [l for l in open(gatk_std, \"r\").readlines() if \"IntelPairHmm - Using\" in l and \"available threads, but\" in l and \"were requested\" in l]\n if len(threads_lines)!=1: raise ValueError(\"the threads were not properly calculated\")\n\n available_threads = int(threads_lines[0].split(\"IntelPairHmm - Using \")[1].split(\"available threads\")[0])\n\n # print\n print_if_verbose(\"there are %i available threads in this run\"%available_threads)\n\n # remove the outdir\n delete_folder(outdir)\n\n return available_threads",
"def smt_params(self):\n cpucount = 0\n SMTLIST = []\n SMT = {}\n if self.GENERAL_SNAP:\n self.GENERAL_SNAP.seek(0)\n while True:\n CUR_SMT = {}\n line = self.GENERAL_SNAP.readline()\n if not line:\n break\n if 'smt_enabled' in line:\n CUR_SMT.update({'smt_enabled' : line.split()[1]})\n line = self.GENERAL_SNAP.readline()\n CUR_SMT.update({'smt_threads' : line.split()[1]})\n if len(SMTLIST) > 0:\n for item in SMTLIST:\n if not(item == CUR_SMT):\n SMTLIST.append(CUR_SMT)\n else:\n SMTLIST.append(CUR_SMT)\n cpucount += 1\n# (cpucount == len(self.__snap_proc_list())) and\n\n if (len(SMTLIST) == 1):\n if SMTLIST[0]['smt_enabled'] == 'true':\n SMT.update({'smt_threads_count' : SMTLIST[0]['smt_threads']})\n else:\n SMT.update({'smt_threads_count' : '0'})\n else:\n return None\n return SMT",
"def mp_spawn_kwargs(self):\n return {\n \"args\": (self.lightning_module.trainer, self.mp_queue),\n \"nprocs\": self.num_processes,\n \"cpu_procs\": self.cpu_for_each_process\n }",
"def set_thread_params(\n self,\n enable: bool = None,\n *,\n count: int = None,\n count_offload: int = None,\n stack_size: int = None,\n no_wait: bool = None\n ):\n self._set('enable-threads', enable, cast=bool)\n self._set('no-threads-wait', no_wait, cast=bool)\n self._set('threads', count)\n self._set('offload-threads', count_offload)\n\n if count:\n self._section.print_out(f'Threads per worker: {count}')\n\n self._set('threads-stacksize', stack_size)\n\n return self._section",
"def get_bowtie2_number_threads():\n global BOWTIE2_THREADS\n return BOWTIE2_THREADS",
"def get_active_workers(self):\n return self.wdict",
"def get_distributed_params():\n master_port = str(random.randint(5 * 10 ** 4, 6 * 10 ** 4))\n master_addr = \"127.0.0.1\"\n cur_node, num_nodes = 0, 1\n if _is_slurm_available():\n cur_node, num_nodes, master_addr, master_port = _get_slurm_params()\n\n os.environ[\"MASTER_ADDR\"] = os.getenv(\"MASTER_ADDR\", master_addr)\n os.environ[\"MASTER_PORT\"] = os.getenv(\"MASTER_PORT\", master_port)\n\n workers_per_node = torch.cuda.device_count()\n start_rank = cur_node * workers_per_node\n world_size = num_nodes * workers_per_node\n\n local_rank = os.getenv(\"LOCAL_RANK\", None)\n rank = os.getenv(\"RANK\", None)\n local_rank, rank = [v and int(v) for v in [local_rank, rank]]\n world_size = int(os.getenv(\"WORLD_SIZE\", world_size))\n\n output = OrderedDict(\n local_rank=local_rank,\n start_rank=start_rank,\n rank=rank,\n world_size=world_size,\n master_addr=os.environ[\"MASTER_ADDR\"],\n master_port=os.environ[\"MASTER_PORT\"],\n )\n\n return output",
"def realSchedParams(self,cfg_params):\n params={'jobScriptDir':common.work_space.jobDir(),\n 'jobResDir':common.work_space.resDir()\n }\n\n\t\t# update parameters\n for s in ('resources', 'queue', 'workernodebase', 'hostname', 'forcetransferfiles', 'grouplist'):\n params[s] = cfg_params.get( self.name().upper()+'.'+s,'' )\n\n return params"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Parses the given element_names from xmlfile and yield compound objects for their xml subtrees (no extra objects are returned if element_names appear in the subtree) The compound objects provide all element attributes of the root of the subtree as attributes unless attr_names are supplied. In this case attr_names maps element names to a list of attributes which are supplied. If attr_conversions is not empty it must map attribute names to callables which will be called upon the attribute value before storing under the attribute name. The compound objects gives dictionary style access to list of compound objects o for any children with the given element name o['child_element_name'] = [osub0, osub1, ...] As a shorthand, attribute style access to the list of child elements is provided unless an attribute with the same name as the child elements exists (i.e. o.child_element_name = [osub0, osub1, ...]) | def parse(xmlfile, element_names, element_attrs={}, attr_conversions={},
heterogeneous=False, warn=False):
if isinstance(element_names, str):
element_names = [element_names]
elementTypes = {}
for _, parsenode in ET.iterparse(_open(xmlfile, None)):
if parsenode.tag in element_names:
yield _get_compound_object(parsenode, elementTypes,
parsenode.tag, element_attrs,
attr_conversions, heterogeneous, warn)
parsenode.clear() | [
"def parse_fast_nested(xmlfile, element_name, attrnames, element_name2, attrnames2,\n warn=False, optional=False, encoding=\"utf8\"):\n Record, reprog = _createRecordAndPattern(element_name, attrnames, warn, optional)\n Record2, reprog2 = _createRecordAndPattern(element_name2, attrnames2, warn, optional)\n record = None\n for line in _open(xmlfile, encoding):\n m2 = reprog2.search(line)\n if m2:\n if optional:\n yield record, Record2(**m2.groupdict())\n else:\n yield record, Record2(*m2.groups())\n else:\n m = reprog.search(line)\n if m:\n if optional:\n record = Record(**m.groupdict())\n else:\n record = Record(*m.groups())",
"def parse_tree(self, node):\n for key in node.attrib:\n self.attributes[key] = node.attrib[key]\n self.attributes['xml_tag'] = node.tag\n # self.attributes['xml_element'] = node\n\n for child in node:\n try:\n if not child.attrib['name'] in self.children:\n child_config = ConfigurationElement(child.attrib['name'])\n child_config.parse_data(child, root=False)\n self.children[child.attrib['name']] = child_config\n else:\n self.children[child.attrib['name']].parse_tree(child)\n except KeyError:\n print_error(\"No name attribute for node \" + child.tag + \". Tree with root \"\n \"at node \" + child.tag + \" not parsed.\")",
"def _parse_children(xml_element, mjcf_element, escape_separators=False):\n for xml_child in xml_element:\n if xml_child.tag is etree.Comment or xml_child.tag is etree.PI:\n continue\n try:\n child_spec = mjcf_element.spec.children[xml_child.tag]\n if escape_separators:\n attributes = {}\n for name, value in six.iteritems(xml_child.attrib):\n new_value = value.replace(constants.PREFIX_SEPARATOR_ESCAPE, constants.PREFIX_SEPARATOR_ESCAPE * 2)\n new_value = new_value.replace(constants.PREFIX_SEPARATOR, constants.PREFIX_SEPARATOR_ESCAPE)\n attributes[name] = new_value\n else:\n attributes = dict(xml_child.attrib)\n if child_spec.repeated or child_spec.on_demand:\n mjcf_child = mjcf_element.add(xml_child.tag, **attributes)\n else:\n mjcf_child = getattr(mjcf_element, xml_child.tag)\n mjcf_child.set_attributes(**attributes)\n except: # pylint: disable=bare-except\n err_type, err, traceback = sys.exc_info()\n message = \"Line {}: error while parsing element <{}>: {}\".format(xml_child.sourceline, xml_child.tag, err)\n six.reraise(err_type, err_type(message), traceback)\n _parse_children(xml_child, mjcf_child, escape_separators)",
"def basic_xml_parse(nodes):\n values = {}\n for node in nodes:\n if node.nodeType == 1:\n node.normalize()\n if len(node.childNodes) == 1:\n if node.attributes.keys():\n values[node.tagName] = {}\n for e in node.attributes.keys():\n values[node.tagName][e] = node.attributes[e].value\n values[node.tagName]['value'] = node.childNodes[0].nodeValue\n else:\n values[node.tagName] = node.childNodes[0].nodeValue\n else:\n nv = {}\n if node.tagName == \"rowset\":\n rset = []\n for nd in node.childNodes:\n if nd.nodeType == 1:\n d = {}\n for e in nd.attributes.keys():\n d[e] = nd.attributes[e].value\n\n if len(nd.childNodes) > 0:\n p = basic_xml_parse(nd.childNodes)\n for i in p.keys():\n d[i] = p[i]\n\n rset.append(d)\n values[node.attributes['name'].value] = rset\n else:\n values[node.tagName] = basic_xml_parse(node.childNodes)\n if node.attributes.keys():\n for e in node.attributes.keys():\n values[node.tagName][e] = node.attributes[e].value\n\n\n return values",
"def from_fatxmls(cls, basedir, filenames=[]):\n groups = []\n parts = []\n if len(filenames) > 0:\n source = [f for f in filenames if os.path.splitext(f)[1] == '.xml']\n else:\n source = glob.glob(\"{0}/*.fatxml\".format(basedir))\n for fn in source:\n raw_path = cls.extract_tree_path_from_fatxml(fn)\n path = [cls.parse_name_without_underscore(r) for r in raw_path]\n parent = ''\n for g in path:\n groups.append({'name': g[0], 'label': g[1], 'parent': parent})\n parent = g[0]\n name, label = cls.parse_name_without_underscore(os.path.splitext(os.path.basename(fn))[0])\n parts.append({'name': name, 'label': label, 'parent': parent, 'filename': os.path.splitext(fn)[0]})\n groupdict = {g['name']: g for g in groups}\n groups = [v for _, v in groupdict.items()]\n return cls(groups, parts)",
"def get_element_names(self, element_tree, element_names=[]):\n\t\telement_names.append(element_tree['name'])\n\n\t\tif 'children' in element_tree: \n\t\t\tfor child in element_tree['children']: \n\t\t\t\tself.get_element_names(child, element_names)",
"def get_elements_dict(self, element_tree, elements):\n\t\telements[element_tree[\"name\"]] = element_tree \n\n\t\tif \"children\" in element_tree and len(element_tree[\"children\"]): \n\t\t\tfor child in element_tree[\"children\"]: \n\t\t\t\tself.get_elements_dict(child, elements)",
"def iter_elements(self, name: Union['bytes', 'str']=None) -> Iterator['Element']:\n\n def yield_elements(element):\n for child in element:\n if is_element(child):\n if name is None or child.name == name:\n yield child\n yield from yield_elements(child)\n else:\n yield from yield_elements(child)\n\n return yield_elements(self)",
"def _process_attributes(self, attributes_element):\n for element in list(attributes_element):\n if element.tag != \"attribute\":\n raise AglyphError(\n \"unexpected element: attributes/%s\" % element.tag)\n name = element.get(\"name\")\n if not name:\n raise AglyphError(\n \"attribute/@name is required and cannot be empty\")\n value = self._unserialize_element_value(element)\n yield (name, value)",
"def elements(self, name: Union['bytes', 'str']=None) -> Iterator['Element']:\n def yield_elements(elements_name):\n for child in self:\n if is_element(child) and (elements_name is None or child.name == elements_name):\n yield child\n return yield_elements(name)",
"def iter_attributes(self, name: Union['bytes', 'str']=None) -> Iterator['Attribute']:\n\n def yield_attributes(element):\n for child in element:\n if is_element(child):\n yield from yield_attributes(child)\n else:\n if name is None or child.name == name:\n yield child\n\n return yield_attributes(self)",
"def _parse(xml_root, escape_separators=False, model_dir=\"\", resolve_references=True, assets=None):\n\n assets = assets or {}\n\n if xml_root.tag != \"mujoco\":\n raise ValueError(\"Root element of the XML should be <mujoco>: got <{}>\".format(xml_root.tag))\n\n with debugging.freeze_current_stack_trace():\n # Recursively parse any included XML files.\n to_include = []\n for include_tag in xml_root.findall(\"include\"):\n try:\n # First look for the path to the included XML file in the assets dict.\n path_or_xml_string = assets[include_tag.attrib[\"file\"]]\n parsing_func = from_xml_string\n except KeyError:\n # If it's not present in the assets dict then attempt to load the XML\n # from the filesystem.\n path_or_xml_string = os.path.join(model_dir, include_tag.attrib[\"file\"])\n parsing_func = from_path\n included_mjcf = parsing_func(\n path_or_xml_string,\n escape_separators=escape_separators,\n resolve_references=resolve_references,\n assets=assets,\n )\n to_include.append(included_mjcf)\n # We must remove <include/> tags before parsing the main XML file, since\n # these are a schema violation.\n xml_root.remove(include_tag)\n\n # Parse the main XML file.\n try:\n model = xml_root.attrib.pop(\"model\")\n except KeyError:\n model = None\n mjcf_root = element.RootElement(model=model, model_dir=model_dir, assets=assets)\n _parse_children(xml_root, mjcf_root, escape_separators)\n\n # Merge in the included XML files.\n for included_mjcf in to_include:\n # The included MJCF might have been automatically assigned a model name\n # that conficts with that of `mjcf_root`, so we override it here.\n included_mjcf.model = mjcf_root.model\n mjcf_root.include_copy(included_mjcf)\n\n if resolve_references:\n mjcf_root.resolve_references()\n return mjcf_root",
"def parse_data(self, *arg, **kwargs):\n\n for xml in arg:\n # when the elem_type is xml_object(root xml object)\n if 'elem_type' in kwargs and kwargs['elem_type'] == \"xml_object\":\n config_tree = xml\n elif 'root' in kwargs and not kwargs['root']:\n config_tree = xml\n else:\n tree = ElementTree.parse(xml)\n config_tree = tree.getroot()\n\n self.parse_tree(config_tree)",
"def from_etree(cls, elem):\n if not isinstance(elem, ET.Element):\n msg = \"Bad type {} - should be xml.etree.ElementTree.Element\"\n raise ValueError(msg.format(type(elem)))\n try:\n SubClass = getattr(ofxtools.models, elem.tag)\n except AttributeError:\n msg = \"ofxtools.models doesn't define {}\".format(elem.tag)\n raise ValueError(msg)\n\n # Hook to modify incoming ``ET.Element`` before conversion\n elem = SubClass.groom(elem)\n\n spec = list(SubClass.spec)\n listitems = SubClass.listitems\n\n args = []\n kwargs = {}\n specIndices = []\n\n for subelem in elem:\n key = subelem.tag.lower()\n\n if key in kwargs:\n msg = \"{} contains multiple {}\"\n raise ValueError(msg.format(SubClass.__name__, key))\n\n # If child contains text data, it's an Element; return text data.\n # Otherwise it's an Aggregate - perform type conversion\n if key in SubClass.unsupported:\n value = None\n elif subelem.text:\n value = subelem.text\n else:\n value = Aggregate.from_etree(subelem)\n\n try:\n idx = spec.index(key)\n specIndices.append((idx, spec[idx]))\n if key in listitems:\n args.append(value)\n else:\n kwargs[key] = value\n except ValueError:\n msg = \"{} is not in {}\".format(key, spec) # FIXME\n raise ValueError(msg)\n\n # Verify that SubElements appear in the order defined by self.spec\n for (idx0, attr0), (idx1, attr1) in pairwise(specIndices):\n # Relative order of ListItems doesn't matter, but position of\n # ListItems relative to non-ListItems (and that of non-ListItems\n # relative to other non-ListItems) does matter.\n if idx1 <= idx0 and (attr0 not in listitems or attr1 not in listitems):\n msg = \"{} SubElements out of order: {}\"\n raise ValueError(msg.format(SubClass.__name__, [el.tag for el in elem]))\n\n instance = SubClass(*args, **kwargs)\n return instance",
"def get_compound_attr(obj, *namesandindices):\n currentattr = obj\n for e in namesandindices:\n currentattr = _getattr_from_compound_element(currentattr, e)\n return currentattr",
"def parse_mesh(filename):\r\n MeshNameDict = {}\r\n MeshUiDict = {}\r\n MeshFullWordDict = defaultdict(list)\r\n\r\n def tree(): return defaultdict(tree)\r\n\r\n def add(t, path, name, ui):\r\n for node in path: \r\n t = t[node] \r\n t['name'] = (name, ui)\r\n #t['ui'] = ui\r\n \r\n MeshTree = tree()\r\n\r\n for _evt, elem in elemtree.iterparse(filename):\r\n if elem.tag == 'DescriptorRecord':\r\n #yield DescriptorRecord.from_xml_elem(elem)\r\n DR = DescriptorRecord.from_xml_elem(elem)\r\n\r\n if DR.tree_numbers != None:\r\n # make lowercase for Dictionary\r\n MeshNameDict[DR.name.lower()] = DR.tree_numbers\r\n MeshUiDict[DR.ui.lower()] = DR.tree_numbers\r\n\r\n \tword_list = [x.strip(',') for x in DR.name.split()] \r\n \tfor word in word_list:\r\n \t\tMeshFullWordDict[word.lower()] += [DR.name]\r\n \r\n for item in DR.tree_numbers:\r\n add(MeshTree, item.split('.'), DR.name, DR.ui)\r\n\r\n return MeshNameDict, MeshUiDict, MeshTree, MeshFullWordDict",
"def _parse_children(self):\n for child in self.xml:\n if child.tag == \"input\":\n self.inputs.append(TestInput(child, self))\n elif child.tag == \"output\":\n outvar = TestOutput(child)\n self.outputs[outvar.identifier] = outvar\n elif child.tag == \"target\":\n self.targets.append(TestTarget(child, self))\n elif child.tag == \"prereq\" and \"method\" in child.attrib:\n self.methods.append(TestPreReq(child, self))\n elif child.tag == \"assignment\":\n self.methods.append(Assignment(child, self))\n elif (child.tag == \"global\" and \"name\" in child.attrib):\n TestingGroup.global_add(self.variables, self._variable_order,\n child.attrib[\"name\"].lower(), child)",
"def _clean_names(self, names):\n for n in names:\n definition = n.parent\n if isinstance(definition, (tree.Function, tree.Class, tree.Module)):\n yield self._evaluator.wrap(definition).name\n else:\n yield n",
"def create_child(self,*names, **meta):\n \n for name in names: \n child = node(name, self, **meta)\n self.childs[name] = child"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Parses the given attrnames from all elements with element_name And attrnames2 from element_name2 where element_name2 is a child element of element_name | def parse_fast_nested(xmlfile, element_name, attrnames, element_name2, attrnames2,
warn=False, optional=False, encoding="utf8"):
Record, reprog = _createRecordAndPattern(element_name, attrnames, warn, optional)
Record2, reprog2 = _createRecordAndPattern(element_name2, attrnames2, warn, optional)
record = None
for line in _open(xmlfile, encoding):
m2 = reprog2.search(line)
if m2:
if optional:
yield record, Record2(**m2.groupdict())
else:
yield record, Record2(*m2.groups())
else:
m = reprog.search(line)
if m:
if optional:
record = Record(**m.groupdict())
else:
record = Record(*m.groups()) | [
"def attrib_parser(element, fields):\r\n attr_dict = {}\r\n\r\n # Fill attr_dict from element attributes but only attributes designated by field\r\n for attr in element.attrib: # takes elements specified in field\r\n if attr in fields:\r\n attr_dict[attr] = element.attrib[attr] # and adds them to dict(attr_dict)\r\n\r\n return attr_dict",
"def _process_attributes(self, attributes_element):\n for element in list(attributes_element):\n if element.tag != \"attribute\":\n raise AglyphError(\n \"unexpected element: attributes/%s\" % element.tag)\n name = element.get(\"name\")\n if not name:\n raise AglyphError(\n \"attribute/@name is required and cannot be empty\")\n value = self._unserialize_element_value(element)\n yield (name, value)",
"def get_element_names(self, element_tree, element_names=[]):\n\t\telement_names.append(element_tree['name'])\n\n\t\tif 'children' in element_tree: \n\t\t\tfor child in element_tree['children']: \n\t\t\t\tself.get_element_names(child, element_names)",
"def attributes(self):\n # \"\"\" Returns a List of an element's attributes \"\"\"\n # try:\n # return [Attr(key.lstrip('_'), value) for key, value in self.kwargs.items()]\n # except Exception as e:\n # print('Error - no tag!', e)\n # return []\n # print('attributes', self.kwargs)\n newargs = []\n for key, value in self.kwargs.items():\n # print('key', key)\n # print('value', value)\n newargs.append(Attr(key.lstrip('_'), value))\n\n nnm = NamedNodeMap(newargs, None, self)\n return nnm",
"def xml_get_elements_dictionary(dom, element_name, key_name, value_name):\n d = {} # \n for element in dom.getElementsByTagName(element_name):\n key = element.getAttribute(key_name)\n value = element.getAttribute(value_name)\n d[key] = value\n return d",
"def basic_xml_parse(nodes):\n values = {}\n for node in nodes:\n if node.nodeType == 1:\n node.normalize()\n if len(node.childNodes) == 1:\n if node.attributes.keys():\n values[node.tagName] = {}\n for e in node.attributes.keys():\n values[node.tagName][e] = node.attributes[e].value\n values[node.tagName]['value'] = node.childNodes[0].nodeValue\n else:\n values[node.tagName] = node.childNodes[0].nodeValue\n else:\n nv = {}\n if node.tagName == \"rowset\":\n rset = []\n for nd in node.childNodes:\n if nd.nodeType == 1:\n d = {}\n for e in nd.attributes.keys():\n d[e] = nd.attributes[e].value\n\n if len(nd.childNodes) > 0:\n p = basic_xml_parse(nd.childNodes)\n for i in p.keys():\n d[i] = p[i]\n\n rset.append(d)\n values[node.attributes['name'].value] = rset\n else:\n values[node.tagName] = basic_xml_parse(node.childNodes)\n if node.attributes.keys():\n for e in node.attributes.keys():\n values[node.tagName][e] = node.attributes[e].value\n\n\n return values",
"def _getAttrMultiple(self, data, **kwargs):\n returnData = []\n if not data:\n return returnData\n\n for i in data:\n result = self._getAttr(i, **kwargs)\n if result:\n returnData.append(result)\n return returnData",
"def parse(xmlfile, element_names, element_attrs={}, attr_conversions={},\n heterogeneous=False, warn=False):\n if isinstance(element_names, str):\n element_names = [element_names]\n elementTypes = {}\n for _, parsenode in ET.iterparse(_open(xmlfile, None)):\n if parsenode.tag in element_names:\n yield _get_compound_object(parsenode, elementTypes,\n parsenode.tag, element_attrs,\n attr_conversions, heterogeneous, warn)\n parsenode.clear()",
"def iter_attributes(self, name: Union['bytes', 'str']=None) -> Iterator['Attribute']:\n\n def yield_attributes(element):\n for child in element:\n if is_element(child):\n yield from yield_attributes(child)\n else:\n if name is None or child.name == name:\n yield child\n\n return yield_attributes(self)",
"def get_elements_dict(self, element_tree, elements):\n\t\telements[element_tree[\"name\"]] = element_tree \n\n\t\tif \"children\" in element_tree and len(element_tree[\"children\"]): \n\t\t\tfor child in element_tree[\"children\"]: \n\t\t\t\tself.get_elements_dict(child, elements)",
"def _modSubelements(elem,*args,**kwargs):\n if 'append' not in kwargs or not kwargs['append']:\n attr = elem.items()\n elem.clear()\n [elem.set(*a) for a in attr]\n for subelem in args:\n tag = subelem[0].lower()\n attrs = dict([(item[0].lower(),item[1]) for item in subelem[1].items()])\n et.SubElement(elem,tag,attrs)\n return elem",
"def _parse(cls, node, path):\n kwargs = cls._parse_simple_attribs(node)\n kwargs.update(cls._parse_simple_elements(node, path))\n return kwargs",
"def get_elementattributes(element, xpath, attribute, tag_converted):\n\n subelements = element.findall(xpath)\n listofattrs = []\n for subelement in subelements:\n attr_value = subelement.get(attribute, None)\n if tag_converted:\n tag_attribute_converted(subelement, attribute)\n if attr_value is not None:\n listofattrs.append(attr_value)\n return listofattrs",
"def _parse_common(tag):\n if \"modifiers\" in tag.attrib:\n modifiers = re.split(\",\\s*\", tag.attrib[\"modifiers\"].strip())\n if \"\" in modifiers:\n modifiers.remove(\"\")\n else:\n modifiers = None\n\n if \"name\" in tag.attrib:\n name = tag.attrib[\"name\"]\n if \"type\" in tag.attrib:\n dtype = tag.attrib[\"type\"]\n else:\n dtype = None\n if \"kind\" in tag.attrib:\n kind = tag.attrib[\"kind\"]\n else:\n kind = None\n\n return (name, modifiers, dtype, kind)",
"def parse_a2(root):\n labels = [\"UID Value\", \"UID Name\", \"UID Keyword\", \"Normative Reference\"]\n attrs = parse_docbook_table(root, labels, \"Well-known Frames of Reference\")\n\n # Customisations for Table A-2\n for v in attrs:\n v[\"UID Type\"] = \"Well-known frame of reference\"\n v[\"UID Info\"] = \"\"\n v[\"Retired\"] = \"\"\n del v[\"Normative Reference\"]\n\n return attrs",
"def __sub__(self, names):\r\n if isinstance(names, str):\r\n names = (names,)\r\n return Attrs([(name, val) for name, val in self if name not in names])",
"def get_attributes(html):\n\n for i, c in enumerate(html):\n if c == '>':\n if USE_BUFFER:\n html = buffer(html, 0, i)\n else:\n html = html[:i]\n break\n return dict((name.lower().strip(), value.strip('\\'\" ')) for (name, value) in attributes_regex.findall(html))",
"def get_attrs(self, node1, node2):\n return self._graph[node1][node2]",
"def get_specific_nodes(self, node, names):\n nodes = [(x.tagName, x) for x in node.childNodes \\\n if x.nodeType == x.ELEMENT_NODE and \\\n x.tagName in names]\n return dict(nodes)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Helper method that reads the model metadata file for the model selected. | def read_model_metadata_file(model_metatdata_file):
try:
err_msg = ""
if(not os.path.isfile(model_metatdata_file)):
err_msg = "No model_metadata_file for the model selected"
return 1, err_msg, {}
with open(model_metatdata_file) as json_file:
data = json.load(json_file)
return 0, err_msg, data
except Exception as exc:
return 1, f"Error while reading model_metadata.json: {exc}", {} | [
"def readModel(self, path) -> None:\n ...",
"def load_model_metadata(self):\n info_dict = joblib.load(self.path_model_metadata)\n\n self.max_sequence_size = info_dict['max_sequence_size']\n self.number_of_distinct_items = info_dict['number_of_distinct_items']\n self.item_dictionary = info_dict['item_dictionary']\n self.encoding_vector_size = info_dict['encoding_vector_size']\n\n return info_dict",
"def read_model(self, psf_file):\n f = pf.open(psf_file)\n self.psf_model = f[0].data\n f.close()",
"def load_model(self):\n raise NotImplementedError",
"def loadModel(self):\n folder = self.app.mwGlob['modelDir']\n val = self.openFile(self, 'Open model file', folder, 'Model files (*.model)',\n multiple=False,\n reverseOrder=True)\n loadFilePath, fileName, ext = val\n\n if loadFilePath:\n self.processModel(loadFilePath)\n\n return True",
"def read_model(self):\n f1 = open(self.name + '_' + 'words', 'r')\n f2 = open(self.name + '_' + 'word_lengths', 'r')\n f3 = open(self.name + '_' + 'stems', 'r')\n f4 = open(self.name + '_' + 'sentence_lengths', 'r')\n f5 = open(self.name + '_' + 'word_pair', 'r')\n d_str1 = f1.read() \n d_str2 = f2.read() \n d_str3 = f3.read() \n d_str4 = f4.read() \n d_str5 = f5.read() \n self.words = dict(eval(d_str1))\n self.word_lengths= dict(eval(d_str2))\n self.stems = dict(eval(d_str3))\n self.sentence_lengths = dict(eval(d_str4))\n self.word_pair = dict(eval(d_str5))",
"def _readMetadata(self):\r\n if self._mdFile is not None and os.path.exists(self._mdFile):\r\n with open(self._mdFile, 'r') as fp:\r\n return json.load(fp)\r\n return {}",
"def load_model():\n # TODO: INSERT CODE\n # return model",
"def read_model(self):\n reply = self._command_reply(0x80000031, 0, 32)\n\n return reply.decode('utf-8').split('\\x00')[0]",
"def read(self, model, _id=None):\n raise NotImplementedError()",
"def readmodel(model):\n if model not in MODELS:\n raise web.notfound('No model %s. Choices are: %s' % (model, ', '.join(MODELS)))\n modelfname = model+'.model'\n from svm import svm_model\n t1 = time.time()\n model = svm_model(modelfname)\n f = open(modelfname.replace('.model', '.params'))\n model.scales = eval(f.readline().strip())\n simmeths = eval(f.readline().strip())\n f.close()\n log('Loaded verification model for %s from %s with %d dims and simmeths %s in %0.3f secs' % (model, modelfname, len(model.scales), simmeths, time.time()-t1))\n return (model, simmeths)",
"def load_model(self):\n filename = filedialog.askopenfilename()\n if filename:\n self.model_path = filename\n self.reload()",
"def _construct_model_metadata(self):\n return {\"Model\": {\"Arn\": self.properties[\"ModelName\"]}}",
"def read_model(self):\r\n dic1=self.name+'_'+'words'\r\n dic2=self.name+'_'+'word_lengths'\r\n dic3=self.name+'_'+'stems'\r\n dic4=self.name+'_'+'sentence_lengths'\r\n dic5=self.name+'_'+'three_adjacent'\r\n f = open(dic1, 'r') \r\n words = f.read()\r\n self.words=dict(eval(words))\r\n f.close()\r\n \r\n f=open(dic2,'r')\r\n word_lengths=f.read()\r\n self.word_lengths=dict(eval(word_lengths))\r\n f.close()\r\n\r\n f=open(dic3,'r')\r\n stems=f.read()\r\n self.stems=dict(eval(stems))\r\n f.close()\r\n \r\n f=open(dic4,'r')\r\n sentence_lengths=f.read()\r\n self.sentence_lengths=dict(eval(sentence_lengths))\r\n f.close()\r\n\r\n f=open(dic5,'r')\r\n three_adjacent=f.read()\r\n self.three_adjacent=dict(eval(three_adjacent))\r\n f.close()",
"def act_func_load_model(self):\n # Open QFileDialog\n dialog = QtWidgets.QFileDialog(caption=\"Load model\")\n dialog.setFileMode(QtWidgets.QFileDialog.ExistingFile)\n dialog.setNameFilter(\"Sparse model files (*.pp *.txt)\")\n if not dialog.exec_():\n return\n filename = dialog.selectedFiles()\n self.process.sparse_model_file = filename\n\n # Set mode\n self.build_model_mode = False\n self.mode_qlabel.setText(\"Mode: Use existing model\")",
"def get_model_metadata(self, model_name, model_version=\"\", as_json=False):\n try:\n request = grpc_service_v2_pb2.ModelMetadataRequest(\n name=model_name,\n version=model_version)\n response = self._client_stub.ModelMetadata(request)\n if as_json:\n return json.loads(MessageToJson(response))\n else:\n return response\n except grpc.RpcError as rpc_error:\n raise_error_grpc(rpc_error)",
"def load_model(self, filename):\n \n x = loadmat(filename, struct_as_record=True)\n return (x[\"layers\"], x[\"meta\"])",
"def _collect_files_from_store_object(self):\n # Get the artifact and model file along with its extra data:\n (\n self._model_file,\n self._model_artifact,\n self._extra_data,\n ) = mlrun.artifacts.get_model(self._model_path)\n\n # Get the model file: TODO: Once implementing abstract formats, '.pkl' check is only relevant to SavedModel.\n if self._model_file.endswith(\".pkl\"):\n self._model_file = self._extra_data[\n self._get_model_file_artifact_name()\n ].local()\n\n # Read the settings:\n self._model_format = self._model_artifact.labels[\"model-format\"]\n self._save_traces = self._model_artifact.labels[\"save-traces\"]\n\n # Read the IO information:\n self._inputs = self._model_artifact.inputs\n self._outputs = self._model_artifact.outputs\n\n # Read the custom objects:\n if self._get_custom_objects_map_artifact_name() in self._extra_data:\n self._custom_objects_map = self._extra_data[\n self._get_custom_objects_map_artifact_name()\n ].local()\n self._custom_objects_directory = self._extra_data[\n self._get_custom_objects_directory_artifact_name()\n ].local()\n else:\n self._custom_objects_map = None\n self._custom_objects_directory = None\n\n # Read additional files according to the model format used:\n # # ModelFormats.SAVED_MODEL - Unzip the SavedModel archive:\n if self._model_format == TFKerasModelHandler.ModelFormats.SAVED_MODEL:\n # Unzip the SavedModel directory:\n with zipfile.ZipFile(self._model_file, \"r\") as zip_file:\n zip_file.extractall(os.path.dirname(self._model_file))\n # Set the model file to the unzipped directory:\n self._model_file = os.path.join(\n os.path.dirname(self._model_file), self._model_name\n )\n # # ModelFormats.JSON_ARCHITECTURE_H5_WEIGHTS - Get the weights file:\n elif (\n self._model_format\n == TFKerasModelHandler.ModelFormats.JSON_ARCHITECTURE_H5_WEIGHTS\n ):\n # Get the weights file:\n self._weights_file = self._extra_data[\n self._get_weights_file_artifact_name()\n ].local()",
"def get_model_info():\n return Model().get_model_info(SeccionBase)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Helper method that returns the corresponding enum values for sensors in the model_metadata json of the model selected. | def get_sensors(model_metatdata_json):
try:
sensors = None
err_msg = ""
if constants.ModelMetadataKeys.SENSOR in model_metatdata_json:
sensor_names = set(model_metatdata_json[constants.ModelMetadataKeys.SENSOR])
if all([constants.SensorInputKeys.has_member(sensor_name) for sensor_name in sensor_names]):
sensors = [constants.SensorInputKeys[sensor_name].value for sensor_name in sensor_names]
else:
return 2, "The sensor configurations of your vehicle and trained model must match", []
else:
# To handle DeepRacer models with no sensor key
err_msg = "No sensor key in model_metadata_file. Defaulting to observation."
sensors = [constants.SensorInputKeys.observation.value]
return 0, err_msg, sensors
except Exception as exc:
return 1, f"Error while getting sensor names from model_metadata.json: {exc}", [] | [
"def get_sensor_labels(self) -> Dict[str, List[str]]:\n return {}",
"def _get_fsm_sensor(self):\n fsm, state = self._get_fsm_state()\n sensor = state.sensors[fsm.selected_sensor]\n return fsm, sensor",
"def test_enum_value(self):\n \n type = simdat.SimulationDataType.REAL\n self.assertEqual(type.value, \"fmiReal\")\n type = simdat.SimulationDataType.INTEGER\n self.assertEqual(type.value, \"fmiInteger\")\n type = simdat.SimulationDataType.BOOLEAN\n self.assertEqual(type.value, \"fmiBoolean\")\n type = simdat.SimulationDataType.STRING\n self.assertEqual(type.value, \"fmiString\")",
"def getEnumNameFromValue(value):\n for t, tobj in list(TypeDef.typeDict.items()):\n if tobj.objtype in {s_ENUM, s_KERNEL}:\n n = tobj.getLabel(value)\n if n is not None:\n return t, n\n return None, None",
"def get_data(self, model):\n #return [model.get(key) for key in self.get_keys()]\n return [eval(key, model) for key in self.get_keys()]",
"def model_features(dev_type: str) -> dict:\n for dev_dict in {**air_features, **humid_features}.values():\n if dev_type in dev_dict['models']:\n return dev_dict\n raise ValueError('Device not configured')",
"def model_query(model: db.Model) -> List[dict]:\n result = []\n fields = ['spin_mode', 'basis_set', 'method', 'method_family', 'program', 'version', 'solvation', 'solvent',\n 'embedding', 'periodic_boundaries', 'external_field', 'temperature', 'electronic_temperature']\n for field in fields:\n value = getattr(model, field)\n if value.lower() != \"any\":\n result.append({f\"model.{field}\": value})\n return result",
"def state_names(model):\n return tuple(n for n, v in model[\"state\"])",
"def sensor_names(self):\n\n return get_sensor_info(key='name')",
"def enum_strs(self):\n return self._metadata[\"enum_strs\"]",
"def get_measurement_types():\n\n all_measures = ['temperature']\n\n ####################\n return all_measures\n ####################",
"def get_sensor_descriptions(self):\n return ()",
"def getEnumData(self, *args):\n return _coin.SoFieldData_getEnumData(self, *args)",
"def all_enums(self):\n if self.cluster:\n for e in self.cluster.enums:\n yield e\n for e in self.idl.enums:\n yield e",
"def loadEnumMetas(event_file_name, dataset):\n \n event_file = open(event_file_name, 'r')\n \n events_meta_list = []\n \n enum_metas_positions = dataset.metas_enum_positions\n \n for line in event_file:\n line_metas = line[:-1].split('\\t')\n \n event_metas = []\n for enum_pos in enum_metas_positions:\n event_metas.append(int(line_metas[enum_pos]))\n \n events_meta_list.append(event_metas)\n \n event_file.close()\n \n return events_meta_list",
"def get_model_constants(model_settings):\n return model_settings.get(\"CONSTANTS\", {})",
"def get_value_condition_record_types(self):\n return # osid.type.TypeList",
"def get_machine_series(machine, model_name=None):\n return get_machine_status(\n machine=machine,\n key='series',\n model_name=model_name\n )",
"def state_class(self):\n if data := super().state_class:\n return data\n if self.native_unit_of_measurement == ENERGY_KILO_WATT_HOUR:\n return SensorStateClass.TOTAL_INCREASING\n if self.native_unit_of_measurement:\n return SensorStateClass.MEASUREMENT\n else:\n return None"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.