query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
sequencelengths
19
20
metadata
dict
Unhexlify raw text, return unhexlified text.
def unhexlify(text): unhexlified = binascii.unhexlify(text) if six.PY3: unhexlified = unhexlified.decode('utf-8') return unhexlified
[ "def hexlify(text):\n if six.PY3:\n text = text.encode('utf-8')\n\n hexlified = binascii.hexlify(text)\n\n if six.PY3:\n hexlified = hexlified.decode('utf-8')\n\n return hexlified", "def unhexlify(data) -> bytes:", "def test_unhexlify():\n hexlified = uflash.hexlify(TEST_SCRIPT)\n unhexlified = uflash.unhexlify(hexlified)\n assert unhexlified == TEST_SCRIPT.decode('utf-8')", "def hexlify(data: bytes, sep: str = '') -> bytes:", "def _get_hexplain(data: str) -> str:\n temp = []\n for char in data:\n temp.append(hex(ord(char)).replace(\"0x\", \"\"))\n return \"\".join(temp)", "def preprocess_hex_chars(self, text) :\n preprocessed_text = ''\n\n i = 0\n while i < len(text) :\n if '\\\\x' == text[i:i+2] :\n c = int(text[i+2:i+4], base=16)\n preprocessed_text += chr(c)\n i += 4\n else :\n preprocessed_text += text[i]\n i += 1\n\n return preprocessed_text", "def hexify(text):\r\n return ' '.join([hexify_word(word) for word in text.split()])", "def xx(data):\n if sys.version_info < (3, 5):\n return binascii.hexlify(data).decode('ascii')\n return data.hex()", "def CUnescape(text):\n # type: (str) -> bytes\n\n def ReplaceHex(m):\n # Only replace the match if the number of leading back slashes is odd. i.e.\n # the slash itself is not escaped.\n if len(m.group(1)) & 1:\n return m.group(1) + 'x0' + m.group(2)\n return m.group(0)\n\n # This is required because the 'string_escape' encoding doesn't\n # allow single-digit hex escapes (like '\\xf').\n result = _CUNESCAPE_HEX.sub(ReplaceHex, text)\n\n return (result.encode('utf-8') # Make it bytes to allow decode.\n .decode('unicode_escape')\n # Make it bytes again to return the proper type.\n .encode('raw_unicode_escape'))", "def unhex(s):\n s = str(s).strip()\n return (len(s) % 2 and '0' + s or s).decode('hex')", "def unhex(s):\n # Must be an even number of digits\n assert len(s) % 2 == 0\n rv=\"\"\n for i in range(len(s)/2):\n pair=s[i*2:(i*2)+2]\n rv+=str(chr(int(pair, 16)))\n return rv", "def _get_hexesc(data: str) -> str:\n temp = []\n for char in data:\n temp.append(hex(ord(char)).replace(\"0x\", \"\\\\\\\\x\"))\n return \"\".join(temp)", "def decode_hex(data: typing.AnyStr, with_prefix=False) -> bytes:\n\n if isinstance(data, (bytes, bytearray)):\n return data\n return unhexlify(data, with_prefix=with_prefix)", "def str_sha(raw_sha):\n return hexlify(raw_sha)[:12]", "def _grab_unascii(self):\r\n unascii = \"\"\r\n while self._char != -1 and not self._char in \"\\x00\\t\\r\\n\":\r\n unascii += self._char\r\n self._get_char()\r\n return unascii", "def sanatize_hex(data: str) -> str:\n return data.replace(\"0x\", \"\").replace(\"0X\", \"\")", "def raw_unicode(raw):\n return raw.encode(\"utf8\").decode(\"unicode-escape\")", "def refine_text_string(text_string):\n text_list = text_string.split(\",\")\n hex_list = [hex(int(a)) for a in text_list]\n for i in range(len(hex_list)):\n if len(hex_list[i]) == 3:\n hex_list[i] = hex_list[i].replace('x', \"\")\n elif len(hex_list[i]) == 4:\n hex_list[i] = hex_list[i].replace('0x', \"\")\n return \"\".join(hex_list)", "def encode_hex(data: typing.AnyStr, with_prefix=False) -> str:\n\n if isinstance(data, str):\n return data\n return hexlify(data, with_prefix=with_prefix)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Obtains the record in the set with the time closest to the given $unix_time. If this record with not $within the correct number of seconds, an exception is raised.
def get_record(self, unix_time, within): if len(self.records) <= 0: raise Exception("No records in this set") r = self.records[0] closest_record = r closest_delta = abs(r.unix_time - unix_time) for r in self.records[1:]: delta = abs(r.unix_time - unix_time) if delta < closest_delta: closest_record = r closest_delta = delta if closest_delta > within: raise Exception("Closest record to %d was %d (delta=%d) which exceeds limit of %d" % (unix_time, closest_record.unix_time, closest_delta, within)) return closest_record
[ "def get_closest_record(self, time):\n dist = 10000000\n record = -1\n # TODO: optimise a bit\n for i, itime in enumerate(self.times):\n if (abs(time-itime)) < dist:\n dist = abs(time-itime)\n record = i\n\n return record", "def nearest (self, timet):\n def next_index (low, high):\n return math.floor(low+((high-low)/2)) \n\n def search_up (table, low, high, timet):\n search = next_index(low, high)\n searcht = table[search][\"time\"]\n while (high - low) > 1 and searcht < timet:\n low = search\n search = next_index(low, high)\n searcht = table[search][\"time\"]\n if searcht == timet:\n return (search, search)\n else:\n return (low, search)\n\n def search_down (table, low, high, timet):\n search = next_index(low, high)\n searcht = table[search][\"time\"]\n while (high - low) > 1 and searcht > timet:\n high = search\n search = next_index(low, high)\n searcht = table[search][\"time\"]\n if searcht == timet:\n return (search, search)\n else:\n return (search, high)\n\n low = 0\n high = self._table.nrows\n while low != high:\n low, high = search_up(self._table, low, high, timet)\n if low != high:\n low, high = search_down(self._table, low, high, timet)\n return low", "def find_nearest_time(self, time):\n\n idx = np.searchsorted(self.times, time, side=\"left\")\n if idx > 0 and (idx == len(self.times) or math.fabs(time - self.times[idx-1]) < math.fabs(time - self.times[idx])):\n return self.times[idx-1]\n else:\n return self.times[idx]", "def find(self, time, offset=0):\n for i in self.items[offset:]:\n if i.xmin > time:\n break\n if i.xmax > time:\n return i", "def find_closest_(weather):\n \n Now = datetime.now()\n \n server_time_fault = 1565867883 - 1565863997\n \n current_timestamp = datetime.timestamp(Now) + server_time_fault\n\n stamps = []\n for t in weather:\n stamps.append(t['time'])\n \n min_val = 100000000000000000\n min_idx = 0\n \n for idx, val in enumerate(stamps):\n \n if ((val - current_timestamp) < min_val):\n min_val = val - current_timestamp\n min_idx = idx\n \n return weather[min_idx]", "def query(self, current_time):\n if (current_time < self.a):\n return self.a\n else:\n return -1", "def get_closest_time():\n closest_generate_time = base.next_time\n closest_log_prev_time = base.total\n\n tmp = work.device_list\n if tmp[0] is not 0:\n closest_device_time = sorted(tmp, key=lambda device: device.end_time)[0].end_time\n else:\n closest_device_time = 0\n\n if closest_device_time == 0:\n closest_device_time = closest_generate_time\n\n return_value = closest_generate_time\n\n if closest_device_time < return_value:\n return_value = closest_device_time\n\n if closest_log_prev_time < return_value:\n return_value = closest_log_prev_time\n return return_value", "def get_datetime_nearest_at_position(event, receiver):\n data = Data(DB_BLAZE)\n datetime = odo(data.datetime_nearest[data.datetime_nearest.event==event][['event', receiver]], pd.DataFrame)[receiver].loc[0]\n return datetime", "def _nearest_datetime(self, datetime_list, target_datetime):\n if not datetime_list:\n raise errors.ParserError(\n \"Input parameter datetime_list length is zero. Required\"\n \" parameters: [datetime.datetime], datetime.datetime\")\n work_list = [entry for entry in datetime_list if entry < target_datetime]\n if not work_list:\n raise errors.ParserError(\n \"work_list length is zero. Entries in datetime_list\"\n \" {} are not < target_datetime {}\".format(datetime_list,\n target_datetime))\n return min(\n work_list,\n key=lambda datetime_entry: abs(datetime_entry - target_datetime))", "def test_is_closest_hour03(self):\n now = dt(2019, 1, 25, 19, 40, 0, 0, TO_ZONE)\n sunset = dt(2019, 1, 25, 19, 38, 0, 0, TO_ZONE)\n result = is_closest_time(now, sunset)\n self.assertTrue(result, 'Should be considered closest \"after\" time')", "def find(self, time, offset=0, buffer=0):\n i = max(offset,0)\n pt = self.items[i]\n for p in self.items[offset:]:\n if abs(p.time-time)<=buffer:\n return p", "def searchGPSByTime(gpsData, time): \n\n if len(gpsData) == 0:\n # No data for this function to find\n return None\n \n # The \"time\" is larger than the last item, which has the largest time, \n # in the gpsData, so return the last index of gpsData\n if time > gpsData[-1][0]:\n return len(gpsData) -1\n\n # The \"time\" is smaller than the first item, which has the smallest time, \n # in the gpsData, so return the first index of gpsData \n if time < gpsData[0][0]:\n return 0\n\n # Initialize the start and end indices of this GPS data set.\n start = 0\n end = len(gpsData)\n\n # Because the GPS data is recorded every 5 seconds, set the time \n # difference to 5 seconds. \n # If the difference between the search and target time is less than \n # the time difference, then we found the GPS record we want.\n diff = datetime.timedelta(0,5) # 5 seconds\n \n # Using binary search\n mid = start + (end - start) / 2\n while abs(gpsData[mid][0] - time) > diff:\n if time > gpsData[mid][0]:\n start = mid + 1\n else:\n end = mid - 1\n mid = start + (end - start) / 2\n \n # Check which of gpsData[mid-1][0], gpsData[mid][0] and gpsData[mid+1][0] is \n # the nearest time to the target time.\n compareTime = {}\n compareTime[mid] = abs(gpsData[mid][0] - time)\n\n if mid != len(gpsData) -1:\n compareTime[mid + 1] = abs(gpsData[mid + 1][0] - time)\n\n if mid > 0:\n compareTime[mid - 1] = abs(gpsData[mid - 1][0] - time)\n\n # Sort compareTime by value from small to large.\n SortCompareTime = sorted(compareTime.items(), key=operator.itemgetter(1))\n \n # Return the first pair's key, which is the closest index regarding to the target time\n return SortCompareTime[0][0]", "def select_closest(data, ts):\n in_ts = data['ts']\n ts_list = list()\n for t in ts:\n id_diff_min = h.argnearest(in_ts, t) # find index of nearest time step to input time step\n ts_list.append(id_diff_min) # append index to list\n data_new = h.put_in_container(data['var'][ts_list], data, ts=ts, mask=data['mask'][ts_list])\n\n return data_new", "def LST_from_unixtimes(unixtimes):\n julian_dates = time[\"Julian\"](unixtimes)\n lst = time[\"LST\"](julian_dates)\n return lst", "def getElemAfterTime(self, stamp):\n newer = [msg for (msg, time) in zip(self.cache_msgs, self.cache_times)\n if time >= stamp]\n if not newer:\n return None\n return newer[0]", "def get_last_an_time(self, utc_time):\n\n # Propagate backwards to ascending node\n dt = np.timedelta64(10, 'm')\n t_old = utc_time\n t_new = t_old - dt\n pos0, vel0 = self.get_position(t_old, normalize=False)\n pos1, vel1 = self.get_position(t_new, normalize=False)\n while not (pos0[2] > 0 and pos1[2] < 0):\n pos0 = pos1\n t_old = t_new\n t_new = t_old - dt\n pos1, vel1 = self.get_position(t_new, normalize=False)\n\n # Return if z within 1 km of an\n if np.abs(pos0[2]) < 1:\n return t_old\n elif np.abs(pos1[2]) < 1:\n return t_new\n\n # Bisect to z within 1 km\n while np.abs(pos1[2]) > 1:\n # pos0, vel0 = pos1, vel1\n dt = (t_old - t_new) / 2\n t_mid = t_old - dt\n pos1, vel1 = self.get_position(t_mid, normalize=False)\n if pos1[2] > 0:\n t_old = t_mid\n else:\n t_new = t_mid\n\n return t_mid", "def getTPDetection(detections, windowTimes):\r\n for detection in detections.iterrows():\r\n detectionTime = pd.to_datetime(detection[1][\"timestamp\"])\r\n if detectionTime > windowTimes[0] and detectionTime < windowTimes[1]:\r\n return detection\r\n return None", "def locate_nearest_event(self):\n nearest_event_date = ''\n min = 1000000\n today = self.get_today()\n event_array = self.events.keys()\n for event_date in event_array:\n event_date = self.date_to_operate_format(event_date)\n if int(event_date) - int(today) > 0:\n if int(event_date) - int(today) < min:\n min = int(event_date) - int(today)\n nearest_event_date = event_date\n\n nearest_event = '0'\n if len(event_array) > 0:\n nearest_event = self.change_format_to_database_index(nearest_event_date)\n\n return nearest_event", "def getElemBeforeTime(self, stamp):\n older = [msg for (msg, time) in zip(self.cache_msgs, self.cache_times)\n if time <= stamp]\n if not older:\n return None\n return older[-1]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Pulls in the records from other into self with the other, but since the timestamps won't match up perfectly, the output will only have a record per $period number of seconds.
def merge_with(self, other, period=60): new_list = [] last_timestamp = 0 for r in self.records: if abs(r.unix_time - last_timestamp) > period: # Accept this record last_timestamp = r.unix_time other_r = other.get_record(r.unix_time, period/2) r.merge_with(other_r) new_list.append(r) self.records = new_list
[ "def merge_usage_periods(periods, new_period):\n outlist = []\n for period in periods:\n if new_period[0] > period[1]:\n # No overlap - past the end\n outlist.append(period)\n continue\n if new_period[1] < period[0]:\n # No overlap - before the beginning\n outlist.append(period)\n continue\n # There must now be some overlap\n merged = True\n if new_period[0] < period[0]:\n period[0] = new_period[0]\n if new_period[1] > period[1]:\n period[1] = new_period[1]\n new_period = period\n\n outlist.append(new_period)\n return outlist", "def get_datasets_reference_period(self) -> List[Dict]:\n datasets = self.get_datasets_modified_yesterday()\n dataset_ids = list()\n for dataset_id, dataset in datasets.items():\n if \"*\" in dataset[\"dataset_date\"]:\n continue\n if dataset[\"update_frequency\"] <= 0:\n continue\n dataset_ids.append(dataset_id)\n columns = [DBDataset.id, DBDataset.dataset_date]\n filters = [\n DBDataset.id.in_(dataset_ids),\n DBDataset.run_number == self.run_numbers[1][0],\n ]\n results = self.session.execute(select(*columns).where(*filters))\n norows = 0\n unchanged_dsdates_datasets = list()\n for norows, result in enumerate(results):\n dataset_id = result.id\n if result.dataset_date == datasets[dataset_id][\"dataset_date\"]:\n unchanged_dsdates_datasets.append(dataset_id)\n logger.info(f\"SQL query returned {norows} rows.\")\n DBDataset2 = aliased(DBDataset)\n dsdates_not_changed_within_uf = list()\n for dataset_id in unchanged_dsdates_datasets:\n filters = [\n DBDataset.id == dataset_id,\n DBDataset2.id == DBDataset.id,\n DBDataset2.run_number == DBDataset.run_number - 1,\n DBDataset.dataset_date != DBDataset2.dataset_date,\n ]\n result = self.session.scalar(\n select(DBDataset.run_number)\n .where(*filters)\n .order_by(DBDataset.run_number.desc())\n .limit(1)\n )\n delta = self.now - self.run_number_to_run_date[result.run_number]\n if delta > timedelta(\n days=datasets[dataset_id][\"update_frequency\"]\n ):\n dsdates_not_changed_within_uf.append(dataset_id)\n datasets_dataset_date = list()\n for dataset_id in dsdates_not_changed_within_uf:\n columns = [DBDataset.run_number, DBDataset.update_frequency]\n filters = [\n DBDataset.id == dataset_id,\n DBDataset2.id == DBDataset.id,\n DBDataset2.run_number == DBDataset.run_number - 1,\n DBDataset.what_updated != \"nothing\",\n ]\n results = self.session.execute(select(*columns).where(*filters))\n prevdate = self.now\n number_of_updates = 0\n number_of_updates_within_uf = 0\n for number_of_updates, result in enumerate(results):\n run_date = self.run_number_to_run_date[result.run_number]\n delta = prevdate - run_date\n if delta < timedelta(days=result.update_frequency):\n number_of_updates_within_uf += 1\n prevdate = run_date\n if number_of_updates_within_uf / number_of_updates < 0.8:\n continue\n datasets_dataset_date.append(datasets[dataset_id])\n return datasets_dataset_date", "def _merge_report(self, target, new):\n time = None\n if 'ts' in new['parsed']:\n time = new['parsed']['ts']\n\n if (target.get('lastSeenDate', None) and\n time and\n target['lastSeenDate'] < time):\n target['lastSeenDate'] = time\n\n query_millis = int(new['parsed']['stats']['millis'])\n target['stats']['totalTimeMillis'] += query_millis\n target['stats']['count'] += 1\n target['stats']['avgTimeMillis'] = target['stats']['totalTimeMillis'] / target['stats']['count']", "def make_all_datetime(self):\n \n logging.info('\\n *** Running make_all_datetime ' )\n\n all_uniques = [] # storing a list with all the unique date_times \n which_k_in_dt = {} # list of avilable dataset for each unique date_time, so that when looping over the distinct date_times, only the proper dataset will be read and compared \n\n \"\"\" Loop over all the datasets \n k: name of the dataset\n v: list of file paths, eg 'era5_1':[filepath_1, filepath_2 ]\"\"\"\n\n for k,v in self.datasets.items() :\n self.unique_dates[k] = {}\n for F in v: \n self.unique_dates[k][F] = {}\n \n self.unique_dates[k][F]['indices'] = {} \n self.unique_dates[k][F]['index_offset_next'] = 0 # to be replaced later when slicing \n self.unique_dates[k][F]['index_offset'] = 0 # to be replaced later when slicing \n\n unique_dt = list(data[k][F]['recordtimestamp'])\n \n indices = list(data[k][F]['recordindex'])\n all_uniques += unique_dt # adding to the total unique date_times \n\n \"\"\" Loop over all the date_times of each dataset \"\"\"\n for dt, index_low, count in zip (unique_dt, indices, range(len(unique_dt)) ):\n\n if dt not in which_k_in_dt.keys():\n which_k_in_dt[dt] = {}\n if k not in which_k_in_dt[dt].keys():\n which_k_in_dt[dt][k] = [] \n if F not in which_k_in_dt[dt][k]:\n which_k_in_dt[dt][k].append(F)\n # at this point I have e.g. which_k_in_dt= {1990-01-01-12-00: {era5_1:[file1,file2] , ncar:[file3] } }\n\n self.unique_dates[k][F]['indices'][dt] = {}\n self.unique_dates[k][F]['indices'][dt]['low'] = index_low \n try:\n index_up = indices[ count + 1 ] # works until the last available recordindex\n except: \n index_up = max(indices)+1000000 # dummy large number \n\n self.unique_dates[k][F]['indices'][dt]['up'] = index_up\n self.unique_dates[k][F]['up_to_dt_slice'] = data[k][F]['min_date'] \n \n\n self.dataset_per_dt = which_k_in_dt \n self.merged_unique_dates = np.unique(np.array(all_uniques) ) # storing the set of *ALL* distinct dt values of all datasets and all files \n logging.debug('*** make_all_datetime finished ')", "def Merge(self, other):\n\n # Logging just in case\n self.db.ExecuteSql('insert into events(timestamp, track_id, event, '\n 'details) values (now(), %d, \"merge: before\", %s);'\n %(self.persistant['id'],\n sql.FormatSqlValue('details',\n repr(self.persistant))))\n self.db.ExecuteSql('insert into events(timestamp, track_id, event, '\n 'details) values (now(), %d, \"merge: deleted\", %s);'\n %(other.persistant['id'], \n sql.FormatSqlValue('details',\n repr(other.persistant))))\n\n # Fields which can be summed\n for f in ['plays', 'skips']:\n self.persistant[f] = (self.persistant.get(f, 0) +\n other.persistant.get(f, 0))\n\n # Date fields where we take the newest\n for f in ['last_played', 'last_skipped', 'last_action']:\n a = self.persistant.get(f, datetime.datetime(1970, 1, 1))\n b = other.persistant.get(f, datetime.datetime(1970, 1, 1))\n if a > b:\n v = a\n else:\n v = b\n if v != datetime.datetime(1970, 1, 1):\n self.persistant[f] = v\n\n # Date fields where we take the oldest\n for f in ['creation_time']:\n a = self.persistant.get(f, datetime.datetime(1970, 1, 1))\n b = other.persistant.get(f, datetime.datetime(1970, 1, 1))\n if a < b:\n v = a\n else:\n v = b\n if v != datetime.datetime(1970, 1, 1):\n self.persistant[f] = v\n\n # Fields where we only clobber ours if we don't have a value\n for f in ['artist', 'album', 'song']:\n if not self.persistant.has_key(f) or not self.persistant[f]:\n self.persistant[f] = other.persistant.get(f, None)\n\n # Sometimes the number is a placeholder\n if self.persistant.has_key('number') and self.persistant['number'] == -1:\n self.persistant['number'] = other.persistant.get('number', -1)\n if not self.persistant.has_key('number'):\n self.persistant['number'] = other.persistant.get('number', -1)\n\n # Update the id in the tags table\n tags = self.db.GetRows('select tag from tags where track_id=%d;'\n % other.persistant['id'])\n self.db.ExecuteSql('insert into events(timestamp, track_id, event, '\n 'details) values (now(), %d, \"merge: tags: %d\", %s);'\n %(self.persistant['id'], other.persistant['id'],\n sql.FormatSqlValue('details', repr(tags))))\n\n try:\n self.db.ExecuteSql('update tags set track_id=%d where track_id=%d;'\n %(self.persistant['id'], other.persistant['id']))\n self.db.ExecuteSql('commit;')\n except:\n # This can happen if the is already a matching tag for the first track\n pass\n\n # Update the id in the paths table\n paths = self.db.GetRows('select path from paths where track_id=%d;'\n % other.persistant['id'])\n self.db.ExecuteSql('insert into events(timestamp, track_id, event, '\n 'details) values (now(), %d, \"merge: paths: %d\", %s);'\n %(self.persistant['id'], other.persistant['id'],\n sql.FormatSqlValue('details', repr(paths))))\n \n self.db.ExecuteSql('update paths set track_id=%d where track_id=%d;'\n %(self.persistant['id'], other.persistant['id']))\n self.db.ExecuteSql('commit;')\n\n self.db.ExecuteSql('insert into events(timestamp, track_id, event, '\n 'details) values (now(), %d, \"merge: after\", %s);'\n %(self.persistant['id'],\n sql.FormatSqlValue('details',\n repr(self.persistant))))\n self.db.ExecuteSql('commit;')", "def adjust_sample_times(self):\n st_idx = 0\n interval = 0\n for i in range(1, len(self._data_particle_record_buffer)):\n # Keep iterating through the region (window) of identical timestamps until the timestamp changes\n time_diff = self._data_particle_record_buffer[i].get_value(DataParticleKey.INTERNAL_TIMESTAMP) - \\\n self._data_particle_record_buffer[st_idx].get_value(DataParticleKey.INTERNAL_TIMESTAMP)\n if time_diff == 0:\n continue\n\n # Timestamps should be 1 second apart, print a single warning message if otherwise\n if time_diff > 1:\n if not self._unexpected_interval_found:\n log.warning('A %f second interval was found between particles, expected 1.0 seconds' % time_diff)\n self._unexpected_interval_found = True\n time_diff = 1\n\n # Determine the interval between particles in the window to be the time span of the window\n # divided by the number of particles in the window\n interval = float(time_diff)/(i - st_idx)\n\n self.adjust_times_in_window(self._data_particle_record_buffer[st_idx:i], interval)\n\n # set start index to beginning of next window\n st_idx = i\n\n # Adjust the records in the final window. Use the interval from the previous window\n if st_idx < len(self._data_particle_record_buffer)-1:\n self.adjust_times_in_window(self._data_particle_record_buffer[st_idx:], interval)", "def merge_logfiles(log1, log2):\n first_in_2 = log2['time'][0]\n keep_from_1 = log1['time'] < first_in_2\n for key in log1.keys():\n log1[key] = log1[key][keep_from_1]\n log1.timeseries_append(log2)\n return log1", "def get_next_updates(self):\n timestamps = sorted(list(set(self.announcements.keys()).union(set(self.withdrawals.keys()))))\n for timestamp in timestamps:\n update_record = {'announcements': [], 'withdrawals': [], 'timestamp': timestamp}\n if timestamp in self.announcements.keys():\n update_record['announcements'] = self.announcements[timestamp]\n if timestamp in self.withdrawals.keys():\n update_record['withdrawals'] = self.withdrawals[timestamp]\n yield update_record\n yield {'announcements': [], 'withdrawals': [], 'timestamp': None}", "def __or__(self, other):\n if not all(self.frequency == other.frequency):\n raise ValueError(\n f\"The {ST_Slice} objects to concatenate in \"\n \"time do not have equal frequency axes.\"\n )\n\n log.info(\n f\"Concatenating in time ({self.time.size}, {other.time.size}).\"\n )\n\n if self.time.max() < other.time.min():\n new_data = np.hstack((self.value, other.value))\n new_time = Time(np.hstack((self.time.jd, other.time.jd)), format='jd')\n new_ana_times = Time(np.hstack((self.analog_pointing_times.jd, other.analog_pointing_times.jd)), format='jd')\n new_digi_times = Time(np.hstack((self.digital_pointing_times.jd, other.digital_pointing_times.jd)), format='jd')\n else:\n new_data = np.hstack((other.value, self.value))\n new_time = Time(np.hstack((other.time.jd, self.time.jd)), format='jd')\n new_ana_times = Time(np.hstack((other.analog_pointing_times.jd, self.analog_pointing_times.jd)), format='jd')\n new_digi_times = Time(np.hstack((other.digital_pointing_times.jd, self.digital_pointing_times.jd)), format='jd')\n\n # unique_times_nb = np.unique(new_time).size\n # if unique_times_nb != new_time.size:\n # log.warning(\n # f\"There are {new_time.size - unique_times_nb} overlaps in the time axis.\"\n # )\n\n return ST_Slice(\n time=new_time,\n frequency=self.frequency,\n value=new_data,\n analog_pointing_times=new_ana_times,\n digital_pointing_times=new_digi_times\n )", "def same_period(x, y):\n time_diff = pd.Timedelta(minutes=minutes_diff)\n res = y[datetime_column] - x[datetime_column] < time_diff\n\n for crit in criteria:\n res = res and (x[crit] == y[crit])\n return res", "def chunk_periods(start, end):\n\n logging.debug(f'chunking {start} to {end}')\n # convert the strings to datetime objects\n #start = dt.datetime.strptime(''.join(start.rsplit(':', 1)), '%Y-%m-%dT%H:%M:%S-%z')\n start = dt.datetime.strptime(start, '%Y-%m-%dT%H:%M:%S-%z')\n logging.debug(f'start: {start}')\n periods = []\n\n # if the year and month of the period are the same, just return the dates as we got them\n\n\n\n return periods", "def get_records_periods(tenant_id, start, end):\r\n sql_cmd = Record.query.filter_by(tenant_id=tenant_id) \\\r\n .filter(Record.sale_date < end).filter(Record.sale_date >= start).statement\r\n records = pd.read_sql(sql = sql_cmd, con = db.session.bind)\r\n return records", "def two_in_one(obs_file,et,subevent):\r\n \r\n #in this function, the \"original time window\" talked about in the comments\r\n #refers to the start and end times that were input to create the file obs_file,\r\n #which will likely have been created using the database_extraction function\r\n \r\n #opening first output file created by operational_sep_quantities\r\n with open(obs_file, 'r') as o:\r\n out = js.load(o)\r\n \r\n #all events recorded in that output file\r\n ongoing_events = (out['sep_forecast_submission']['triggers'][0]['particle_intensity']\r\n ['ongoing_events'])\r\n \r\n #creating lists for values from each event\r\n end_times = [] \r\n start_times = []\r\n energy_thresholds = []\r\n flux_thresholds = []\r\n out_names = []\r\n \r\n #appending values to lists for each event\r\n for i in range(len(ongoing_events)):\r\n start_times.append(parse(ongoing_events[i]['start_time']))\r\n end_times.append(parse(ongoing_events[i]['end_time']))\r\n energy_thresholds.append(ongoing_events[i]['energy_min'])\r\n flux_thresholds.append(float(ongoing_events[i]['threshold']))\r\n \r\n #checking if there was a second event for each threshold\r\n for i in range(len(end_times)):\r\n end = end_times[i]\r\n #if the end time of an event for any threshold was a day before the last day\r\n #in the original time window given, will check if ONLY THAT THRESHOLD\r\n #had another event after the first one, using the end time of the first\r\n #event of that threshold as the new start time of the event window\r\n if end.date() < et.date():\r\n print('end time to use as new start time: %s' %end)\r\n #figuring out which threshold this end time was for\r\n flux_thresh = int(flux_thresholds[i])\r\n energy_thresh = int(energy_thresholds[i])\r\n print('extracting second event for threshold ' + str(flux_thresh) + ' MeV '\r\n + str(energy_thresh) + ' pfu')\r\n #new start time (2 days in advance bc the database_extraction function\r\n #makes the start time 2 days prior, so will cancel that out)\r\n st = end + timedelta(days=2)\r\n #thresholds in correct format\r\n thresholds = str(energy_thresh) + ',' + str(flux_thresh)\r\n print('thresholds: %s' %thresholds)\r\n #creating observation data for second event for thresholds given\r\n out_names.append(Path(cfg.obs_path) /\r\n database_extraction(st,et,instrument_chosen,subevent,\r\n thresholds = thresholds,\r\n one_thresh = True))\r\n \r\n #returns list of all new files created by this function\r\n return(out_names)", "def promediate(self) -> List[object]:\n splitted_results = {}\n if self.interval == 'TD':\n splitted_results = self.split_between_hours()\n elif self.interval != 'LH' and self.interval != 'TD':\n splitted_results = self.split_between_days()\n if splitted_results:\n self.results = []\n for key in splitted_results:\n value = 0.0\n for service in splitted_results[key]:\n value += float(service.response_time)\n value = value / len(splitted_results[key])\n dummy = splitted_results[key][0]\n dummy.response_time = value\n self.results.append(dummy)\n return self.results", "def CompareReducedDetIDs(FirstDict,SecondDict):\n DifferenceDict={}\n for timestamp in sorted(FirstDict.keys()):\n if timestamp.replace(microsecond=0) in SecondDict.keys():\n secondtimestamp=timestamp.replace(microsecond=0)\n print(\"Timestamp %s is present in both Dictionaries!\"%timestamp)\n else:\n secondtimestamps=sorted(SecondDict.keys())\n secondtimestamps.append(timestamp)\n secondtimestamps.sort()\n if secondtimestamps.index(timestamp)!=0:#To avoid wrapping up to the end of the list of timestamps!!!\n secondtimestamp=secondtimestamps[secondtimestamps.index(timestamp)-1]\n else:#Default to the earliest timestamp in the second dictionary...\n secondtimestamp=secondtimestamps[secondtimestamps.index(timestamp)+1]\n print(\"Comparing the IOV with timestamp %s (1st dict) with IOV with timestamp %s (2nd dict)\"%(timestamp,secondtimestamp)) \n if set(map(lambda x:int(x),FirstDict[timestamp][0]))!=set(map(lambda x:int(x),SecondDict[secondtimestamp][0])) or set(map(lambda x:int(x),FirstDict[timestamp][1]))!=set(map(lambda x:int(x),SecondDict[secondtimestamp][1])): #Change!\n if len(set(FirstDict[timestamp][0]))<=len(set(SecondDict[secondtimestamp][0])):\n differenceHV=set(map(lambda x:int(x),SecondDict[secondtimestamp][0]))-set(map(lambda x:int(x),FirstDict[timestamp][0]))\n else:\n #elif len(set(SecondDict[secondtimestamp][0]))<len(set(FirstDict[timestamp][0])):\n differenceHV=set(map(lambda x:int(x),FirstDict[timestamp][0]))-set(map(lambda x:int(x),SecondDict[secondtimestamp][0]))\n #else:\n # print \"SCREAM! Something weird going on one of the two should be a subset of the other!\"\n # differenceLV=set([])\n # differenceHV=set([])\n if len(set(FirstDict[timestamp][1]))<=len(set(SecondDict[secondtimestamp][1])):\n differenceLV=set(map(lambda x:int(x),SecondDict[secondtimestamp][1]))-set(map(lambda x:int(x),FirstDict[timestamp][1]))\n else:\n #elif set(SecondDict[secondtimestamp][1]).issubset(set(FirstDict[timestamp][1])):\n differenceLV=set(map(lambda x:int(x),FirstDict[timestamp][1]))-set(map(lambda x:int(x),SecondDict[secondtimestamp][1]))\n #else:\n # print \"SCREAM! Something weird going on one of the two should be a subset of the other!\"\n # differenceLV=set([])\n # differenceHV=set([])\n DifferenceDict.update({(timestamp,secondtimestamp):(differenceHV,differenceLV)})\n print(\"Difference in timestamp %s (corresponding to %s):\"%(timestamp,secondtimestamp))\n #print \"LV OFF:\"\n #for LVChannel in differenceLV:\n # print LVChannel\n #print \"HV OFF:\"\n #for HVChannel in differenceHV:\n # print HVChannel\n else:\n print(\"Timestamp %s is identical in both dictionaries\"%timestamp)\n return DifferenceDict", "def test_aggregate_dataframe_to_stress_period(shellmound_datapath, sourcefile, dates):\n start, end = dates\n welldata = pd.read_csv(os.path.join(shellmound_datapath, sourcefile\n ))\n\n welldata['start_datetime'] = pd.to_datetime(welldata.start_datetime)\n welldata['end_datetime'] = pd.to_datetime(welldata.end_datetime)\n duplicate_well = welldata.groupby('node').get_group(welldata.node.values[0])\n welldata = welldata.append(duplicate_well)\n start_datetime = pd.Timestamp(start)\n end_datetime = pd.Timestamp(end) # pandas convention of including last day\n result = aggregate_dataframe_to_stress_period(welldata,\n start_datetime=start_datetime,\n end_datetime=end_datetime,\n period_stat='mean',\n id_column='node',\n data_column='flux_m3')\n overlap = (welldata.start_datetime < end_datetime) & \\\n (welldata.end_datetime > start_datetime)\n #period_inside_welldata = (welldata.start_datetime < start_datetime) & \\\n # (welldata.end_datetime > end_datetime)\n #overlap = welldata_overlaps_period #| period_inside_welldata\n\n # for each location (id), take the mean across source data time periods\n agg = welldata.loc[overlap].copy().groupby(['start_datetime', 'node']).sum().reset_index()\n agg = agg.groupby('node').mean().reset_index()\n if end_datetime < start_datetime:\n assert result['flux_m3'].sum() == 0\n if overlap.sum() == 0:\n assert len(result) == 0\n expected_sum = agg['flux_m3'].sum()\n if duplicate_well.node.values[0] in agg.index:\n dw_overlaps = (duplicate_well.start_datetime < end_datetime) & \\\n (duplicate_well.end_datetime > start_datetime)\n expected_sum += duplicate_well.loc[dw_overlaps, 'flux_m3'].mean()\n assert np.allclose(result['flux_m3'].sum(), expected_sum)", "def _fill_day_dicts(self):\n today = datetime.date.today()\n for i, record in enumerate(self._dataset):\n if (record[\"createdAt\"] / 1000) > time.mktime((today - datetime.timedelta(days=30)).timetuple()):\n self._add_record(self._all30_dict, record, key=i)\n\n elif (record[\"createdAt\"] / 1000) > time.mktime((today - datetime.timedelta(days=60)).timetuple()):\n self._add_record(self._all60_dict, record, key=i)\n\n else:\n self._add_record(self._all90_dict, record, key=i)", "def merge(self, other, gap_method=\"slinear\", new_sample_rate=None):\n if new_sample_rate is not None:\n merge_sample_rate = new_sample_rate\n combine_list = [self.decimate(new_sample_rate).dataset]\n else:\n merge_sample_rate = self.sample_rate\n combine_list = [self.dataset]\n\n ts_filters = self.filters\n if isinstance(other, (list, tuple)):\n for run in other:\n if not isinstance(run, RunTS):\n raise TypeError(f\"Cannot combine {type(run)} with RunTS.\")\n\n if new_sample_rate is not None:\n run = run.decimate(new_sample_rate)\n combine_list.append(run.dataset)\n ts_filters.update(run.filters)\n else:\n if not isinstance(other, RunTS):\n raise TypeError(f\"Cannot combine {type(other)} with RunTS.\")\n\n if new_sample_rate is not None:\n other = other.decimate(new_sample_rate)\n combine_list.append(other.dataset)\n ts_filters.update(other.filters)\n\n # combine into a data set use override to keep attrs from original\n\n combined_ds = xr.combine_by_coords(\n combine_list, combine_attrs=\"override\"\n )\n\n n_samples = (\n merge_sample_rate\n * float(\n combined_ds.time.max().values - combined_ds.time.min().values\n )\n / 1e9\n ) + 1\n\n new_dt_index = make_dt_coordinates(\n combined_ds.time.min().values,\n merge_sample_rate,\n n_samples,\n self.logger,\n )\n\n run_metadata = self.run_metadata.copy()\n run_metadata.sample_rate = merge_sample_rate\n\n new_run = RunTS(\n run_metadata=self.run_metadata,\n station_metadata=self.station_metadata,\n survey_metadata=self.survey_metadata,\n )\n\n ## tried reindex then interpolate_na, but that has issues if the\n ## intial time index does not exactly match up with the new time index\n ## and then get a bunch of Nan, unless use nearest or pad, but then\n ## gaps are not filled correctly, just do a interp seems easier.\n new_run.dataset = combined_ds.interp(\n time=new_dt_index, method=gap_method\n )\n\n # update channel attributes\n for ch in new_run.channels:\n new_run.dataset[ch].attrs[\"time_period.start\"] = new_run.start\n new_run.dataset[ch].attrs[\"time_period.end\"] = new_run.end\n\n new_run.run_metadata.update_time_period()\n new_run.station_metadata.update_time_period()\n new_run.survey_metadata.update_time_period()\n new_run.filters = ts_filters\n\n return new_run", "def generate_record(self, data_dictionaries, group_by):\n result = {}\n\n for one_measurement in data_dictionaries:\n time = one_measurement['datetime']\n\n if isinstance(time, str):\n if self.timezone:\n time = arrow.get(time).shift(hours=6) # TODO: fix utc conversion\n else:\n time = arrow.get(time)\n\n record = Record(self.name, self.lat, self.lon, self.height, time)\n\n del one_measurement['datetime']\n\n one_measurement = {k: float(v) for k, v in one_measurement.items()}\n\n record.merge(one_measurement)\n\n key = group_by(time)\n \n if key == '2016-04-01_00':\n break\n\n record_string = record.little_r_report()\n\n try:\n result[key].append(record_string)\n except KeyError:\n result[key] = [record_string]\n\n return result" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given a UC480 camera object (instrumental module) and a number indicating the number of trap objects, applies an iterative image analysis to individual trap adjustment in order to achieve a nearly homogeneous intensity profile across traps.
def stabilize_intensity(which_cam, cam, verbose=False): L = 0.5 # Correction Rate mags = np.ones(12) ### ! ntraps = len(mags) iteration = 0 while iteration < 5: iteration += 1 print("Iteration ", iteration) im = cam.latest_frame() try: trap_powers = analyze_image(which_cam, im, ntraps, iteration, verbose) except (AttributeError, ValueError) as e: print("No Bueno, error occurred during image analysis:\n", e) break mean_power = trap_powers.mean() rel_dif = 100 * trap_powers.std() / mean_power print(f'Relative Power Difference: {rel_dif:.2f} %') if rel_dif < 0.8: print("WOW") break deltaP = [mean_power - P for P in trap_powers] dmags = [(dP / abs(dP)) * sqrt(abs(dP)) * L for dP in deltaP] mags = np.add(mags, dmags) print("Magnitudes: ", mags) break # self._update_magnitudes(mags) _ = analyze_image(im, ntraps, verbose=verbose)
[ "def analyze_image(which_cam, image, ntraps, iteration=0, verbose=False):\n threshes = [0.5, 0.6]\n margin = 10\n threshold = np.max(image) * threshes[which_cam]\n im = image.transpose()\n\n x_len = len(im)\n peak_locs = np.zeros(x_len)\n peak_vals = np.zeros(x_len)\n\n ## Trap Peak Detection ##\n for i in range(x_len):\n if i < margin or x_len - i < margin:\n peak_locs[i] = 0\n peak_vals[i] = 0\n else:\n peak_locs[i] = np.argmax(im[i])\n peak_vals[i] = max(im[i])\n\n ## Trap Range Detection ##\n first = True\n pos_first, pos_last = 0, 0\n left_pos = 0\n for i, p in enumerate(peak_vals):\n if p > threshold:\n left_pos = i\n elif left_pos != 0:\n if first:\n pos_first = (left_pos + i) // 2\n first = False\n pos_last = (left_pos + i) // 2\n left_pos = 0\n\n ## Separation Value ##\n separation = (pos_last - pos_first) / ntraps # In Pixels\n\n ## Initial Guesses ##\n means0 = np.linspace(pos_first, pos_last, ntraps).tolist()\n waists0 = (separation * np.ones(ntraps) / 2).tolist()\n ampls0 = (max(peak_vals) * 0.7 * np.ones(ntraps)).tolist()\n _params0 = [means0, waists0, ampls0, [0.06]]\n params0 = [item for sublist in _params0 for item in sublist]\n\n ## Fitting ##\n if verbose:\n print(\"Fitting...\")\n xdata = np.arange(x_len)\n popt, pcov = curve_fit(lambda x, *params_0: wrapper_fit_func(x, ntraps, params_0),\n xdata, peak_vals, p0=params0)\n if verbose:\n print(\"Fit!\")\n plt.figure()\n plt.plot(xdata, peak_vals) # Data\n if iteration:\n plt.plot(xdata, wrapper_fit_func(xdata, ntraps, params0), '--r') # Initial Guess\n plt.plot(xdata, wrapper_fit_func(xdata, ntraps, popt)) # Fit\n plt.title(\"Iteration: %d\" % iteration)\n else:\n plt.title(\"Final Product\")\n\n plt.xlim((pos_first - margin, pos_last + margin))\n plt.legend([\"Data\", \"Guess\", \"Fit\"])\n plt.show(block=False)\n print(\"Fig_Newton\")\n trap_powers = np.frombuffer(popt[2 * ntraps:3 * ntraps])\n return trap_powers", "def calc_miou(gt_dict, pr_dict, shot_to_end_frame_dict, threshold=0.5):\r\n def iou(x, y):\r\n s0, e0 = x\r\n s1, e1 = y\r\n smin, smax = (s0, s1) if s1 > s0 else (s1, s0)\r\n emin, emax = (e0, e1) if e1 > e0 else (e1, e0)\r\n return (emin - smax + 1) / (emax - smin + 1)\r\n\r\n def scene_frame_ranges(scene_transitions, shot_to_end_frame):\r\n end_shots = np.where(scene_transitions)[0]\r\n scenes = np.zeros((len(end_shots) + 1, 2), dtype=end_shots.dtype)\r\n scenes[:-1, 1] = shot_to_end_frame[end_shots]\r\n scenes[-1, 1] = shot_to_end_frame[len(scene_transitions)]\r\n scenes[1:, 0] = scenes[:-1, 1] + 1\r\n return scenes\r\n\r\n def miou(gt_array, pr_array, shot_to_end_frame):\r\n gt_scenes = scene_frame_ranges(gt_array, shot_to_end_frame)\r\n # pr_scenes = scene_frame_ranges(pr_array >= threshold, shot_to_end_frame)\r\n pr_scenes = scene_frame_ranges(pr_array, shot_to_end_frame)\r\n assert gt_scenes[-1, -1] == pr_scenes[-1, -1]\r\n\r\n m = gt_scenes.shape[0]\r\n n = pr_scenes.shape[0]\r\n\r\n # IoU for (gt_scene, pr_scene) pairs\r\n iou_table = np.zeros((m, n))\r\n\r\n j = 0\r\n for i in range(m):\r\n # j start prior to i end\r\n while pr_scenes[j, 0] <= gt_scenes[i, 1]:\r\n iou_table[i, j] = iou(gt_scenes[i], pr_scenes[j])\r\n if j < n - 1:\r\n j += 1\r\n else:\r\n break\r\n # j end prior to (i + 1) start\r\n if pr_scenes[j, 1] < gt_scenes[i, 1] + 1:\r\n break\r\n # j start later than (i + 1) start\r\n if pr_scenes[j, 0] > gt_scenes[i, 1] + 1:\r\n j -= 1\r\n assert np.isnan(iou_table).sum() == 0\r\n assert iou_table.min() >= 0\r\n\r\n # Miou\r\n return (iou_table.max(axis=0).mean() + iou_table.max(axis=1).mean()) / 2\r\n\r\n assert gt_dict.keys() == pr_dict.keys()\r\n\r\n miou_dict = dict()\r\n\r\n for imdb_id in gt_dict.keys():\r\n miou_dict[imdb_id] = miou(gt_dict[imdb_id], pr_dict[imdb_id], shot_to_end_frame_dict[imdb_id])\r\n mean_miou = sum(miou_dict.values()) / len(miou_dict)\r\n\r\n return mean_miou, miou_dict", "def calc_miou(gt_dict, pr_dict, shot_to_end_frame_dict, threshold=0.5):\n def iou(x, y):\n s0, e0 = x\n s1, e1 = y\n smin, smax = (s0, s1) if s1 > s0 else (s1, s0)\n emin, emax = (e0, e1) if e1 > e0 else (e1, e0)\n return (emin - smax + 1) / (emax - smin + 1)\n\n def scene_frame_ranges(scene_transitions, shot_to_end_frame):\n end_shots = np.where(scene_transitions)[0]\n scenes = np.zeros((len(end_shots) + 1, 2), dtype=end_shots.dtype)\n scenes[:-1, 1] = shot_to_end_frame[end_shots]\n scenes[-1, 1] = shot_to_end_frame[len(scene_transitions)]\n scenes[1:, 0] = scenes[:-1, 1] + 1\n return scenes\n\n def miou(gt_array, pr_array, shot_to_end_frame):\n gt_scenes = scene_frame_ranges(gt_array, shot_to_end_frame)\n pr_scenes = scene_frame_ranges(pr_array >= threshold, shot_to_end_frame)\n assert gt_scenes[-1, -1] == pr_scenes[-1, -1]\n\n m = gt_scenes.shape[0]\n n = pr_scenes.shape[0]\n\n # IoU for (gt_scene, pr_scene) pairs\n iou_table = np.zeros((m, n))\n\n j = 0\n for i in range(m):\n # j start prior to i end\n while pr_scenes[j, 0] <= gt_scenes[i, 1]:\n iou_table[i, j] = iou(gt_scenes[i], pr_scenes[j])\n if j < n - 1:\n j += 1\n else:\n break\n # j end prior to (i + 1) start\n if pr_scenes[j, 1] < gt_scenes[i, 1] + 1:\n break\n # j start later than (i + 1) start\n if pr_scenes[j, 0] > gt_scenes[i, 1] + 1:\n j -= 1\n assert np.isnan(iou_table).sum() == 0\n assert iou_table.min() >= 0\n\n # Miou\n return (iou_table.max(axis=0).mean() + iou_table.max(axis=1).mean()) / 2\n\n assert gt_dict.keys() == pr_dict.keys()\n\n miou_dict = dict()\n\n for imdb_id in gt_dict.keys():\n miou_dict[imdb_id] = miou(gt_dict[imdb_id], pr_dict[imdb_id], shot_to_end_frame_dict[imdb_id])\n mean_miou = sum(miou_dict.values()) / len(miou_dict)\n\n return mean_miou, miou_dict", "def analyze_image(image, ntraps, iteration=0, verbose=False):\n ## Image Conditioning ##\n margin = 10\n threshold = np.max(image)*0.5\n im = image.transpose()\n plt.imshow(im)\n plt.show(block=False)\n\n x_len = len(im)\n peak_locs = np.zeros(x_len)\n peak_vals = np.zeros(x_len)\n\n ## Trap Peak Detection ##\n for i in range(x_len):\n if i < margin or x_len - i < margin:\n peak_locs[i] = 0\n peak_vals[i] = 0\n else:\n peak_locs[i] = np.argmax(im[i])\n peak_vals[i] = max(im[i])\n\n ## Trap Range Detection ##\n first = True\n pos_first, pos_last = 0, 0\n for i, p in enumerate(peak_vals):\n if p > threshold:\n if first:\n pos_first = i\n first = False\n pos_last = i\n ## Separation Value ##\n separation = (pos_last - pos_first) / ntraps # In Pixels\n\n ## Initial Guesses ##\n means0 = np.linspace(pos_first, pos_last, ntraps).tolist()\n waists0 = (separation * np.ones(ntraps) / 2).tolist()\n ampls0 = (max(peak_vals) * 0.7 * np.ones(ntraps)).tolist()\n _params0 = [means0, waists0, ampls0, [0.06]]\n params0 = [item for sublist in _params0 for item in sublist]\n\n ## Fitting ##\n if verbose:\n print(\"Fitting...\")\n xdata = np.arange(x_len)\n popt, pcov = curve_fit(lambda x, *params_0: wrapper_fit_func(x, ntraps, params_0),\n xdata, peak_vals, p0=params0)\n if verbose:\n print(\"Fit!\")\n plt.figure()\n plt.plot(xdata, peak_vals) # Data\n if iteration:\n plt.plot(xdata, wrapper_fit_func(xdata, ntraps, params0), '--r') # Initial Guess\n plt.plot(xdata, wrapper_fit_func(xdata, ntraps, popt)) # Fit\n plt.title(\"Iteration: %d\" % iteration)\n else:\n plt.title(\"Final Product\")\n\n plt.xlim((pos_first - margin, pos_last + margin))\n plt.legend([\"Data\", \"Guess\", \"Fit\"])\n plt.show(block=False)\n print(\"Fig_Newton\")\n trap_powers = np.frombuffer(popt[2 * ntraps:3 * ntraps])\n return trap_powers", "def eyeContingentRemappingPerRun(self):\n\t\teye_data = self.eyeDataForRuns() # remap, sacc_map\n\t\troi_data_per_run = self.runDataForRegions()\n\t\troi_data_per_condition = self.conditionDataForRegions()\n\t\tresults = []\n\t\tf = pl.figure(figsize = (8,8))\n\t\tfor area in range(len(self.maskedConditionData)):\n\t\t\tmask = self.maskedConditionData[area][self.conditionDict.keys().index('fix_map')][9] != 0\n\t\t\tfix_map_phases = self.maskedConditionData[area][self.conditionDict.keys().index('fix_map')][9][mask]\n\t\t\tsacc_map_phases = self.maskedConditionData[area][self.conditionDict.keys().index('sacc_map')][9][mask]\n\t\t\tfix_periphery_phases = self.maskedConditionData[area][self.conditionDict.keys().index('fix_periphery')][9][mask]\n\t\t\tremap_phases_per_run = np.array([self.maskedRunData[self.conditionDict.keys().index('remap')][area][run][9][mask] for run in range(len(self.maskedRunData[self.conditionDict.keys().index('remap')][area]))])\n\t\t\t\n\t\t\t# remap is the first of the saccade conditions, so it's eye_data[0]\n\t\t\teye_parameters_remap = np.array([[eye_data[0][run][i] for i in [3,4]] for run in range(len(eye_data[0]))])\n\t\t\t\n\t\t\t# calculate phase differences and such\n\t\t\tmap_diffs = (pi - np.abs(circularDifference(fix_map_phases, sacc_map_phases))) / pi\n\t\t\tperiphery_diffs = (pi - np.abs(circularDifference(fix_periphery_phases, sacc_map_phases))) / pi\n\t\t\tremap_diffs = (pi - np.abs(np.array([circularDifference(r, sacc_map_phases) for r in remap_phases_per_run]))) / pi\n\t\t\t\n\t\t\ts = f.add_subplot(len(self.maskedConditionData), 2, (2*area) + 1)\n\t\t\tpl.hist(periphery_diffs, range = [0,1], bins = 50, alpha = 0.3, histtype='step', color = 'b')\n\t\t\tpl.hist(map_diffs, range = [0,1], bins = 50, alpha = 0.3, histtype='stepfilled', color = 'r')\n\t\t\tfor i in range(remap_diffs.shape[0]):\n\t\t\t\tpl.hist(remap_diffs[i], range = [0,1], bins = 50, alpha = 0.3, histtype='step', color = 'g')\n\t\t\tprint map_diffs.mean(), periphery_diffs.mean(), remap_diffs.mean(axis = 1)\n\t\t\t\n\t\t\t# shell()\n\t\t\ts = f.add_subplot(len(self.maskedConditionData), 2, (2*area) + 2)\n\t\t\tfor i in range(remap_diffs.shape[0]):\n\t\t\t\tpl.plot( eye_parameters_remap[:,0] - eye_parameters_remap[:,0].mean(), remap_diffs.mean(axis = 1) - remap_diffs.mean(axis = 1).mean(), 'gx', alpha = 0.3 + (0.7 / remap_diffs.shape[0]) )\n\t\t\tfor i in range(remap_diffs.shape[0]):\n\t\t\t\tpl.plot( eye_parameters_remap[:,1] - eye_parameters_remap[:,1].mean(), remap_diffs.mean(axis = 1) - remap_diffs.mean(axis = 1).mean(), 'g+', alpha = 0.3 + (0.7 / remap_diffs.shape[0]) )\n\t\t\tresults.append(np.vstack(((eye_parameters_remap - eye_parameters_remap.mean(axis = 0)).T, remap_diffs.mean(axis = 1) - remap_diffs.mean(axis = 1).mean())))\n\t\tpl.savefig(os.path.join(self.stageFolder(stage = 'processed/mri/figs'), 'eye_remap_run_correlations.pdf' ))\n\t\tnp.save(os.path.join(self.stageFolder(stage = 'processed/mri/figs'), 'eye_remap_run_correlations.npy' ), np.array(results))", "def phot_aperture(input_file):\n #set the original directory\n original_path = os.getcwd()\n save_path = input_file['save_path']\n planet = input_file['exoplanet']\n #radii = np.arange(input_file['apertures'][0],input_file['apertures'][1],0.1)\n radii = np.array(input_file['apertures'])\n #change to save data reduction directory\n os.chdir(save_path)\n if not os.path.exists('phot_results'):\n os.makedirs('phot_results')\n tempo = time.time()\n print 'Starting aperture photometry'\n print 'Saving results on: '+save_path+'/phot_results/'\n \n #check the number of objects to make the photometry\n N_obj = len(input_file['pxpositions'])/2.\n print 'Number of objects = ',N_obj\n positions = [] #create the positions variable (X,Y) in pixels unit on the CCD\n for i in range(len(input_file['pxpositions'])):\n if i % 2 == 0: #if the number is a even (or not a odd), the turple is created\n positions.append((input_file['pxpositions'][i],input_file['pxpositions'][i+1]))\n print 'Radius from ',radii[0],' to ',radii[-1],'\\n'\n \n skysection = input_file['skysection']\n skysection[0] = int(skysection[0])\n skysection[1] = int(skysection[1])\n \n images = sorted(glob.glob('AB'+planet+'*.fits'))\n for radius in radii:\n flux_data = []\n for i in range(len(images)):\n im = fits.getdata(images[i],header=False)\n im = array(im,dtype='Float64')\n \n # ERROR\n #Traceback (most recent call last):\n # File \"ExoTRed.py\", line 105, in <module>\n # exotred.phot_aperture(input_file)\n # File \"./sources/ExoTRed_core.py\", line 637, in phot_aperture \n # File \"/home/walter/bin/anaconda3/envs/iraf27/lib/python2.7/site-packages/photutils/background/background_2d.py\", line 329, in __init__\n # self._calc_bkg_bkgrms()\n # File \"/home/walter/bin/anaconda3/envs/iraf27/lib/python2.7/site-packages/photutils/background/background_2d.py\", line 686, in _calc_bkg_bkgrms\n # bkg = self._interpolate_meshes(self._bkg1d)\n # File \"/home/walter/bin/anaconda3/envs/iraf27/lib/python2.7/site-packages/photutils/background/background_2d.py\", line 575, in _interpolate_meshes\n # f = ShepardIDWInterpolator(yx, data)\n # File \"/home/walter/bin/anaconda3/envs/iraf27/lib/python2.7/site-packages/photutils/utils/interpolation.py\", line 138, in __init__\n # raise ValueError('The number of values must match the number '\n # ValueError: The number of values must match the number of coordinates.\n\n # bkg = background.background_2d.Background2D(im,tuple(skysection))\n # bkg_data = bkg.background\n # bkg_rms = bkg.background_rms\n\n # phot_table = aperture_photometry(im - bkg_data, CircularAperture(positions, radius),\n # error=bkg_rms, method ='center')#,effective_gain=float(input_file['gain']))\n ####### SUBSTITUTE ROUTINE\n window = 100\n sky_size = im.shape\n sky_mean = float(np.median(im[int(skysection[1]-window):int(skysection[1]+window),int(skysection[0]-window):int(skysection[0]+window)]))\n bkg = np.random.poisson(sky_mean,sky_size)\n apertures = CircularAperture(positions, radius)\n phot_table = aperture_photometry(im, apertures, error=bkg)\n #######\n phot_table_flux = np.array([]) #saving results of aperture photometry\n for j in range(len(phot_table['aperture_sum'])):\n phot_table_flux = np.concatenate((phot_table_flux,np.array([phot_table['aperture_sum'][j]])),axis=0)\n phot_table_flux = np.concatenate((phot_table_flux,np.array([phot_table['aperture_sum_err'][j]])),axis=0)\n flux = np.concatenate((phot_table_flux,np.array([images[i]])),axis=0)\n # flux = [phot_table['aperture_sum'][0], phot_table['aperture_sum'][1],phot_table['aperture_sum_err'][0],\n # phot_table['aperture_sum_err'][1],images[i]]\n flux_data.append(flux)\n flux_data = DataFrame(flux_data)#,columns=['hoststar','refstar','hoststar_err','refstar_err','image'])\n flux_data.to_csv('./phot_results/'+planet+'_flux_radius_'+str(radius)+'.csv',index=False)\n use.update_progress((float(np.where(radii == radius)[0])+1.)/len(radii))\n print 'Time total = ',abs(time.time()-tempo)/60.,' minutes'\n os.chdir(original_path)", "def update_for_in_trap(self, t, traps): #******\n sources = traps.param['source_locations'] #Of format [(0,0),]\n for trap_num, trap_loc in enumerate(sources):\n dist_vals = distance((self.x_position, self.y_position),trap_loc)\n mask_trapped = dist_vals < traps.param['trap_radius']\n self.mode[mask_trapped] = self.Mode_Trapped\n self.trap_num[mask_trapped] = trap_num\n self.x_trap_loc[mask_trapped] = trap_loc[0]\n self.y_trap_loc[mask_trapped] = trap_loc[1]\n self.x_velocity[mask_trapped] = 0.0\n self.y_velocity[mask_trapped] = 0.0\n\n # Get time stamp for newly trapped flies\n mask_newly_trapped = mask_trapped & (self.t_in_trap == scipy.inf)\n self.t_in_trap[mask_newly_trapped] = t", "def extract_tissue_component(img, threshInfo=\"\", perform_aniso=False, perform_median=False):\n\n if threshInfo.find(\"bone\") > -1:\n thresholds = [200., 800., 1300., 1500.]\n elif threshInfo.find(\"skin\") > -1:\n thresholds = [-200., 0., 500., 1500.]\n elif threshInfo.find(\"soft\") > -1:\n thresholds = [-15., 30., 58., 100.]\n elif threshInfo.find(\"fat\") > -1:\n thresholds = [-122., -112., -96., -70.]\n else:\n try:\n thresholds = [float(x) for x in threshInfo.split(\",\")]\n except:\n raise ValueError(\"could not convert threshInfo to 4-tuple\")\n # check that there are 4 threshold values.\n if len(thresholds) != 4:\n raise ValueError(\"Error: Threshold is not of size 4.\", thresholds)\n\n if perform_aniso:\n pixelType = img.GetPixelID()\n img = sitk.Cast(img, sitk.sitkFloat32)\n img = sitk.CurvatureAnisotropicDiffusion(img, .03)\n img = sitk.Cast(img, pixelType)\n gc.collect()\n\n img = sitk.DoubleThreshold(img, thresholds[0], thresholds[1], thresholds[2], thresholds[3], 255, 0)\n gc.collect()\n\n if perform_median:\n img = sitk.Median(img, [3,3,1])\n gc.collect()\n\n return img", "def irrigate(self, location, amount):\n # window_grid_size = (self.irr_threshold + self.irr_threshold + 1) * (\n # self.irr_threshold + self.irr_threshold + 1) / 10000 # in square meters\n window_grid_size = np.pi * ((self.irr_threshold)**2) / 10000 # in square meters\n gain = 1/32\n # Start from outer radius\n for radius in range(4,9)[::-1]:\n # For each bounding box, check if the cubes are within the radius \n # + add water from outer to center\n lower_x = max(0, location[0] - radius)\n upper_x = min(self.grid.shape[0], location[0] + radius + 1)\n lower_y = max(0, location[1] - radius)\n upper_y = min(self.grid.shape[1], location[1] + radius + 1)\n for y in range(lower_y, upper_y):\n for x in range(lower_x, upper_x):\n pt = [x, y]\n if np.sqrt((location[0] - pt[0])**2 + (location[1] - pt[1])**2) <= radius:\n self.grid[x, y]['water'] += gain * (amount / (window_grid_size * 0.35))\n gain *= 2\n\n # TODO: add distribution kernel for capillary action and spread of water jet\n # 0.001m^3/(0.11m * 0.11m * 0.35m) ~ 0,236 %\n # self.grid[lower_x:upper_x, lower_y:upper_y]['water'] += amount / (\n # window_grid_size * 0.35) # 0.0121m^2 * 0.35m depth\n lower_x = max(0, location[0] - self.irr_threshold)\n upper_x = min(self.grid.shape[0], location[0] + self.irr_threshold + 1)\n lower_y = max(0, location[1] - self.irr_threshold)\n upper_y = min(self.grid.shape[1], location[1] + self.irr_threshold + 1)\n \n np.minimum(\n self.grid[lower_x:upper_x, lower_y:upper_y]['water'],\n MAX_WATER_LEVEL,\n out=self.grid[lower_x:upper_x, lower_y:upper_y]['water'])", "def trap(self, traps):\n if not self.immune:\n # randomly select a trap from a list of traps\n spring = random.choice(traps)\n # call hit upon racer trap\n self._hit(spring['Effect'], spring['Damage'])\n self.immune = True\n print TRAPPED % (self.name, spring['Description'],\n spring['Name'], spring['Effect'],\n spring['Damage'])\n else:\n self.immune = False", "def calculate_track(self, picture):", "def shutter_correction(imagelist):\n\n fiber1 = 20\n fiber2 = 43\n\n hdulist = [pyfits.open(image)[0] for image in imagelist]\n avgs = [np.mean(h.data[fiber1 - 1:fiber2 - 1,:],axis=0) for h in hdulist]\n corrections = avgs[0]/np.array(avgs)\n\n pyfits.PrimaryHDU(corrections,hdulist[0].header).writeto('corrections.ms.fits',clobber=True)\n\n outputnames = ['{}_shut.ms.fits'.format(image.split('.ms.fits')[0]) for image in imagelist]\n print 'Correcting for shutter lag...'\n for h, corr, name in zip(hdulist, corrections, outputnames):\n print '\\t{} {}'.format(name,np.mean(corr))\n h.data *= corr\n h.writeto(name,clobber=True)\n\n return outputnames", "def trapfilt_taps(N, phil, alfa):\n\n\n\n tt = arange(-N/2,N/2 + 1) # Time axis for h(t) \n # ***** Generate impulse response ht here *****\n ht = zeros(len(tt))\n ix = where(tt != 0)[0]\n if alfa != 0:\n ht[ix] = ((sin(2*pi*phil*tt[ix]))/(pi*tt[ix]))*((sin(2*pi*alfa*phil*tt[ix]))/(2*pi*alfa*phil*tt[ix]))\n else:\n ht[ix] = (sin(2*pi*phil*tt[ix]))/(pi*tt[ix])\n ix0 = where(tt == 0)[0]\n ht[ix0] = 2*phil\n ht = ht/sum(power(ht,2))\n\n return ht", "def readPeriStimulusTifImages(tifDir, basPath, nBasCh=16,\n ch_camTrig='patch1', ch_stim='patch3',\n tifNameStr='', time_preStim=1,\n time_postStim=10, thr_stim=0.5,\n thr_camTrig=3,\n maxAllowedTimeBetweenStimAndCamTrig=0.5,\n n_jobs=2):\n import tifffile as tff\n import apCode.FileTools as ft\n import apCode.ephys as ephys\n import apCode.SignalProcessingTools as spt\n import apCode.util as util\n\n def getImgIndsInTifs(tifInfo):\n nImgsInFile_cum =\\\n np.cumsum(tifInfo['nImagesInFile']*tifInfo['nChannelsInFile'])\n imgIndsInTifs = []\n for i in range(len(nImgsInFile_cum)):\n if i == 0:\n inds_ = np.arange(0, nImgsInFile_cum[i])\n else:\n inds_ = np.arange(nImgsInFile_cum[i-1], nImgsInFile_cum[i])\n imgIndsInTifs.append(inds_)\n return imgIndsInTifs\n\n # Read relevant metadata from tif files in directory\n print('Reading ScanImage metadata from tif files...')\n tifInfo = ft.scanImageTifInfo(tifDir)\n nCaImgs = np.sum(tifInfo['nImagesInFile'])\n print('{} images from all tif files'.format(nCaImgs))\n\n # Check for consistency in the number of image channels in all files.\n if len(np.unique(tifInfo['nChannelsInFile'])) > 1:\n print('Different number of image channels across files, check files!')\n return None\n nImgCh = tifInfo['nChannelsInFile'][0]\n\n # Get a list of indices corresponding to images in each of the tif files\n inds_imgsInTifs = getImgIndsInTifs(tifInfo)\n\n # Read bas file to get stimulus and camera trigger indices required to\n # align images and behavior\n print('Reading bas file, detecting stimuli and camera triggers...')\n bas = ephys.importCh(basPath, nCh=nBasCh)\n inds_stim = spt.levelCrossings(bas[ch_stim], thr=thr_stim)[0]\n inds_camTrig = spt.levelCrossings(bas[ch_camTrig], thr=thr_camTrig)[0]\n dt_vec = np.diff(bas['t'][inds_camTrig])\n dt_ca = np.round(np.mean(dt_vec)*100)/100\n print('Ca sampling rate = {}'.format(1/dt_ca))\n inds_del = np.where(dt_vec <= (0.5*dt_ca))[0]+1\n inds_camTrig = np.delete(inds_camTrig, inds_del)\n\n # Deal with possible mismatch in number of camera trigger indices and\n # number of images in tif files\n if nCaImgs < len(inds_camTrig):\n inds_camTrig = inds_camTrig[:nCaImgs]\n nCaImgs_extra = 0\n elif nCaImgs > len(inds_camTrig):\n nCaImgs_extra = nCaImgs-len(inds_camTrig)\n else:\n nCaImgs_extra = 0\n print('{} extra Ca2+ images'.format(nCaImgs_extra))\n print('{} stimuli and {} camera triggers'.format(len(inds_stim),\n len(inds_camTrig)))\n\n # Indices of ca images closest to stimulus\n inds_stim_img = spt.nearestMatchingInds(inds_stim, inds_camTrig)\n\n # Find trials where the nearest cam trigger is farther than the stimulus\n # by a certain amount\n inds_camTrigNearStim = inds_camTrig[inds_stim_img]\n t_stim = bas['t'][inds_stim]\n t_camTrigNearStim = bas['t'][inds_camTrigNearStim]\n inds_tooFar = np.where(np.abs(t_stim-t_camTrigNearStim) >\n maxAllowedTimeBetweenStimAndCamTrig)[0]\n inds_ca_all = np.arange(nCaImgs)\n nPreStim = int(np.round(time_preStim/dt_ca))\n nPostStim = int(np.round(time_postStim/dt_ca))\n print(\"{} pre-stim points, and {} post-stim points\".format(nPreStim,\n nPostStim))\n inds_ca_trl = np.array(spt.segmentByEvents(\n inds_ca_all, inds_stim_img+nCaImgs_extra, nPreStim, nPostStim))\n\n # Find trials that are too short to include the pre- or post-stimulus\n # period\n trlLens = np.array([len(trl_) for trl_ in inds_ca_trl])\n inds_tooShort = np.where(trlLens < np.max(trlLens))[0]\n inds_trl_del = np.union1d(inds_tooFar, inds_tooShort)\n inds_trl_keep = np.setdiff1d(np.arange(len(inds_ca_trl)), inds_trl_del)\n\n # Exclude the above 2 types of trials from consideration\n if len(inds_trl_del) > 0:\n print('Excluding the trials {}'.format(inds_trl_del))\n inds_ca_trl = inds_ca_trl[inds_trl_keep]\n\n I = []\n print('Reading trial-related images from tif files...')\n nTrls = len(inds_ca_trl)\n\n def trlImages(inds_ca_trl, inds_imgsInTifs, nImgCh, tifInfo, trl):\n trl_ = np.arange(trl.min()*nImgCh, (trl.max()+1)*nImgCh)\n loc = util.locateItemsInSetsOfItems(trl_, inds_imgsInTifs)\n I_ = []\n for subInds, supInd in zip(loc['subInds'], loc['supInds']):\n with tff.TiffFile(tifInfo['filePaths'][supInd]) as tif:\n img = tif.asarray(key=subInds)\n I_.extend(img.reshape(-1, nImgCh, *img.shape[1:]))\n I_ = np.array(I_)\n return I_\n\n if n_jobs < 2:\n chunkSize = int(nTrls/5)\n for iTrl, trl in enumerate(inds_ca_trl):\n if np.mod(iTrl, chunkSize) == 0:\n print('Trl # {}/{}'.format(iTrl+1, nTrls))\n I_ = trlImages(inds_ca_trl, inds_imgsInTifs, nImgCh, tifInfo, trl)\n I.append(I_)\n else:\n print('Processing with dask')\n import dask\n from dask.diagnostics import ProgressBar\n for trl in inds_ca_trl:\n I_ = dask.delayed(trlImages)(inds_ca_trl, inds_imgsInTifs, nImgCh,\n tifInfo, trl)\n I.append(I_)\n with ProgressBar():\n I = dask.compute(*I)\n\n D = dict(I=np.squeeze(np.array(I)), tifInfo=tifInfo, inds_stim=inds_stim,\n inds_stim_img=inds_stim_img, inds_camTrig=inds_camTrig, bas=bas,\n inds_trl_excluded=inds_trl_del)\n return D", "def process():\r\n\r\n for scenario in SCENARIOS:\r\n output_file_name = \"all_\" + scenario['name']\r\n\r\n # limit output_file_name to 13 chars\r\n output_file_name = output_file_name[:13]\r\n\r\n output_file = output_path + output_file_name\r\n\r\n # calculate raster if we don't already have it\r\n if not os.path.exists(output_file):\r\n print 'Calculating combined raster for scenario: ' + scenario['name']\r\n fuzzies = []\r\n for source_file in SOURCE_FILES:\r\n \r\n file_name = source_file.get('file_name')\r\n weight = scenario[source_file.get('name')]\r\n invert_required = source_file.get('invert')\r\n overlay_method = source_file.get('overlay')\r\n\r\n input_raster = RasterInput(file_name,\r\n weight,\r\n invert_required,\r\n overlay_method)\r\n\r\n fuzzies.append(input_raster)\r\n\r\n final_raster = overlay(fuzzies)\r\n final_raster.save(output_file)", "def recalage_rigidezoom_multi_IM(imreca, imref, imres, filename) : \n medipy.medimax.recalage.LinearRegistration(imref,imreca, imres,1,5,1,1,0,1,str(filename),0,2,0)", "def ptc_ir(imagelist, variancelist, NDIT=10, *coor, **kargs):\n # x1,x2,y1,y2=bias1.get_windowcoor(*coor)\n # TODO continue with the program for this routine", "def integrate_flux(ifu, fibersize, image, platescale):\r\n pointy = hexlib.Layout(hexlib.layout_pointy, fibersize, hexlib.Point(0,0))\r\n fluxes = np.zeros(len(ifu), dtype=np.float)\r\n for (x,y), flux in np.ndenumerate(image):\r\n p = Point(x*platescale,y*platescale)\r\n hex = pixel_to_hex(pointy, p)\r\n i = find_hex(ifu, hex)\r\n if i>=0:\r\n flux[i] += image(x,y)", "def driver(detector, original_images_dir, ground_truth_directory, predictions_directory, reports_dir):\n ## concatenate all ground truth annotations for an image into a single text file\n ## dictionary keys are image filenames and value is list of lists of all annotations in float 8 point form\n ground_truth_annotation_dictionary = createGroundTruthDictionary(original_images_dir, ground_truth_directory)\n \n ## concatenate all predicted annotations for an image into a single text file\n ## dictionary keys are image filenames and value is list of lists of all annotations in float 8 point form\n predicted_annotation_dictionary = createPredictedDictionary(original_images_dir, predictions_directory)\n\n # dictionary keys are the image filenames and value is the average IoU score\n calculated_iou_dictionary = performPolygonIoUCalculation(ground_truth_annotation_dictionary, predicted_annotation_dictionary)\n \n ## output text file with all average IoU values\n if not reports_dir:\n reports_dir = os.path.join(os.curdir, \"reports\")\n \n if not os.path.isdir(reports_dir):\n os.mkdir(reports_dir)\n \n report_filepath = os.path.join(reports_dir, \"%s_iou_report.txt\" % detector)\n # generateIoUReport(calculated_iou_dictionary, report_filepath)\n generatePrecisionRecallReport(calculated_iou_dictionary, detector, reports_dir, report_filepath)\n\n ## signal completion\n print(\"IoU Calculation Complete!\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Scans the given image for the 'ntraps' number of trap intensity peaks. Then extracts the 1dimensional gaussian profiles across the traps and returns a list of the amplitudes.
def analyze_image(which_cam, image, ntraps, iteration=0, verbose=False): threshes = [0.5, 0.6] margin = 10 threshold = np.max(image) * threshes[which_cam] im = image.transpose() x_len = len(im) peak_locs = np.zeros(x_len) peak_vals = np.zeros(x_len) ## Trap Peak Detection ## for i in range(x_len): if i < margin or x_len - i < margin: peak_locs[i] = 0 peak_vals[i] = 0 else: peak_locs[i] = np.argmax(im[i]) peak_vals[i] = max(im[i]) ## Trap Range Detection ## first = True pos_first, pos_last = 0, 0 left_pos = 0 for i, p in enumerate(peak_vals): if p > threshold: left_pos = i elif left_pos != 0: if first: pos_first = (left_pos + i) // 2 first = False pos_last = (left_pos + i) // 2 left_pos = 0 ## Separation Value ## separation = (pos_last - pos_first) / ntraps # In Pixels ## Initial Guesses ## means0 = np.linspace(pos_first, pos_last, ntraps).tolist() waists0 = (separation * np.ones(ntraps) / 2).tolist() ampls0 = (max(peak_vals) * 0.7 * np.ones(ntraps)).tolist() _params0 = [means0, waists0, ampls0, [0.06]] params0 = [item for sublist in _params0 for item in sublist] ## Fitting ## if verbose: print("Fitting...") xdata = np.arange(x_len) popt, pcov = curve_fit(lambda x, *params_0: wrapper_fit_func(x, ntraps, params_0), xdata, peak_vals, p0=params0) if verbose: print("Fit!") plt.figure() plt.plot(xdata, peak_vals) # Data if iteration: plt.plot(xdata, wrapper_fit_func(xdata, ntraps, params0), '--r') # Initial Guess plt.plot(xdata, wrapper_fit_func(xdata, ntraps, popt)) # Fit plt.title("Iteration: %d" % iteration) else: plt.title("Final Product") plt.xlim((pos_first - margin, pos_last + margin)) plt.legend(["Data", "Guess", "Fit"]) plt.show(block=False) print("Fig_Newton") trap_powers = np.frombuffer(popt[2 * ntraps:3 * ntraps]) return trap_powers
[ "def analyze_image(image, ntraps, iteration=0, verbose=False):\n ## Image Conditioning ##\n margin = 10\n threshold = np.max(image)*0.5\n im = image.transpose()\n plt.imshow(im)\n plt.show(block=False)\n\n x_len = len(im)\n peak_locs = np.zeros(x_len)\n peak_vals = np.zeros(x_len)\n\n ## Trap Peak Detection ##\n for i in range(x_len):\n if i < margin or x_len - i < margin:\n peak_locs[i] = 0\n peak_vals[i] = 0\n else:\n peak_locs[i] = np.argmax(im[i])\n peak_vals[i] = max(im[i])\n\n ## Trap Range Detection ##\n first = True\n pos_first, pos_last = 0, 0\n for i, p in enumerate(peak_vals):\n if p > threshold:\n if first:\n pos_first = i\n first = False\n pos_last = i\n ## Separation Value ##\n separation = (pos_last - pos_first) / ntraps # In Pixels\n\n ## Initial Guesses ##\n means0 = np.linspace(pos_first, pos_last, ntraps).tolist()\n waists0 = (separation * np.ones(ntraps) / 2).tolist()\n ampls0 = (max(peak_vals) * 0.7 * np.ones(ntraps)).tolist()\n _params0 = [means0, waists0, ampls0, [0.06]]\n params0 = [item for sublist in _params0 for item in sublist]\n\n ## Fitting ##\n if verbose:\n print(\"Fitting...\")\n xdata = np.arange(x_len)\n popt, pcov = curve_fit(lambda x, *params_0: wrapper_fit_func(x, ntraps, params_0),\n xdata, peak_vals, p0=params0)\n if verbose:\n print(\"Fit!\")\n plt.figure()\n plt.plot(xdata, peak_vals) # Data\n if iteration:\n plt.plot(xdata, wrapper_fit_func(xdata, ntraps, params0), '--r') # Initial Guess\n plt.plot(xdata, wrapper_fit_func(xdata, ntraps, popt)) # Fit\n plt.title(\"Iteration: %d\" % iteration)\n else:\n plt.title(\"Final Product\")\n\n plt.xlim((pos_first - margin, pos_last + margin))\n plt.legend([\"Data\", \"Guess\", \"Fit\"])\n plt.show(block=False)\n print(\"Fig_Newton\")\n trap_powers = np.frombuffer(popt[2 * ntraps:3 * ntraps])\n return trap_powers", "def trapfilt_taps(N, phil, alfa):\n\n\n\n tt = arange(-N/2,N/2 + 1) # Time axis for h(t) \n # ***** Generate impulse response ht here *****\n ht = zeros(len(tt))\n ix = where(tt != 0)[0]\n if alfa != 0:\n ht[ix] = ((sin(2*pi*phil*tt[ix]))/(pi*tt[ix]))*((sin(2*pi*alfa*phil*tt[ix]))/(2*pi*alfa*phil*tt[ix]))\n else:\n ht[ix] = (sin(2*pi*phil*tt[ix]))/(pi*tt[ix])\n ix0 = where(tt == 0)[0]\n ht[ix0] = 2*phil\n ht = ht/sum(power(ht,2))\n\n return ht", "def traps(self):\n return self.trapezoids.trap_list()", "def trapfilt_taps(N, phiL, alfa):\n tth = arange(-N/2.0,N-(N/2.0)) # Time axis for h(t)\n hLn_num = (sin(2*pi*phiL*tth)*sin(2*pi*alfa*phiL*tth))\n hLn_den = (pi*tth*2*pi*alfa*phiL*tth)\n nans = where(hLn_den==0)\n for i in nans:\n hLn_den[i]=1 # ht[i-1]\n hLn_num[i]=2*phiL # ht[i-1]\n hLn = hLn_num/hLn_den\n return hLn", "def gaussianFilter(gain,BT,spSym,nTaps):\n\n a = np.sqrt(np.log(2)/2)/BT\n t = np.linspace(-.5*nTaps,.5*nTaps-1,nTaps)/spSym\n\n ft = np.sqrt(np.pi)/a *np.exp(-(np.pi**2*(t)**2)/a**2)\n ft /= np.sum(ft) * gain # normalize filter\n\n return ft", "def demo_detect_intensity_peaks(tiff):\n fig, axs = plt.subplots(4, 10)\n imgs, hist, peaks = detect_intensity_peaks(tiff, demo=True)\n for i, img in enumerate(imgs):\n p_row = int(i / 10)\n p_col = int(i % 10)\n ax = axs[p_row][p_col]\n ax.set_axis_off()\n ax.plot(hist[i], lw=2)\n ax.plot(peaks[i], hist[i][peaks[i]], \"x\")\n ax.set_title(i)", "def extract_features(img, sigmas, n_features): \n dims = img.shape # dimensions of the image\n \n features = np.zeros((dims[0], dims[1], n_features)) # each feature map has the same size as the input image\n \n # the first feature we use is the pixel intensity in the green channel itself\n img_g = img[:,:,1] #I just assume it follows the RGB convention and not GBR or BGR...\n features[:,:,0] = img_g\n features[:,:,1] = np.sum(img,axis=2) \n \n gabors = get_gabors() \n \n # >>> YOUR CODE STARTS HERE <<<\n i = 2\n# for s in sigmas:\n# gfilters = gauss_filter(s)\n# for gf in gfilters:\n# features[:,:,i] = scipy.signal.fftconvolve(img_g, gf, mode='same') ;i+=1\n for s in sigmas:\n gauss = gauss_filter(s)\n for g in gauss:\n features[:,:,i] = scipy.signal.fftconvolve(img_g, g, mode='same') ;i+=1\n \n for gabor in gabors:\n features[:,:,i] = scipy.signal.fftconvolve(img_g, gabor, mode='same') ;i+=1\n \n \n features[:,:,i] = sobel(img_g, axis=0) ;i+=1\n features[:,:,i] = sobel(img_g, axis=1) ;i+=1\n features[:,:,i] = sobel(img_g, axis=0)+sobel(img_g, axis=1) ;i+=1\n features[:,:,i] = feature.canny(img_g, sigma=0.0) ;i+=1\n features[:,:,i] = feature.canny(img_g, sigma=0, low_threshold=13, high_threshold=50);i+=1\n features[:,:,i] = feature.canny(img_g, sigma=1)\n # >>> YOUR CODE ENDS HERE <<< \n \n return features", "def test_compute_ts_map():\n filename = '$GAMMAPY_EXTRA/test_datasets/unbundled/poisson_stats_image/input_all.fits.gz'\n images = SkyImageList.read(filename)\n\n kernel = Gaussian2DKernel(2.5)\n\n images['counts'] = images['counts'].downsample(2, np.nansum)\n images['background'] = images['background'].downsample(2, np.nansum)\n images['exposure'] = images['exposure'].downsample(2, np.mean)\n\n result = compute_ts_image(\n images['counts'], images['background'], images['exposure'], kernel,\n method='leastsq iter',\n )\n for name, order in zip(['ts', 'amplitude', 'niter'], [2, 5, 0]):\n result[name].data = np.nan_to_num(result[name].data)\n result[name] = result[name].upsample(2, order=order)\n\n assert_allclose(1705.840212274973, result['ts'].data[99, 99], rtol=1e-3)\n assert_allclose([[99], [99]], np.where(result['ts'].data == result['ts'].data.max()))\n assert_allclose(3, result['niter'].data[99, 99])\n assert_allclose(1.0227934338735763e-09, result['amplitude'].data[99, 99], rtol=1e-3)", "def sig_heatmap_html_extract(files):\n\n signal = [[None for j in range(len(files[0]))] for i in range(len(files))]\n bg = [[None for j in range(len(files[0]))] for i in range(len(files))]\n\n for i, file_row in enumerate(files):\n for j, file in enumerate(file_row):\n signal[i][j], _, bg[i][j], __ = sig_and_bg_from_html(file)\n\n return [np.array(signal), np.array(bg)]", "def get_fluxes_within_mask(tpf, aper_mask, gaia_sources):\n assert tpf is not None\n assert aper_mask is not None\n assert gaia_sources is not None\n ra, dec = gaia_sources[[\"ra\", \"dec\"]].values.T\n pix_coords = tpf.wcs.all_world2pix(np.c_[ra, dec], 0)\n contour_points = measure.find_contours(aper_mask, level=0.1)[0]\n isinside = [\n is_point_inside_mask(contour_points, pix) for pix in pix_coords\n ]\n min_gmag = gaia_sources.loc[isinside, \"phot_g_mean_mag\"].min()\n gamma = gaia_sources.loc[isinside, \"phot_g_mean_mag\"].apply(\n lambda x: 10 ** (0.4 * (min_gmag - x))\n )\n return gamma", "def filter_probes(pacall, annotation, probes, threshold=0.5):\n\n threshold = np.clip(threshold, 0.0, 1.0)\n\n LGR.info(f'Filtering probes with intensity-based threshold of {threshold}')\n\n probes = io.read_probes(probes)\n signal, n_samp = np.zeros(len(probes), dtype=int), 0\n for donor, pa in pacall.items():\n annot = io.read_annotation(annotation[donor]).index\n data = io.read_pacall(pa).loc[probes.index, annot]\n n_samp += data.shape[-1]\n # sum binary expression indicator across samples for current subject\n signal += np.asarray(data.sum(axis=1))\n\n # calculate proportion of signal to noise for given probe across samples\n keep = (signal / n_samp) >= threshold\n\n LGR.info(f'{keep.sum()} probes survive intensity-based filtering')\n\n return probes[keep]", "def quantized_taps(self, nbits, taps=None):\n if taps is None:\n taps = self.taps\n\n new_taps = np.zeros(len(taps))\n for i, tap in enumerate(taps):\n new_taps[i] = bit.quantized_real(tap, nbits)\n return new_taps", "def compute_tap_intervals(xtaps, t, threshold=20):\n import numpy as np\n\n if isinstance(xtaps, list):\n xtaps = np.asarray(xtaps)\n if isinstance(t, list):\n t = np.asarray(t)\n\n # Set time points:\n tap_times = t - t[0]\n\n # Calculate x offset:\n xtaps_offset = xtaps - np.mean(xtaps)\n\n # Find left/right finger \"press\" events:\n dx = xtaps_offset[1:] - xtaps_offset[:-1]\n ipress = np.where(np.abs(dx) > threshold)\n\n # Filter data:\n #xtaps = xtaps[ipress]\n tap_times = tap_times[ipress]\n\n # Find press event intervals:\n tap_intervals = tap_times[1:] - tap_times[:-1]\n\n return ipress, tap_intervals", "def multi_aperture_map_drift_algorithm(range_data, N=2, num_iter=6): \n \n if N > 5:\n warn('MAM works best with N<=5')\n \n (Ta, K) = range_data.shape\n t_i = ((np.arange(1,N+1)/N)-((N+1)/(2*N))) * Ta \n range_bins = np.max([int(0.05*K), 32])\n idx = np.argsort(np.abs(range_data.ravel()))[:range_bins]\n _, idx = np.unravel_index(idx, range_data.shape)\n range_mask = np.zeros((K, ), dtype=np.bool)\n range_mask[np.unique(idx)] = True\n phi = 0\n\n for _ in range(num_iter):\n \n range_bins = np.sum(range_mask)\n \n if range_bins < 1:\n break\n \n map_arr = []\n \n ti_pix = np.int32(t_i + Ta/2)\n h = int(Ta/(2*N))\n for i in range(N):\n map_arr += [np.abs(ft2(range_data[ti_pix[i]-h:ti_pix[i]+h]))]\n \n num_pairs = int(N*(N-1)/2)\n rel_shift = np.zeros((num_pairs, range_bins ) , dtype=np.float32)\n delta = np.zeros ((num_pairs, N-1) , dtype=np.float32)\n r=0\n for i in range(N):\n for j in range(i+1, N):\n rel_shift[r] = corr_help(map_arr[i][:,range_mask],\n map_arr[j][:,range_mask])\n for k in range(2,N+1):\n delta[r,k-2] = k/(2*np.pi)*(t_i[j]**(k-1) - t_i[i]**(k-1))\n r+=1\n \n delta_inv = np.linalg.pinv(delta)\n a_vec = np.matmul(delta_inv, np.mean(rel_shift, 1))\n \n # Compute quadratic error for phi\n # TODO: I need to apply the entire N-th order phase change, my bad \n phi = 0.0\n for i in range(num_pairs):\n phi += a_vec[0] * 2 * t_i[i]\n \n # Discard range bins 1 std away from mean\n mean_err = np.mean(rel_shift)\n one_std_from_mean = np.std(rel_shift)#/2.0\n mean_range_err = np.mean(rel_shift, 0)\n range_mask_idx = np.argwhere(range_mask)\n less_than = mean_range_err < (mean_err - one_std_from_mean)\n greater_than = mean_range_err > (mean_err + one_std_from_mean)\n discard = less_than + greater_than\n range_mask[range_mask_idx[discard]] = False\n range_data = range_data * np.exp(1j * phi)\n\n return range_data", "def smooth_spectra(xarr, farr, sigma=3, nkern=20):\n xkern = np.arange(nkern)\n kern = np.exp(-(xkern - 0.5 * nkern) ** 2 / (sigma) ** 2)\n\n return gaussian_filter1d(farr, sigma)", "def _get_features_from_batch_images(self, img, r, p):\n tmp_feats = []\n for channel in range(4):\n current_img = img[channel, :, :]\n tmp_feats = np.append(tmp_feats, np.histogram(current_img)[0])\n # extract 8*8 patches of 64*64 px and derive 10 bins histogram\n for j in range(r):\n for k in range(r):\n tmp_feats = np.append(\n tmp_feats,\n np.histogram(current_img[j * p:(j + 1) * (p), k *\n p:(k + 1) * p])[0])\n return tmp_feats", "def features_sigma(img,\n sigma,\n intensity=True,\n edges=True,\n texture=True):\n\n features = []\n\n gx,gy = np.meshgrid(np.arange(img.shape[1]), np.arange(img.shape[0]))\n # print(gx.shape)\n #features.append(gx)\n gx = filters.gaussian(gx, sigma)\n gy = filters.gaussian(gy, sigma)\n\n features.append(np.sqrt(gx**2 + gy**2)) #use polar radius of pixel locations as cartesian coordinates\n\n del gx, gy\n\n logging.info(datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\"))\n logging.info('Location features extracted using sigma= %f' % (sigma))\n\n img_blur = filters.gaussian(img, sigma)\n\n if intensity:\n features.append(img_blur)\n\n logging.info(datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\"))\n logging.info('Intensity features extracted using sigma= %f' % (sigma))\n\n if edges:\n features.append(filters.sobel(img_blur))\n\n logging.info(datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\"))\n logging.info('Edge features extracted using sigma= %f' % (sigma))\n\n if texture:\n H_elems = [\n np.gradient(np.gradient(img_blur)[ax0], axis=ax1)\n for ax0, ax1 in itertools.combinations_with_replacement(range(img.ndim), 2)\n ]\n\n eigvals = feature.hessian_matrix_eigvals(H_elems)\n del H_elems\n\n for eigval_mat in eigvals:\n features.append(eigval_mat)\n del eigval_mat\n\n logging.info(datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\"))\n logging.info('Texture features extracted using sigma= %f' % (sigma))\n\n logging.info(datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\"))\n logging.info('Image features extracted using sigma= %f' % (sigma))\n\n return features", "def t1_hypointensity( x, xsegmentation, xWMProbability, template, templateWMPrior, wmh_thresh=0.1 ):\n mybig = [88,128,128]\n templatesmall = ants.resample_image( template, mybig, use_voxels=True )\n qaff = ants.registration(\n ants.rank_intensity(x),\n ants.rank_intensity(templatesmall), 'SyN',\n syn_sampling=2,\n syn_metric='CC',\n reg_iterations = [25,15,0,0],\n aff_metric='GC', random_seed=1 )\n afftx = qaff['fwdtransforms'][1]\n templateWMPrior2x = ants.apply_transforms( x, templateWMPrior, qaff['fwdtransforms'] )\n cerebrum = ants.threshold_image( xsegmentation, 2, 4 )\n realWM = ants.threshold_image( templateWMPrior2x , 0.1, math.inf )\n inimg = ants.rank_intensity( x )\n parcellateWMdnz = ants.kmeans_segmentation( inimg, 2, realWM, mrf=0.3 )['probabilityimages'][0]\n x2template = ants.apply_transforms( templatesmall, x, afftx, whichtoinvert=[True] )\n parcellateWMdnz2template = ants.apply_transforms( templatesmall,\n cerebrum * parcellateWMdnz, afftx, whichtoinvert=[True] )\n # features = rank+dnz-image, lprob, wprob, wprior at mybig resolution\n f1 = x2template.numpy()\n f2 = parcellateWMdnz2template.numpy()\n f3 = ants.apply_transforms( templatesmall, xWMProbability, afftx, whichtoinvert=[True] ).numpy()\n f4 = ants.apply_transforms( templatesmall, templateWMPrior, qaff['fwdtransforms'][0] ).numpy()\n myfeatures = np.stack( (f1,f2,f3,f4), axis=3 )\n newshape = np.concatenate( [ [1],np.asarray( myfeatures.shape )] )\n myfeatures = myfeatures.reshape( newshape )\n\n inshape = [None,None,None,4]\n wmhunet = antspynet.create_unet_model_3d( inshape,\n number_of_outputs = 1,\n number_of_layers = 4,\n mode = 'sigmoid' )\n\n wmhunet.load_weights( get_data(\"simwmhseg\", target_extension='.h5') )\n\n pp = wmhunet.predict( myfeatures )\n\n limg = ants.from_numpy( tf.squeeze( pp[0] ).numpy( ) )\n limg = ants.copy_image_info( templatesmall, limg )\n lesresam = ants.apply_transforms( x, limg, afftx, whichtoinvert=[False] )\n # lesresam = lesresam * cerebrum\n rnmdl = antspynet.create_resnet_model_3d( inshape,\n number_of_classification_labels = 1,\n layers = (1,2,3),\n residual_block_schedule = (3,4,6,3), squeeze_and_excite = True,\n lowest_resolution = 32, cardinality = 1, mode = \"regression\" )\n rnmdl.load_weights( get_data(\"simwmdisc\", target_extension='.h5' ) )\n qq = rnmdl.predict( myfeatures )\n\n lesresamb = ants.threshold_image( lesresam, wmh_thresh, 1.0 )\n lgo=ants.label_geometry_measures( lesresamb, lesresam )\n wmhsummary = pd.read_csv( get_data(\"wmh_evidence\", target_extension='.csv' ) )\n wmhsummary.at[0,'Value']=lgo.at[0,'VolumeInMillimeters']\n wmhsummary.at[1,'Value']=lgo.at[0,'IntegratedIntensity']\n wmhsummary.at[2,'Value']=float(qq)\n\n return {\n \"wmh_summary\":wmhsummary,\n \"wmh_probability_image\":lesresam,\n \"wmh_evidence_of_existence\":float(qq),\n \"wmh_max_prob\":lesresam.max(),\n \"features\":myfeatures }", "def calculate_psf_tilts():\n for order in [1, 2]:\n\n # Get the file\n path = 'files/SOSS_PSF_tilt_order{}.npy'.format(order)\n psf_file = resource_filename('awesimsoss', path)\n\n # Dimensions\n subarray = 'SUBSTRIP256'\n X = range(2048)\n Y = range(256)\n\n # Get the wave map\n wave_map = utils.wave_solutions(subarray, order).astype(float)\n\n # Get the y-coordinate of the trace polynomial in this column\n # (center of the trace)\n coeffs = trace_polynomials(subarray=subarray, order=order)\n trace = np.polyval(coeffs, X)\n\n # Interpolate to get the wavelength value at the center\n wave = interp2d(X, Y, wave_map)\n\n # Get the wavelength of the trace center in each column\n trace_wave = []\n for x, y in zip(X, trace):\n trace_wave.append(wave(x, y)[0])\n\n # For each column wavelength (defined by the wavelength at\n # the trace center) define an isowavelength contour\n angles = []\n for n, x in enumerate(X):\n\n w = trace_wave[x]\n\n # Edge cases\n try:\n w0 = trace_wave[x-1]\n except IndexError:\n w0 = 0\n\n try:\n w1 = trace_wave[x+1]\n except IndexError:\n w1 = 10\n\n # Define the width of the wavelength bin as half-way\n # between neighboring points\n dw0 = np.mean([w0, w])\n dw1 = np.mean([w1, w])\n\n # Get the coordinates of all the pixels in that range\n yy, xx = np.where(np.logical_and(wave_map >= dw0, wave_map < dw1))\n\n # Find the angle between the vertical and the tilted wavelength bin\n if len(xx) >= 1:\n angle = get_angle([xx[-1], yy[-1]], [x, trace[x]])\n else:\n angle = 0\n\n # Don't flip them upside down\n angle = angle % 180\n\n # Add to the array\n angles.append(angle)\n\n # Save the file\n np.save(psf_file, np.array(angles))\n print('Angles saved to', psf_file)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given the opened camera object and the Slider object connected to the camera's exposure, adjusts the exposure to just below clipping. Binary Search
def fix_exposure(cam, slider, verbose=False): margin = 10 exp_t = MAX_EXP / 2 cam._set_exposure(exp_t * u.milliseconds) time.sleep(0.5) print("Fetching Frame") im = cam.latest_frame() x_len = len(im) right, left = MAX_EXP, 0 inc = right / 10 for _ in range(10): ## Determine if Clipping or Low-Exposure ## gap = 255 for i in range(x_len): if i < margin or x_len - i < margin: continue else: gap = min(255 - max(im[i]), gap) ## Make Appropriate Adjustment ## if gap == 0: if verbose: print("Clipping at: ", exp_t) right = exp_t elif gap > 50: if verbose: print("Closing gap: ", gap, " w/ exposure: ", exp_t) left = exp_t else: if verbose: print("Final Exposure: ", exp_t) return if inc < 0.01: exp_t -= inc if gap == 0 else -inc else: exp_t = (right + left) / 2 inc = (right - left) / 10 slider.set_val(exp_t) time.sleep(1) im = cam.latest_frame()
[ "def fix_exposure(cam, slider, verbose=False):\n margin = 10\n print(\"Fetching Frame\")\n im = cam.latest_frame()\n x_len = len(im)\n print(\"Fetching Exposure\")\n exp_t = cam._get_exposure()\n\n right, left = exp_t*2, 0\n inc = right / 10\n for _ in range(10):\n ## Determine if Clipping or Low-Exposure ##\n gap = 1000\n for i in range(x_len):\n if i < margin or x_len - i < margin:\n continue\n else:\n gap = min(255 - max(im[i]), gap)\n\n ## Make Appropriate Adjustment ##\n if gap == 0:\n if verbose:\n print(\"Clipping at: \", exp_t)\n right = exp_t\n elif gap > 110:\n if verbose:\n print(\"Closing gap: \", gap, \" w/ exposure: \", exp_t)\n left = exp_t\n else:\n if verbose:\n print(\"Final Exposure: \", exp_t.magnitude)\n return\n\n if inc.magnitude < 0.01:\n exp_t -= inc if gap == 0 else -inc\n else:\n exp_t = (right + left) / 2\n inc = (right - left) / 10\n\n slider.set_val(exp_t.magnitude)\n time.sleep(1)\n im = cam.latest_frame()", "def image_adjust_brightness(img, limit_left, limit_right, color_map, title):\n img_ha = exposure.rescale_intensity(img, (limit_left, limit_right))\n \n fig = plt.figure(figsize=(10, 10))\n fig.set_facecolor('white')\n plt.imshow(img_ha, cmap=color_map)\n plt.title(title)\n plt.show()\n \n return img_ha", "def set_exposure(self, expo):\n if expo == 0:\n self.exposure = 0\n elif expo == 1:\n self.exposure = min(9, self.exposure+1)\n elif expo == -1:\n self.exposure = max(-9, self.exposure-1)\n self.drone.set_exposure(self.exposure)\n log.info(f\"EXPOSURE {self.exposure}\")", "def exposure(self):\n\n # define a range of declination to evaluate the\n # exposure at\n self.declination = np.linspace(-np.pi/2, np.pi/2, self.num_points)\n\n m = np.asarray([m_dec(d, self.params) for d in self.declination])\n \n # normalise to a maximum at 1\n self.exposure_factor = (m / m_dec(-np.pi/2, self.params))\n\n # find the point at which the exposure factor is 0\n self.limiting_dec = Angle((self.declination[m == 0])[0], 'rad')", "def update_imshow(val):\n image.set_clim(vmin = image_min, # + contrastSlider.val*image_diff//8\n vmax = image_max - contrastSlider.val*image_diff) #//2", "def _update_obstacle_limits(self, low, high, obstacle_array):\n # If obstacle position is higher than the upper bound, obstacle positions will happen after the cropped fragment [low:high)\n obstacle_array[obstacle_array > high] = np.nan\n # Updates the obstacle positions\n obstacle_array = obstacle_array - low\n # If obstacle position is negative, the obstacle positions is previous to the cropped fragment [low:high).\n obstacle_array[obstacle_array < 0] = np.nan\n\n return obstacle_array", "def _fixBoundsAndDraw(self):\r\n# print \"in self.ul:\",self.ul, \"shape:\",self.shape\r\n self.ul = np.maximum(0,np.minimum(self.ul, self.imShape-self.shape))\r\n self.shape = np.minimum(np.maximum(PanAndZoomState.MIN_SHAPE,self.shape), self.imShape-self.ul)\r\n# print \"out self.ul:\",self.ul, \"shape:\",self.shape\r\n yFraction = float(self.ul[0])/max(1,self.imShape[0]-self.shape[0])\r\n xFraction = float(self.ul[1])/max(1,self.imShape[1]-self.shape[1])\r\n cv2.setTrackbarPos(self.parentWindow.H_TRACKBAR_NAME, self.parentWindow.WINDOW_NAME,int(xFraction*self.parentWindow.TRACKBAR_TICKS))\r\n cv2.setTrackbarPos(self.parentWindow.V_TRACKBAR_NAME, self.parentWindow.WINDOW_NAME,int(yFraction*self.parentWindow.TRACKBAR_TICKS))\r\n self.parentWindow.redrawImage()", "def _handler_slider2(self, event):\n if self.lungVolume == None:\n\t \treturn\n else: \n\t \tcontourValue = self._view_frame.lower_slider.GetValue()\n\t \tself.adjust_contour(self.lungVolume, contourValue, self.severe_mapper)\n\t \tself.create_overlay(self._view_frame.upper_slider.GetValue(),contourValue)", "def _rescale_threshold(self):\n # The current detection method\n v = self.detect_type.get()\n\n # Figure out if the current detection method \n # admits a threshold ('t') kwarg. If not, do nothing.\n try:\n # The slider index corresponding to the threshold\n s_idx = DETECT_KWARG_MAP[v].index('t')\n\n # Set the limits of the corresponding slider to \n # the minimum and maximum values of the current\n # filtered image (self.img_10)\n [self.S01, self.S11, self.S21, self.S31][s_idx].configure(\n from_=int(np.floor(self.img_10.min())),\n to=int(np.ceil(self.img_10.max()))\n )\n except ValueError:\n pass", "def apply_filter(self, src_img, slider1, slider2, slider3):\n pass", "def enable_roi_auto_exposure(xcord, ycord, image_width, image_height, hid_handle, win_size=4):\n outputLow = 0\n outputHigh = 255\n\n # Convert RoI center position to 0-255 value\n inputXLow = 0\n inputXHigh = image_width - 1\n inputXCord = xcord\n outputXCord = int(((inputXCord - inputXLow) / (inputXHigh - inputXLow)) * (outputHigh - outputLow) + outputLow)\n\n inputYLow = 0\n inputYHigh = image_height - 1\n inputYCord = ycord\n outputYCord = int(((inputYCord - inputYLow) / (inputYHigh - inputYLow)) * (outputHigh - outputLow) + outputLow)\n\n input_buffer = bytearray([0] * BUFFER_LENGTH)\n input_buffer[1] = CAMERA_CONTROL_CU20\n input_buffer[2] = SET_AE_ROI_MODE_CU20\n input_buffer[3] = AutoExpManual\n input_buffer[4] = outputXCord\n input_buffer[5] = outputYCord\n input_buffer[6] = win_size\n\n hid_write(hid_handle, input_buffer)\n output_buffer = hid_read(hid_handle)\n\n if output_buffer[6] == 0x00:\n print(\"\\nEnabling AutoExposure(RoI based) is failed\\n\")\n return False\n elif (\n output_buffer[0] == CAMERA_CONTROL_CU20\n and output_buffer[1] == SET_AE_ROI_MODE_CU20\n and output_buffer[6] == SUCCESS\n ):\n print(\"\\nAutoExposure(RoI based) is enabled\\n\")\n return True", "def reset_camera_clipping_range(self):\n self.ResetCameraClippingRange()", "def constrain_roi(self, frame):\n raise NotImplementedError", "def adjust_image(img_path: str):\n # load image and convert to greyscale, necessary for morphological filters \n img = rgb2gray(img_as_float(imread(img_path)))\n \n # kernel for morphology is a 20-pixel circle\n selem = disk(20)\n \n # black tophat filter will highlight contiguous dark regions smaller than the kernel\n # this is useful for identifying text\n img = black_tophat(img, selem)\n \n # adjust contrast by setting anything with intensity lower than 75th percentile to black\n v_min, v_max = np.percentile(img, (75,100))\n img = exposure.rescale_intensity(img, in_range=(v_min, v_max))\n \n return img_as_ubyte(img)", "def Threshold(image_array, choice):\n #opening image\n\n image_array[image_array<=choice]=0\n image_array[image_array>choice]=255\n print(image_array)\n #returning modified array of image\n return image_array", "def __uni_window(img):\n\n # initialize window and trackbars\n window = \"Mask and Applied Mask with Thresholding\"\n cv2.namedWindow(window, cv2.WINDOW_NORMAL)\n sliders = __initialize_sliders(window)\n\n # create colorspace labels to be displayed\n cspace_labels = {0:'BGR',1:'HSV',2:'HLS',3:'Lab',4:'Luv',5:'YCrCb',6:'XYZ',7:'Gray'}\n fontface = cv2.FONT_HERSHEY_SIMPLEX\n fontcolor = [0,0,0] # black text\n strokecolor = [255,255,255] # stroke around the text\n\n # initializations\n cspace = 0 # starting in BGR\n h, w = img.shape[:2]\n mask = np.ones((h, w), dtype=np.uint8)*255\n masked_img = img\n combo_img = np.zeros((h,2*w,3), dtype=np.uint8)\n global REDISPLAY\n\n # display window with trackbar values that can be changed\n print('Exit with [q] or [esc].')\n while(True):\n\n # display the image\n REDISPLAY = False\n\n combo_img[:h,:w] = np.repeat(mask[:,:,np.newaxis], 3, axis=2)\n combo_img[:h,w:2*w] = masked_img\n cv2.imshow(window, combo_img)\n k = cv2.waitKey(200) & 0xFF # large wait time to remove freezing\n if k == 113 or k == 27:\n break\n\n # get positions of the sliders\n slider_pos = [cv2.getTrackbarPos(sliders[i], window) for i in range(0,7)]\n cspace = slider_pos.pop(0) # take the colorspace value out of the positions\n\n # update threshold image\n if REDISPLAY: # global variable which is modified when a trackbar position moves\n mask,_,_,_ = cspaceThreshImg.main(img, cspace_labels[cspace], slider_pos)\n masked_img = cv2.bitwise_and(img, img, mask=mask)\n cv2.putText(mask, cspace_labels[cspace], (5,30), fontface, 1, strokecolor, 5) # outline\n cv2.putText(mask, cspace_labels[cspace], (5,30), fontface, 1, fontcolor, 2) # text\n\n\n cv2.destroyAllWindows()\n\n return cspace, slider_pos", "def __multi_window(img):\n\n # initialize window and trackbars\n mask_window = \"Binary mask\"\n masked_window = \"Masked image\"\n slider_window = \"Thresholding ranges\"\n cv2.namedWindow(mask_window, cv2.WINDOW_NORMAL)\n cv2.namedWindow(masked_window, cv2.WINDOW_NORMAL)\n cv2.namedWindow(slider_window, cv2.WINDOW_NORMAL)\n sliders = __initialize_sliders(slider_window)\n\n # create colorspace labels to be displayed\n cspace_labels = {0:'BGR',1:'HSV',2:'HLS',3:'Lab',4:'Luv',5:'YCrCb',6:'XYZ',7:'Gray'}\n fontface = cv2.FONT_HERSHEY_SIMPLEX\n fontcolor = [0,0,0] # black text\n strokecolor = [255,255,255] # stroke around the text\n\n # initializations\n cspace = 0 # starting in BGR\n mask = np.ones(img.shape[:2], dtype=np.uint8)\n masked_img = img\n global REDISPLAY\n\n # display window with trackbar values that can be changed\n print('Exit with [q] or [esc].')\n while(True):\n\n # display the image\n REDISPLAY = False\n cv2.imshow(mask_window, mask)\n cv2.imshow(masked_window, masked_img)\n k = cv2.waitKey(200) & 0xFF # large wait time to remove freezing\n if k == 113 or k == 27:\n break\n\n # get positions of the sliders\n slider_pos = [cv2.getTrackbarPos(sliders[i], slider_window) for i in range(0,7)]\n cspace = slider_pos.pop(0) # take the colorspace value out of the positions\n\n # update threshold image\n if REDISPLAY: # global variable which is modified when a trackbar position moves\n mask,_,_,_ = cspaceThreshImg.main(img, cspace_labels[cspace], slider_pos)\n masked_img = cv2.bitwise_and(img, img, mask=mask)\n cv2.putText(mask, cspace_labels[cspace], (5,30), fontface, 2, strokecolor, 5) # outline\n cv2.putText(mask, cspace_labels[cspace], (5,30), fontface, 2, fontcolor, 2) # text\n\n\n cv2.destroyAllWindows()\n\n return cspace, slider_pos", "def modify_exposure(self, exposure_info, test=False):\n\n if not self.is_finalized:\n warnings.warn('Data collection needs to be finalized to amend exposure information.', RuntimeWarning)\n return None\n if isinstance(exposure_info, dict):\n if 'start' in exposure_info and 'step' in exposure_info:\n raw_exposure = exposure_info['start'] + np.array(range(len(self.original_average))) * exposure_info['step']\n raw_exposure_lookup = {i: raw_exposure[n] for n, i in enumerate(self.original_index)}\n exposure = [raw_exposure_lookup[i] for i in self.raw_index]\n else:\n warnings.warn('Exposure information needs the keys \"start\" and \"step\".', RuntimeWarning)\n return None, None\n else:\n if len(exposure_info) == len(self.bg_parameters):\n exposure = exposure_info\n else:\n warnings.warn('Exposure information has the wrong length.', RuntimeWarning)\n return None, None\n\n order = np.argsort(exposure)\n reorder = not np.all(order[:-1] <= order[1:])\n if test:\n return exposure, reorder\n\n self.exposure = exposure\n if reorder:\n raw_images_array = np.zeros_like(self.raw_images)\n backgrounds_array = np.zeros_like(self.backgrounds)\n foregrounds_array = np.zeros_like(self.foregrounds)\n source_images = []\n meta = []\n for n, i in enumerate(order):\n raw_images_array[:, :, n] = self.raw_images[:, :, i]\n backgrounds_array[:, :, n] = self.backgrounds[:, :, i]\n foregrounds_array[:, :, n] = self.foregrounds[:, :, i]\n source_images.append(self.source_images[i])\n meta.append(self.meta_data[i])\n self.raw_images = raw_images_array\n self.backgrounds = backgrounds_array\n self.foregrounds = foregrounds_array\n self.bg_parameters = self.bg_parameters[order]\n self.raw_index = self.raw_index[order]\n self.meta_data = meta\n self.source_images = source_images\n\n self.has_exposure = True\n self.minimize_kappa()\n return self.exposure, reorder", "def sliderfunc_y(widget, event):\n i = int(widget.GetRepresentation().GetValue())\n self.pos_slider[1] = i\n self.msh = self.volume.ySlice(i).alpha(self.alpha).lighting('', la, ld, 0)\n self.msh.pointColors(cmap=self.cmap_slicer, vmin=self.rmin, vmax=self.rmax, alpha=self.alphas)\n if map2cells: self.msh.mapPointsToCells()\n self.renderer.RemoveActor(self.visibles[1])\n if i<dims[1]: self.renderer.AddActor(self.msh)\n self.visibles[1] = self.msh" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Fetches prediction field from prediction byte array. After TensorRT inference, prediction data is saved in byte array and returned by object detection network. This byte array contains several pieces of data about prediction we call one such piece a prediction field. The prediction fields layout is described in TRT_PREDICTION_LAYOUT. This function, given prediction byte array returned by network, staring index of given prediction and field name of interest, returns prediction field data corresponding to given arguments.
def fetch_prediction_field(field_name, detection_out, pred_start_idx): return detection_out[pred_start_idx + TRT_PREDICTION_LAYOUT[field_name]]
[ "def load_predict(path=MODEL_PATH, version=VERSION, namePredictor=DEFAULT_PREDICTOR):\n logging.info(\"trying to load {}\".format(path + namePredictor + version + '.npz'))\n return np.load(path + namePredictor + version + '.npz')['pred']", "def get_fvlm_predict_fn(serving_batch_size):\n num_classes, text_dim = load_fvlm_gin_configs()\n predict_step = create_predict_step()\n anchor_boxes, image_info = generate_anchors_info()\n\n def predict_fn(params, input_dict):\n input_dict['labels'] = {\n 'detection': {\n 'anchor_boxes': anchor_boxes,\n 'image_info': image_info,\n }\n }\n output = predict_step(params, input_dict, jax.random.PRNGKey(0))\n output = output['detection']\n output.pop('rpn_score_outputs')\n output.pop('rpn_box_outputs')\n output.pop('class_outputs')\n output.pop('box_outputs')\n return output\n\n input_signatures = {\n 'image':\n tf.TensorSpec(\n shape=(serving_batch_size, _IMAGE_SIZE.value, _IMAGE_SIZE.value,\n 3),\n dtype=tf.bfloat16,\n name='image'),\n 'text':\n tf.TensorSpec(\n shape=(serving_batch_size, num_classes, text_dim),\n dtype=tf.float32,\n name='queries'),\n }\n return predict_fn, input_signatures", "def pred_reader_fn(fp_pred):\n part_pred_sample = np.array(Image.open(fp_pred), dtype=np.int32)\n pan_classes = part_pred_sample[..., 0]\n pan_inst_ids = part_pred_sample[..., 1]\n parts_output = part_pred_sample[..., 2]\n return pan_classes, pan_inst_ids, parts_output", "def predict(self, deployment_name, df):\n\n url = \"{api}/{predictions}/{name}\".format(\n api=self.inference_api, predictions=\"predictions\", name=deployment_name\n )\n if isinstance(df, pd.DataFrame):\n df = df.to_json(orient=\"records\")[1:-1].replace(\"},{\", \"} {\")\n\n if torch.is_tensor(df):\n data = json.dumps({\"data\": df.tolist()})\n else:\n try:\n data = json.loads(df)\n except TypeError as e:\n raise TypeError(\"Input data can either be dataframe or Json string: {}\".format(e))\n except json.decoder.JSONDecodeError as e:\n raise ValueError(\"Unable to parse input json string: {}\".format(e))\n\n resp = requests.post(url, data)\n if resp.status_code != 200:\n raise Exception(\n \"Unable to infer the results for the name %s. \"\n \"Server returned status code %s and response: %s\"\n % (deployment_name, resp.status_code, resp.content)\n )\n\n return resp.text", "def predict_one_player(player_name):\n\n cursor.execute(f\"select * from get_player_data('{player_name}')\")\n\n player_data = result_set_to_dict(cursor)\n if player_data == []:\n raise Exception(f\"No player data found for {player_name}\")\n player_data = player_data_to_array(player_data)\n player_data = scale_features(player_data)\n player_data = player_data[np.newaxis, :]\n\n prediction = model.predict(player_data)[:, -1][0][0]\n\n return prediction", "def predict(self, trained_model, prediction_datetime):\n return trained_model.predict()", "def r_predict_index_and_label(prediction) -> str:\n index = prediction[0] - 1\n label = prediction.levels[index]\n return label", "def load_prediction(predict_name=None, metadata_only=False, predict_path=None):\n if predict_name is None:\n raise Exception(\"predict_name must be specified\")\n if predict_path is None:\n predict_path = model_output_path\n else:\n predict_path = pathlib.Path(predict_path)\n\n fq_predict = predict_path / f'{predict_name}'\n\n predict = Dataset.load(fq_predict, data_path=predict_path, metadata_only=metadata_only)\n\n return predict", "def extract_pred_from_estimator_predictions(predictions):\n # print('predictions:', predictions)\n pred = np.array([])\n for prediction in predictions:\n pred = np.append(pred, prediction['predictions'])\n num_samples = len(pred)\n pred = pred.reshape((num_samples, ))\n return pred", "def predict(self, frame: np.array) -> Tuple[List[np.array], List[str], List[float]]:\n assert isinstance(frame, np.ndarray)\n\n # return bboxes, object_bboxes, object_labels, object_scores\n return self.detector.predict_object_bbox_from_image(\n self.class_names, frame, self.detect_ids\n )", "def predict(self, x):\n\n import torchvision # lgtm [py/repeated-import]\n\n self._model.eval()\n\n # Apply preprocessing\n x, _ = self._apply_preprocessing(x[0], y=None, fit=False)\n\n transform = torchvision.transforms.Compose([torchvision.transforms.ToTensor()])\n image_tensor_list: List[np.ndarray] = []\n\n if self.clip_values is not None:\n norm_factor = self.clip_values[1]\n else:\n norm_factor = 1.0\n for i in range(x.shape[0]):\n image_tensor_list.append(transform(x[i] / norm_factor).to(self.device))\n predictions = self._model(image_tensor_list)\n\n # Recreate the tracker to reset its tracks\n tracker = BYTETracker(self.tracker_args, frame_rate=self.frame_rate)\n\n results = []\n # Iterate over the batch (or timestep) of predictions and update tracker\n for frame_id, pred in enumerate(predictions):\n with torch.no_grad():\n boxes = pred[\"boxes\"]\n scores = pred[\"scores\"]\n labels = pred[\"labels\"]\n\n # Keep only predictions associated with tracked classes and whose scores is above threshold\n for tc in self.tracked_classes:\n cls_id = self.tracked_classes_map[tc]\n boxes_c = boxes[labels == cls_id].clone()\n scores_c = scores[labels == cls_id].clone()\n labels_c = labels[labels == cls_id].clone()\n\n boxes_c = boxes_c[scores_c >= self.conf_thresh]\n labels_c = labels_c[scores_c >= self.conf_thresh]\n scores_c = scores_c[scores_c >= self.conf_thresh]\n\n # Perform non-maximum suppression to remove redundant bounding boxes\n # and reformat prediction as required by tracker, which is\n # (x1, y1, x2, y2, obj_conf, class_conf, class_pred)\n nms_out_index = torchvision.ops.batched_nms(\n boxes_c,\n scores_c,\n labels_c,\n self.nms_thresh,\n )\n\n detections = torch.cat(\n (\n boxes_c,\n torch.unsqueeze(scores_c, 1),\n torch.ones(len(scores_c), 1).to(DEVICE),\n torch.unsqueeze(labels_c, 1),\n ),\n 1,\n )\n detections = detections[nms_out_index]\n\n # Update tracker\n if detections.size(0):\n online_targets = tracker.update(\n detections, x.shape[1:3], x.shape[1:3]\n )\n online_tlwhs = []\n online_ids = []\n online_scores = []\n for t in online_targets:\n tlwh = t.tlwh\n tid = t.track_id\n online_tlwhs.append(tlwh)\n online_ids.append(tid)\n online_scores.append(t.score)\n\n # save results\n results.append(\n (\n [\n frame_id for _ in range(len(online_ids))\n ], # Use 0-based index for MOT frame\n online_ids,\n online_tlwhs,\n online_scores,\n [cls_id for _ in range(len(online_ids))],\n [\n 1 for _ in range(len(online_ids))\n ], # visibility; not used\n )\n )\n\n # Format tracker output to format required by metrics calculation, namely,\n # tracker detections are given as 2D NDArrays with shape = (M, 9). Each row is a detection whose format is:\n # <timestep> <object_id> <bbox top-left x> <bbox top-left y> <bbox width> <bbox height> <confidence_score> <class_id> <visibility=1>\n output = [\n [f, i, *b, s, c, v]\n for result in results\n for [f, i, b, s, c, v] in zip(*result)\n ]\n output = np.asarray(output).astype(np.float32)\n output = np.expand_dims(output, 0)\n\n if self.coco_format:\n output = mot_array_to_coco(output)\n return output", "def predict(self, data):\n pass", "def _decode_record(record, name_to_features, data_type=None, debug=False, test=False):\n example = tf.io.parse_single_example(record, name_to_features)\n for name in list(example.keys()):\n # tf.Example only supports tf.int64, but the IPU only supports tf.int32.\n # So cast all int64 to int32.\n t = example[name]\n if name == \"masked_lm_weights\" and data_type is not None:\n t = tf.cast(t, dtype=data_type)\n if t.dtype == tf.int64:\n t = tf.cast(t, tf.int32)\n example[name] = t\n\n # Build labels from 'masked_lm_ids' and 'masked_lm_positions'\n masked_lm_ids = example.pop(\"masked_lm_ids\")\n masked_lm_positions = example.pop(\"masked_lm_positions\")\n masked_lm_positions_reshape = tf.reshape(masked_lm_positions, (-1, 1))\n len_seq = len(example[\"input_ids\"])\n\n mlm_long_labels = tf.scatter_nd(masked_lm_positions_reshape, masked_lm_ids, [len_seq])\n next_sentence_labels = example.pop(\"next_sentence_labels\")\n\n # Build input, targets tuple and change keys to be compatible with Hugging Face models\n inputs = {\n \"input_ids\": example.pop(\"input_ids\"),\n \"attention_mask\": example.pop(\"input_mask\"),\n \"token_type_ids\": example.pop(\"segment_ids\"),\n \"masked_lm_positions\": masked_lm_positions,\n }\n labels = (masked_lm_ids, next_sentence_labels)\n\n if not debug and not test:\n return inputs, labels\n\n if test:\n labels = (*labels, mlm_long_labels)\n return inputs, labels\n\n if debug:\n inputs.update({\"masked_lm_ids\": masked_lm_ids, \"next_sentence_labels\": next_sentence_labels})\n return inputs, labels", "def _get_model_pred(self, model_index: int, X: np.array) -> np.array:\n return self.models[model_index].predict(X)", "def get_field_from_dict(example_dict, field_name, height_m_agl=None):\n\n check_field_name(field_name)\n\n if field_name in ALL_SCALAR_PREDICTOR_NAMES:\n height_m_agl = None\n field_index = example_dict[SCALAR_PREDICTOR_NAMES_KEY].index(field_name)\n data_matrix = example_dict[SCALAR_PREDICTOR_VALS_KEY][..., field_index]\n elif field_name in ALL_SCALAR_TARGET_NAMES:\n height_m_agl = None\n field_index = example_dict[SCALAR_TARGET_NAMES_KEY].index(field_name)\n data_matrix = example_dict[SCALAR_TARGET_VALS_KEY][..., field_index]\n elif field_name in ALL_VECTOR_PREDICTOR_NAMES:\n field_index = example_dict[VECTOR_PREDICTOR_NAMES_KEY].index(field_name)\n data_matrix = example_dict[VECTOR_PREDICTOR_VALS_KEY][..., field_index]\n else:\n field_index = example_dict[VECTOR_TARGET_NAMES_KEY].index(field_name)\n data_matrix = example_dict[VECTOR_TARGET_VALS_KEY][..., field_index]\n\n if height_m_agl is None:\n return data_matrix\n\n height_index = match_heights(\n heights_m_agl=example_dict[HEIGHTS_KEY],\n desired_height_m_agl=height_m_agl\n )\n\n return data_matrix[..., height_index]", "def make_tflite_inference(ndvi_img_array, model_interpreter):\n # Get input and output tensors.\n input_details = model_interpreter.get_input_details()\n output_details = model_interpreter.get_output_details()\n\n # Get Input shape\n input_shape = input_details[0]['shape']\n input_data = ndvi_img_array.reshape(input_shape)\n\n model_interpreter.set_tensor(input_details[0]['index'], input_data)\n model_interpreter.invoke()\n\n outputs = []\n\n for tensor in output_details:\n output_data = model_interpreter.get_tensor(tensor['index'])\n outputs.append(output_data[0][0])\n\n prediction = outputs[0]\n\n return prediction", "def get_predictions(self, predictions_key):\n\n predictions = self.r.get(predictions_key)\n # we have the JSON with timestamp-prediction pairs.\n # Let's also return the datetime, which is encoded within the key\n dt_obj = self.get_datetime_part_of_key(predictions_key)\n # dt_str = get_non_venue_part_of_key(predictions_key)\n # dt_obj = parse(dt_str)\n return (dt_obj, predictions)", "def get_batch_prediction(BatchPredictionId=None):\n pass", "def predict(self, adata):\n tensor_data = torch.from_numpy(adata.X)\n d_preds, y_preds = self._predict(tensor_data)\n d_preds_str = np.array([self.model.domains[i] for i in d_preds])\n y_preds_str = np.array([self.model.labels[i] for i in y_preds])\n\n adata.obs['label_preds'] = y_preds_str\n adata.obs['domain_preds'] = d_preds_str" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Web GET or POST request를 호출 후 그 결과를 dict형으로 반환
def web_request(method_name, url, dict_data, is_urlencoded=True): method_name = method_name.upper() # 메소드이름을 대문자로 바꾼다 if method_name not in ('GET', 'POST'): raise Exception('method_name is GET or POST plz...') if method_name == 'GET': # GET방식인 경우 response = requests.get(url=url, params=dict_data) elif method_name == 'POST': # POST방식인 경우 if is_urlencoded is True: response = requests.post(url=url, data=dict_data, headers={'Content-Type': 'application/x-www-form-urlencoded'}) else: response = requests.post(url=url, data=json.dumps(dict_data), headers={'Content-Type': 'application/json'}) dict_meta = {'status_code': response.status_code, 'ok': response.ok, 'encoding': response.encoding, 'Content-Type': response.headers['Content-Type']} if 'json' in str(response.headers['Content-Type']): # JSON 형태인 경우 return {**dict_meta, **response.json()} else: # 문자열 형태인 경우 return {**dict_meta, **{'text': response.text}}
[ "def GET(self):\n pass", "def request_vars(self):", "def get_dict_from_request(request):\n if request.method == 'GET':\n return request.GET\n elif request.method == 'POST':\n return request.POST\n else:\n raise NotImplemented", "def do_POST(self):\r\n self.do_GET()", "def params(self):\n if self._GETPOST is None:\n self._GETPOST = dict(self.GET)\n self._GETPOST.update(self.POST)", "def get(request_class):", "def test_simple_request(self):\n\n\t\tself.n = tracker.decode_request(\"?key=value\")\n\t\tself.assertEqual(self.n, {\"key\":[\"value\"]})", "def process_request(self, req, resp):\n pass", "def test_dict_for_request_in_method_get(self):\n self.request.GET = {\"foo\": \"bar\"}\n response = self.panel.process_request(self.request)\n self.panel.generate_stats(self.request, response)\n # ensure the panel GET request data is processed correctly.\n content = self.panel.content\n self.assertIn(\"foo\", content)\n self.assertIn(\"bar\", content)", "def test_query_dict_for_request_in_method_get(self):\n self.request.GET = QueryDict(\"foo=bar\")\n response = self.panel.process_request(self.request)\n self.panel.generate_stats(self.request, response)\n # ensure the panel GET request data is processed correctly.\n content = self.panel.content\n self.assertIn(\"foo\", content)\n self.assertIn(\"bar\", content)", "def post_requests():", "def process_request(self, request):\n request.is_get = self.is_get(request)\n request.is_post = self.is_post(request)\n request.post_data = self.post_data(request)\n request.get_data = self.get_data(request)\n request.file_data = self.file_data(request)\n request.urls = self.default_urls()\n\n def post_data_prefix(prefix):\n data = request.post_data\n if not data:\n return None\n for key in data.keys():\n if key.startswith(prefix):\n return data\n\n request.post_data_prefix = post_data_prefix", "def get_requests(self):", "def request_data(self, merge=True):\n if self.request.method == 'POST':\n if merge:\n data = self.request.POST.copy()\n if not isinstance(self.request.GET, NoVars):\n data.update(self.request.GET)\n else:\n data = self.request.POST\n else:\n data = self.request.GET\n return data", "def alarm_request():\n return dict(vars=request.vars)", "def route_dict():\n d = {\n '/weibo/add': login_required(add),\n '/weibo/delete': login_required(delete),\n '/weibo/edit': login_required(edit),\n '/weibo/update': login_required(update),\n '/weibo/index': login_required(index),\n # 评论功能\n '/comment/add': login_required(comment_add),\n '/comment/delete': login_required(comment_delete),\n '/comment/edit': login_required(comment_edit),\n '/comment/update': login_required(comment_update),\n }\n return d", "def post_params(self, request):\n return Response(request.data)", "def get_data_from_request():\n return {\n 'request': {\n 'url': '%s://%s%s' % (web.ctx['protocol'], web.ctx['host'], web.ctx['path']),\n 'query_string': web.ctx.query,\n 'method': web.ctx.method,\n 'data': web.data(),\n 'headers': dict(get_headers(web.ctx.environ)),\n 'env': dict(get_environ(web.ctx.environ)),\n }\n }", "def rquest(self, strUrl, strType, dicData):\n\n if strType == 'POST':\n res = self.rquests.post(strUrl, dicData)\n return res.text\n\n if strType == 'GET':\n res = self.rquests.get(strUrl, dicData)\n return res.text\n\n return 'type error, must POST or GET'" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get a dictionary with the important tags for DAGMC geometries inputs
def get_dagmc_tags(my_core): dagmc_tags = {} dagmc_tags['geom_dim'] = my_core.tag_get_handle('GEOM_DIMENSION', size=1, tag_type=types.MB_TYPE_INTEGER, storage_type=types.MB_TAG_SPARSE, create_if_missing=True) # geometric dimension dagmc_tags['category'] = my_core.tag_get_handle('CATEGORY', size=32, tag_type=types.MB_TYPE_OPAQUE, storage_type=types.MB_TAG_SPARSE, create_if_missing=True) # the category dagmc_tags['global_id'] = my_core.tag_get_handle('GLOBAL_ID', size=1, tag_type=types.MB_TYPE_INTEGER, storage_type=types.MB_TAG_SPARSE, create_if_missing=True) # id return dagmc_tags
[ "def getTag(self, inputs, tag):\n result = {}\n for into in inputs:\n for i in into:\n if i in self.sim.agents:\n agentTags = self.sim.agents[i].access[\"tags\"]\n if tag in agentTags:\n result[i] = agentTags[tag]\n return result", "def material_tags(self):\n values = []\n for shape_or_component in self.shapes_and_components:\n if not isinstance(\n shape_or_component,\n (paramak.Plasma,\n paramak.PlasmaFromPoints,\n paramak.PlasmaBoundaries)):\n values.append(shape_or_component.material_tag)\n return values", "def tags_dict(self):\n return ({'name': 'tag', 'attrs': {'k': k, 'v': v}} for k, v in self.tags.items())", "def get_tag_list_with_attrs(cls) -> Dict[str, List[str]]:\n # TODO(sll): Cache this computation and update it on each refresh.\n # Better still, bring this into the build process so it doesn't have\n # to be manually computed each time.\n component_list = list(cls.get_all_rte_components().values())\n\n component_tags = {}\n for component_specs in component_list:\n tag_name = 'oppia-noninteractive-%s' % (\n utils.camelcase_to_hyphenated(component_specs['backend_id']))\n\n component_tags[tag_name] = [\n '%s-with-value' % ca_spec['name']\n for ca_spec in component_specs['customization_arg_specs']]\n\n return component_tags", "def comando_gne(self):\r\n if args.tag:\r\n\t if args.value:\r\n tags = self.alterar_gne_framework(args.tag, args.value)\r\n\t else:\r\n tags = self.ler_gne_framework(args.tag)\r\n\t return {args.tag:tags[args.tag]} # Ex: {\"nnf\":115}\r", "def _extra_input_signature_def(self):\n feed_dict = self.extra_compute_action_feed_dict()\n return {\n k.name: tf1.saved_model.utils.build_tensor_info(k) for k in feed_dict.keys()\n }", "def _tags(self):\n tags = self.properties.get(self.TAGS) or []\n for t in tags:\n if t[self.TAG_KEY].startswith('metering.'):\n # the user has added one, don't add another.\n return tags\n return tags + [{self.TAG_KEY: 'metering.groupname',\n self.TAG_VALUE: self.FnGetRefId()}]", "def get_all_node_tags(self):\n\n return np.unique(self.connectivity)", "def tag_dict(self):\n tag_dict = dict()\n for document in self.documents:\n for tag in document.tags:\n tag_type = tag['tag']\n tag_dict[tag_type] = tag_dict.get(tag_type, []) + [tag]\n return tag_dict", "def get_tag_dict(self):\n return self.tag_dict", "def get_boundary_tags(self):\n\n tags = {}\n for v in list(self.boundary.values()):\n tags[v] = 1\n\n return list(tags.keys())", "def get_tags_gff(tagline):\n\n tags = dict()\n for t in tagline.split(';'):\n tt = t.split('=')\n tags[tt[0]] = tt[1]\n return tags", "def _getTags(self, elem):\n res = {}\n for tag in elem.iter(\"tag\"):\n res[tag.attrib[\"k\"]] = tag.attrib[\"v\"]\n return res", "def tags(self) -> dict:\n return make_tags(self.system_tag_defs)", "def dict_of_inputs(self):\n return {var.name: var for var in self.inputs}", "def feature_tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ApplicationFeatureTagArgs']]]]:\n return pulumi.get(self, \"feature_tags\")", "def get_input_map(pch):\n inp_map = defaultdict(list)\n for pc in list(pch.nodes.values()):\n i_s = tuple(sorted(pc.valid_inputs(), key=lambda x: x.tdl_order))\n if len(i_s) > 0:\n inp_map[i_s] += [pc]\n return inp_map", "def getMapSymbolKeyNames(self):\n keyNames = []\n for cat in self.getCategories():\n for thmg in cat.getThematicgroups():\n for tg in thmg.getTaggroups():\n for t in tg.getTags():\n if t.getMapSymbol() is not None:\n keyNames.append([\n t.getKey().getTranslatedName(),\n t.getKey().getName(), t.getMapSymbol()])\n return keyNames", "def simplified_tags(self) -> Dict:\n return dict(self.client.get_instances_id_tags(self.id_, params={'simplify': True}))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get a dictionary with MOAB ranges for each of the requested entity types inputs
def get_native_ranges(my_core, meshset, entity_types): native_ranges = {} for entity_type in entity_types: native_ranges[entity_type] = my_core.get_entities_by_type( meshset, entity_type) return native_ranges
[ "def get_entityset_ranges(my_core, meshset, geom_dim):\n\n entityset_ranges = {}\n entityset_types = ['Nodes', 'Curves', 'Surfaces', 'Volumes']\n for dimension, set_type in enumerate(entityset_types):\n entityset_ranges[set_type] = my_core.get_entities_by_type_and_tag(meshset, types.MBENTITYSET, geom_dim,\n [dimension])\n return entityset_ranges", "def get_ranges(graph: Graph, property_to_id: Dict[str, int], entity_type_to_id: Dict[str, int]) -> Dict[int, int]:\n # dictionary pointing from object property id to an entity type id\n ranges = {}\n\n # add all range triples for which the subject is an object property and the object is an entity type\n for subject, predicate, object in graph.triples((None, RDFS.range, None)):\n if subject in property_to_id and object in entity_type_to_id:\n ranges[property_to_id[subject]] = entity_type_to_id[object]\n return ranges", "def _get_ranges_dict(self):\n\n wlr = self.wl_range\n llm = self.ll_mean\n llr = self.ll_range\n pll = self.pl_lines\n if pll > 80:\n pll_min = 45\n pll_max = pll + 10\n else:\n pll_min = pll - 10\n pll_max = pll + 10\n\n ranges = {'plength': {'val': pll,\n 'max': pll_max,\n 'min': pll_min,\n 'down_adj': 2,\n 'up_adj': 1},\n 'mean_ll': {'val': llm,\n 'max': llm + 20,\n 'min': llm - 20,\n 'down_adj': 4,\n 'up_adj': 2.5},\n 'llrange': {'val': llr,\n 'max': llr + 20,\n 'min': llr - 20,\n 'down_adj': 4,\n 'up_adj': 2},\n 'wlrange': {'val': wlr,\n 'max': wlr + 4,\n 'min': wlr - 4,\n 'down_adj': 1,\n 'up_adj': 1}}\n\n return ranges", "def make_range(chain_range_dic):\n chain_ranges = {}\n for chain in chain_range_dic:\n min_idx = min(chain_range_dic[chain])\n max_idx = max(chain_range_dic[chain])\n chain_ranges[chain] = (min_idx, max_idx)\n return chain_ranges", "def get_param_ranges(line_model):\n\n line_models = ['voigt', 'rosato', 'stehle', 'stehle_param', ]\n n_upper_range = [(np.nan, np.nan), (3, 7), (3, 30), (3, 9)]\n e_dens_range = [(np.nan, np.nan), (1e19, 1e22), (1e16, 1e25), (0., 1e22)]\n temp_range = [(np.nan, np.nan), (0.32, 32), (0.22, 110), (0., 1000)]\n b_field_range = [(np.nan, np.nan), (0, 5), (0, 5), (0, 5)]\n\n param_ranges = list(zip(line_models, n_upper_range, e_dens_range, temp_range, b_field_range))\n columns = ['line_model_name', 'n_upper_range', 'e_dens_range', 'temp_range', 'b_field_range']\n param_ranges = pd.DataFrame(data=param_ranges, columns=columns)\n\n n_upper_range = param_ranges['n_upper_range'][param_ranges['line_model_name'] == line_model].values[0]\n e_dens_range = param_ranges['e_dens_range'][param_ranges['line_model_name'] == line_model].values[0]\n temp_range = param_ranges['temp_range'][param_ranges['line_model_name'] == line_model].values[0]\n b_field_range = param_ranges['b_field_range'][param_ranges['line_model_name'] == line_model].values[0]\n\n return n_upper_range, e_dens_range, temp_range, b_field_range", "def get_etype_2_minmax_funcEnum(entitytype_arr):\n etype_2_minmax_funcEnum = {}\n s = pd.Series(entitytype_arr)\n for name, group in s.groupby(s):\n etype_2_minmax_funcEnum[name] = (min(group.index), max(group.index))\n return etype_2_minmax_funcEnum", "def _get_range_data(list_of_numbers, set_max=None):\n\n labels = []\n if set_max:\n for i in range(set_max):\n labels.append(float(i))\n else:\n for i in range(int(max(list_of_numbers) + 1)):\n labels.append(float(i))\n\n range_count = []\n\n for l in labels:\n range_count.append(list_of_numbers.count(l))\n\n data = {\"labels\": labels,\n \"range\": range_count}\n\n return data", "def list_to_dict(entities: List[Entity]) -> dict:\n ranges = defaultdict(list)\n for ent in entities:\n ranges[ent.name].append(ent.range)\n\n return ranges", "def _get_grouped_range(list_of_numbers, set_max, range_num):\n\n labels = []\n range_count = []\n for i in range(0, set_max - range_num, range_num):\n\n label = str(i) + \" -- \" + str(i + range_num)\n labels.append(label)\n range_list = [n for n in list_of_numbers\n if n >= i and n < (i + range_num)]\n range_count.append(len(range_list))\n\n data = {\"labels\": labels,\n \"range\": range_count}\n\n return data", "def _calc_range(self) -> np.ndarray:\n if self._is_ct25k():\n range_resolution = 30\n n_gates = 256\n else:\n n_gates = int(self.metadata[\"number_of_gates\"])\n range_resolution = int(self.metadata[\"range_resolution\"])\n return np.arange(n_gates) * range_resolution + range_resolution / 2", "def define_range():\n\n def_range = {'lt': [0.0, 24.0],\n 'lon': [0.0, 360.0],\n 'angle': [0.0, 2.0 * np.pi]}\n\n return def_range", "def test_get_meta_range(self):\n pass", "def slot_mappings(self) -> Dict[Text, Union[Dict, List[Dict]]]:\n\n return {\n \"entity_major\": [\n self.from_entity(entity=\"entity_major\", intent=[\"intent_major_info\", \"inform\"])],\n }", "def getRangeMM(self) -> float:\n ...", "def _get_energy_range(self):\n\n e0_min = self.network.isomers[0].E0\n e0_max = e0_min\n\n for isomer in self.network.isomers[1:]:\n E0 = isomer.E0\n if E0 < e0_min:\n e0_min = E0\n if E0 > e0_max:\n e0_max = E0\n for reactant in self.network.reactants:\n E0 = reactant.E0\n if E0 < e0_min:\n e0_min = E0\n if E0 > e0_max:\n e0_max = E0\n for product in self.network.products:\n E0 = product.E0\n if E0 < e0_min:\n e0_min = E0\n if E0 > e0_max:\n e0_max = E0\n for rxn in self.network.path_reactions:\n E0 = rxn.transition_state.conformer.E0.value_si\n if E0 < e0_min:\n e0_min = E0\n if E0 > e0_max:\n e0_max = E0\n\n return e0_min, e0_max", "def slot_mappings(self) -> Dict[Text, Union[Dict, List[Dict]]]:\n\n return {\n \"product\": [\n self.from_entity(entity=\"product\", intent=[\"inform\"]),\n ],\n \"applicant_name\": [\n self.from_entity(entity=\"applicant_name\", intent=[\"inform\"]),\n ],\n \"applicant_dob\": [\n self.from_entity(entity=\"applicant_dob\", intent=[\"inform\"]),\n ],\n \"applicant_phoneno\": [\n self.from_entity(entity=\"applicant_phoneno\", intent=[\"inform\"]),\n ],\n \"applicant_address\": [\n self.from_entity(entity=\"applicant_address\", intent=[\"inform\"]),\n ]\n }", "def getRange(self):\n \n pass", "def _part_group_cell_mapper(bd_type):\n js, iss = np.meshgrid(range(smt.cols), range(smt.rows)) # zero indexed to agree with python interpretation\n idx = bd_type.flatten() != -1\n out = dict(zip(range(1, idx.sum() + 1), list(zip(iss.flatten()[idx], js.flatten()[idx]))))\n return out", "def create_prob_ranges(norm_probs):\n accum_val = 0\n ranges = []\n for pair in norm_probs:\n accum_val += pair[1]\n ranges.append(accum_val)\n return ranges" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get a dictionary with MOAB Ranges that are specific to the types.MBENTITYSET type inputs
def get_entityset_ranges(my_core, meshset, geom_dim): entityset_ranges = {} entityset_types = ['Nodes', 'Curves', 'Surfaces', 'Volumes'] for dimension, set_type in enumerate(entityset_types): entityset_ranges[set_type] = my_core.get_entities_by_type_and_tag(meshset, types.MBENTITYSET, geom_dim, [dimension]) return entityset_ranges
[ "def getRangeMM(self) -> float:\n ...", "def _get_range_data(list_of_numbers, set_max=None):\n\n labels = []\n if set_max:\n for i in range(set_max):\n labels.append(float(i))\n else:\n for i in range(int(max(list_of_numbers) + 1)):\n labels.append(float(i))\n\n range_count = []\n\n for l in labels:\n range_count.append(list_of_numbers.count(l))\n\n data = {\"labels\": labels,\n \"range\": range_count}\n\n return data", "def _get_grouped_range(list_of_numbers, set_max, range_num):\n\n labels = []\n range_count = []\n for i in range(0, set_max - range_num, range_num):\n\n label = str(i) + \" -- \" + str(i + range_num)\n labels.append(label)\n range_list = [n for n in list_of_numbers\n if n >= i and n < (i + range_num)]\n range_count.append(len(range_list))\n\n data = {\"labels\": labels,\n \"range\": range_count}\n\n return data", "def get_native_ranges(my_core, meshset, entity_types):\n\n native_ranges = {}\n for entity_type in entity_types:\n native_ranges[entity_type] = my_core.get_entities_by_type(\n meshset, entity_type)\n return native_ranges", "def _get_ranges_dict(self):\n\n wlr = self.wl_range\n llm = self.ll_mean\n llr = self.ll_range\n pll = self.pl_lines\n if pll > 80:\n pll_min = 45\n pll_max = pll + 10\n else:\n pll_min = pll - 10\n pll_max = pll + 10\n\n ranges = {'plength': {'val': pll,\n 'max': pll_max,\n 'min': pll_min,\n 'down_adj': 2,\n 'up_adj': 1},\n 'mean_ll': {'val': llm,\n 'max': llm + 20,\n 'min': llm - 20,\n 'down_adj': 4,\n 'up_adj': 2.5},\n 'llrange': {'val': llr,\n 'max': llr + 20,\n 'min': llr - 20,\n 'down_adj': 4,\n 'up_adj': 2},\n 'wlrange': {'val': wlr,\n 'max': wlr + 4,\n 'min': wlr - 4,\n 'down_adj': 1,\n 'up_adj': 1}}\n\n return ranges", "def getMassRange(brand):\n return mass_range[brand]", "def make_range(chain_range_dic):\n chain_ranges = {}\n for chain in chain_range_dic:\n min_idx = min(chain_range_dic[chain])\n max_idx = max(chain_range_dic[chain])\n chain_ranges[chain] = (min_idx, max_idx)\n return chain_ranges", "def test_get_meta_range(self):\n pass", "def range_(self):\n return self.bset.range_", "def get_param_ranges(line_model):\n\n line_models = ['voigt', 'rosato', 'stehle', 'stehle_param', ]\n n_upper_range = [(np.nan, np.nan), (3, 7), (3, 30), (3, 9)]\n e_dens_range = [(np.nan, np.nan), (1e19, 1e22), (1e16, 1e25), (0., 1e22)]\n temp_range = [(np.nan, np.nan), (0.32, 32), (0.22, 110), (0., 1000)]\n b_field_range = [(np.nan, np.nan), (0, 5), (0, 5), (0, 5)]\n\n param_ranges = list(zip(line_models, n_upper_range, e_dens_range, temp_range, b_field_range))\n columns = ['line_model_name', 'n_upper_range', 'e_dens_range', 'temp_range', 'b_field_range']\n param_ranges = pd.DataFrame(data=param_ranges, columns=columns)\n\n n_upper_range = param_ranges['n_upper_range'][param_ranges['line_model_name'] == line_model].values[0]\n e_dens_range = param_ranges['e_dens_range'][param_ranges['line_model_name'] == line_model].values[0]\n temp_range = param_ranges['temp_range'][param_ranges['line_model_name'] == line_model].values[0]\n b_field_range = param_ranges['b_field_range'][param_ranges['line_model_name'] == line_model].values[0]\n\n return n_upper_range, e_dens_range, temp_range, b_field_range", "def range_params(self, ran, kw):\n specs = {\"range\": (SchemaNode(\"value\"),\n SchemaNode(\"param\").set_attr(\"name\",\"minInclusive\"),\n SchemaNode(\"param\").set_attr(\"name\",\"maxInclusive\")),\n \"length\": (SchemaNode(\"param\").set_attr(\"name\",\"length\"),\n SchemaNode(\"param\").set_attr(\"name\",\"minLength\"),\n SchemaNode(\"param\").set_attr(\"name\",\"maxLength\"))}\n (exact, min_, max_) = specs[kw]\n if (len(ran) == 1 or ran[0] == ran[1]) and ran[0][0] != \"m\":\n elem = exact\n elem.text = ran[0]\n return [elem]\n res = []\n if ran[0][0] != \"m\":\n elem = min_\n elem.text = ran[0]\n res.append(elem)\n if ran[1][0] != \"m\":\n elem = max_\n elem.text = ran[1]\n res.append(elem)\n return res", "def define_range():\n\n def_range = {'lt': [0.0, 24.0],\n 'lon': [0.0, 360.0],\n 'angle': [0.0, 2.0 * np.pi]}\n\n return def_range", "def get_bpm_range():\n return BPM_DANCE_RANGE", "def _part_group_cell_mapper(bd_type):\n js, iss = np.meshgrid(range(smt.cols), range(smt.rows)) # zero indexed to agree with python interpretation\n idx = bd_type.flatten() != -1\n out = dict(zip(range(1, idx.sum() + 1), list(zip(iss.flatten()[idx], js.flatten()[idx]))))\n return out", "def get_ranges(graph: Graph, property_to_id: Dict[str, int], entity_type_to_id: Dict[str, int]) -> Dict[int, int]:\n # dictionary pointing from object property id to an entity type id\n ranges = {}\n\n # add all range triples for which the subject is an object property and the object is an entity type\n for subject, predicate, object in graph.triples((None, RDFS.range, None)):\n if subject in property_to_id and object in entity_type_to_id:\n ranges[property_to_id[subject]] = entity_type_to_id[object]\n return ranges", "def get_objective_bank_mdata():\n return {\n }", "def __rangeSet(self, group):\r\n\t\trangeSet = set()\t\r\n\r\n\t\tfor block in group[\"group\"]:\r\n\t\t\tblockRange = range(block[0], block[1] + 1)\r\n\r\n\t\t\tfor element in blockRange:\r\n\t\t\t\trangeSet.add(element)\r\n\r\n\r\n\t\treturn rangeSet", "def get_allowed_ranges(csvfile):\n from csv import DictReader\n ranges = {}\n with open(csvfile, 'r') as infile:\n # Remove spaces from field headers\n firstline = infile.readline()\n headers = [k.strip() for k in firstline.split(',')]\n if not len(headers) == 11:\n headers = [k.strip() for k in firstline.split(' ')]\n opfield = 'CSVv2;OperatingPoint'\n if not opfield in headers: opfield = 'cMVAv2;OperatingPoint'\n if not opfield in headers: opfield = 'CSV;OperatingPoint'\n\n reader = DictReader(infile, fieldnames=headers)\n for row in reader:\n key = (int(row[opfield].strip()),\n row['measurementType'].strip(),\n row['sysType'].strip(),\n int(row['jetFlavor'].strip()))\n ranges.setdefault(key, {})\n for var in ['eta', 'pt', 'discr']:\n mini = float(row['%sMin'%var].strip())\n maxi = float(row['%sMax'%var].strip())\n ranges[key]['%sMin'%var] = min(ranges[key].setdefault('%sMin'%var, mini), mini)\n ranges[key]['%sMax'%var] = max(ranges[key].setdefault('%sMax'%var, maxi), maxi)\n return ranges", "def list_to_dict(entities: List[Entity]) -> dict:\n ranges = defaultdict(list)\n for ent in entities:\n ranges[ent.name].append(ent.range)\n\n return ranges" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get side lengths of triangle inputs
def get_tri_side_length(my_core, tri): side_lengths = [] s = 0 coord_list = [] verts = list(my_core.get_adjacencies(tri, 0)) for vert in verts: coords = my_core.get_coords(vert) coord_list.append(coords) for side in range(3): side_lengths.append(np.linalg.norm(coord_list[side]-coord_list[side-2])) # The indices of coord_list includes the "-2" because this way each side will be matched up with both # other sides of the triangle (IDs: (Side 0, Side 1), (Side 1, Side 2), (Side 2, Side 0)) return side_lengths
[ "def calc_side_lengths(triangles):\n first_vec = [2, 0, 1]\n second_vec = [1, 2, 0]\n sides = triangles[:, first_vec] - triangles[:, second_vec]\n lengths = np.sqrt(np.sum(sides**2, axis=2))\n return lengths", "def triangle_area():\r\n edge = float(input(\"\"))\r\n return (math.sqrt(3)/4) * edge ** 2", "def square_triangle(sides: list) -> float:\n h_per = (sides[0] + sides[1] + sides[2]) / 2 #half-perimetr\n square = math.sqrt (h_per * (h_per- sides[0]) * (h_per - sides[1]) * (h_per - sides[2]))\n return square", "def side_length(self,k):\n #assert isinstance(k, Integer), \"Argument must be an integer\"\n return self.side_as_vector(k).norm()", "def triad_length(self):\n return self._triad_length", "def triangle_area(side1: number, side2: number, side3: number) -> number:\n s = (side1+side2+side3)/2\n area = sqrt(s*(s-side1)*(s-side2)*(s-side3))\n return sqrt(s*(s-side1)*(s-side2)*(s-side3))", "def count_triangles(A):\r\n \r\n A_3 = matrix_power(A, 3)\r\n A_trace = np.trace(A_3)\r\n num_triangles = A_trace /6\r\n return num_triangles", "def triangle (x,y,n):", "def sidelength(self) -> int:\n\n chars = self.get_char(\"border\")\n style = self.get_style(\"border\")\n if not isinstance(chars, list):\n return 0\n\n left_border, _, right_border, _ = chars\n return real_length(style(left_border) + style(right_border))", "def triangle(self, freq: int, /) -> None:", "def side_length():\n length = input(\"What is the side length of the hexagons?\")\n return float(length)", "def sides(self):\n return len(self)", "def rightangletriangles(i):\r\n lmin=int(i/(1+2**0.5))\r\n lmax=int(i/2)\r\n lt=[]\r\n for l in range(lmin, lmax):\r\n #\r\n for s in range( int((i-l)/(2**0.5)) ,l):\r\n t=i-l-s\r\n if l*l==s*s+t*t:\r\n lt.append((l,s,t))\r\n #\r\n if lt: print(i,lt)\r\n return lt", "def triangles(G):\n\n result = triangle_count_wrapper.triangles(G)\n\n return result", "def triangle(n):\n return n*(n+1)/2", "def triangle_numbers():\n return (n * (n + 1) / 2 for n in count(1))", "def getlen(self):\n if self.onlydiag():\n return self.lendiag()\n else:\n return len(self)", "def findTriangles(p):\n triangleCount = 0\n for a in range(3, p//3 + 1):\n for b in range(a+1, p//2):\n c = p - (a+b)\n if (a**2 + b**2) == c**2:\n triangleCount += 1\n return triangleCount", "def calc_triangle(triangle_attributes):\n\n # Convert attributes to float\n side_b = float(triangle_attributes[\"line_ac\"])\n side_a = float(triangle_attributes[\"line_bc\"])\n # Convert degree attribute to a radian\n radians = float(triangle_attributes[\"angle_c\"]) * (math.pi / 180)\n #Find lenght of Line C with Law of Cosines\n side_c = math.sqrt((side_b ** 2) + (side_a ** 2) - (2 * side_b * side_a * math.cos(radians)))\n\n # Output the calculated triangle leg\n print(\"\\nThe length of Side C (Line A - B) is: %.2f\" % side_c)\n input(\"\\nPress ENTER to proceed.\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Cleans the line from geometrical shape characters and replaces these with space.
def clean_text_from_geometrical_shape_unicode(line): line = re.sub(r"([\u25A0-\u25FF])", " ", line) return line
[ "def clean(line):\n line = line.strip('\\n').strip()\n line = line.replace('\\xe2\\x80\\x93', '-')\n line = line.replace('\\xe2\\x80\\x99', '\\'')\n\n return line", "def _fix_line(line):\n line = line.strip(\"\\n\\r\\f\")\n # This is a hack to make the tagger work, but it loses information\n # TODO: could replace \"~\" with \"&tilde;\" or find the real solution\n line = line.replace('~', '')\n # backspace characters break the sdp tagger code\n line = line.replace(unichr(8), '')\n return line", "def _stripstuffing(self, line):\n if line.startswith(u' '):\n return line[1:]\n return line", "def clean_setting_line(line):\n temp_line = line.replace(' ', '')\n return temp_line", "def clean_line(line):\n return line[:72].ljust(72)", "def clean_atom(atom_line):\r\n return atom_line[:60] + ' 0 0 0'", "def prepare_text_line(line):\n\n re_sub = re.sub\n # FIXME: maintain the original character positions\n\n # strip whitespace\n line = line.strip()\n\n # strip comment markers\n # common comment characters\n line = line.strip('\\\\/*#%;')\n # un common comment line prefix in dos\n line = re_sub('^rem\\s+', ' ', line)\n line = re_sub('^\\@rem\\s+', ' ', line)\n # un common comment line prefix in autotools am/in\n line = re_sub('^dnl\\s+', ' ', line)\n # un common comment line prefix in man pages\n line = re_sub('^\\.\\\\\\\\\"', ' ', line)\n # un common pipe chars in some ascii art\n line = line.replace('|', ' ')\n\n # normalize copyright signs and spacing aournd them\n line = line.replace('(C)', ' (c) ')\n line = line.replace('(c)', ' (c) ')\n # the case of \\251 is tested by 'weirdencoding.h'\n line = line.replace(u'\\251', u' (c) ')\n line = line.replace('&copy;', ' (c) ')\n line = line.replace('&#169;', ' (c) ')\n line = line.replace('&#xa9;', ' (c) ')\n line = line.replace(u'\\xa9', ' (c) ')\n # FIXME: what is \\xc2???\n line = line.replace(u'\\xc2', '')\n\n # TODO: add more HTML entities replacements\n # see http://www.htmlhelp.com/reference/html40/entities/special.html\n # convert html entities &#13;&#10; CR LF to space\n line = line.replace(u'&#13;&#10;', ' ')\n line = line.replace(u'&#13;', ' ')\n line = line.replace(u'&#10;', ' ')\n\n # normalize (possibly repeated) quotes to unique single quote '\n # backticks ` and \"\n line = line.replace(u'`', \"'\")\n line = line.replace(u'\"', \"'\")\n line = re.sub(MULTIQUOTES_RE(), \"'\", line)\n # quotes to space? but t'so will be wrecked\n # line = line.replace(u\"'\", ' ')\n\n # some trailing garbage ')\n line = line.replace(\"')\", ' ')\n\n\n # note that we do not replace the debian tag by a space: we remove it\n line = re_sub(DEBIAN_COPYRIGHT_TAGS_RE(), '', line)\n\n line = re_sub(IGNORED_PUNCTUATION_RE(), ' ', line)\n\n # tabs to spaces\n line = line.replace('\\t', ' ')\n\n # normalize spaces around commas\n line = line.replace(' , ', ', ')\n\n # remove ASCII \"line decorations\"\n # such as in --- or === or !!! or *****\n line = re_sub(ASCII_LINE_DECO_RE(), ' ', line)\n line = re_sub(ASCII_LINE_DECO2_RE(), ' ', line)\n\n # Replace escaped literal \\0 \\n \\r \\t that may exist as-is by a space\n # such as in code literals: a=\"\\\\n some text\"\n line = line.replace('\\\\r', ' ')\n line = line.replace('\\\\n', ' ')\n line = line.replace('\\\\t', ' ')\n line = line.replace('\\\\0', ' ')\n\n # TODO: Why?\n # replace contiguous spaces with only one occurrence\n # line = re.sub(WHITESPACE_RE(), ' ', text)\n\n # normalize to ascii text\n line = commoncode.text.toascii(line)\n # logger.debug(\"ascii_only_text: \" + text)\n\n # strip verbatim back slash and comment signs again at both ends of a line\n # FIXME: this is done at the start of this function already\n line = line.strip('\\\\/*#%;')\n\n # normalize to use only LF as line endings so we can split correctly\n # and keep line endings\n line = commoncode.text.unixlinesep(line)\n # why?\n line = lowercase_well_known_word(line)\n\n return line", "def strip_specials_and_whitespace(self, line):\n newline = self.regex_sub(REGEX_SPECIAL,'',line)\n newline = self.regex_sub(REGEX_WHITESPACE, ' ', newline)\n return newline", "def _stripflow(self, line):\n if self.delete_space and line.endswith(u' '):\n return line[:-1]\n return line", "def _remove_trailing_spaces(line):\n while line.endswith(\" \") and not line.endswith(\"\\\\ \"):\n line = line[:-1]\n return line.replace(\"\\\\ \", \" \")", "def normalize_line(self, line):\n newline = line.lower()\n for item in REGEX_EXPRESSIONS:\n newline = self.regex_sub(item[0], item[1], newline)\n newline = self.strip_specials_and_whitespace(newline)\n return newline", "def get_clean_line(self):\n string = self.pos_string\n\n for index, token in reversed(list(enumerate(self.tokens))):\n if not token[2] is None and len(token[2]) > 0:\n string = string.replace(\"%\"+str(index), token[2].keys()[0])\n else: # Inline correction is not available\n if not token[1] is None:\n string = string.replace(\"%\"+str(index), token[1])\n else: # Clean token does not exist, use the original token\n string = string.replace(\"%\"+str(index), token[0])\n\n return re.sub(\" +\", \" \", string).strip()", "def remove_space(line):\n split_line = line.split()\n return \"\".join(split_line)", "def squash_crs(string):\n if isinstance(string, str):\n return re.sub('\\n[^\\n]+\\r', '\\n', string)\n else:\n return re.sub(b'\\n[^\\n]+\\r', b'\\n', string)", "def clean_text_from_private_unicode(line):\n line = re.sub(r\"([\\uE000-\\uF8FF]|\\uD83C[\\uDF00-\\uDFFF]|\\uD83D[\\uDC00-\\uDDFF])\", \" \", line)\n return line", "def __stripEol(self, txt):\n return txt.replace(\"\\r\", \"\").replace(\"\\n\", \"\")", "def handle_line(line):\n\n # remove [] and ()\n while (\"[\" in line and \"]\" in line) or (\"(\" in line and \")\" in line):\n cutted_line = re.split(\"\\[*\\]*\", line)\n if len(cutted_line) > 1:\n del cutted_line[1]\n line = \" \".join(cutted_line)\n cutted_line = re.split(\"\\(*\\)*\", line)\n if len(cutted_line) > 1:\n del cutted_line[1]\n line = \" \".join(cutted_line)\n\n for i in range(len(line)):\n if line[i].lower() in ENGLISH_LETTERS or line[i].lower() in RUSSIAN_LETTERS:\n break\n line = line[i:]\n\n return line.strip()", "def cleanLine(line):\r\n sio = StringIO(line)\r\n lexer = shlex.shlex(sio)\r\n lexer.wordchars = wordchars\r\n # lexer.debug = 1\r\n gt = lexer.get_token\r\n # in non-posix shlex returns None for EOF, so kludge with {or ''}\r\n cmd, arg = gt() or '', gt() or ''\r\n if len(arg)>=1 and arg[0] in lexer.quotes:\r\n arg = arg[1:-1]\r\n return ' '.join((cmd, arg))", "def clean_string(line):#filename, raw_line=False):\n return re.sub(r\"[\\n\\t\\s]*\", \"\", line)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Cleans the line from private unicode characters and replaces these with space.
def clean_text_from_private_unicode(line): line = re.sub(r"([\uE000-\uF8FF]|\uD83C[\uDF00-\uDFFF]|\uD83D[\uDC00-\uDDFF])", " ", line) return line
[ "def _fix_line(line):\n line = line.strip(\"\\n\\r\\f\")\n # This is a hack to make the tagger work, but it loses information\n # TODO: could replace \"~\" with \"&tilde;\" or find the real solution\n line = line.replace('~', '')\n # backspace characters break the sdp tagger code\n line = line.replace(unichr(8), '')\n return line", "def clean_text_from_geometrical_shape_unicode(line):\n line = re.sub(r\"([\\u25A0-\\u25FF])\", \" \", line)\n return line", "def clean(line):\n line = line.strip('\\n').strip()\n line = line.replace('\\xe2\\x80\\x93', '-')\n line = line.replace('\\xe2\\x80\\x99', '\\'')\n\n return line", "def clean_up(sentence):\n\treturn unicode(sentence.strip().replace(\"\\n\", \"\"), errors='ignore').strip().replace(\"\\x0c\", \"\")", "def prepare_text_line(line):\n\n re_sub = re.sub\n # FIXME: maintain the original character positions\n\n # strip whitespace\n line = line.strip()\n\n # strip comment markers\n # common comment characters\n line = line.strip('\\\\/*#%;')\n # un common comment line prefix in dos\n line = re_sub('^rem\\s+', ' ', line)\n line = re_sub('^\\@rem\\s+', ' ', line)\n # un common comment line prefix in autotools am/in\n line = re_sub('^dnl\\s+', ' ', line)\n # un common comment line prefix in man pages\n line = re_sub('^\\.\\\\\\\\\"', ' ', line)\n # un common pipe chars in some ascii art\n line = line.replace('|', ' ')\n\n # normalize copyright signs and spacing aournd them\n line = line.replace('(C)', ' (c) ')\n line = line.replace('(c)', ' (c) ')\n # the case of \\251 is tested by 'weirdencoding.h'\n line = line.replace(u'\\251', u' (c) ')\n line = line.replace('&copy;', ' (c) ')\n line = line.replace('&#169;', ' (c) ')\n line = line.replace('&#xa9;', ' (c) ')\n line = line.replace(u'\\xa9', ' (c) ')\n # FIXME: what is \\xc2???\n line = line.replace(u'\\xc2', '')\n\n # TODO: add more HTML entities replacements\n # see http://www.htmlhelp.com/reference/html40/entities/special.html\n # convert html entities &#13;&#10; CR LF to space\n line = line.replace(u'&#13;&#10;', ' ')\n line = line.replace(u'&#13;', ' ')\n line = line.replace(u'&#10;', ' ')\n\n # normalize (possibly repeated) quotes to unique single quote '\n # backticks ` and \"\n line = line.replace(u'`', \"'\")\n line = line.replace(u'\"', \"'\")\n line = re.sub(MULTIQUOTES_RE(), \"'\", line)\n # quotes to space? but t'so will be wrecked\n # line = line.replace(u\"'\", ' ')\n\n # some trailing garbage ')\n line = line.replace(\"')\", ' ')\n\n\n # note that we do not replace the debian tag by a space: we remove it\n line = re_sub(DEBIAN_COPYRIGHT_TAGS_RE(), '', line)\n\n line = re_sub(IGNORED_PUNCTUATION_RE(), ' ', line)\n\n # tabs to spaces\n line = line.replace('\\t', ' ')\n\n # normalize spaces around commas\n line = line.replace(' , ', ', ')\n\n # remove ASCII \"line decorations\"\n # such as in --- or === or !!! or *****\n line = re_sub(ASCII_LINE_DECO_RE(), ' ', line)\n line = re_sub(ASCII_LINE_DECO2_RE(), ' ', line)\n\n # Replace escaped literal \\0 \\n \\r \\t that may exist as-is by a space\n # such as in code literals: a=\"\\\\n some text\"\n line = line.replace('\\\\r', ' ')\n line = line.replace('\\\\n', ' ')\n line = line.replace('\\\\t', ' ')\n line = line.replace('\\\\0', ' ')\n\n # TODO: Why?\n # replace contiguous spaces with only one occurrence\n # line = re.sub(WHITESPACE_RE(), ' ', text)\n\n # normalize to ascii text\n line = commoncode.text.toascii(line)\n # logger.debug(\"ascii_only_text: \" + text)\n\n # strip verbatim back slash and comment signs again at both ends of a line\n # FIXME: this is done at the start of this function already\n line = line.strip('\\\\/*#%;')\n\n # normalize to use only LF as line endings so we can split correctly\n # and keep line endings\n line = commoncode.text.unixlinesep(line)\n # why?\n line = lowercase_well_known_word(line)\n\n return line", "def _stripstuffing(self, line):\n if line.startswith(u' '):\n return line[1:]\n return line", "def remove_bad_characters(self):\n\n self.categorie_name = self.categorie_name.replace(\"\\n\", \"\")", "def clean_setting_line(line):\n temp_line = line.replace(' ', '')\n return temp_line", "def clean_line(line):\n return line[:72].ljust(72)", "def remove_unicode(text):\n regex = r\"(\\\\u....)\"\n text = re.sub(regex, ' ', text)\n return text", "def remove_special_characters(self, txt: str) -> str:", "def removeSpecialChars(self) -> None:\n self.text = re.sub('[^a-zA-z0-9\\n\\.\\s]', '', self.text)", "def clean_string(line):#filename, raw_line=False):\n return re.sub(r\"[\\n\\t\\s]*\", \"\", line)", "def _clean_text(self, text):\n output = []\n for char in text:\n cp = ord(char)\n if cp == 0 or cp == 0xFFFD or _is_control(char):\n continue\n if _is_whitespace(char):\n output.append(\" \")\n else:\n output.append(char)\n return \"\".join(output)", "def _clean_text(self, text):\n output = []\n for char in text:\n cp = ord(char)\n if cp == 0 or cp == 0xfffd or _is_control(char):\n continue\n if _is_whitespace(char):\n output.append(\" \")\n else:\n output.append(char)\n return \"\".join(output)", "def _clean_text(self, text):\n output = []\n for char in text:\n cp = ord(char)\n if cp == 0 or cp == 0xFFFD or _is_control(char):\n continue # pragma: no cover\n if _is_whitespace(char):\n output.append(\" \")\n else:\n output.append(char)\n return \"\".join(output)", "def _replace_unicode_with_space(text):\n returnme = ''.join([i if ord(i) < 128 else ' ' for i in text])\n returnme = ' '.join(returnme.split()) # Change all space/newline to one space\n return returnme", "def cleaning_up(self):\n # find all non-letter-no-digit except whitespace and \"-\"\n try:\n pattern = re.compile(\"[a-zA-Z0-9\\\\s\\\\-]\")\n badChars = re.sub(pattern, '', string.printable)\n logging.debug(\"Bad chars: {}\".format(badChars))\n # define translate table\n remap = dict.fromkeys(badChars)\n logging.debug(remap)\n table = str.maketrans(remap)\n result = \"\"\n with open(self.input) as infile:\n lines = (line.strip() for line in infile)\n for line in lines:\n if len(line) == 0:\n continue\n else:\n logging.debug(line)\n result = result + \" \" + line.translate(table)\n # Since the input file only has one line, we can use the following\n # code. For general use, I kept above code.\n # result = line.translate(remap)\n # break;\n except LookupError as e:\n logging.exception(\"Lookup Error: {}\".format(e.strerror))\n except IOError as e:\n logging.exception(\"IO Error: {}\".format(e.strerror))\n except:\n logging.exception(\"Unknown Error\")\n return result.strip()", "def strip_specials_and_whitespace(self, line):\n newline = self.regex_sub(REGEX_SPECIAL,'',line)\n newline = self.regex_sub(REGEX_WHITESPACE, ' ', newline)\n return newline" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
return a model as defines in model_search.yaml
def get_model_from_yaml(name): filename = pkg_resources.resource_filename('empirical_lsm', 'data/model_search.yaml') with open(filename) as f: model_dict = yaml.load(f)[name] return get_model_from_dict(model_dict)
[ "def get_model(model):\n all_models = cmd.get_object_list()\n\n if len(all_models) == 0:\n logging.parser_error('No models are opened.')\n return\n\n model = model.lower()\n\n if model and (model in all_models):\n return model\n\n if len(all_models) > 1:\n logging.parser_error(\"Please specify which model you want to use. {}\".format(all_models))\n return\n\n return all_models[0]", "def get_search_model_string(jazzmin_settings: Dict) -> str:\n\n app, model_name = jazzmin_settings[\"search_model\"].split(\".\")\n return \"{app}.{model_name}\".format(app=app, model_name=model_name.lower())", "def get_model(model_name):\n return models.get_model('askbot', model_name)", "def get_model_definition(request):\n modelname = request.matchdict['modelname']\n results = db_model_definition(request.db)[modelname]\n for result in results:\n return result.value\n raise NotFound(\"Unknown model %s\" % modelname)", "def get_model(model_name):\n module_name = 'strain.models.strain_' + model_name.lower()\n model_module = importlib.import_module(module_name)\n obj = getattr(model_module, model_name)\n return obj", "def get_model_by_name(cls, name):\n model_name = inflection.camelize(name) # class name of the model to use\n model = cls.models[model_name]\n return model", "def get_model(self):\n return None", "def get_model(self):\n raise NotImplementedError(\n \"You must provide a 'get_model' method for the '%r' index.\" % self\n )", "def model_get_from_url(self, url):\n s = j.data.schema.get_from_url_latest(url=url)\n return self.model_get_from_schema(s)", "def get_model(cls, key: str) -> Optional[BaseModelParamsT]:\n key = cls._class_path_prefix() + '.' + key\n if key not in cls._registry:\n for k in cls._registry:\n logging.info('Known model: %s', k)\n return cls._registry.get(key)", "def get_model(name, **kwargs):\n name = name.lower()\n if name not in models:\n raise ValueError('%s\\n\\t%s' % (str(name), '\\n\\t'.join(sorted(models.keys()))))\n net = models[name](**kwargs)\n return net", "def get_model(self):\n return self.model", "def model_class(self):\n model_name = self.model_name()\n\n if not model_name:\n return None\n\n try:\n (app, mdl) = model_name.strip().split('.')\n except ValueError:\n logger.error(f\"Invalid 'model' parameter for setting {self.key} : '{model_name}'\")\n return None\n\n app_models = apps.all_models.get(app, None)\n\n if app_models is None:\n logger.error(f\"Error retrieving model class '{model_name}' for setting '{self.key}' - no app named '{app}'\")\n return None\n\n model = app_models.get(mdl, None)\n\n if model is None:\n logger.error(f\"Error retrieving model class '{model_name}' for setting '{self.key}' - no model named '{mdl}'\")\n return None\n\n # Looks like we have found a model!\n return model", "def find_model_using_name(model_name):\n model_filename = \"models.\" + model_name + \"_model\"\n modellib = importlib.import_module(model_filename)\n model = None\n target_model_name = model_name.replace('_', '') + 'model'\n for name, cls in modellib.__dict__.items():\n if name.lower() == target_model_name.lower() \\\n and issubclass(cls, BaseModel):\n model = cls\n\n if model is None:\n print(\"In %s.py, there should be a subclass of BaseModel with class name that matches %s in lowercase.\" % (model_filename, target_model_name))\n exit(0)\n\n return model", "def get_model(name, *args, **kwargs):\n assert name in models.keys(), \\\n \"Unknown model name \" + name\n return models[name](*args, **kwargs)", "def get_model(name, **kw):\n if isinstance(name, _SubstitutionModel):\n # already a substitution model\n return name\n if name not in models:\n msg = f'Unknown model \"{name}\". Model names are case sensitive!'\n raise ValueError(msg)\n\n return _all_models[name](**kw)", "def default_model():\n return \"teenytweetynet\"", "def get_model(self):\n if self.acquiring:\n raise RuntimeError(\"Illegal operation. Scanivalve is currently acquiring data!\")\n return self.list_any_map(\"I\")[\"MODEL\"]", "def get_model_reference(self, model_name):\n\n print_debug(\"Geting model :\" + model_name)\n model = ModelsFactory.get(model_name=model_name)\n return model" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a sklearn model pipeline from a model_dict
def get_model_from_dict(model_dict): pipe_list = [] if 'transforms' in model_dict: # For basic scikit-learn transforms transforms = model_dict['transforms'].copy() if 'scaler' in transforms: scaler = transforms.pop('scaler') pipe_list.append(get_scaler(scaler)) if 'pca' in transforms: transforms.pop('pca') pipe_list.append(get_pca()) if 'poly' in transforms: args = transforms.pop('poly') pipe_list.append(get_poly(args)) if len(transforms) > 0: raise Exception("unknown transforms: %s" % repr(transforms)) if 'args' in model_dict: model = get_model_class(model_dict['class'], model_dict['args']) else: model = get_model_class(model_dict['class']) if 'clusterregression' in model_dict: from empirical_lsm.clusterregression import ModelByCluster clusterer = model_dict['clusterregression']['class'] cluster_args = model_dict['clusterregression']['args'] model = ModelByCluster( get_clusterer(clusterer, cluster_args), model) pipe_list.append(model) pipe = make_pipeline(*pipe_list) if 'lag' in model_dict: params = model_dict['lag'] pipe = get_lagger(pipe, params) elif 'markov' in model_dict: params = model_dict['markov'] pipe = get_markov_wrapper(pipe, params) if 'forcing_vars' in model_dict: pipe.forcing_vars = model_dict['forcing_vars'] else: logger.warning("Warning: no forcing vars, using defaults (all)") pipe.forcing_vars = get_config(['vars', 'met']) if 'description' in model_dict: pipe.description = model_dict['description'] return pipe
[ "def get_pipeline(model):\n \n cat=['workclass', 'education','marital-status','relationship', 'occupation', 'native-country' ]\n num=['age', 'education-num','capital-gain', 'capital-loss','hours-per-week' ]\n \n pipeline = Pipeline([\n ('features', FeatureUnion([\n ('categoricals', Pipeline([\n ('extract', ColumnExtractor(cat)),\n ('WorkImpute', WorkImputer('workclass')),\n ('occupation_Impute', WorkImputer('occupation')),\n ('country_convert', CountryConverter(col=\"native-country\")),\n ('marital_status', MaritalStatusConverter(col= \"marital-status\")),\n ('labelEncoding', labelEnc())\n \n ])),\n ('numerics', Pipeline([\n ('extract', ColumnExtractor(num)),\n ('zero_fill', ZeroFillTransformer()),\n ('log', Log1pTransformer()),\n ('scale', StandardScalerCustom()),\n ]))\n ])),\n \n ('impute', CustomImputer()),\n model\n ])\n return pipeline\n\n pass", "def build_model():\n\n \n pipeline = Pipeline(steps=[\n ('count_vector', CountVectorizer()),\n ('tfidf',TfidfTransformer()),\n ('clf', RandomForestClassifier())\n ])\n\n # params dict to tune a model\n parameters = {\n \"clf__n_estimators\":[5,10,20,50,100]\n }\n # instantiate a gridsearchcv object with the params defined\n cv = GridSearchCV(pipeline, param_grid=parameters) \n return cv\n\n return pipeline", "def build_model():\n pipeline = Pipeline([\n ('features', FeatureUnion([\n ('text_pipeline', Pipeline([\n ('vect', CountVectorizer(tokenizer=tokenize)),\n ('tfidf', TfidfTransformer())\n ])),\n ('starting_verb', StartingVerbExtractor())])),\n ('clf', MultiOutputClassifier(RandomForestClassifier()))\n ])\n\n parameters = {\n 'features__text_pipeline__vect__max_df': (0.5, 0.75, 1.0),\n 'features__text_pipeline__tfidf__use_idf': (True, False),\n 'clf__estimator__random_state':[42],\n 'clf__estimator__max_depth': [3, 10, None]\n }\n\n cv = GridSearchCV(pipeline, param_grid=parameters, n_jobs=-1, verbose=99)\n\n return cv", "def create_pipeline(clf):\n return Pipeline([('scaler', MinMaxScaler()), ('clf', clf)])", "def sklearn_model_from_param(param, _copy=True):\n\n if _copy:\n param = copy.deepcopy(param) # Internal copy\n\n model_node, model_klass = _is_model(param)\n\n if model_node and param[0] == SpecialModels.GraphPipeline:\n\n ##########################\n ### GraphPipeline node ###\n ##########################\n\n rest_param = param[1:]\n list_args = []\n\n for i, arg in enumerate(rest_param[:-1]):\n if i == 0:\n list_args.append(sklearn_model_from_param(arg, _copy=False))\n else:\n # Second argument is edges => I don't want to translate it\n list_args.append(arg)\n\n # If last attribute is a dict, it is named arguments\n if isinstance(rest_param[-1], dict):\n dict_args = rest_param[-1]\n for k, v in dict_args.items():\n if k != \"edges\":\n dict_args[k] = sklearn_model_from_param(v, _copy=False)\n else:\n # Otherwise : just a regular param\n dict_args = {}\n if len(rest_param) == 1:\n list_args.append(sklearn_model_from_param(rest_param[-1], _copy=False))\n else:\n list_args.append(rest_param[-1])\n\n return model_klass(*list_args, **dict_args)\n\n elif model_node and param[0] != SpecialModels.GraphPipeline:\n\n ############################\n ### Classical model node ###\n ############################\n\n rest_param = param[1:]\n\n # If last attribute is a dict, it is named arguments\n if isinstance(rest_param[-1], dict):\n list_args = list(rest_param[:-1])\n dict_args = rest_param[-1]\n else:\n list_args = list(rest_param)\n dict_args = {}\n\n return model_klass(\n *sklearn_model_from_param(list_args, _copy=False), **sklearn_model_from_param(dict_args, _copy=False)\n )\n\n elif isinstance(param, dict):\n\n ###################\n ### Dictionnary ###\n ###################\n\n res = param.__class__()\n for k, v in param.items():\n res[k] = sklearn_model_from_param(v, _copy=False)\n\n return res\n\n elif isinstance(param, list):\n\n ############\n ### List ###\n ############\n\n return [sklearn_model_from_param(v, _copy=False) for v in param]\n\n elif isinstance(param, tuple):\n\n #############\n ### Tuple ###\n #############\n\n return tuple([sklearn_model_from_param(v, _copy=False) for v in param])\n else:\n return param", "def build_model():\n # Classifier to predict categories based on the vectorized text. The default kernel function of the SVC is RBF\n svc = SVC(class_weight=\"balanced\", gamma=\"scale\")\n\n # We have a multiple classifications per input text, that is, we have a two-dimensional target variable.\n # Use the MultiOutputClassifier to train a different Classifier per target variable\n clsfr = Pipeline([\n (\"vctzr\", CountVectorizer(tokenizer=tokenize)),\n (\"tfidf\", TfidfTransformer(use_idf=True, smooth_idf=True)),\n (\"clsfr\", MultiOutputClassifier(svc))\n ])\n\n return clsfr", "def build_model_pipeline(self):\n self.build_feature_extraction_pipeline()\n model_pipeline = Pipeline(\n [('feature_extraction_pipeline', self.feat_ext),\n ('reg', self.model_type)], verbose=True)\n return model_pipeline", "def build_model():\n pipeline = Pipeline([\n ('vect', CountVectorizer(tokenizer = tokenize)),\n ('tfidf', TfidfTransformer()),\n ('clf', MultiOutputClassifier(RandomForestClassifier(class_weight = 'balanced'))) ])\n print(pipeline.get_params())\n\n parameters = {'clf__estimator__n_estimators' : [40,100], 'clf__estimator__min_samples_split' : [2,3] }\n print ('Training pipeline in GridSearhCV')\n cv = GridSearchCV(pipeline, param_grid=parameters, cv=3, scoring = 'f1_weighted', verbose = 3)\n \n return cv", "def create_pipeline(self, model_name: str, model: Any) -> Pipeline:\n # Preprocessing\n preprocessing_pipeline = self.predefined_preprocessing()\n\n # Model\n model_pipeline = Pipeline(steps=[(model_name, model)])\n\n # Merge all the steps\n pipelines = [preprocessing_pipeline, model_pipeline]\n steps = reduce(operator.add, [p.steps for p in pipelines if not (p is None)])\n return Pipeline(steps=steps)", "def create_model_pipe(self, preprocess, model):\n #---------------------------------------------------------\n #--------------------------------------------------------- \n return make_pipeline(preprocess, model)", "def build_model():\n \n # text processing and model pipeline\n pipeline = Pipeline([\n (\"vect\",CountVectorizer(tokenizer = tokenize)),\n (\"TfidfVect\",TfidfTransformer()),\n (\"clf\",MultiOutputClassifier(DecisionTreeClassifier\n (random_state=42))) ])\n \n # define parameters for for GridSearchCV\n parameters = {'clf__estimator__max_depth': [10,20]}\n \n # define scoring metrics\n scoring = {'accuracy': make_scorer(accuracy_score),\n 'precision': make_scorer(precision_score, average = 'macro'),\n 'recall': make_scorer(recall_score, average = 'macro'),\n 'f1_score': make_scorer(f1_score, average = 'macro')}\n \n # create grid search object and return as final model pipeline\n CV = GridSearchCV(estimator = pipeline,scoring = scoring ,\n param_grid = parameters,refit =\"f1_score\"\n ,cv=3,return_train_score=True)\n return CV", "def load_model():\n\n def model(row):\n raw_title = row[\"job_title_raw\"]\n clean_title = clean_raw_job_title(raw_title)\n return predict_soc_and_title(clean_title)\n\n return model", "def make_regressor_pipeline(X: pd.DataFrame) -> Pipeline:\n numerics = [\"int16\", \"int32\", \"int64\", \"float16\", \"float32\", \"float64\"]\n num_features = list(X.dropna(axis=1, how=\"all\").select_dtypes(include=numerics).columns)\n\n # This example model only uses numeric features and drops the rest\n num_transformer = Pipeline(\n steps=[(\"imputer\", SimpleImputer(strategy=\"mean\")), (\"standardize\", StandardScaler())]\n )\n preprocessor = ColumnTransformer(transformers=[(\"num\", num_transformer, num_features)])\n\n # create model\n estimator = create_regression_model()\n\n # pipeline with preprocessor and estimator bundled\n regressor_pipeline = Pipeline(steps=[(\"preprocessor\", preprocessor), (\"estimator\", estimator)])\n return regressor_pipeline", "def build_model():\n \n # pipeline to process num_words column\n num_words = Pipeline([\n ('selector', NumColumnSelector(key = 'num_words')),\n ('scaler', StandardScaler())\n ])\n\n # pipeline to process number of non_stopwords column\n num_non_stopwords = Pipeline([\n ('selector', NumColumnSelector(key = 'non_stopwords')),\n ('scaler', StandardScaler())\n ])\n\n # pipeline to process avg_word_len column\n avg_word_length = Pipeline([\n ('selector', NumColumnSelector(key = 'avg_word_len')),\n ('scaler', StandardScaler())\n ])\n\n # pipeline to process processed_text column\n message_processing = Pipeline([\n ('selecor', TextColumnSelector(key = 'processed_text')),\n ('tfidf', TfidfVectorizer(stop_words = 'english'))\n ])\n\n # pipeline to process length column\n length = Pipeline([\n ('selector', NumColumnSelector(key = 'length')),\n ('scaler', StandardScaler())\n ])\n\n # pipeline to process genre column\n # uncomment the lines below if genre column is provided at inference\n# genre = Pipeline([\n# ('selector', TextColumnSelector(key = 'genre')),\n# ('scaler', CustomLabelBinarizer())\n# ])\n \n # process all the pipelines in parallel using feature union\n feature_union = FeatureUnion([\n ('num_words', num_words),\n ('num_non_stopwords', num_non_stopwords),\n ('avg_word_length', avg_word_length),\n ('message_processing', message_processing),\n ('length', length)\n # ('genre_ohe', genre)\n ])\n\n # create final pipeline using Random Forest classifier\n final_pipeline = Pipeline([\n ('feature_union', feature_union),\n ('clf', MultiOutputClassifier(RandomForestClassifier()))\n ])\n \n # use GridSearch to find best hyperparameters for the model\n # prepare dictionary of parameters\n parameters = {'feature_union__message_processing__tfidf__max_df': [0.75, 1],\n 'feature_union__message_processing__tfidf__ngram_range': [(1, 1), (1, 2)],\n 'feature_union__message_processing__tfidf__use_idf': [True, False],\n 'clf__estimator__n_estimators': [200, 300]\n }\n \n # create grid search object \n grid_cv = GridSearchCV(final_pipeline, parameters, cv = 3, n_jobs = -1)\n \n return grid_cv", "def _get_pipeline(self, params_dict):\n p = Pipeline(steps=[('normalise', StandardScaler()),\n ('add_noise', NoiseAdder()),\n ('dim_reduce', PCA()),\n ('cluster', KMeans())])\n p.set_params(**params_dict)\n return p", "def create_pipelines(seed, verbose=1):\n\n models = [\n ('LR', LogisticRegression()),\n ('LDA', LinearDiscriminantAnalysis()),\n ('KNN', KNeighborsClassifier()),\n ('CART', DecisionTreeClassifier(random_state=seed)),\n ('NB', GaussianNB()),\n ('SVM', SVC(random_state=seed, probability=True)),\n ('RF', RandomForestClassifier(max_depth=3, random_state=seed)),\n ('MLP', MLPClassifier(random_state=seed))\n ]\n scalers = [('StandardScaler', StandardScaler()),\n ('MinMaxScaler', MinMaxScaler()),\n ('MaxAbsScaler', MaxAbsScaler()),\n ('RobustScaler', RobustScaler()),\n ('QuantileTransformer-Normal', QuantileTransformer(output_distribution='normal')),\n ('QuantileTransformer-Uniform', QuantileTransformer(output_distribution='uniform')),\n ('PowerTransformer-Yeo-Johnson', PowerTransformer(method='yeo-johnson')),\n ('Normalizer', Normalizer())\n ]\n additions = [('PCA', PCA(n_components=4)),\n ]\n # Create pipelines\n pipelines = []\n for model in models:\n # Append only model\n model_name = \"_\" + model[0]\n pipelines.append((model_name, Pipeline([model])))\n\n # Append model+scaler\n for scalar in scalers:\n model_name = scalar[0] + \"_\" + model[0]\n pipelines.append((model_name, Pipeline([scalar, model])))\n\n # To easier distinguish between with and without Additions (i.e: PCA)\n # Append model+addition\n for addition in additions:\n model_name = \"_\" + model[0] + \"-\" + addition[0]\n pipelines.append((model_name, Pipeline([addition, model])))\n\n # Append model+scaler+addition\n for scalar in scalers:\n for addition in additions:\n model_name = scalar[0] + \"_\" + model[0] + \"-\" + addition[0]\n pipelines.append((model_name, Pipeline([scalar, addition, model])))\n\n if verbose:\n print(\"Created these pipelines:\")\n for pipe in pipelines:\n print(pipe[0])\n\n return pipelines", "def stacked_model(models):\n\n stack_m = [] \n for model, m in models.items(): \n stack_m.append((model, m))\n stack_model = StackingClassifier(estimators = stack_m, final_estimator = LogisticRegression(), cv = 3)\n models['stacked'] = stack_model\n \n return models", "def model_pipeline_run(index, model, params, X_train, y_train, X_test, y_test, model_name, pre_process_time, type):\n n_jobs = -1\n n_iter = 100\n if model is None:\n return\n try:\n row = {\"dataset_index\": index}\n if type == \"classification\":\n steps = [(\"classifier\", model)]\n else:\n steps = [(\"regressor\", model)]\n pipeline = MLPipeline(steps=steps)\n if type == \"classification\":\n if model_name == \"rf\":\n params[\"classifier__max_features\"] = [min([x, X_train.shape[1]]) for x in\n params[\"classifier__max_features\"]]\n elif \"dl\" in model_name:\n n_jobs = None\n params[\"classifier__shape\"] = [X_train.shape[1]]\n if isinstance(y_test[0], (str)):\n try:\n y_train = np.asarray(list(map(lambda x: int(re.search(\"[0-9]+\", x).group()), y_train)))\n y_test = np.asarray(list(map(lambda x: int(re.search(\"[0-9]+\", x).group()), y_test)))\n except Exception as e:\n le = LabelEncoder()\n y_train = le.fit_transform(y_train)\n y_test = le.transform(y_test)\n grid = RandomizedSearchCV(estimator=pipeline, param_distributions=params, cv=KFold(3), refit=True,\n verbose=0, n_jobs=n_jobs, n_iter=n_iter,\n scoring=\"f1\" if len(set(y_train)) == 2 else \"f1_weighted\")\n else:\n if model_name == \"rf\":\n params[\"regressor__max_features\"] = [min([x, X_train.shape[1]]) for x in\n params[\"regressor__max_features\"]]\n elif \"dl\" in model_name:\n n_jobs = None\n params[\"regressor__shape\"] = [X_train.shape[1]]\n grid = RandomizedSearchCV(estimator=pipeline, param_distributions=params, cv=KFold(3), refit=True,\n verbose=0, n_jobs=n_jobs, n_iter=n_iter, error_score=np.nan)\n model_time = time.time()\n columns = X_train.columns\n if \"dl-rnn\" in model_name:\n X_train = np.reshape(X_train.astype(\"float32\").values, (X_train.shape[0], 1, X_train.shape[1]))\n X_test = np.reshape(X_test.astype(\"float32\").values, (X_test.shape[0], 1, X_test.shape[1]))\n else:\n X_train = X_train.astype(\"float32\").values\n X_test = X_test.astype(\"float32\").values\n grid = grid.fit(X_train.astype(\"float32\"), y_train)\n row[\"time\"] = (time.time() - model_time) / 60\n row[\"pre_process_time\"] = pre_process_time\n return scoring(grid, X_train, X_test, y_train, y_test, columns, row=row, model_name=model_name, type=type)\n except Exception as e:\n print(e)", "def create(pdef):\n from sklearn.pipeline import Pipeline\n return [Pipeline(p) for p in pdef]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a Lag wrapper for a pipeline.
def get_lagger(pipe, kwargs): from .transforms import LagWrapper return LagWrapper(pipe, **kwargs)
[ "def pipeline(self) -> Pipeline:\n if self._to_pipeline is None:\n raise AttributeError(\n \"pipeline not available because `to_pipeline` was not set on __init__.\"\n )\n return self._to_pipeline(self)", "def make_pipeline():\n pipeline = Pipeline(\n columns={\n \"1y_returns\": Returns(window_length=252),\n },\n screen=AverageDollarVolume(window_length=30) > 10e6\n )\n return pipeline", "def pipeline(self):\n # gotta avoid circular imports by deferring\n from .pipeline import Pipeline\n return Pipeline().from_source(self._collection)", "def pipeline_factory(pipeline_name):\n ThisPipelineClass = getattr(pipelines, pipeline_name)\n\n if not ThisPipelineClass and issubclass(ThisPipelineClass, pipelines.PipelinesBase):\n logger.error(\"Invalid pipeline: {}\".format(pipeline_name))\n sys.exit(1)\n\n return ThisPipelineClass", "def load_pipeline(pipeline_path):\n from axolotl.utils import pipeline as pipeline_utils\n pipeline = pipeline_utils.load_pipeline(pipeline_path)\n\n return pipeline", "def make_pipeline(self, steps=None):\n pipe_steps = self.custom_steps + steps if steps else self.custom_steps\n\n return Pipeline(pipe_steps)", "def get_pipeline(cls, pipeline_name: Text) -> PassPipeline:\n if pipeline_name not in cls._PIPELINE_NAME_TO_PASSES:\n raise ValueError(\n f\"There is no pipeline for `{pipeline_name}`. \"\n f\"Available pipelines: {cls._PIPELINE_NAME_TO_PASSES.keys()}\"\n )\n return PassPipeline(cls._PIPELINE_NAME_TO_PASSES[pipeline_name], pipeline_name)", "def from_pipeline(cls, pipeline, proba=None, repeat=None):\n if proba is None:\n if repeat is None:\n new_p = cls(pipeline=pipeline)\n else:\n if pipeline.num_actions == 1 and pipeline.get_last_action_proba() is None:\n new_p = cls(pipeline=pipeline, repeat=repeat)\n else:\n new_p = cls()\n new_p.append_pipeline(pipeline, repeat=repeat)\n else:\n if pipeline.num_actions == 1 and pipeline.get_last_action_repeat() is None:\n new_p = cls(pipeline=pipeline, proba=proba)\n else:\n new_p = cls()\n new_p.append_pipeline(pipeline, proba=proba)\n return new_p", "def getPipeline(self, version):\n try:\n return Vistrail.getPipelineDispatcher[type(version)](self, version)\n except Exception, e:\n raise InvalidPipeline([e])", "def ParsePipeline(self):\n negated = False\n\n self._Peek()\n if self.token_type == Id.KW_Bang:\n negated = True\n self._Next()\n\n child = self.ParseCommand()\n assert child is not None\n\n children = [child]\n\n self._Peek()\n if self.token_type not in (Id.Op_Pipe,):\n if negated:\n node = oil_cmd.Pipeline(children, negated)\n return node\n else:\n return child\n\n while True:\n self._Next() # skip past Id.Op_Pipe or Id.Op_PipeAmp\n\n self._NewlineOk()\n\n child = self.ParseCommand()\n assert child is not None\n children.append(child)\n\n self._Peek()\n if self.token_type not in (Id.Op_Pipe,):\n break\n\n node = oil_cmd.Pipeline(children, negated)\n return node", "def pipelines(self):\r\n return pipelines.Pipelines(self)", "def process_pipeline(frame, keep_state=True):\n\n global line_lt, line_rt, processed_frames\n\n # undistort the image using coefficients found in calibration\n undistorted_img = undistort(frame, mtx, dist)\n\n # binarize the frame and highlight lane lines\n binarized_img = binarize(undistorted_img)\n\n # perspective transform to obtain bird's eye view\n birdeye_img, matrix, inversed_matrix = birdeye(binarized_img, visualise=False)\n\n # 2 order polynomial curve fit onto lane lines found\n if processed_frames > 0 and keep_state and line_lt.detected and line_rt.detected:\n find_lane_by_previous_fits(birdeye_img, line_lt, line_rt, visualise=False)\n else:\n find_lane_by_sliding_windows(birdeye_img, line_lt, line_rt, n_windows=9, visualise=False)\n\n # compute offset in meter from center of the lane\n offset_meter = offset_from_lane_center(line_lt, line_rt, frame_width=frame.shape[1])\n\n # draw the surface enclosed by lane lines back onto the original frame\n blend_on_road = draw_back_onto_the_road(undistorted_img, inversed_matrix, line_lt, line_rt, keep_state)\n mean_curvature_meter = np.mean([line_lt.curvature_meter, line_rt.curvature_meter])\n font = cv2.FONT_HERSHEY_SIMPLEX\n cv2.putText(blend_on_road, 'Curvature radius: {:.02f}m'.format(mean_curvature_meter), (60, 60), font, 1,\n (255, 255, 255), 2)\n cv2.putText(blend_on_road, 'Offset from center: {:.02f}m'.format(offset_meter), (60, 90), font, 1,\n (255, 255, 255), 2)\n\n processed_frames += 1\n\n return blend_on_road", "def construct(args,\n **kwargs):\n kw = parse_args(args)\n kw.update(kwargs)\n return (build_pipeline(**kw),\n kw)", "def make_pipeline():\r\n\r\n # Custom universe containing only desired assets (stocks with flag data)\r\n universe = StaticSids(my_stocks)\r\n\r\n return Pipeline(\r\n columns={\r\n #'flag_type': algo_data_full.flag_type.latest,\r\n #'flag_price': algo_data_full.flag_price.latest,\r\n #'end_flag_date': algo_data_full.end_flag_date.latest,\r\n #'end_flag_price': algo_data_full.end_flag_price.latest,\r\n 'up_flags': flag_counts.up.latest,\r\n 'down_flags': flag_counts.down.latest,\r\n 'up_ratio': up_ratios_2.up_ratio.latest,\r\n 'close': USEquityPricing.close.latest,\r\n },\r\n screen=universe\r\n )", "def pe_pipeline_cli(**kwargs): # pragma: no cover\n\n _ = pe_pipeline(**kwargs)", "def pipeline(self):\n return self._pipeline", "def make_ensemble_pipeline(pipelines):\n pipeline = StackingRegressor(regressors=pipelines, meta_regressor=MeanRegressor())\n return pipeline", "def create_fake_pipeline(*_args, **_kwargs):\n return Pipeline(\n [\n node(match.clean_match_data, \"fake_match_data\", \"clean_match_data\"),\n node(\n common.convert_match_rows_to_teammatch_rows,\n \"clean_match_data\",\n \"match_data_b\",\n ),\n node(match.add_out_of_state, \"match_data_b\", \"match_data_c\"),\n node(match.add_travel_distance, \"match_data_c\", \"match_data_d\"),\n node(match.add_result, \"match_data_d\", \"match_data_e\"),\n node(match.add_margin, \"match_data_e\", \"match_data_f\"),\n node(\n match.add_shifted_team_features(\n shift_columns=[\n \"score\",\n \"oppo_score\",\n \"result\",\n \"margin\",\n \"team_goals\",\n \"team_behinds\",\n ]\n ),\n \"match_data_f\",\n \"match_data_g\",\n ),\n node(match.add_cum_win_points, \"match_data_g\", \"match_data_h\"),\n node(match.add_win_streak, \"match_data_h\", \"match_data_i\"),\n ]\n )", "def pipeline_hook(at: Text) -> Callable[[Any], Any]:\n def _hook(f: Callable[[Any], Any]) -> Callable[[Any], Any]:\n setattr(f, '__hook__', at)\n return cast(Callable[[Any], Any], staticmethod(f))\n return _hook" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a markov wrapper for a pipeline.
def get_markov_wrapper(pipe, kwargs): from .transforms import MarkovWrapper return MarkovWrapper(pipe, **kwargs)
[ "def create_pipelines_lingspam():\n stop = ('stop', StopWordRemovalTransformer())\n lemma = ('lemma', LemmatizeTransformer())\n binz = ('binarizer', CountVectorizer())\n we = ('document embedding', DocEmbeddingVectorizer())\n sel = ('fsel', SelectKBest(score_func=mutual_info_classif, k=100))\n clf = ('cls', BernoulliNB()) # Binary features in the original paper. \n return Pipeline([binz, sel, clf]), \\\n Pipeline([stop, binz, sel, clf]), \\\n Pipeline([lemma, binz, sel, clf]), \\\n Pipeline([stop, lemma, binz, sel, clf]), \\\n Pipeline([stop, lemma, we, sel, clf])", "def create_pipeline_for_kfold(self, args):\n return ClassificationPipeline(args=args)", "def create_pipelines(seed, verbose=1):\n\n models = [\n ('LR', LogisticRegression()),\n ('LDA', LinearDiscriminantAnalysis()),\n ('KNN', KNeighborsClassifier()),\n ('CART', DecisionTreeClassifier(random_state=seed)),\n ('NB', GaussianNB()),\n ('SVM', SVC(random_state=seed, probability=True)),\n ('RF', RandomForestClassifier(max_depth=3, random_state=seed)),\n ('MLP', MLPClassifier(random_state=seed))\n ]\n scalers = [('StandardScaler', StandardScaler()),\n ('MinMaxScaler', MinMaxScaler()),\n ('MaxAbsScaler', MaxAbsScaler()),\n ('RobustScaler', RobustScaler()),\n ('QuantileTransformer-Normal', QuantileTransformer(output_distribution='normal')),\n ('QuantileTransformer-Uniform', QuantileTransformer(output_distribution='uniform')),\n ('PowerTransformer-Yeo-Johnson', PowerTransformer(method='yeo-johnson')),\n ('Normalizer', Normalizer())\n ]\n additions = [('PCA', PCA(n_components=4)),\n ]\n # Create pipelines\n pipelines = []\n for model in models:\n # Append only model\n model_name = \"_\" + model[0]\n pipelines.append((model_name, Pipeline([model])))\n\n # Append model+scaler\n for scalar in scalers:\n model_name = scalar[0] + \"_\" + model[0]\n pipelines.append((model_name, Pipeline([scalar, model])))\n\n # To easier distinguish between with and without Additions (i.e: PCA)\n # Append model+addition\n for addition in additions:\n model_name = \"_\" + model[0] + \"-\" + addition[0]\n pipelines.append((model_name, Pipeline([addition, model])))\n\n # Append model+scaler+addition\n for scalar in scalers:\n for addition in additions:\n model_name = scalar[0] + \"_\" + model[0] + \"-\" + addition[0]\n pipelines.append((model_name, Pipeline([scalar, addition, model])))\n\n if verbose:\n print(\"Created these pipelines:\")\n for pipe in pipelines:\n print(pipe[0])\n\n return pipelines", "def make_pipeline():\n pipeline = Pipeline(\n columns={\n \"1y_returns\": Returns(window_length=252),\n },\n screen=AverageDollarVolume(window_length=30) > 10e6\n )\n return pipeline", "def test_generate_pipeline_code():\n pipeline = ['KNeighborsClassifier',\n ['CombineDFs',\n ['GradientBoostingClassifier',\n 'input_matrix',\n 38.0,\n 0.87],\n ['GaussianNB',\n ['ZeroCount',\n 'input_matrix']]],\n 18,\n 33]\n\n expected_code = \"\"\"make_pipeline(\n make_union(\n make_union(VotingClassifier([('branch',\n GradientBoostingClassifier(learning_rate=1.0, max_features=1.0, n_estimators=500)\n )]), FunctionTransformer(lambda X: X)),\n make_union(VotingClassifier([('branch',\n make_pipeline(\n ZeroCount(),\n GaussianNB()\n )\n )]), FunctionTransformer(lambda X: X))\n ),\n KNeighborsClassifier(n_neighbors=5, weights=\"distance\")\n)\"\"\"\n\n assert expected_code == generate_pipeline_code(pipeline)", "def create_fake_pipeline(*_args, **_kwargs):\n return Pipeline(\n [\n node(match.clean_match_data, \"fake_match_data\", \"clean_match_data\"),\n node(\n common.convert_match_rows_to_teammatch_rows,\n \"clean_match_data\",\n \"match_data_b\",\n ),\n node(match.add_out_of_state, \"match_data_b\", \"match_data_c\"),\n node(match.add_travel_distance, \"match_data_c\", \"match_data_d\"),\n node(match.add_result, \"match_data_d\", \"match_data_e\"),\n node(match.add_margin, \"match_data_e\", \"match_data_f\"),\n node(\n match.add_shifted_team_features(\n shift_columns=[\n \"score\",\n \"oppo_score\",\n \"result\",\n \"margin\",\n \"team_goals\",\n \"team_behinds\",\n ]\n ),\n \"match_data_f\",\n \"match_data_g\",\n ),\n node(match.add_cum_win_points, \"match_data_g\", \"match_data_h\"),\n node(match.add_win_streak, \"match_data_h\", \"match_data_i\"),\n ]\n )", "def pipen():\n index = Pipen.PIPELINE_COUNT + 1\n pipen_simple = Pipen(\n name=f\"simple_pipeline_{index}\",\n desc=\"No description\",\n loglevel=\"debug\",\n cache=True,\n outdir=TEST_TMPDIR / f\"pipen_simple_{index}\",\n )\n\n return pipen_simple", "def make_pipeline(*steps, **kwargs):\n memory = kwargs.pop('memory', None)\n if kwargs:\n raise TypeError('Unknown keyword arguments: \"{}\"'\n .format(list(kwargs.keys())[0]))\n return Pipeline(_name_estimators(steps))", "def pipeline(self) -> Pipeline:\n if self._to_pipeline is None:\n raise AttributeError(\n \"pipeline not available because `to_pipeline` was not set on __init__.\"\n )\n return self._to_pipeline(self)", "def test_pipeline_instance(self):\n model = Pipeline([\n ('reduce_dim', PCA()),\n ('linreg', LinearRegression())\n ])\n\n self.assertTrue(isestimator(model))", "def pipeline(*args, **kwargs):\n kwargs.setdefault(\"_task_type\", DecoratedPipelineTask)\n kwargs.setdefault(\"_task_default_result\", parameter.output)\n return build_task_decorator(*args, **kwargs)", "def run(self):\n pipeline = set_pipeline()\n pipeline.fit(self.X_train, self.y_train)\n return pipeline", "def create_pipeline(path):\n\n pipeline = import_file(path)\n # Perform Wigner-Seitz analysis:\n ws = WignerSeitzAnalysisModifier(\n output_displaced=False, # Output sites\n per_type_occupancies=True, # Output occupancies per atom type\n affine_mapping=ReferenceConfigurationModifier.AffineMapping.ToReference)\n pipeline.modifiers.append(ws)\n # Calculate total and elementwise occupancies\n pipeline.modifiers.append(total_occupancy_modifier)\n # Select all defect sites\n pipeline.modifiers.append(select_defects_modifier)\n # Delete all non-defect sites\n pipeline.modifiers.append(InvertSelectionModifier())\n pipeline.modifiers.append(DeleteSelectedModifier())\n # Find defect clusters\n pipeline.modifiers.append(ClusterAnalysisModifier(\n cutoff=CLUSTER_CUTOFF,\n sort_by_size=False))\n # Classify defect clusters\n pipeline.modifiers.append(classify_defect_clusters_modifier)\n\n return pipeline", "def make_pipeline():\r\n \r\n value = Fundamentals.ebit.latest / Fundamentals.enterprise_value.latest\r\n quality = Fundamentals.roe.latest\r\n sentiment_score = SimpleMovingAverage(\r\n inputs=[stocktwits.bull_minus_bear],\r\n window_length=3,\r\n )\r\n \r\n input_columns= {\r\n 'value': value,\r\n 'quality': quality,\r\n 'sentiment_score': sentiment_score,\r\n } \r\n \r\n predictor_columns = OrderedDict()\r\n \r\n predictor_columns['Returns'] = Returns(\r\n inputs=(USEquityPricing.open,),\r\n mask=universe, window_length=PRED_N_FORWARD_DAYS + 1,\r\n )\r\n\r\n # rank all the factors and put them after returns\r\n predictor_columns.update({\r\n k:v.winsorize(min_percentile=PERCENT, max_percentile=1.0-PERCENT).zscore() for k, v in input_columns.items()\r\n })\r\n # v.rank(mask=universe)\r\n \r\n print predictor_columns.keys()\r\n \r\n # Create our ML pipeline factor. The window_length will control how much\r\n # lookback the passed in data will have.\r\n combined_factor = ML(\r\n inputs=predictor_columns.values(),\r\n window_length=TRAINING_WINDOW_DAYS,\r\n mask=universe,\r\n )\r\n \r\n # Build Filters representing the top and bottom baskets of stocks by our\r\n # combined ranking system. We'll use these as our tradeable universe each\r\n # day.\r\n longs = combined_factor.top(TOTAL_POSITIONS//2, )#mask=universe)\r\n shorts = combined_factor.bottom(TOTAL_POSITIONS//2,)# mask=universe)\r\n\r\n # The final output of our pipeline should only include\r\n # the top/bottom 300 stocks by our criteria\r\n long_short_screen = (longs | shorts)\r\n\r\n # Create pipeline\r\n pipe = Pipeline(\r\n columns={\r\n 'longs': longs,\r\n 'shorts': shorts,\r\n 'combined_factor': combined_factor\r\n },\r\n screen=long_short_screen\r\n )\r\n return pipe", "def create_model_pipe(self, preprocess, model):\n #---------------------------------------------------------\n #--------------------------------------------------------- \n return make_pipeline(preprocess, model)", "def make_pipeline(estimator, transform=None, **kwargs) -> ModifiedPipeline:\n\n memory = kwargs.pop('memory', None)\n verbose = kwargs.pop('verbose', None)\n n_jobs = kwargs.pop('n_jobs', None)\n target_index = kwargs.pop('target_index', None)\n target_type = kwargs.pop('target_type', None)\n base_boosting_options = kwargs.pop('base_boosting_options', None)\n random_state = kwargs.pop('random_state', None)\n n_quantiles = kwargs.pop('n_quantiles', None)\n cv = kwargs.pop('cv', None)\n chain_order = kwargs.pop('chain_order', None)\n if kwargs:\n raise TypeError('Unknown keyword arguments: %s'\n % (list(kwargs.keys())[0]))\n\n if transform is not None:\n if isinstance(transform, tuple):\n steps = [transform]\n elif isinstance(transform, list):\n steps = transform\n elif transform in _PIPELINE_TRANSFORM_CHOICE:\n # Utulize the in-built transforma options.\n if transform == 'standardscaler':\n transform = sklearn.preprocessing.StandardScaler()\n elif transform == 'boxcox':\n transform = sklearn.preprocessing.PowerTransformer(method='box-cox')\n elif transform == 'yeojohnson':\n transform = sklearn.preprocessing.PowerTransformer(method='yeo-johnson')\n elif transform == 'quantileuniform':\n transform = sklearn.preprocessing.QuantileTransformer(n_quantiles=n_quantiles,\n output_distribution='uniform',\n random_state=random_state)\n elif transform == 'quantilenormal': \n transform = sklearn.preprocessing.QuantileTransformer(n_quantiles=n_quantiles,\n output_distribution='normal',\n random_state=random_state)\n steps = [('tr', transform)]\n else:\n raise TypeError('The transform: %s was not a default str option, '\n 'tuple with (name, transform), or a list of such '\n 'tuple(s).'\n % (transform))\n else:\n steps = []\n\n # Distinguishes between single-target and multi-target regression.\n if target_type in _MULTI_TARGET:\n if chain_order is not None:\n estimator = sklearn.multioutput.RegressorChain(base_estimator=estimator,\n order=chain_order,\n cv=cv,\n random_state=random_state)\n steps.append(('reg', estimator))\n else:\n estimator = sklearn.multioutput.MultiOutputRegressor(estimator=estimator,\n n_jobs=n_jobs)\n steps.append(('reg', estimator))\n else:\n steps.append(('reg', estimator))\n\n return ModifiedPipeline(steps=steps,\n memory=memory,\n verbose=verbose,\n n_jobs=n_jobs,\n target_index=target_index,\n base_boosting_options=base_boosting_options)", "def make_pipeline():\n # exchange = Fundamentals.exchange_id.latest\n # nyse_filter = exchange.eq('NYS')\n symbol_filter = StaticSids([TRADING_SID])\n set_benchmark(TRADING_SID) \n # volume_filter = VolumeFilter(\n # inputs=[USEquityPricing.volume],\n # window_length=1,\n # mask=symbol_filter\n # )\n\n # is_setup = volume_filter & alpha_long_weekly & alpha_long_daily\n weekly_high = WeeklyHigh(\n inputs=[USEquityPricing.high],\n mask=symbol_filter\n )\n weekly_low = WeeklyLow(\n inputs=[USEquityPricing.low],\n mask=symbol_filter\n )\n weekly_classifier = WeeklyClassifier(\n inputs=[\n USEquityPricing.open,\n USEquityPricing.high,\n USEquityPricing.low,\n USEquityPricing.close\n ],\n mask=symbol_filter\n )\n daily_classifier = DailyClassifier(\n inputs=[\n USEquityPricing.open,\n USEquityPricing.high,\n USEquityPricing.low,\n USEquityPricing.close\n ],\n mask=symbol_filter\n\n )\n\n pipe = Pipeline(\n screen=symbol_filter, # & (daily_classifier > 0),\n columns={\n 'daily_classifier': daily_classifier,\n 'daily_high': USEquityPricing.high.latest,\n 'daily_low': USEquityPricing.low.latest,\n 'weekly_classifier': weekly_classifier,\n 'weekly_high': weekly_high,\n 'weekly_low': weekly_low\n }\n )\n return pipe", "def _make_pipeline(preprocessors, classifier):\n if isinstance(preprocessors, list):\n # support only preprocessing of lenght 2\n return make_pipeline(preprocessors[0], preprocessors[1], classifier)\n if preprocessors is None:\n return make_pipeline(classifier)\n\n return make_pipeline(preprocessors, classifier)", "def build_pipe(self):\n graphs = list(zip(*self.dataset_a_train))[0]\n features = dgl.batch(graphs).ndata['node_attr'].numpy()\n pipe = Pipeline([('impute', SimpleImputer()), ('scale', StandardScaler())])\n pipe.fit(features)\n return pipe" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a scikitlearn clusterer from name and args.
def get_clusterer(name, kwargs): if name == 'KMeans': from sklearn.cluster import KMeans return KMeans(**kwargs) if name == 'MiniBatchKMeans': from sklearn.cluster import MiniBatchKMeans return MiniBatchKMeans(**kwargs)
[ "def get_cluster(self, name):\n return clusters.get_cluster(self, name)", "def show_cluster(name: str) -> Cluster:\n environment = EnvironmentProvider().environment\n return environment.clusters[name]", "def get_cluster(name: str) -> dict:\n return ECS.get_clusters([name])[0]", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None) -> 'Cluster':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = ClusterArgs.__new__(ClusterArgs)\n\n __props__.__dict__[\"aggregator_or_single_rack_definition\"] = None\n __props__.__dict__[\"analytics_workspace_id\"] = None\n __props__.__dict__[\"available_upgrade_versions\"] = None\n __props__.__dict__[\"cluster_capacity\"] = None\n __props__.__dict__[\"cluster_connection_status\"] = None\n __props__.__dict__[\"cluster_extended_location\"] = None\n __props__.__dict__[\"cluster_location\"] = None\n __props__.__dict__[\"cluster_manager_connection_status\"] = None\n __props__.__dict__[\"cluster_manager_id\"] = None\n __props__.__dict__[\"cluster_service_principal\"] = None\n __props__.__dict__[\"cluster_type\"] = None\n __props__.__dict__[\"cluster_version\"] = None\n __props__.__dict__[\"compute_deployment_threshold\"] = None\n __props__.__dict__[\"compute_rack_definitions\"] = None\n __props__.__dict__[\"detailed_status\"] = None\n __props__.__dict__[\"detailed_status_message\"] = None\n __props__.__dict__[\"extended_location\"] = None\n __props__.__dict__[\"hybrid_aks_extended_location\"] = None\n __props__.__dict__[\"location\"] = None\n __props__.__dict__[\"managed_resource_group_configuration\"] = None\n __props__.__dict__[\"manual_action_count\"] = None\n __props__.__dict__[\"name\"] = None\n __props__.__dict__[\"network_fabric_id\"] = None\n __props__.__dict__[\"provisioning_state\"] = None\n __props__.__dict__[\"support_expiry_date\"] = None\n __props__.__dict__[\"system_data\"] = None\n __props__.__dict__[\"tags\"] = None\n __props__.__dict__[\"type\"] = None\n __props__.__dict__[\"workload_resource_ids\"] = None\n return Cluster(resource_name, opts=opts, __props__=__props__)", "def create_marker_cluster(name: str):\n return MarkerCluster(name=name)", "def launch_example_cluster_cmd(*args, **kwargs):\n return launch_example_cluster(*args, **kwargs)", "def cluster(args):\n\n # if not (args.coverage or args.index):\n # logging.error('Must specify a coverage file or contigs + reference index.')\n\n logging.info('Starting clustering process')\n perform_clustering(args)", "def __init__(self, cluster_calc, cluster_label):\n \n ## calculated clustering results\n self.cluster_calc = cluster_calc\n\n ## expected cluster results\n self.cluster_label = cluster_label", "def get_one_cluster_by_name(ctx, cluster_name, project_name):\n project = ctx.obj.groups.byName[project_name].get().data\n cluster = ctx.obj.groups[project.id].clusters[cluster_name].get()\n pprint(cluster.data)", "def cluster(self) -> Cluster:", "def get_cluster_name(self):\n raise NotImplementedError(\"The method not implemented\")", "def is_sklearn_clusterer(obj):\n return is_sklearn_estimator(obj) and sklearn_scitype(obj) == \"clusterer\"", "def kozakov2015(*args, **kwargs):\n clusters = []\n for sel in args:\n cluster = Cluster(\"\", sel, pm.get_coords(sel))\n clusters.append(cluster)\n\n ensemble = Kozakov2015Ensemble(clusters)\n print(\n textwrap.dedent(\n f\"\"\"\n {ensemble}\n Class {ensemble.klass}\n S {ensemble.strength}\n S0 {ensemble.strength0}\n CD {ensemble.max_center_to_center}\n MD {ensemble.max_dist}\n \"\"\"\n )\n )", "def _GetKubernetesEngine(args):\n\n def External():\n return kubernetes.ExternalClusterContext(args.kube_context)\n\n def Minikube():\n if args.IsSpecified('minikube_profile'):\n cluster_name = args.minikube_profile\n else:\n cluster_name = kubernetes.DEFAULT_CLUSTER_NAME\n\n return kubernetes.Minikube(cluster_name, args.stop_cluster,\n getattr(args, 'minikube_vm_driver', 'docker'),\n _IsDebug())\n\n if args.IsSpecified('kube_context'):\n return External()\n else:\n return Minikube()", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n cluster_version: Optional[pulumi.Input[str]] = None,\n component_version: Optional[pulumi.Input[pulumi.InputType['SparkClusterComponentVersionArgs']]] = None,\n compute_isolation: Optional[pulumi.Input[pulumi.InputType['SparkClusterComputeIsolationArgs']]] = None,\n disk_encryptions: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SparkClusterDiskEncryptionArgs']]]]] = None,\n encryption_in_transit_enabled: Optional[pulumi.Input[bool]] = None,\n extension: Optional[pulumi.Input[pulumi.InputType['SparkClusterExtensionArgs']]] = None,\n gateway: Optional[pulumi.Input[pulumi.InputType['SparkClusterGatewayArgs']]] = None,\n location: Optional[pulumi.Input[str]] = None,\n metastores: Optional[pulumi.Input[pulumi.InputType['SparkClusterMetastoresArgs']]] = None,\n monitor: Optional[pulumi.Input[pulumi.InputType['SparkClusterMonitorArgs']]] = None,\n name: Optional[pulumi.Input[str]] = None,\n network: Optional[pulumi.Input[pulumi.InputType['SparkClusterNetworkArgs']]] = None,\n resource_group_name: Optional[pulumi.Input[str]] = None,\n roles: Optional[pulumi.Input[pulumi.InputType['SparkClusterRolesArgs']]] = None,\n security_profile: Optional[pulumi.Input[pulumi.InputType['SparkClusterSecurityProfileArgs']]] = None,\n storage_account_gen2: Optional[pulumi.Input[pulumi.InputType['SparkClusterStorageAccountGen2Args']]] = None,\n storage_accounts: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SparkClusterStorageAccountArgs']]]]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n tier: Optional[pulumi.Input[str]] = None,\n tls_min_version: Optional[pulumi.Input[str]] = None,\n __props__=None):\n ...", "def get_cluster(alias: str, fname: str = __cluster_config_file__, get_client: bool = True):\n if get_client:\n return ClusterList().load(fname).get_client(alias)\n else:\n return ClusterList().load(fname).select(alias)", "def run_cluster(*data):\n sample = data[0][0]\n tools = dd.get_expression_caller(data[0][0])\n work_dir = dd.get_work_dir(sample)\n out_dir = op.join(work_dir, \"seqcluster\", \"cluster\")\n out_dir = op.abspath(safe_makedir(out_dir))\n prepare_dir = op.join(work_dir, \"seqcluster\", \"prepare\")\n bam_file = data[0][0][\"work_bam\"]\n if \"seqcluster\" in tools:\n sample[\"seqcluster\"] = _cluster(bam_file, data[0][0][\"seqcluster_prepare_ma\"], out_dir, dd.get_ref_file(sample), dd.get_srna_gtf_file(sample))\n sample[\"report\"] = _report(sample, dd.get_ref_file(sample))\n\n out_mirna = _make_isomir_counts(data, out_dir=op.join(work_dir, \"mirbase\"))\n if out_mirna:\n sample = dd.set_mirna_counts(sample, out_mirna[0])\n sample = dd.set_isomir_counts(sample, out_mirna[1])\n\n out_novel = _make_isomir_counts(data, \"seqbuster_novel\", op.join(work_dir, \"mirdeep2\"), \"_novel\")\n if out_novel:\n sample = dd.set_novel_mirna_counts(sample, out_novel[0])\n sample = dd.set_novel_isomir_counts(sample, out_novel[1])\n data[0][0] = sample\n data = spikein.combine_spikein(data)\n return data", "def generate_cluster_stack_name(job):\n return 'cluster-%s----%s' % (job.compute_resource.id, job.id)", "def _with_cluster(fn):\n\n def wrapper(args: dict,\n project_id: str,\n creds: Credentials,\n zone: str = k.ZONE_DEFAULT):\n cluster_name = args.get('cluster_name')\n\n cluster = Cluster.get(name=cluster_name,\n project_id=project_id,\n zone=zone,\n creds=creds)\n\n return fn(args, cluster=cluster) if cluster else None\n\n return wrapper" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
get a sklearn scaler from a scaler name
def get_scaler(scaler): if scaler == 'standard': from sklearn.preprocessing import StandardScaler return StandardScaler() if scaler == 'minmax': from sklearn.preprocessing import MinMaxScaler return MinMaxScaler()
[ "def get_scaler(name):\n return _autoscaling_components[SCALER_KEY][name]", "def load_scaler(fname):\n return Scaler(joblib.load(fname))", "def __create_scaler_type(self):\n\n if self.scalertype == \"standard\":\n return StandardScaler()\n if self.scalertype == \"minmax\":\n return MinMaxScaler(feature_range=self.featureRange)\n assert True, \"An error occured when creating a scaler of type '{}'\".format(self.scalertype)", "def load_scaler(self, filename: str):\n self._scaler_file = filename\n self._scaler = joblib.load(self._scaler_file)\n self.log.info(f\"Scaler loaded from {filename}\")\n return self._scaler", "def get_scaler(df, missing=\"zeros\", scaler=\"standard\", **kwargs):\n\n scalers = {'standard':'StandardScaler', 'minmax':'MinMaxScaler', 'maxabs':'MaxAbsScaler',\\\n 'robust':'RobustScaler', 'quantile':'QuantileTransformer'}\n\n s = getattr(preprocessing, scalers[scaler])\n s = s(**kwargs)\n\n df = Preprocessor.fillna(df, missing=missing)\n \n return s.fit(df)", "def _load_scaler(self, scaler_file):\n assert isinstance(scaler_file, str),\\\n \"scaler_file not entered as string.\"\n self.scaler = joblib.load(file_path(scaler_file))\n return", "def fit_scaler(self, train_dir):\n if not self.word2vec_model:\n raise ValueError('word2vec model is not trained. ' + \\\n 'Run train_word2vec() first.')\n\n if self.scaler:\n print('WARNING! Overwriting already fitted scaler.',\n file=sys.stderr)\n\n self.scaler = fit_scaler(train_dir, word2vec_model=self.word2vec_model)\n\n return self.scaler", "def test_scaler_attribute_type(self, scaler, scaler_type):\n\n x = ScalingTransformer(columns=\"b\", scaler=scaler)\n\n assert (\n type(x.scaler) is scaler_type\n ), f\"unexpected scaler set in init for {scaler}\"", "def load_scaler(self, mode='train', fold_idx=1):\n with h5py.File(self.dev_scaler_h5, 'r') as f:\n fold_str = 'fold' + str(fold_idx)\n mu = f[fold_str][mode]['mu'].value\n sigma = f[fold_str][mode]['sigma'].value\n return mu, sigma", "def get_norm(name):\n if name in _metrics.keys():\n return _metrics[name]\n raise ValueError(\"Name '{}' does not stand for any known norm\", name)", "def compute_scaler(args):\n workspace = args.workspace\n data_type = args.data_type\n dir_name = args.dir_name \n # Load data. \n t1 = time.time()\n hdf5_path = os.path.join(workspace, \"packed_features\", \"spectrogram\", data_type, dir_name, \"data.h5\")\n with h5py.File(hdf5_path, 'r') as hf:\n x = hf.get('x') \n x = np.array(x) # (n_segs, n_concat, n_freq)\n \n # Compute scaler. \n (n_segs, n_concat, n_freq) = x.shape\n x2d = x.reshape((n_segs * n_concat, n_freq))\n scaler = preprocessing.StandardScaler(with_mean=True, with_std=True).fit(x2d)\n print(scaler.mean_)\n print(scaler.scale_)\n \n # Write out scaler. \n out_path = os.path.join(workspace, \"packed_features\", \"spectrogram\", data_type, dir_name, \"scaler.p\")\n create_folder(os.path.dirname(out_path))\n pickle.dump(scaler, open(out_path, 'wb'))\n \n print(\"Save scaler to %s\" % out_path)\n print(\"Compute scaler finished! %s s\" % (time.time() - t1,))", "def train_scaler(dset, varname=None, row_dim='time', transform=True):\n \n try: \n from dask_ml.preprocessing import StandardScaler\n except: \n from sklearn.preprocessing import StandardScaler\n \n dset = dset[varname]\n space_dims = tuple(x for x in dset.dims if x != row_dim)\n dset_stack = dset.stack(z=space_dims) \n scaler = StandardScaler()\n if transform: \n data_std = scaler.fit_transform(dset_stack.data)\n dset_stack.data = data_std\n dset = dset_stack.unstack()\n return dset, scaler\n else:\n return None, scaler", "def compute_scaler(data_type):\n workspace = config.workspace\n\n if data_type == 'train':\n snr = config.Tr_SNR\n \n # Load data. \n t1 = time.time()\n hdf5_path = os.path.join(workspace, \"packed_features\", \"spectrogram\", data_type, \"%ddb\" % int(snr), \"data.h5\")\n with h5py.File(hdf5_path, 'r') as hf:\n x = hf.get('x') \n x = np.array(x) # (n_segs, n_concat, n_freq)\n \n # Compute scaler. \n (n_segs, n_concat, n_freq) = x.shape\n x2d = x.reshape((n_segs * n_concat, n_freq))\n scaler = StandardScaler(with_mean=True, with_std=True).fit(x2d)\n# print(scaler.mean_)\n# print(scaler.scale_)\n \n # Write out scaler. \n out_path = os.path.join(workspace, \"packed_features\", \"spectrogram\", data_type, \"%ddb\" % int(snr), \"scaler.p\")\n create_folder(os.path.dirname(out_path))\n pickle.dump(scaler, open(out_path, 'wb'))\n \n print(\"Save scaler to %s\" % out_path)\n print(\"Compute scaler finished! %s s\" % (time.time() - t1,))", "def pick_scale(self, scale=None):\n if not scale:\n return SCALE_NAMES[random.randint(0, len(SCALE_NAMES) - 1)]\n else:\n return scale", "def get_scaler(env):\n low = [0] * (env.n_stock * 2 + 1)\n\n high = []\n max_price = env.stock_price_history.max(axis=1)\n min_price = env.stock_price_history.min(axis=1)\n max_cash = env.init_invest * 3 # 3 is a magic number...\n max_stock_owned = max_cash // min_price\n for i in max_stock_owned:\n high.append(i)\n for i in max_price:\n high.append(i)\n high.append(max_cash)\n\n scaler = StandardScaler()\n scaler.fit([low, high])\n return scaler", "def _scaling_model_from_dict(obj):\n for entry_point in pkg_resources.iter_entry_points(\"dxtbx.scaling_model_ext\"):\n if entry_point.name == obj[\"__id__\"]:\n return entry_point.load().from_dict(obj)", "def build_scale_controller(name: str, kwargs=None) -> Union[ScaleControllerBase, None]:\n if not name or name == 'none':\n return None\n controller_choices = {\n 'learn': LearnableScaleController,\n 'fix': FixedScaleController,\n 'relu': ReluScaleController,\n 'exp': ExpScaleController,\n 'softmax': SoftmaxScaleController,\n 'norm': NormalizeScaleController,\n }\n if name not in controller_choices:\n raise KeyError('Wrong scale controller name.')\n controller_type = controller_choices[name]\n return controller_type(**kwargs) if kwargs else controller_type()", "def save_scaler(scaler:object,name:str) -> object:\n joblib.dump(scaler,name+'.joblib')", "def load_X_scaler(self, out_tag='lstm_scaler'): \n\n print ('loading X scaler: models/{}_X_scaler.pkl'.format(out_tag))\n self.X_scaler = load(open('models/{}_X_scaler.pkl'.format(out_tag),'rb'))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
get a PCA decomposition
def get_pca(): from sklearn.decomposition import PCA return PCA()
[ "def principalComponents(self):\n\n\t\tpca = PCA()\n\t\tpca.fit(self.training_set)\n\t\treturn pca", "def pca(X = Math.array([]), no_dims = 50):\n\n\tprint (\"Preprocessing the data using PCA...\")\n\t(n, d) = X.shape;\n\tX = X - Math.tile(Math.mean(X, 0), (n, 1));\n\t(l, M) = Math.linalg.eig(Math.dot(X.T, X));\n\tY = Math.dot(X, M[:,0:no_dims]);\n\treturn Y;", "def pca(X = Math.array([]), no_dims = 50):\n\n print \"Preprocessing the data using PCA...\"\n (n, d) = X.shape;\n X = X - Math.tile(Math.mean(X, 0), (n, 1));\n (l, M) = Math.linalg.eig(Math.dot(X.T, X));\n Y = Math.dot(X, M[:,0:no_dims]);\n return Y;", "def pca_decomposition(data, dept, n_components=12):\n try:\n df_svd = pivot_df(data, dept)\n pca = PCA(n_components=n_components)\n df_low = pca.fit_transform(df_svd)\n df_inverse = pca.inverse_transform(df_low)\n\n # re-frame\n df_inverse = reframe_df(previous_df=df_svd, processed_data=df_inverse)\n return df_inverse\n\n except:\n # if pca fail,\n return pivot_df(data, dept)", "def pca(X,k):\n m = X.shape[0]\n covariance = np.dot(X.T, X) / (m-1)\n # Eigen decomposition\n eigenvals, eigenvecs = np.linalg.eig(covariance)\n #project in K dimensions\n print(\"First two Principal Components from PCA:\")\n print(eigenvecs[:,:k])\n pca_result = np.dot(X, eigenvecs[:,:k])\n return pca_result", "def do_pca(data, nr_components):\n from sklearn.decomposition import PCA\n M, frames, joi, xy = data.shape\n res = data.reshape(M,frames,joi*xy)\n res = res.reshape(M*frames, joi*xy)\n #print(data[1,3])\n #print(res[170])\n print(res.shape)\n pca = PCA(n_components=nr_components)\n new_data = pca.fit_transform(res)\n # new_data = pca.transform(res)\n print(new_data.shape)\n new = new_data.reshape(M,frames, nr_components, 1)\n return new", "def pcapply(X):\n data = PCA(n_components=40).fit_transform(X)\n _, b, _ = np.linalg.svd(X.transpose().dot(X)) # Demo mejor con 40\n plt.title('Perdida Explicación variabilidad en base al número de variables')\n plt.plot(range(10, 75), b[10:75], 'bx-')\n plt.show()\n return data", "def doPCA(self):\n data = [l.points for l in self.preprocessedLandmarks]\n data.append(data[0])\n\n S = np.cov(np.transpose(data))\n\n eigenvalues, eigenvectors = np.linalg.eig(S)\n sorted_values = np.flip(eigenvalues.argsort(), 0)[:self.pcaComponents]\n\n self.eigenvalues = eigenvalues[sorted_values]\n self.eigenvectors = eigenvectors[:, sorted_values]\n # print(self.eigenvalues)\n return self", "def pca_decomposition(\n X,\n y=None,\n ax=None,\n features=None,\n classes=None,\n scale=True,\n projection=2,\n proj_features=False,\n colors=None,\n colormap=None,\n alpha=0.75,\n random_state=None,\n colorbar=True,\n heatmap=False,\n show=True,\n **kwargs\n):\n # Instantiate the visualizer\n visualizer = PCA(\n ax=ax,\n features=features,\n classes=classes,\n scale=scale,\n projection=projection,\n proj_features=proj_features,\n colors=colors,\n colormap=colormap,\n alpha=alpha,\n random_state=random_state,\n colorbar=colorbar,\n heatmap=heatmap,\n **kwargs\n )\n\n # Fit and transform the visualizer (calls draw)\n visualizer.fit(X, y)\n visualizer.transform(X, y)\n\n if show:\n visualizer.show()\n else:\n visualizer.finalize()\n\n # Returns the visualizer object.\n return visualizer", "def pca(self):\n self.pca_mean = self.X.mean(axis=1)\n X_meanC = self.X - self.pca_mean[:, None]\n (self.pca_U, self.pca_S, self.pca_V) = np.linalg.svd(X_meanC, full_matrices=False)\n self.pc_weights = np.dot(np.diag(self.pca_S), self.pca_V)\n self.pc_stdevs = np.std(self.pc_weights, axis=1)", "def perform_pca(A):\r\n # First subtract the mean\r\n M = (A-numpy.mean(A.T, axis=1)).T\r\n # Get eigenvectors and values of covariance matrix\r\n return numpy.linalg.eig(numpy.cov(M))", "def _get_PCA(self, comps=2, max_rows=1000):\n\n rnd = np.random.RandomState(self.number)\n\n if len(self) > max_rows:\n rows = rnd.choice(self.elements_idx, max_rows, replace=False)\n else:\n rows = self.elements_idx\n\n m = toArray(self.ensemble.data[rows])\n P = sklearn.decomposition.PCA(comps, random_state=rnd, svd_solver='randomized')\n P.fit(m)\n norm = sklearn.preprocessing.normalize(P.components_)\n return norm", "def classic_PCA(Input_Data, standarize = True):\n if standarize:\n Data = standarize_data(Input_Data)\n else:\n Data = np.copy(Input_Data)\n eigenvectors_cols,eigenvalues,eigenvectors_rows = np.linalg.svd(np.cov(Data))\n idx = eigenvalues.argsort()\n eigenvalues = eigenvalues[idx[::-1]]\n eigenvectors_cols = eigenvectors_cols[:,idx[::-1]]\n eigenvectors_rows = eigenvectors_rows[idx[::-1],:]\n # Return: V matrix, eigenvalues and the principal components.\n return eigenvectors_rows,eigenvalues,np.dot(eigenvectors_rows,Data)", "def pca(x):\n\t\n\tx = (x - x.mean(axis = 0)) # Subtract the mean of column i from column i, in order to center the matrix.\n\t\n\tnum_observations, num_dimensions = x.shape\n\t\n\t# Often, we have a large number of dimensions (say, 10,000) but a relatively small number of observations (say, 75). In this case, instead of directly computing the eigenvectors of x^T x (a 10,000 x 10,000 matrix), it's more efficient to compute the eigenvectors of x x^T and translate these into the eigenvectors of x^T x by using the transpose trick. \n\t# The transpose trick says that if v is an eigenvector of M^T M, then Mv is an eigenvector of MM^T.\n\t# We arbitrarily select \"100\" as the switching threshold. Another approach is to switch by comparing num_observations and num_dimensions.\n\tif num_dimensions > 100:\n\t\teigenvalues, eigenvectors = linalg.eigh(dot(x, x.T))\n\t\tv = (dot(x.T, eigenvectors).T)[::-1] # Unscaled, but the relative order is still correct.\n\t\ts = sqrt(eigenvalues)[::-1] # Unscaled, but the relative order is still correct.\n\telse:\n\t\tu, s, v = linalg.svd(x, full_matrices = False)\n\t\t\n\treturn v, s", "def doPCA(pairs, embedding, num_components=10):\n matrix = []\n for a, b in pairs:\n center = (embedding.v(a) + embedding.v(b)) / 2\n matrix.append(embedding.v(a) - center)\n matrix.append(embedding.v(b) - center)\n matrix = np.array(matrix)\n pca = PCA(n_components=num_components)\n pca.fit(matrix)\n # bar(range(num_components), pca.explained_variance_ratio_)\n return pca", "def compute_pca(A, num_dimensions):\n\n from sklearn import decomposition\n\n pca = decomposition.PCA(n_components=num_dimensions)\n return pca.fit_transform(A)", "def _apply_pca(self, X):\n newX = np.reshape(X, (-1, X.shape[2]))\n pca = sklearnPCA(n_components=self.num_components, whiten=True)\n newX = pca.fit_transform(newX)\n newX = np.reshape(newX, (X.shape[0], X.shape[1], self.num_components))\n return newX", "def pca(X: np.array, k: int) -> np.array:\n n, d = X.shape\n X = X - np.mean(X, 0) # mean value of each dimension\n C = np.dot(np.transpose(X), X) # covariance matrix\n if not PCA._check_real_symmetric(C):\n raise ArithmeticError('Covariance matrix is not real symmetric')\n eig_val, eig_vec = np.linalg.eig(C) # eigenvalue, eigenvector\n eig_pairs = [(np.abs(eig_val[i]), eig_vec[:, i]) for i in range(d)] # eigen-value-vector tuples\n topk_pairs = heapq.nlargest(k, eig_pairs) # retrieve top-k eigenvalue pairs\n P = np.array([pair[1] for pair in topk_pairs]) # permutation matrix\n return np.dot(np.real(P), np.transpose(X)).T", "def train_pca(train_file, dimension=100):\n print 'PCA for grasp training data...\\n'\n f_feat = open(train_file, \"r\")\n first_line = f_feat.readline()\n tags = first_line.split(',')\n idx_f = -1\n idx_s = -1\n for i in range(len(tags)):\n if tags[i].find(\"feature\") == 0:\n idx_f = i\n if tags[i] == \"side\":\n idx_s = i\n assert(idx_f != -1 and idx_s != -1)\n features = f_feat.readlines()\n feature_listOfList = []\n for item in features:\n item_split = item.split(',')\n #only process right hand; modify this if double hands are to be process\n if \"left\" == item_split[idx_s]:\n continue\n feature_listOfList.append(item_split[idx_f:len(item_split)-1])\n dataMatrix = np.array(feature_listOfList, dtype=np.float32)\n print dataMatrix.shape, dataMatrix.dtype\n mean, eigenvectors = cv2.PCACompute(data=dataMatrix, maxComponents=dimension)\n print mean.shape, eigenvectors.shape\n \n return [mean, eigenvectors]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
get a PolynomialFeatures transform
def get_poly(kwargs): from sklearn.preprocessing import PolynomialFeatures return PolynomialFeatures(**kwargs)
[ "def make_poly_features(x):\n poly = PolynomialFeatures(degree=5)\n new_x = poly.fit_transform(x)\n return new_x", "def polyfeatures(X: np.ndarray, degree: int) -> np.ndarray:\n poly = PolynomialFeatures(degree=degree)\n X = poly.fit_transform(X.reshape(-1,1))\n \n return(X[:,1:])", "def polyFeatures(X, p):\n # You need to return the following variables correctly.\n X_poly = np.zeros((X.shape[0], p))\n\n for i in range(1,p+1):\n X_poly[:,i-1] = X[:,0]**i \n return X_poly", "def create_polynomial_features(x, polynomial_order):\n # Skip the first order features\n polynomial_features = [x[:, 0:6] ** (i + 1) for i in range(0, polynomial_order)]\n return np.hstack(polynomial_features + [x[:, 6:7]])", "def polynomize(X, p):\n poly = preprocessing.PolynomialFeatures(p)\n return poly.fit_transform(X) # añade automaticamente la columna de 1s", "def polyfeatures(self, X, degree):\n #TODO\n \n for d in range(2,degree+1):\n X = np.append(X,X[:,[0]]**d,1)\n \n return X", "def polynomialize(features, power):\n poly = PolynomialFeatures(degree = power)\n featpoly = poly.fit_transform(features)\n poly_columns = poly.get_feature_names(features.columns)\n return pd.DataFrame(featpoly, columns=poly_columns)", "def polynomial_features(X, degree):\n if X.ndim == 1:\n # Treat the numpy arrays as mathematical column vectors\n X = X.reshape(-1, 1)\n features = [np.ones(X.shape[0])]\n for degree in range(1, degree + 1):\n for items in itertools.combinations_with_replacement(X.T, degree):\n features.append(functools.reduce(lambda x, y: x * y, items))\n return np.asarray(features).T", "def make_features(x):\n x = x.unsqueeze(1)\n return torch.cat([x ** i for i in range(1, POLY_DEGREE+1)], 1)", "def make_features(x):\n x = x.unsqueeze(1)\n return torch.cat([x ** i for i in range(1, POLY_DEGREE+1)], 1)", "def polynomial_features(df, columns, degree=2, include_bias=False,\n copy=True):\n from sklearn.preprocessing import PolynomialFeatures\n poly = PolynomialFeatures(degree, include_bias=include_bias)\n\n X = df[columns].get_values()\n X_poly = poly.fit_transform(X)\n\n target_feature_names = []\n power_columns = [zip(df[columns], p) for p in poly.powers_]\n\n for power_column in power_columns:\n powers = []\n for pair in power_column:\n if pair[1] != 0:\n if pair[1] == 1:\n powers.append('{}'.format(pair[0]))\n else:\n powers.append('{}^{}'.format(pair[0], pair[1]))\n target_feature_names.append('x'.join(powers))\n\n df_poly = pd.DataFrame(X_poly, columns=target_feature_names)\n\n if copy:\n df_output = df.copy()\n else:\n df_output = df\n\n df_output.drop(columns, axis=1, inplace=True)\n df_output.reset_index(inplace=True)\n\n return pd.concat((df_output, df_poly), axis=1)", "def build_poly(self, x, degree):\n # send features in high dimension using polynomial expansion \n poly = np.ones((len(x), 1))\n for deg in range(1, degree+1):\n poly = np.c_[poly, np.power(x, deg)]\n return poly", "def _make_features(self, x):\n\t\tx = x.unsqueeze(1)\n\t\treturn torch.cat([x ** i for i in range(1, self._degree+1)], 1)", "def gen_features(self, X):", "def polynomial_basis(X, degree):\n n_samples, n_features = X.shape\n\n # The number of monomials is (n + d) choose d\n n_monomials = int(factorial(n_features + degree)/(factorial(n_features)*factorial(degree)))\n features = np.ones((n_monomials, n_samples))\n col = 1\n x_T = X.T\n\n for deg in range(1, degree + 1):\n for combs in combinations_with_replacement(x_T, deg):\n features[col, :] = reduce(lambda x, y: x * y, combs)\n col += 1\n return features.T", "def toyData(w,sigma,N): \n #Degree of polynomial \n degree=w.size; \n \n #generate x values \n x=np.linspace(0, 1,N);\n \n poly=preprocessing.PolynomialFeatures(degree-1,include_bias=True)\n \n PHI=poly.fit_transform(x.reshape(N,1)) \n \n y=np.dot(PHI,w);\n \n target=y+np.random.normal(0, sigma, N);\n \n Out=[x,y,PHI, target]\n\n return Out", "def polynomial_features(y_in, order=3, include_labels=False):\n assert order in (1, 2, 3), \"Polynomial order 1, 2 or 3 only\"\n\n if len(y_in.shape) == 1:\n y_in = y_in.reshape(-1, 1) # Reshape vector to matrix\n n = y_in.shape[1]\n y_out_cols = []\n\n # Poly order 0\n y_out_cols.append(np.ones((len(y_in), 1)))\n\n # Poly order 1\n y_out_cols.append(y_in)\n\n # Poly order 2\n if order >= 2:\n for i in range(n):\n y_out_cols.append(y_in[:, i:] * y_in[:, i].reshape(-1, 1))\n\n # Poly order 3\n if order >= 3:\n # Use poly order 2 results\n results = y_out_cols[-n:]\n for j in range(0, n):\n for result in results[j:]:\n y_out_cols.append(result * y_in[:, j].reshape(-1, 1))\n\n theta = np.hstack(y_out_cols)\n if include_labels is False:\n return theta\n else:\n return theta, polynomial_feature_labels(n, order)", "def add_polynomial_features(x, power):\n if type(power) is int and type(x) is np.ndarray:\n return np.concatenate([x**i for i in range(1, power+1)], axis=1)\n return None", "def train_polynomialRegressionModel(X, y, degree=2, interaction_only=False,\n include_bias=True):\n model = Pipeline([('poly', PolynomialFeatures(degree=degree)),\n ('linear', LinearRegression(fit_intercept=False))])\n model = model.fit(X, y)\n return model" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check GMail E.g. messages,unseen = imap.check_gmail('username.com','password')
def check_gmail(username, password): i = imaplib.IMAP4_SSL('imap.gmail.com') try: i.login(username, password) x, y = i.status('INBOX', '(MESSAGES UNSEEN)') messages = int(re.search('MESSAGES\s+(\d+)', y[0]).group(1)) unseen = int(re.search('UNSEEN\s+(\d+)', y[0]).group(1)) return messages, unseen except: return False, 0
[ "def checkEmail():\n\tpop_conn = poplib.POP3_SSL('pop.gmail.com')\n\tpop_conn.user('')\n\tpop_conn.pass_('')\n\t#Get messages from server:\n\tmessages = [pop_conn.retr(i) for i in range(1, len(pop_conn.list()[1]) + 1)]\n\t# Concat message pieces:\n\tmessages = [\"\\n\".join(mssg[1]) for mssg in messages]\n\t#Parse message intom an email object:\n\tmessages = [parser.Parser().parsestr(mssg) for mssg in messages]\n\tflag = 0\n\tsweep = None\n\tfor message in messages:\n\t\tsubject = message['subject']\n\t\tif subject is None:\n\t\t\tcontinue\n\t\telif \"CommenceSweep:\" in subject:\n\t\t\tstart = subject.find(\":\")\n\t\t\tcommand = subject[start+1:]\n\t\t\tprint command\n\t\t\tif \"Comp\"+sys.argv[1] in command:\n\t\t\t\tstart = command.find(\"-\")\n\t\t\t\tsweep = command[start+1:]\n\t\t\t\tprint sweep\n\t\t\t\tpoplist = pop_conn.list()\n\t\t\t\tmsglist = poplist[1]\n\t\t\t\tfor msgspec in msglist:\n\t\t\t\t\tdelete = int(msgspec.split(' ')[0])\n\t\t\t\t\tpop_conn.dele(delete)\n\t\t\t\tflag = 1\n\tpop_conn.quit()\n\treturn flag, sweep", "def check_email():\n\n\n client = Gmail()\n USERNAME = 'ifthisthenthatclone'\n PASSWORD = 'ifthisthenthat'\n client.login(USERNAME,PASSWORD)\n\n if client.inbox().mail(unread=True):\n\n unread = client.inbox().mail(unread=True)\n\n unread[0].fetch()\n print \"The input string is {}\".format(unread[0].body)\n print \"\"\n # FORMAT MUST BE (ARTIST, SONG NAME)\n\n input_string = unread[0].body\n input_split = input_string.split(',')\n artist=input_split[0].lower()\n song_name=input_split[1].lower()\n test = song_name.rstrip()\n unread[0].read()\n\n else:\n #print \"YOU HAVE READ EVERYTHING\"\n artist = None\n test = None\n\n return artist, test", "def check_gmailness(self):\r\n if not GIMAPFetcher.GMAIL_EXTENSION in self.get_capabilities():\r\n raise Exception(\"GIMAPFetcher is not connected to a IMAP GMAIL server. Please check host (%s) and port (%s)\" \\\r\n % (self.host, self.port))\r\n \r\n return True", "def ztest_retrieve_gmail_ids(self):\r\n gimap = imap_utils.GIMAPFetcher('imap.gmail.com', 993, self.login, self.passwd)\r\n \r\n gimap.connect()\r\n \r\n criteria = ['Before 1-Oct-2004']\r\n #criteria = ['ALL']\r\n ids = gimap.search(criteria)\r\n \r\n res = gimap.fetch(ids, [gimap.GMAIL_ID])\r\n \r\n self.assertEquals(res, {27362: {'X-GM-MSGID': 1147537963432096749L, 'SEQ': 14535}, 27363: {'X-GM-MSGID': 1147537994018957026L, 'SEQ': 14536}})", "def check_for_new_data():\n SCOPES = ['https://mail.google.com/']\n creds = None\n # The file token.json stores the user's access and refresh tokens, and is\n # created automatically when the authorization flow completes for the first\n # time.\n if os.path.exists('token.json'):\n creds = Credentials.from_authorized_user_file('token.json', SCOPES)\n # If there are no (valid) credentials available, let the user log in.\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file('creds_4.json', SCOPES)\n creds = flow.run_local_server(port=0)\n # Save the credentials for the next run\n with open('token.json', 'w') as token:\n token.write(creds.to_json())\n\n service = build('gmail', 'v1', credentials=creds)\n stamp = int(time.time()) - 3600\n # Call the Gmail API\n results = service.users().messages().list(userId='me',q=f\"from:notify@google.com after:{stamp}\").execute()\n if results[\"resultSizeEstimate\"] > 0:\n populate_database()", "def main():\n \n ####GET ALL MESSAGES FROM GMAIL###\n # gmail_usr_name = raw_input(\"Enter the gmail user name: \\n\")\n # gmail_passwrd = getpass.getpass(\"Enter the Gmail password: \\n\")\n print(\"Please wait while message IDs for Gmail are populated...\")\n gmail_accumulator = Accumulator.Accumulator(GMAIL_PATH, \"usr_name\", \"passwrd\",\n IMAP_PORT, GMAIL_FOLDER)\n gmail_msg_ids = gmail_accumulator.get_ids()\n pprint.pprint(gmail_msg_ids)\n \n ####GET ALL MESSAGES FROM IMAP###\n #IMAP2_usr_name = raw_input(\"Enter the IMAP2 user name: \\n\")\n #IMAP2_passwrd = getpass.getpass(\"Enter the IMAP2 password: \\n\")\n print(\"Please wait while message IDs for IMAP are populated\")\n \n IMAP2_accumulator = Accumulator.Accumulator(\"imap2.lbl.gov\", \"usr_name\", \"passwrd\",\n IMAP_PORT, IMAP2_FOLDER)\n IMAP2_msg_ids = IMAP2_accumulator.get_ids()\n pprint.pprint(IMAP2_msg_ids)\n \n gmail_unique_ids = gmail_accumulator.get_unique_ids()\n ###FIND THE DIFFERENCES BETWEEN IMAP AND GMAIL.####\n compare_ids = Comparator.Comparator(IMAP2_msg_ids, gmail_unique_ids)\n diff_ids = compare_ids.compare()\n \n ###FIND THE DUPLICATE IDs FROM IMAP2.###\n \n dups = IMAP2_accumulator.get_duplicate_ids()\n dup_headers = header_info(dups, IMAP2_accumulator)\n print(\"{num_msgs} messages in IMAP2/{fldr}\\n\".format(num_msgs = IMAP2_accumulator.count_ids(), fldr = IMAP2_accumulator.folder))\n print(\"{num_msgs} messages in GMAIL/{fldr}\\n\".format(num_msgs = gmail_accumulator.count_ids(), fldr = gmail_accumulator.folder))\n \n print(\"-------------------------------------------------------------------------------------\")\n print(\"There are {num} messages in IMAP2/{fldr1} which are not in Gmail/{fldr2}\\n\".format(num = len(diff_ids),\n fldr1 = IMAP2_accumulator.folder,\n fldr2 = gmail_accumulator.folder))\n print(\"--------------------------------------------------------------------------------------\")\n pprint.pprint(diff_ids)\n\n print(\"Here is a list of the headers of each message ID which is not in Gmail:\\n\")\n headers = header_info(diff_ids, IMAP2_accumulator)\n\n ###print a table of the info of the missing messages.###\n table = prettytable.PrettyTable([\"TO\", \"FROM\", \"SUBJECT\"])\n table.align[\"TO\"] = \"l\"\n table.padding_width = 1\n for hdr in headers:\n table.add_row(hdr)\n print(table)\n\n\n ###write the output to OUTPUT_FILE.###\n\n output_file = open(OUTPUT_FILE, 'w')\n output_file.write(\"\\n\")\n output_file.write(\"{num_msgs} messages in IMAP2/{fldr}\\n\".format(num_msgs = IMAP2_accumulator.count_ids(), fldr = IMAP2_accumulator.folder))\n output_file.write(\"{num_msgs} messages in GMAIL/{fldr}\\n\".format(num_msgs = gmail_accumulator.count_ids(), fldr = gmail_accumulator.folder))\n output_file.write(\"There are {num} messages in IMAP2/{fldr1} which are not in Gmail/{fldr2} \\n\".format(num = len(diff_ids),\n fldr1 = IMAP2_accumulator.folder,\n fldr2 = gmail_accumulator.folder))\n output_file.write(\"Here is a list of the headers of each message ID which is not in Gmail:\\n\")\n for ids in diff_ids:\n output_file.write(str(ids))\n output_file.write(\"\\n\")\n output_file.write(\"\\n\")\n\n ###OUUTPUT THE TABLE###\n\n output_file.write(str(table)) \n output_file.write(LINE_SEPARATOR)\n\n output_file.close()\n\n ucb.interact()", "def loop(total_time=120):\n print(\"Trying to connect...\")\n server = IMAPClient(HOSTNAME, use_uid = True, ssl = True)\n server.login(USERNAME, PASSWORD)\n print(\"Logging in as \" + USERNAME)\n select_info = server.select_folder(\"Inbox\", readonly = True)\n\n messages_filter_1 = []\n other_messages = server.search([\"UNSEEN\"])\n\n for sender in FROM_FILTER_1:\n messages_filter_1 += server.search([\"UNSEEN\",\"FROM\",sender])\n\n other_messages = [x for x in other_messages if x not in messages_filter_1]\n print(\"Unread messages from filter: \" + str(len(messages_filter_1)))\n print((\"Other unread messages: \" + str(len(other_messages))))\n\n server.logout()\n print(\"Logged out\")\n \n if messages_filter_1:\n #Check if the green light is already on; the buzzer only buzzes when the unread\n #emails are seen for the first time.\n\n if GPIO.input(GREEN_LED) == 0:\n GPIO.output(GREEN_LED, True)\n buzz()\n\n else:\n GPIO.output(GREEN_LED, False)\n if other_messages:\n GPIO.output(RED_LED, True)\n else:\n GPIO.output(RED_LED, False)\n\n #Check emails every \"total_time\" seconds.\n print(\"Checking again in {} seconds...\".format(total_time))\n while total_time > 0:\n print(total_time)\n time.sleep(1)\n total_time -= 1", "def main():\n\n # start time in milliseconds to compare with last message time\n start_time = int(time.time()) * 1000\n\n # get credentials first and create gmail service object\n credentials = get_credentials()\n http = credentials.authorize(httplib2.Http())\n service = discovery.build('gmail', 'v1', http=http)\n\n while True:\n # receive email messages\n q_to_list = ['from:' + e_mail for e_mail in senders]\n q = 'in:inbox {}'.format(' OR '.join(q_to_list))\n messages = list_messages_matching_query(service, user_id,\n query=q,\n maxResults=1)\n if not messages:\n print(\"No messages to show\")\n time.sleep(seconds_between_checks)\n continue\n else:\n pprint.pprint('Messages to show: {}'.format(messages))\n\n # get thread of first document - so you can label the thread itself if need be\n thread_id = messages[0]['threadId']\n thread = get_thread(service, user_id, thread_id)\n\n msg_id = messages[0]['id']\n message = get_message(service, user_id, msg_id)\n\n msg_sender = field_from_message(message, 'From')\n canned_label_id = get_label_id(service, canned_label)\n thread_label_ids = thread['messages'][0][\"labelIds\"]\n\n # check that the date is later than starting, and emails match list\n if int(message[\"internalDate\"]) < start_time:\n print('internalDate earlier than start_time!')\n print(\"better luck next time\")\n # check if it's already replied to\n elif canned_label_id in thread_label_ids:\n print(\"you replied already to this one, even if it is later than startup\")\n print(\"better luck next time\")\n else:\n # check cleaned sender email in list\n sender_email = parseaddr(msg_sender)[1]\n if sender_email not in senders:\n print(\"emails don't match!!\")\n # after all tests passed, reply to message with same subject\n else:\n subject = 'Re: ' + field_from_message(message, 'Subject')\n msg = create_message(destination=msg_sender, origin=to,\n subject=subject,\n msg_txt=message_text, thr_id=thread_id)\n send_message(service, user_id, msg)\n print(\"Replied to message!\")\n start_time = int(time.time()) * 1000\n\n # then label the thread\n labels = create_msg_labels(service, addLabels=[canned_label_id])\n modify_thread(service, user_id, thread_id, labels)\n print(\"Added a label: {} \".format(canned_label))\n print('done!')\n\n # always print blank line and wait a few seconds\n print('=====\\n')\n time.sleep(seconds_between_checks)", "def verify_email(nickname, quiet):\n\n try:\n account = Account.query.filter_by(nickname=nickname).one()\n except NoResultFound:\n print(f\"Account {nickname} not found\")\n return\n gmail = GmSync.from_account(account, load_config(not quiet))\n gmail.verify()", "def connect():\n\n mailBox = IMAP4_SSL('imap.gmail.com')\n\n if TESTING:\n mailBox.login(\"sapphirephoenix\", getpass.getpass())\n else:\n mailBox.login(raw_input(\"\\nUsername: \"), getpass.getpass())\n\n result, data = mailBox.select('INBOX', True) # INBOX [Gmail]/All Mail\n\n if result == \"OK\":\n print \"\\n* Connected to mailbox! *\\n\"\n else:\n print \"\\nERROR: Could not connect to mailbox\\n\"\n print \"\\n* Exiting... *\\n\"\n sys.exit(1)\n\n return mailBox", "def check_for_subscribers(mail, login_info):\n ADDRESS, PASSWORD = login_info\n\n try:\n mail.select('inbox')\n data = mail.search(None, 'ALL') \n except:\n mail = imaplib.IMAP4_SSL('imap.gmail.com')\n mail.login(ADDRESS, PASSWORD)\n mail.select('inbox')\n data = mail.search(None, 'ALL')\n \n mail_ids = data[1]\n id_list = mail_ids[0].split() \n\n if not id_list:\n return []\n\n first_email_id = int(id_list[0])\n latest_email_id = int(id_list[-1])\n\n subscribers = []\n\n for i in range(latest_email_id, first_email_id-1, -1):\n data = mail.fetch(str(i), '(RFC822)')\n for response_part in data:\n arr = response_part[0]\n if isinstance(arr, tuple):\n msg = email.message_from_string(str(arr[1],'utf-8'))\n email_from = msg['from']\n subscribers.append(email_from)\n\n return subscribers", "def check_remote_mailbox_identical_to_local(the_self, gmvaulter, extra_labels = []): #pylint: disable=C0103,R0912,R0914,R0915\r\n # get all email data from gmvault-db\r\n pivot_dir = None\r\n gmail_ids = gmvaulter.gstorer.get_all_existing_gmail_ids(pivot_dir)\r\n\r\n print(\"gmail_ids = %s\\n\" % (gmail_ids))\r\n \r\n #need to check that all labels are there for emails in essential\r\n gmvaulter.src.select_folder('ALLMAIL')\r\n \r\n # check the number of id on disk \r\n imap_ids = gmvaulter.src.search({ 'type' : 'imap', 'req' : 'ALL'}) #get everything\r\n \r\n the_self.assertEquals(len(imap_ids), \\\r\n len(gmail_ids), \\\r\n \"Error. Should have the same number of emails: local nb of emails %d,\"\\\r\n \" remote nb of emails %d\" % (len(gmail_ids), len(imap_ids)))\r\n\r\n for gm_id in gmail_ids:\r\n\r\n print(\"Fetching id %s with request %s\" % (gm_id, imap_utils.GIMAPFetcher.GET_ALL_BUT_DATA))\r\n #get disk_metadata\r\n disk_metadata = gmvaulter.gstorer.unbury_metadata(gm_id)\r\n\r\n print(\"disk metadata %s\\n\" % (disk_metadata))\r\n\r\n #date = disk_metadata['internal_date'].strftime('\"%d %b %Y\"')\r\n subject = disk_metadata.get('subject', None)\r\n msgid = disk_metadata.get('msg_id', None)\r\n received = disk_metadata.get('x_gmail_received', None)\r\n\r\n req = \"(\"\r\n has_something = False\r\n\r\n #if date:\r\n # req += 'HEADER DATE {date}'.format(date=date)\r\n # has_something = True\r\n\r\n if subject:\r\n #split on ' when contained in subject to keep only the first part\r\n subject = subject.split(\"'\")[0]\r\n subject = subject.split('\"')[0]\r\n if has_something: #add extra space if it has a date\r\n req += ' ' \r\n req += 'SUBJECT \"{subject}\"'.format(subject=subject.strip().encode('utf-8'))\r\n has_something = True\r\n\r\n if msgid:\r\n if has_something: #add extra space if it has a date\r\n req += ' ' \r\n req += 'HEADER MESSAGE-ID {msgid}'.format(msgid=msgid.strip())\r\n has_something = True\r\n \r\n if received:\r\n if has_something:\r\n req += ' '\r\n req += 'HEADER X-GMAIL-RECEIVED {received}'.format(received=received.strip())\r\n has_something = True\r\n \r\n req += \")\"\r\n\r\n print(\"Req = %s\\n\" % (req))\r\n\r\n imap_ids = gmvaulter.src.search({ 'type' : 'imap', 'req': req, 'charset': 'utf-8'})\r\n\r\n print(\"imap_ids = %s\\n\" % (imap_ids))\r\n\r\n if len(imap_ids) != 1:\r\n the_self.fail(\"more than one imap_id (%s) retrieved for request %s\" % (imap_ids, req))\r\n\r\n imap_id = imap_ids[0]\r\n \r\n # get online_metadata \r\n online_metadata = gmvaulter.src.fetch(imap_id, \\\r\n imap_utils.GIMAPFetcher.GET_ALL_BUT_DATA) \r\n\r\n print(\"online_metadata = %s\\n\" % (online_metadata))\r\n print(\"disk_metadata = %s\\n\" % (disk_metadata))\r\n\r\n header_fields = online_metadata[imap_id]['BODY[HEADER.FIELDS (MESSAGE-ID SUBJECT X-GMAIL-RECEIVED)]']\r\n \r\n subject, msgid, received = gmvault_db.GmailStorer.parse_header_fields(header_fields)\r\n\r\n #compare metadata\r\n the_self.assertEquals(subject, disk_metadata.get('subject', None))\r\n the_self.assertEquals(msgid, disk_metadata.get('msg_id', None))\r\n the_self.assertEquals(received, disk_metadata.get('x_gmail_received', None))\r\n\r\n # check internal date it is plus or minus 1 hour\r\n online_date = online_metadata[imap_id].get('INTERNALDATE', None) \r\n disk_date = disk_metadata.get('internal_date', None) \r\n\r\n if online_date != disk_date:\r\n min_date = disk_date - datetime.timedelta(hours=1)\r\n max_date = disk_date + datetime.timedelta(hours=1)\r\n \r\n if min_date <= online_date <= max_date:\r\n print(\"online_date (%s) and disk_date (%s) differs but \"\\\r\n \"within one hour. This is OK (timezone pb) *****\" % (online_date, disk_date))\r\n else:\r\n the_self.fail(\"online_date (%s) and disk_date (%s) are different\" % (online_date, disk_date))\r\n\r\n #check labels\r\n disk_labels = disk_metadata.get('labels', None)\r\n #add extra labels\r\n for x_lab in extra_labels:\r\n disk_labels.append(x_lab)\r\n\r\n online_labels = imap_utils.decode_labels(online_metadata[imap_id].get('X-GM-LABELS', None)) \r\n\r\n #clean potential labels with multiple spaces\r\n disk_labels = [ gmvault_utils.remove_consecutive_spaces_and_strip(label) for label in disk_labels ]\r\n online_labels = [ gmvault_utils.remove_consecutive_spaces_and_strip(label) for label in online_labels ]\r\n\r\n if not disk_labels: #no disk_labels check that there are no online_labels\r\n the_self.assertTrue(not online_labels)\r\n\r\n print(\"disk_labels = %s\\n\" % (disk_labels))\r\n print(\"online_labels = %s\\n\" % (online_labels))\r\n the_self.assertEquals(len(disk_labels), len(online_labels))\r\n\r\n for label in disk_labels:\r\n #change label Migrated (lower and uppercase) to gmv-migrated because reserved by Gmail\r\n if label.lower() == \"migrated\":\r\n label = \"gmv-migrated\"\r\n elif label.lower() == r\"\\muted\":\r\n label = \"gmv-muted\"\r\n if label not in online_labels:\r\n the_self.fail(\"label %s should be in online_labels %s as\"\\\r\n \" it is in disk_labels %s\" % (label, online_labels, disk_labels))\r\n\r\n # check flags\r\n disk_flags = disk_metadata.get('flags', None)\r\n online_flags = online_metadata[imap_id].get('FLAGS', None) \r\n\r\n if not disk_flags: #no disk flags\r\n the_self.assertTrue(not online_flags)\r\n\r\n the_self.assertEquals(len(disk_flags), len(online_flags))\r\n\r\n for flag in disk_flags:\r\n if flag not in online_flags:\r\n the_self.fail(\"flag %s should be in \"\\\r\n \"online_flags %s as it is in disk_flags %s\" \\\r\n % (flag, online_flags, disk_flags))", "def get_msgs(subject=BOTNAME):\n\n logger.debug('Checking messages')\n conn = open_connection()\n conn.select('INBOX')\n typ, data = conn.search(None, '(UNSEEN SUBJECT \"%s\")' % subject)\n for num in data[0].split():\n typ, data = conn.fetch(num, '(RFC822)')\n msg = email.message_from_string(data[0][1])\n typ, data = conn.store(num, 'FLAGS', '\\\\Seen')\n yield msg\n logger.debug('Logging out')\n conn.logout()", "def __checkGoogleMail(self):\n self.googleMailInfoLabel.hide()\n self.googleInstallButton.show()\n self.googleCheckAgainButton.show()\n self.googleHelpButton.setEnabled(True)\n self.googleMailCheckBox.setEnabled(True)\n \n try:\n import E5Network.E5GoogleMail # __IGNORE_WARNING__\n from E5Network.E5GoogleMailHelpers import (\n isClientSecretFileAvailable\n )\n \n self.googleInstallButton.hide()\n if not isClientSecretFileAvailable():\n # secrets file is not installed\n self.googleMailCheckBox.setChecked(False)\n self.googleMailCheckBox.setEnabled(False)\n self.googleMailInfoLabel.setText(self.tr(\n \"<p>The client secrets file is not present.\"\n \" Has the Gmail API been enabled?</p>\"))\n self.googleMailInfoLabel.show()\n Preferences.setUser(\"UseGoogleMailOAuth2\", False)\n else:\n self.googleMailCheckBox.setChecked(\n Preferences.getUser(\"UseGoogleMailOAuth2\"))\n self.googleMailInfoLabel.hide()\n self.googleCheckAgainButton.hide()\n except ImportError:\n # missing libraries, disable Google Mail\n self.googleMailCheckBox.setChecked(False)\n self.googleMailCheckBox.setEnabled(False)\n self.googleMailInfoLabel.setText(self.tr(\n \"<p>The Google Mail Client API is not installed.\"\n \" Use <code>{0}</code> to install it.</p>\"\n ).format(getInstallCommand()))\n self.googleMailInfoLabel.show()\n self.googleHelpButton.setEnabled(False)\n Preferences.setUser(\"UseGoogleMailOAuth2\", False)", "def run_mailcheck (self):\n\t\t# TODO: add function in backend to check if all needed things are set\n\t\t# like server/pass/user/... - if not, show error\n\t\t# if it is not currently refreshing\n\t\tif not self.__mailbackend.refreshing:\n\t\t\tself.__status = mail.MailCheckStatus.REFRESH \n\t\t\tself.redraw_canvas()\n\t\t\tself.__mailbackend.start()\n\t\treturn False\t# in case we are run as a timeout", "def main():\r\n creds = None\r\n # The file token.pickle stores the user's access and refresh tokens, and is\r\n # created automatically when the authorization flow completes for the first\r\n # time.\r\n if os.path.exists('token.pickle'):\r\n with open('token.pickle', 'rb') as token:\r\n creds = pickle.load(token)\r\n # If there are no (valid) credentials available, let the user log in.\r\n if not creds or not creds.valid:\r\n if creds and creds.expired and creds.refresh_token:\r\n creds.refresh(Request())\r\n else:\r\n flow = InstalledAppFlow.from_client_secrets_file(\r\n 'credentials.json', SCOPES)\r\n creds = flow.run_local_server()\r\n # Save the credentials for the next run\r\n with open('token.pickle', 'wb') as token:\r\n pickle.dump(creds, token)\r\n\r\n service = build('gmail', 'v1', credentials=creds)\r\n\r\n # Call the Gmail API\r\n # results = service.users().labels().list(userId='me').execute()\r\n # labels = results.get('labels', [])\r\n\r\n # if not labels:\r\n # print('No labels found.')\r\n # else:\r\n # print('Labels:')\r\n # for label in labels:\r\n # print(label['name'])\r\n\r\n get_messages(service)\r\n # show_chatty_threads(service)\r", "def email_startup():\n imap = imaplib.IMAP4_SSL('imap.gmail.com')\n # authenticate\n imap.login(email_credentials.email_user, email_credentials.email_pass)\n return imap", "def ztest_gmvault_retrieve_email_store_and_read(self): #pylint:disable-msg=C0103\r\n storage_dir = '/tmp/gmail_bk'\r\n gmvault_utils.delete_all_under(storage_dir)\r\n \r\n gimap = imap_utils.GIMAPFetcher('imap.gmail.com', 993, self.login, self.passwd)\r\n gstorer = gmvault.GmailStorer(storage_dir)\r\n \r\n gimap.connect()\r\n \r\n criteria = ['Before 1-Oct-2006']\r\n #criteria = ['ALL']\r\n ids = gimap.search(criteria)\r\n \r\n the_id = ids[124]\r\n \r\n res = gimap.fetch(the_id, gimap.GET_ALL_INFO)\r\n \r\n gm_id = gstorer.bury_email(res[the_id])\r\n \r\n metadata, data = gstorer.unbury_email(gm_id)\r\n \r\n self.assertEquals(res[the_id][gimap.GMAIL_ID], metadata['gm_id'])\r\n self.assertEquals(res[the_id][gimap.EMAIL_BODY], data)\r\n self.assertEquals(res[the_id][gimap.GMAIL_THREAD_ID], metadata['thread_ids'])\r\n \r\n labels = []\r\n for label in res[the_id][gimap.GMAIL_LABELS]:\r\n labels.append(label)\r\n \r\n self.assertEquals(labels, metadata['labels'])", "def get_unread_email_ids(gmail_client):\n response = gmail_client.users().messages().list(userId='me',q='is:unread').execute()\n\n if 'messages' in response: # messages key only exists if there are unread messages\n return [message['id'] for message in response['messages']]\n else:\n print(\"No unread messages...\")\n return [] # still return a list since that's what caller expects" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Converts a raw packet to a dpkt packet regarding of link type.
def iplayer_from_raw(raw, linktype=1): if linktype == 1: # ethernet pkt = dpkt.ethernet.Ethernet(raw) ip = pkt.data elif linktype == 101: # raw ip = dpkt.ip.IP(raw) else: raise Exception("unknown PCAP linktype") return ip
[ "def parse_packet(linktype, packet):\n link_layer = parse_Ethernet(packet) if linktype == pcapy.DLT_EN10MB else parse_Cooked(packet)\n if link_layer['payload_type'] in ['IPv4', 'IPv6']:\n network_layer = parse_IPv4(link_layer['payload']) if link_layer['payload_type'] == 'IPv4' else parse_IPv6(link_layer['payload'])\n if network_layer['payload_type'] in ['UDP', 'TCP']:\n transport_layer = parse_UDP(network_layer['payload']) if network_layer['payload_type'] == 'UDP' else parse_TCP(network_layer['payload'])\n return (link_layer, network_layer, transport_layer)", "def decode_packet(packet: str) -> PacketType:\n node_id, _, protocol, attrs = packet.split(DELIM, 3)\n\n data = cast(PacketType, {\"node\": PacketHeader(node_id).name})\n\n # make exception for version response\n data[\"protocol\"] = UNKNOWN\n if \"=\" in protocol:\n attrs = protocol + DELIM + attrs\n\n # no attributes but instead the welcome banner\n elif \"RFLink Gateway\" in protocol:\n data.update(parse_banner(protocol))\n\n elif protocol == \"PONG\":\n data[\"ping\"] = protocol.lower()\n\n # debug response\n elif protocol.lower() == \"debug\":\n data[\"protocol\"] = protocol.lower()\n if attrs.startswith(\"RTS P1\"):\n data[\"rts_p1\"] = attrs.strip(DELIM).split(DELIM)[1]\n else:\n data[\"tm\"] = packet[3:5]\n\n # failure response\n elif protocol == \"CMD UNKNOWN\":\n data[\"response\"] = \"command_unknown\"\n data[\"ok\"] = False\n\n # ok response\n elif protocol == \"OK\":\n data[\"ok\"] = True\n\n # generic message from gateway\n elif node_id == \"20\" and not attrs:\n data[\"message\"] = protocol\n\n # its a regular packet\n else:\n data[\"protocol\"] = protocol.lower()\n\n # convert key=value pairs where needed\n for attr in filter(None, attrs.strip(DELIM).split(DELIM)):\n if \"=\" not in attr:\n continue\n key, value = attr.lower().split(\"=\", 1)\n if key in VALUE_TRANSLATION:\n try:\n value = VALUE_TRANSLATION[key](value)\n except ValueError:\n log.warning(\n \"Could not convert attr '%s' value '%s' to expected type '%s'\",\n key,\n value,\n VALUE_TRANSLATION[key].__name__,\n )\n continue\n name = PACKET_FIELDS.get(key, key)\n data[name] = value\n unit = UNITS.get(key, None)\n\n if unit:\n data[name + \"_unit\"] = unit\n\n # correct KaKu device address\n if data.get(\"protocol\", \"\") == \"kaku\" and len(data[\"id\"]) != 6:\n data[\"id\"] = \"0000\" + data[\"id\"]\n\n return data", "def parse_ethernet(self, raw_packet):\n header_parts = struct.unpack('!6c6cH', raw_packet[:14])\n dst_mac = ':'.join(['%.2x' % ord(c) for c in header_parts[:6]])\n src_mac = ':'.join(['%.2x' % ord(c) for c in header_parts[6:12]])\n ether_type = hex(header_parts[12])\n info = {'dst_mac': dst_mac,\n 'src_mac': src_mac,\n 'type': ether_type}\n return info, raw_packet[14:]", "def decode(cls, data):\n status = struct.unpack('B', data[1])[0]\n # Power ACK is bit 2\n power_ack = (status & 0x04) >> 2\n # Datarate ACK is bit 1\n datarate_ack = (status & 0x02) >> 1\n # Channelmask ACK is bit 0\n channelmask_ack = status & 0x01\n return LinkADRAns(power_ack, datarate_ack, channelmask_ack)", "def __str__(self):\n return '\\n%(source)s > %(type)s (0x%(type_d).2x)\\n%(data)s' % \\\n {'type': DGTL.pkt_type_str[self.type], 'type_d': self.type,\n 'data': str(self.decoded) if self.decoded else 'Unknown raw data.',\n 'source': self.source}", "def decode(cls, raw: bytes) -> \"EthernetHeader\":\n # unsigned char dmac[6];\n # unsigned char smac[6];\n # uint16_t ethertype;\n # unsigned char payload[];\n dmac = raw[:6]\n smac = raw[6:12]\n typ = socket.htons(struct.unpack(\"H\", raw[12:14])[0])\n payload = raw[14:]\n return EthernetHeader(dmac=dmac, smac=smac, typ=typ, payload=payload)", "def decode(self, eth):\n\t\tif eth.type == dpkt.ethernet.ETH_TYPE_ARP:\n\t\t\t# print 'arp'\n\t\t\treturn ARP(eth.data).get()\n\n\t\telif eth.type == dpkt.ethernet.ETH_TYPE_IP6:\n\t\t\tip = eth.data\n\t\t\tif ip.p == dpkt.ip.IP_PROTO_UDP:\n\t\t\t\tudp = ip.data\n\n\t\t\t\t# multicast is just like IPv4\n\t\t\t\tif udp.dport == 5353:\n\t\t\t\t\t# print udp\n\t\t\t\t\tans = mDNS(udp).get()\n\t\t\t\t\t# print 25*'='\n\t\t\t\t\t# pp.pprint(ans)\n\t\t\t\t\t# print 25*'='\n\t\t\t\t\treturn ans\n\n\t\t\t\t# print 'IPv6 UDP','port:',udp.dport,'src:',self.getip(ip.src,True),'dst:',self.getip(ip.dst,True)\n\n\t\t\t# TCP not useful\n\t\t\telif ip.p == dpkt.ip.IP_PROTO_TCP:\n\t\t\t\tpass\n\t\t\t\t# tcp = ip.data\n\t\t\t\t# print 'IPv6 TCP','port:',tcp.dport,'src:',self.getip(ip.src,True),'dst:',self.getip(ip.dst,True)\n\n\t\t\t# ICMP error msg not useful for mapping\n\t\t\telif ip.p == dpkt.ip.IP_PROTO_ICMP6:\n\t\t\t\t# print 'IPv6 icmp6:',ip.data.data\n\t\t\t\tpass\n\n\t\t\t# other stuff I haven't decoded\n\t\t\telse:\n\t\t\t\tpass\n\t\t\t\t# print 'IPv6',ip.p,'src:',self.getip(ip.src,True),'dst:',self.getip(ip.dst,True)\n\t\telif eth.type == dpkt.ethernet.ETH_TYPE_IP:\n\t\t\tip = eth.data\n\n\t\t\t# roku interface port: 1900 dst: 239.255.255.250 1900\n\t\t\tif ip.p == dpkt.ip.IP_PROTO_UDP:\n\t\t\t\tudp = ip.data\n\n\t\t\t\t# these aren't useful\n\t\t\t\tif udp.dport == 53: # DNS\n\t\t\t\t\t# return DNS(udp.data)\n\t\t\t\t\treturn {}\n\n\t\t\t\telif udp.dport == 5353: # mDNS\n\t\t\t\t\t# print 'mDNS'\n\t\t\t\t\t# print udp\n\t\t\t\t\treturn mDNS(udp).get()\n\n\t\t\t\telif self.getip(ip.dst) == '239.255.255.250':\n\t\t\t\t\treturn {}\n\n\t\t\t\telse:\n\t\t\t\t\t# don't print standard ports\n\t\t\t\t\t# 17500 dropbox\n\t\t\t\t\t# if not ip.data.dport in [17500]:\n\t\t\t\t\t# \tprint 'other udp','port:',udp.dport,'src:',self.getip(ip.src),'dst:',self.getip(ip.dst),': '\n\t\t\t\t\treturn {}\n\t\t\telif ip.p == dpkt.ip.IP_PROTO_TCP:\n\t\t\t\t# src = self.getip(ip.src)\n\t\t\t\t# if netaddr.IPAddress(src) not in netaddr.IPNetwork(\"192.168.1.0/24\"):\n\t\t\t\t# \twho = ''\n\t\t\t\t# \tif src not in self.ipMap:\n\t\t\t\t# \t\twho = WhoIs(src).record['NetName']\n\t\t\t\t# \t\tself.ipMap[src] = who\n\t\t\t\t# \telse:\n\t\t\t\t# \t\twho = self.ipMap[src]\n\t\t\t\t# \tif who in ['GOOGLE','AKAMAI','APPLE-WWNET','AMAZO-ZIAD1','DROPBOX']:\n\t\t\t\t# \t\treturn {}\n\t\t\t\t# \telse:\n\t\t\t\t# \t\tprint src,who\n\t\t\t\t# don't print standard ports\n\t\t\t\t# port 58969 - XSANS Apple, why do i see that?\n\t\t\t\t# 22 ssh\n\t\t\t\t# 25 smtp\n\t\t\t\t# 80 http\n\t\t\t\t# 123 time server\n\t\t\t\t# 143 imap\n\t\t\t\t# 443 https\n\t\t\t\t# 445 smb\n\t\t\t\t# 548 afp over tcp\n\t\t\t\t# 5009 airport admin utility\n\t\t\t\t# 5222 ichat\n\t\t\t\t# 17500 dropbox\n\t\t\t\t# if not ip.data.dport in [22,25,80,123,143,443,445,548,5009,5222,17500]:\n\t\t\t\t\t# print 'other tcp','port:',ip.data.dport,'src:',self.getip(ip.src),'dst:',self.getip(ip.dst)\n\t\t\t\treturn {}\n\t\t\t# elif ip.p == dpkt.ip.IP_PROTO_ICMP6:\n\t\t\t# \tprint '?????? other icmp6','src:',self.getip(ip.src),'dst:',self.getip(ip.dst)\n\t\t\telif ip.p == 2:\n\t\t\t\tpass\n\t\t\t\t# print 'IGMP','src:',self.getip(ip.src),'dst:',self.getip(ip.dst)\n\t\t\telse:\n\t\t\t\t# print 'other ip packet','src:',self.getip(ip.src),'dst:',self.getip(ip.dst)\n\t\t\t\treturn {}", "def packet_type(self):\r\n return self._packet_type", "def process_packet(packet):\n\t# convert netfilter queue packet ot scapy packet\n\tscapy_packet = IP(packet.get_payload())\n\tif scapy_packet.haslayer(DNSRR):\n\t\t# if the packet is a DNS Resource Record (DNS reply)\n\t\t# modify the packet\n\t\tprint(\"[Before]:\", scapy_packet.summary())\n\t\ttry:\n\t\t\tscapy_packet = modify_packet(scapy_packet)\n\t\texcept IndexError:\n\t\t\t# not UDP packet, this can be IPerror/UDPerror packets\n\t\t\tpass\n\t\tprint(\"[After ]:\", scapy_packet.summary())\n\t\t# set back as netfilter queue packet\n\t\tpacket.set_payload(bytes(scapy_packet))\n\t# accept the packet\n\tpacket.accept()", "def _prettify_link_types(conn, save_original_data=False):\n\tconn.execute(\"UPDATE links SET type = substr(type,10)\")\n\tconn.commit()", "def encode_packet(packet: PacketType) -> str:\n if packet[\"protocol\"] == \"rfdebug\":\n return \"10;RFDEBUG=%s;\" % packet[\"command\"]\n elif packet[\"protocol\"] == \"rfudebug\":\n return \"10;RFUDEBUG=%s;\" % packet[\"command\"]\n elif packet[\"protocol\"] == \"qrfdebug\":\n return \"10;QRFDEBUG=%s;\" % packet[\"command\"]\n else:\n return SWITCH_COMMAND_TEMPLATE.format(node=PacketHeader.master.value, **packet)", "def netlink_decode(command, family, nla_type, nla_data):\n\tif command.endswith(\"ADDR\"):\n\t\tname = get_netlink_constant(nla_type, \"IFA_\")\n\n\telif command.endswith(\"LINK\"):\n\t\tname = get_netlink_constant(nla_type, \"IFLA_\")\n\n\telif command.endswith(\"ROUTE\"):\n\t\tname = get_netlink_constant(nla_type, \"RTA_\")\n\n\telse:\n\t\t# Don't know what this is. Leave it as an integer.\n\t\tname = nla_type\n\n\tif name in [\"IFA_ADDRESS\", \"IFA_LOCAL\", \"RTA_SRC\", \"RTA_DST\", \"RTA_GATEWAY\", \"RTA_PREFSRC\", \"RTA_UID\"]:\n\t\tdata = socket.inet_ntop(family, nla_data)\n\n\telif name in [\"RTA_IIF\", \"RTA_OIF\", \"RTA_TABLE\", \"IFLA_MTU\", \"IFLA_TXQLEN\", \"IFLA_GROUP\",\n\t\t\"IFLA_PROMISCUITY\", \"IFLA_NUM_TX_QUEUES\", \"IFLA_GSO_MAX_SEGS\", \"IFLA_GSO_MAX_SIZE\",\n\t\t\"IFLA_NUM_RX_QUEUES\", \"IFLA_LINK\", \"IFLA_CARRIER_CHANGES\", \"IFA_FLAGS\"]:\n\t\tdata = struct.unpack(\"=I\", nla_data)[0]\n\n\telif name in [\"IFLA_OPERSTATE\", \"IFLA_LINKMODE\", \"IFLA_CARRIER\", \"IFLA_PROTO_DOWN\"]:\n\t\tdata = struct.unpack(\"=B\", nla_data)[0]\n\n\telif name in [\"IFLA_IFNAME\", \"IFA_LABEL\", \"IFLA_QDISC\"]:\n\t\tdata = nla_data.strip('\\x00')\n\n\telif name in [\"IFLA_ADDRESS\", \"IFLA_BROADCAST\"]:\n\t\tdata = \":\".join([\"{:02x}\".format(x) for x in struct.unpack('=6B', nla_data)])\n\n\telif name in [\"RTA_MULTIPATH\"]:\n\t\tdata = []\n\t\tfor nexthop, nexthop_attrs in nla_data:\n\t\t\tvalues = {}\n\t\t\tfor rta, v in nexthop_attrs:\n\t\t\t\trta_name, value = netlink_decode(command, socket.AF_INET, rta.rta_type, v)\n\t\t\t\tvalues[rta_name] = value\n\t\t\tdata.append((nexthop, values))\n\n\telse:\n\t\tdata = nla_data\n\n\treturn name, data", "def ingest_packet(self, dpid, in_port, packet, timestamp):\n #*** Instantiate an instance of Packet class:\n self.packet = self.Packet()\n pkt = self.packet\n\n #*** DPID of the switch that sent the Packet-In message:\n pkt.dpid = dpid\n #*** Port packet was received on:\n pkt.in_port = in_port\n #*** Packet receive time:\n pkt.timestamp = timestamp\n #*** Packet length on the wire:\n pkt.length = len(packet)\n\n #*** Read packet into dpkt to parse headers:\n eth = dpkt.ethernet.Ethernet(packet)\n\n #*** Ethernet parameters:\n pkt.eth_src = _mac_addr(eth.src)\n pkt.eth_dst = _mac_addr(eth.dst)\n pkt.eth_type = eth.type\n\n if eth.type == 2048:\n #*** IPv4 (TBD: add IPv6 support)\n ip = eth.data\n pkt.ip_src = socket.inet_ntop(socket.AF_INET, ip.src)\n pkt.ip_dst = socket.inet_ntop(socket.AF_INET, ip.dst)\n pkt.proto = ip.p\n if ip.p == 6:\n #*** TCP\n tcp = ip.data\n pkt.tp_src = tcp.sport\n pkt.tp_dst = tcp.dport\n pkt.tp_flags = tcp.flags\n pkt.tp_seq_src = tcp.seq\n pkt.tp_seq_dst = tcp.ack\n pkt.payload = tcp.data\n elif ip.p == 17:\n #*** UDP\n udp = ip.data\n pkt.tp_src = udp.sport\n pkt.tp_dst = udp.dport\n pkt.tp_flags = \"\"\n pkt.tp_seq_src = 0\n pkt.tp_seq_dst = 0\n pkt.payload = udp.data\n else:\n #*** Not a transport layer that we understand:\n # TBD: add other transport protocols\n pkt.tp_src = 0\n pkt.tp_dst = 0\n pkt.tp_flags = 0\n pkt.tp_seq_src = 0\n pkt.tp_seq_dst = 0\n pkt.payload = ip.data\n else:\n #*** Non-IP:\n pkt.ip_src = ''\n pkt.ip_dst = ''\n pkt.proto = 0\n pkt.tp_src = 0\n pkt.tp_dst = 0\n pkt.tp_flags = 0\n pkt.tp_seq_src = 0\n pkt.tp_seq_dst = 0\n pkt.payload = eth.data\n\n #*** Generate a flow_hash unique to flow for pkts in either direction:\n if pkt.proto == 6:\n self.packet.flow_hash = nethash.hash_flow((pkt.ip_src, pkt.ip_dst,\n pkt.tp_src, pkt.tp_dst,\n pkt.proto))\n else:\n self.packet.flow_hash = nethash.hash_flow((pkt.eth_src, pkt.eth_dst,\n dpid, pkt.timestamp,\n pkt.proto))\n self.flow_hash = self.packet.flow_hash\n\n #*** Generate a packet_hash unique to the packet:\n self.packet.packet_hash = nethash.hash_packet(self.packet)\n\n #*** Instantiate classification data for this flow in context:\n self.classification = self.Classification(self.flow_hash,\n self.classifications,\n self.classification_time_limit,\n self.logger)\n self.logger.debug(\"clasfn=%s\", self.classification.dbdict())\n db_dict = self.packet.dbdict()\n self.logger.debug(\"packet_in=%s\", db_dict)\n\n #*** Write packet-in metadata to database collection:\n self.packet_ins.insert_one(db_dict)", "def packet_to_str(packet: PacketDescription, simple_diagrams=False, force_show_frames='', show_timestamp=False) \\\n -> PacketDiagramDescription:\n protocol = packet.protocols_str\n note_color = ''\n packet_str = ''\n if 'NGAP' in protocol:\n if nas_req_regex.search(packet.msg_description) is not None:\n note_color = ' {0}'.format(color_nas_req)\n protocol = 'NAS req.'\n else:\n note_color = ' {0}'.format(color_nas_rsp)\n protocol = 'NGAP msg. or NAS rsp.'\n\n # Search NGAP messages\n ngap_matches = ngap_message_type_regex.finditer(packet.msg_description)\n ngap_message_types = [ngap_match.group(1) for ngap_match in ngap_matches if ngap_match is not None]\n if len(ngap_message_types) > 0:\n ngap_seen = set()\n ngap_seen_add = ngap_seen.add\n ngap_message_types = ['NGAP {0}'.format(x) for x in ngap_message_types if\n not (x in ngap_seen or ngap_seen_add(x))]\n\n # Search NAS messages\n nas_matches = nas_message_type_regex.finditer(packet.msg_description)\n nas_message_types = [nas_match.group(1) for nas_match in nas_matches if nas_match is not None]\n if len(nas_message_types) > 0:\n # Remove duplicates: https://stackoverflow.com/questions/480214/how-do-you-remove-duplicates-from-a-list-whilst-preserving-order\n nas_seen = set()\n nas_seen_add = nas_seen.add\n nas_message_types = ['NAS {0}'.format(x) for x in nas_message_types if\n not (x in nas_seen or nas_seen_add(x))]\n\n # Print msg. type\n joint_ngap_nas_msg_types = ngap_message_types + nas_message_types\n if len(joint_ngap_nas_msg_types) > 0:\n protocol = '{0}'.format(',\\\\n'.join(joint_ngap_nas_msg_types))\n\n elif 'HTTP' in protocol:\n # Some customized filtering based on what we have seen\n rsp_match = http_rsp_regex.search(packet.msg_description)\n req_match = http_url_regex.search(packet.msg_description)\n if ('404 page not found' in packet.msg_description) or (rsp_match is not None):\n note_color = ' {0}'.format(color_http2_rsp)\n if rsp_match is not None:\n protocol = '{0} {1} rsp.'.format(protocol, rsp_match.group(1))\n else:\n protocol = protocol + ' 404 rsp.'\n elif req_match is not None:\n note_color = ' {0}'.format(color_http2_req)\n protocol = protocol + ' req.'\n else:\n note_color = ' {0}'.format(color_http2_req)\n protocol = protocol + ' req. or rsp. (no HTTP/2 headers)'\n\n match = list(http_url_regex.finditer(packet.msg_description))\n if len(match) > 0:\n method = ''\n method_match_all = http_method_regex.finditer(packet.msg_description)\n protocols = []\n for idx, method_match in enumerate(method_match_all):\n method = '{0} '.format(method_match.group(1))\n url_split = match[idx].group(1).split('?')\n protocols.append('{0} {1}'.format(method, url_split[0]))\n protocol = '{0}\\\\n'.format(protocol) + '\\\\n'.join(protocols)\n\n elif 'PFCP' in protocol:\n if pfcp_req_regex.search(packet.msg_description) is not None:\n note_color = ' {0}'.format(color_pfcp_req)\n protocol = protocol + ' req.'\n else:\n note_color = ' {0}'.format(color_pfcp_rsp)\n protocol = protocol + ' rsp.'\n\n match = pfcp_message_type_regex.search(packet.msg_description)\n if match is not None:\n protocol = '{0}\\\\n{1}'.format(protocol, match.group(1))\n\n elif 'GTPv2' in protocol:\n if gtpv2_req_regex.search(packet.msg_description) is not None:\n note_color = ' {0}'.format(color_gtpv2_req)\n protocol = protocol + ' req.'\n else:\n note_color = ' {0}'.format(color_gtpv2_rsp)\n protocol = protocol + ' req., rsp. or notification'\n\n match = gtpv2_message_type_regex.search(packet.msg_description)\n if match is not None:\n protocol = '{0}\\\\n{1}'.format(protocol, match.group(1))\n\n elif 'Diameter' in protocol or 'RADIUS' in protocol or \"GTP'\" in protocol:\n note_color = ' {0}'.format(color_diameter_radius_gtpprime)\n protocol = get_diam_description(packet)\n\n if show_timestamp:\n try:\n dt_object = datetime.fromtimestamp(packet.timestamp)\n if dt_object.tzinfo is None:\n tz_str = ''\n else:\n tz_str = ' {0}'.format(dt_object.tzinfo)\n timestamp_hour = ' ({0}:{1}:{2}.{3}{4})'.format(dt_object.hour, dt_object.minute, dt_object.second,\n dt_object.microsecond / 1000, tz_str)\n except:\n timestamp_hour = ''\n protocol = '{0}\\\\n+{1:.3f}s{2}'.format(protocol, packet.timestamp_offsett, timestamp_hour)\n\n frame_number = packet[2]\n packet_str = packet_str + '\"{0}\" -> \"{1}\": {2}, {3}\\n'.format(packet.ip_src, packet.ip_dst, frame_number, protocol)\n packet_str = packet_str + '\\nnote right{0}\\n'.format(note_color)\n\n force_show_frames = [e.strip() for e in force_show_frames.split(',')]\n if simple_diagrams and frame_number not in force_show_frames:\n packet_payload = ''\n else:\n packet_payload = packet.msg_description\n\n if packet_payload != '':\n packet_str = packet_str + '**{0} to {1}**\\n{2}\\n'.format(packet.ip_src, packet.ip_dst, packet_payload)\n else:\n packet_str = packet_str + '**{0} to {1}**\\n'.format(packet.ip_src, packet.ip_dst)\n packet_str = packet_str + 'end note\\n'\n packet_str = packet_str + '\\n'\n return PacketDiagramDescription(packet_str, packet.ip_src, packet.ip_dst, protocol)", "def analyze(packet):\n proto = None\n sport = None\n dport = None\n if packet.haslayer(IP):\n # we skip Ethernet and go directly to IP\n packet = packet.getlayer(IP)\n src, dst = packet.src, packet.dst\n if packet.haslayer(TCP):\n proto = \"TCP\"\n packet = packet.getlayer(TCP)\n dport, sport = packet.dport, packet.sport\n elif packet.haslayer(UDP):\n proto = \"UDP\"\n packet = packet.getlayer(UDP)\n dport, sport = packet.dport, packet.sport\n return Flow(packet, src, dst, sport, dport, proto)\n else:\n # TODO: handle this better\n print \"No IP layer: \",packet\n return False", "def next_connection_packets(piter, linktype=1):\n first_ft = None\n\n for ts, raw in piter:\n ft = flowtuple_from_raw(raw, linktype)\n if not first_ft: first_ft = ft\n\n sip, dip, sport, dport, proto = ft\n if not (first_ft == ft or first_ft == (dip, sip, dport, sport, proto)):\n break\n\n yield {\n \"src\": sip, \"dst\": dip, \"sport\": sport, \"dport\": dport, \"proto\": proto,\n \"raw\": payload_from_raw(raw, linktype).encode(\"base64\"), \"direction\": first_ft == ft,\n }", "def get_packet_type(pkt: packet.Packet) -> dict:\n\n pkt_metadata = {}\n pkt_metadata[\"type\"] = \"unsupported\"\n\n for index, protocol in enumerate(pkt.protocols, start=0):\n if type(protocol) == ipv4.ipv4:\n pkt_metadata[\"ipv4\"] = index\n pkt_metadata[\"ipv4_src\"] = protocol.src\n pkt_metadata[\"ipv4_dst\"] = protocol.dst\n elif type(protocol) == tcp.tcp:\n pkt_metadata[\"type\"] = \"tcp\"\n pkt_metadata[\"tcp\"] = index\n pkt_metadata[\"transport_layer\"] = index # Works for both TCP and UDP\n pkt_metadata[\"src_port\"] = protocol.src_port\n pkt_metadata[\"dst_port\"] = protocol.dst_port\n elif type(protocol) == udp.udp:\n pkt_metadata[\"type\"] = \"udp\"\n pkt_metadata[\"udp\"] = index\n pkt_metadata[\"transport_layer\"] = index # Works for both TCP and UDP\n pkt_metadata[\"src_port\"] = protocol.src_port\n pkt_metadata[\"dst_port\"] = protocol.dst_port\n elif type(protocol) == icmp.icmp:\n pkt_metadata[\"type\"] = \"icmp\"\n pkt_metadata[\"icmp\"] = index\n pkt_metadata[\"icmp_type\"] = protocol.type\n pkt_metadata[\"icmp_code\"] = protocol.code\n\n return pkt_metadata", "def build_dht_packet(type, **kwargs):\n kwargs[\"a\"] = int(type)\n return bencode.encode(kwargs)", "def create_packet_definition(packet_to_send):\n source_mac = \"00:00:00:00:00:01\"\n destination_mac = \"00:00:00:00:00:02\"\n source_ip = \"10.10.10.1\"\n destination_ip = \"10.10.10.2\"\n source_ip6 = 'fe80::214:f2ff:fe07:af0'\n destination_ip6 = 'ff02::1'\n sport = 1\n dport = 2\n tos = 4\n if packet_to_send[\"type\"] == \"ip\":\n packet_definition = ({\"Ether\": {\"dst\": destination_mac, \"src\": source_mac, \"type\": 0x0800}},\n {\"IP\": {\"dst\": destination_ip, \"src\": source_ip, \"tos\": tos}},\n {\"TCP\": {}})\n elif packet_to_send[\"type\"] == \"tagged_ip\":\n packet_definition = ({\"Ether\": {\"dst\": destination_mac, \"src\": source_mac, \"type\": 0x8100}},\n {\"Dot1Q\": {\"vlan\": packet_to_send[\"vlan\"],\n \"prio\": packet_to_send[\"priority\"]}},\n {\"IP\": {\"dst\": destination_ip, \"src\": source_ip, \"tos\": tos}})\n elif packet_to_send[\"type\"] == \"tcp\":\n packet_definition = ({\"Ether\": {\"dst\": destination_mac, \"src\": source_mac, \"type\": 0x0800}},\n {\"IP\": {\"dst\": destination_ip, \"src\": source_ip, \"tos\": tos}},\n {\"TCP\": {\"sport\": sport, \"dport\": dport}})\n elif packet_to_send[\"type\"] == \"udp\":\n packet_definition = ({\"Ether\": {\"dst\": destination_mac, \"src\": source_mac, \"type\": 0x0800}},\n {\"IP\": {\"dst\": destination_ip, \"src\": source_ip, \"tos\": tos}},\n {\"UDP\": {\"sport\": sport, \"dport\": dport}})\n elif packet_to_send[\"type\"] == \"double_tagged_ip\":\n packet_definition = ({\"Ether\": {\"dst\": destination_mac, \"src\": source_mac, \"type\": 0x8100}},\n {\"Dot1Q\": {\"vlan\": packet_to_send[\"outer_vlan\"], \"type\": 0x8100,\n \"prio\": packet_to_send[\"outer_priority\"]}},\n {\"Dot1Q\": {\"vlan\": packet_to_send[\"inner_vlan\"], \"type\": 0x0800,\n \"prio\": packet_to_send[\"inner_priority\"]}},\n {\"IP\": {\"dst\": destination_ip, \"src\": source_ip, \"tos\": tos}})\n elif packet_to_send[\"type\"] == \"arp\":\n packet_definition = (\n {\"Ether\": {\"src\": source_mac, \"dst\": 'FF:FF:FF:FF:FF:FF', \"type\": 0x0806}},\n {\"ARP\": {\"op\": 1, \"hwsrc\": source_mac,\n \"psrc\": source_ip, \"pdst\": destination_ip}},)\n elif packet_to_send[\"type\"] == \"arp_reply_tagged\":\n packet_definition = ({\"Ether\": {\"src\": source_mac, \"dst\": destination_mac, \"type\": 0x8100}},\n {\"Dot1Q\": {\"vlan\": 2}},\n {\"ARP\": {\"op\": 2, \"hwsrc\": source_mac, \"hwdst\": destination_mac,\n \"pdst\": destination_ip, \"psrc\": source_ip}}, )\n elif packet_to_send[\"type\"] == \"icmp\":\n packet_definition = ({\"Ether\": {\"dst\": destination_mac, \"src\": source_mac, \"type\": 0x0800}},\n {\"IP\": {\"dst\": destination_ip, \"src\": source_ip, \"proto\": 1}},\n {\"ICMP\": {\"type\": 8, \"code\": 0}})\n elif packet_to_send[\"type\"] == \"ipv6\":\n packet_definition = ({\"Ether\": {\"dst\": destination_mac, \"src\": source_mac, \"type\": 0x86dd}},\n {\"IPv6\": {\"dst\": destination_ip6, \"src\": source_ip6, \"version\": 6,\n \"hlim\": 255, \"plen\": 64, \"tc\": 225}})\n elif packet_to_send[\"type\"] == \"tcp6\":\n packet_definition = ({\"Ether\": {\"dst\": destination_mac, \"src\": source_mac, \"type\": 0x86dd}},\n {\"IPv6\": {\"dst\": destination_ip6, \"src\": source_ip6, \"version\": 6,\n \"hlim\": 255, \"tc\": 224, \"nh\": 6}},\n {\"TCP\": {\"sport\": sport, \"dport\": dport}})\n elif packet_to_send[\"type\"] == \"udp6\":\n packet_definition = ({\"Ether\": {\"dst\": destination_mac, \"src\": source_mac, \"type\": 0x86dd}},\n {\"IPv6\": {\"dst\": destination_ip6, \"src\": source_ip6, \"version\": 6,\n \"hlim\": 255, \"tc\": 224, \"nh\": 17}},\n {\"UDP\": {\"sport\": sport, \"dport\": dport}})\n elif packet_to_send[\"type\"] == \"icmp6\":\n packet_definition = ({\"Ether\": {\"dst\": destination_mac, \"src\": source_mac, \"type\": 0x86dd}},\n {\"IPv6\": {\"dst\": destination_ip6, \"src\": source_ip6, \"version\": 6,\n \"hlim\": 255, \"tc\": 224, \"nh\": 1}},\n {\"ICMP\": {\"type\": 8, \"code\": 0}})\n return packet_definition" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Extract all packets belonging to the same flow from a pcap packet iterator
def next_connection_packets(piter, linktype=1): first_ft = None for ts, raw in piter: ft = flowtuple_from_raw(raw, linktype) if not first_ft: first_ft = ft sip, dip, sport, dport, proto = ft if not (first_ft == ft or first_ft == (dip, sip, dport, sport, proto)): break yield { "src": sip, "dst": dip, "sport": sport, "dport": dport, "proto": proto, "raw": payload_from_raw(raw, linktype).encode("base64"), "direction": first_ft == ft, }
[ "def iter_packets(iterable):\n prev = None\n\n for i in sorted(iterable, key=attrgetter('seq')):\n if prev is None or prev.seq != i.seq:\n prev = i\n yield i", "def packet_filter_generator(pcap_class_gen, filter_con):\n global source_mac_add\n\n for pcapfile, device_name in pcap_class_gen:\n capture = rdpcap(pcapfile) # Read the trace file using scapy rdpcap module\n mac_address_list = {}\n src_mac_address_list = {}\n\n for i, (packet) in enumerate(capture):\n if packet[0].src not in mac_address_list: # Counting the source MAC counter value\n mac_address_list[packet[0].src] = 1\n else:\n mac_address_list[packet[0].src] += 1\n\n if packet[0].dst not in mac_address_list: # Counting the Destination MAC counter value\n mac_address_list[packet[0].dst] = 1\n else:\n mac_address_list[packet[0].dst] += 1\n\n if packet[0].src not in src_mac_address_list: # keeping the source MAC address counter for capture length\n src_mac_address_list[packet[0].src] = 1\n else:\n src_mac_address_list[packet[0].src] += 1\n\n highest = max(mac_address_list.values()) # Identifying the source mac-address\n for k, v in mac_address_list.items():\n if v == highest:\n if k in src_mac_address_list:\n source_mac_add = k\n\n for i, (packet) in enumerate(capture):\n if filter_con == \"bidirectional\": # filter bidirectional traffic on source\n if packet[0].src == source_mac_add or packet[0].dst == source_mac_add:\n yield packet, device_name\n elif filter_con == \"Src_to_Other\": # filter traffic originated from source\n if packet[0].src == source_mac_add:\n yield packet, device_name\n elif filter_con == \"Other_to_Src\": # filter traffic destined to source\n if packet[0].dst == source_mac_add:\n yield packet, device_name", "def known_packets(self, serial):\n for s, pkts in self.received.items():\n if s == serial:\n for ps in pkts.values():\n for _, p in ps:\n yield p", "def parse_packets(pcap):\n # For each packet in the pcap process the contents\n flow_Info = []\n times = 0\n for timestamp, buf in pcap:\n times += 1\n tmp_flow_Info = {}\n\n # Unpack the Ethernet frame (mac src/dst, ethertype)\n eth = dpkt.ethernet.Ethernet(buf)\n # Unpack the data whthin the Ethernet frame (the IP packet)\n ip = eth.data\n\n # if protocol(ip.p) is not UDP(17) ,skip this packet\n if ip.p != 17:\n continue\n\n udp = ip.data\n # Temp_data = parse_data(eth.data.udp.data)\n # Filter CoAP by port\n if(udp.sport != 5683 or udp.dport != 5683):\n continue\n\n str_udp_data = parse_data(eth.data.udp.data)\n # skip packets of Non_confirmable\n if str_udp_data[0] == '5': \n continue\n\n cycle = 0\n index = 0\n Udp_data = []\n \n len_str_udp_data = len(str_udp_data)\n while cycle < (len_str_udp_data//3+1):\n # Udp_data.append(int('0x'+Str_Udp_data[index:index + 2], 16))\n Udp_data.append(int('0x' + str_udp_data[index:index + 2], 16))\n cycle += 1\n index += 3\n tmp_flow_Info['udp_data'] = (Udp_data)\n\n # confirmable or ack\n tmp_flow_Info['Coap_type'] = str_udp_data[0]\n #print(str_udp_data) \n \n # skip space and get \"Message ID\" \n HexMide = str_udp_data[6:8] + str_udp_data[9:11]\n tmp_flow_Info['Mid'] = int('0x'+HexMide, 16)\n\n tmp_flow_Info['Timestamp'] = str(datetime.datetime.fromtimestamp(timestamp))\n # print('Ethernet Frame: ', mac_addr(eth.src), mac_addr(eth.dst), eth.type)\n tmp_flow_Info['src'] = inet_to_str(ip.src)\n tmp_flow_Info['dst'] = inet_to_str(ip.dst)\n\n tmp_flow_Info['sport'] = udp.sport\n tmp_flow_Info['dport'] = udp.dport\n flow_Info.append(tmp_flow_Info)\n\n return flow_Info", "def packet_directions(self, test=0):\n result = []\n time_limit = datetime.datetime.now() - self.flow_time_limit\n #*** Get Client IP and DPID of first switch to report flow:\n (flow_client, first_dpid) = self.origin()\n #*** Main search:\n db_data = {'flow_hash': self.packet.flow_hash,\n 'timestamp': {'$gte': time_limit},\n 'dpid': first_dpid}\n if not test:\n packet_cursor = self.packet_ins.find(db_data).sort('timestamp', 1)\n else:\n return self.packet_ins.find(db_data).sort('timestamp', 1).explain()\n #*** Iterate the packet cursor:\n for packet in packet_cursor:\n if packet['ip_src'] == flow_client:\n result.append(1)\n else:\n result.append(0)\n return result", "def ip_datagrams(frames):\n\n for f in frames:\n if dpkt.ethernet.ETH_TYPE_IP == f.type:\n yield f.data", "def extract_streams(self):\n with open(self.filepath, 'rb') as pcap_file:\n pcap_reader = dpkt.pcap.Reader(pcap_file)\n for timestamp, buf in pcap_reader:\n try:\n frame = dpkt.ethernet.Ethernet(buf)\n except dpkt.dpkt.UnpackError:\n continue\n ip_pkt = frame.data\n if (not isinstance(ip_pkt, dpkt.ip.IP)\n and not isinstance(ip_pkt, dpkt.ip6.IP6)):\n continue\n if not isinstance(ip_pkt.data, dpkt.tcp.TCP):\n continue\n ip_ver = socket.AF_INET\n if ip_pkt.v == 6:\n ip_ver = socket.AF_INET6\n tcp_pkt = ip_pkt.data\n stream_id = (\n socket.inet_ntop(ip_ver, ip_pkt.src),\n tcp_pkt.sport,\n socket.inet_ntop(ip_ver, ip_pkt.dst),\n tcp_pkt.dport\n )\n if len(tcp_pkt.data) > 0:\n timestamp = int(timestamp * 1000) # milliseconds\n self.streams[stream_id].put(\n (tcp_pkt.seq, (timestamp, tcp_pkt.data)))\n\n logging.debug(f'Streams: {len(self.streams)}')", "def packets_for_stream(fobj, offset):\n pcap = dpkt.pcap.Reader(fobj)\n pcapiter = iter(pcap)\n ts, raw = pcapiter.next()\n\n fobj.seek(offset)\n for p in next_connection_packets(pcapiter, linktype=pcap.datalink()):\n yield p", "def analyse_packet_data(self):\n #with memory.get_lock():\n for packet in self.packets: # O(N) packet iteration\n source_private_ip = None\n if \"IPv6\" in packet or \"IPV6\" in packet:\n if self.engine == \"scapy\":\n IP = \"IPv6\"\n else:\n IP = \"IPV6\"\n\n # TODO: Fix weird ipv6 errors in pyshark engine\n # * ExHandler as temperory fix\n try:\n private_source = IPAddress(packet[IP].src).is_private()\n except:\n private_source = None\n try:\n private_destination = IPAddress(packet[IP].dst).is_private()\n except:\n private_destination = None\n elif \"IP\" in packet:# and packet[\"IP\"].version == \"4\":\n # Handle IP packets that originated from LAN (Internal Network)\n #print(packet[\"IP\"].version == \"4\")\n IP = \"IP\"\n private_source = IPAddress(packet[IP].src).is_private()\n private_destination = IPAddress(packet[IP].dst).is_private()\n\n if \"TCP\" in packet or \"UDP\" in packet:\n # Sort out indifferences in pcap engine\n if self.engine == \"pyshark\":\n eth_layer = \"ETH\"\n tcp_src = str(\n packet[\"TCP\"].srcport if \"TCP\" in packet else packet[\"UDP\"].srcport)\n tcp_dst = str(\n packet[\"TCP\"].dstport if \"TCP\" in packet else packet[\"UDP\"].dstport)\n else:\n eth_layer = \"Ether\"\n tcp_src = str(\n packet[\"TCP\"].sport if \"TCP\" in packet else packet[\"UDP\"].sport)\n tcp_dst = str(\n packet[\"TCP\"].dport if \"TCP\" in packet else packet[\"UDP\"].dport)\n\n if private_source and private_destination: # Communication within LAN\n key1 = packet[IP].src + \"/\" + packet[IP].dst + \"/\" + tcp_dst\n key2 = packet[IP].dst + \"/\" + packet[IP].src + \"/\" + tcp_src\n if key2 in memory.packet_db:\n source_private_ip = key2\n else:\n source_private_ip = key1\n # IntraNetwork Hosts list\n memory.lan_hosts[packet[IP].src] = {\"mac\": packet[eth_layer].src}\n memory.lan_hosts[packet[IP].dst] = {\"mac\": packet[eth_layer].dst}\n elif private_source: # Internetwork packet\n key = packet[IP].src + \"/\" + packet[IP].dst + \"/\" + tcp_dst\n source_private_ip = key\n # IntraNetwork vs InterNetwork Hosts list\n memory.lan_hosts[packet[IP].src] = {\"mac\": packet[eth_layer].src}\n memory.destination_hosts[packet[IP].dst] = {}\n elif private_destination: # Internetwork packet\n #print(packet.show())\n key = packet[IP].dst + \"/\" + packet[IP].src + \"/\" + tcp_src\n source_private_ip = key\n # IntraNetwork vs InterNetwork Hosts list\n memory.lan_hosts[packet[IP].dst] = {\"mac\": packet[eth_layer].dst}\n memory.destination_hosts[packet[IP].src] = {}\n\n elif \"ICMP\" in packet:\n key = packet[IP].src + \"/\" + packet[IP].dst + \"/\" + \"ICMP\"\n source_private_ip = key\n # Fill packetDB with generated key\n #print(packet.show())\n if source_private_ip:\n if source_private_ip not in memory.packet_db:\n memory.packet_db[source_private_ip] = {}\n # Ethernet Layer ( Mac address )\n if \"Ethernet\" not in memory.packet_db[source_private_ip]:\n memory.packet_db[source_private_ip][\"Ethernet\"] = {}\n # HTTP Packets\n if \"Payload\" not in memory.packet_db:\n # Payload recording\n memory.packet_db[source_private_ip][\"Payload\"] = []\n if self.engine == \"pyshark\":\n memory.packet_db[source_private_ip][\"Ethernet\"][\"src\"] = packet[\"ETH\"].src\n memory.packet_db[source_private_ip][\"Ethernet\"][\"dst\"] = packet[\"ETH\"].dst\n # Refer https://github.com/KimiNewt/pyshark/issues/264\n #memory.packet_db[source_private_ip][\"Payload\"].append(packet.get_raw_packet())\n else:\n memory.packet_db[source_private_ip][\"Ethernet\"][\"src\"] = packet[\"Ether\"].src\n memory.packet_db[source_private_ip][\"Ethernet\"][\"dst\"] = packet[\"Ether\"].dst\n \n if \"TCP\" in packet:\n memory.packet_db[source_private_ip][\"Payload\"].append(str(packet[\"TCP\"].payload))\n elif \"UDP\" in packet:\n memory.packet_db[source_private_ip][\"Payload\"].append(str(packet[\"UDP\"].payload))\n elif \"ICMP\" in packet:\n memory.packet_db[source_private_ip][\"Payload\"].append(str(packet[\"ICMP\"].payload))", "def __iter__(self) -> Iterator[packets.Packet]:\n for packet in self._packets:\n yield packet\n for pointer in self._packet_pointers:\n yield pointer.get()", "def pull(self):\n\n # For each packet in the pcap process the contents\n for item in self.input_stream:\n\n # Print out the timestamp in UTC\n print('%s -' % item['timestamp'], end='')\n\n # Transport info\n if item['transport']:\n print(item['transport']['type'], end='')\n\n # Print out the Packet info\n packet_type = item['packet']['type']\n print(packet_type, end='')\n packet = item['packet']\n if packet_type in ['IP', 'IP6']:\n # Is there domain info?\n if 'src_domain' in packet:\n print('%s(%s) --> %s(%s)' % (net_utils.inet_to_str(packet['src']), packet['src_domain'],\n net_utils.inet_to_str(packet['dst']), packet['dst_domain']), end='')\n else:\n print('%s --> %s' % (net_utils.inet_to_str(packet['src']), net_utils.inet_to_str(packet['dst'])), end='')\n else:\n print(str(packet))\n\n # Only include application if we have it\n if item['application']:\n print('Application: %s' % item['application']['type'], end='')\n print(str(item['application']), end='')\n\n # Just for newline\n print()", "def get_pcap_traffic_series(self):\n parsed_pcap_data = {}\n\n if (self.mac_address_binary is not None):\n parsed_pcap_data[self.mac_address_binary] = []\n\n with open(self.pcap_file_path, 'rb') as pcap_file:\n try:\n pcap = dpkt.pcap.Reader(pcap_file)\n for ts, buf in pcap:\n # Skip non ethernet frames\n try:\n eth = dpkt.ethernet.Ethernet(buf)\n except:\n continue\n\n # Skip non-IP packets\n if eth.type != 2048:\n continue\n \n # Apply eth filter\n if (self.mac_address_binary is not None):\n self.append_data(parsed_pcap_data, self.mac_address_binary, eth, ts)\n else:\n if (eth.src not in parsed_pcap_data):\n parsed_pcap_data[eth.src] = []\n if (eth.dst not in parsed_pcap_data):\n parsed_pcap_data[eth.dst] = []\n\n self.append_data(parsed_pcap_data, eth.src, eth, ts)\n self.append_data(parsed_pcap_data, eth.dst, eth, ts)\n except:\n print \"Error parsing file: %s\" % pcap_file\n \n # Remove mac addresses that didn't send data\n receivers_only = []\n for mac_addr in parsed_pcap_data:\n data_sent = False\n for data in parsed_pcap_data[mac_addr]:\n if (data[1] > 0):\n data_sent = True\n break\n if (not data_sent):\n receivers_only.append(mac_addr)\n\n for mac_addr in receivers_only:\n parsed_pcap_data.pop(mac_addr, None)\n\n # Sort the data \n for mac_addr in parsed_pcap_data:\n series = sorted(parsed_pcap_data[mac_addr], key=operator.itemgetter(0))\n parsed_pcap_data[mac_addr] = series\n\n return parsed_pcap_data", "def ethernet_frames(packets):\n\n for p in packets:\n try:\n yield dpkt.ethernet.Ethernet(p)\n except dpkt.dpkt.NeedData, e:\n continue", "def dump_as_pkt (self):\n cnt=0;\n for stream in self.streams:\n print(\"=======================\")\n print(\"Stream %d\" % cnt)\n print(\"=======================\")\n cnt = cnt +1 \n stream.to_pkt_dump()", "def parse_pkt_list(self, pkt_list):\n flow_pkts = {}\n for (t, pkt) in pkt_list:\n flowID = self.extract_flowID(pkt)\n if flowID not in flow_pkts.keys():\n flow_pkts[flowID] = [(t, pkt)]\n else:\n flow_pkts[flowID].append((t,pkt))\n return flow_pkts", "def next(self) -> Optional[Packet]:\n self._check_type_ok()\n idx = min(self._index)\n stop_idx = max(self._stop_index)\n\n while idx < stop_idx:\n p = self._pkts[idx]\n\n sys.stderr.write('#%d %s' % (idx + 1, '\\n' if idx % 40 == 39 else ''))\n if self._filter_func(p):\n if p.wpan and not (self._index[0] <= idx < self._stop_index[0]): # wpan matched but not in range\n pass\n elif p.eth and not (self._index[1] <= idx < self._stop_index[1]): # eth matched but not in range\n pass\n else:\n self._on_found_next(idx, p)\n print(\"\\n>>> found packet at #%d!\" % (idx + 1,), file=sys.stderr)\n return p\n\n idx += 1\n\n return None", "async def _process(self, pkts):\n for pkt in pkts:\n for label, info in sorted(self._by_label.items()):\n if info.done:\n continue\n\n result = await self._process_pkt(pkt, label, info)\n\n if result is not None:\n yield result", "def print_packets(pcap):\n # For each packet in the pcap process the contents\n i=0\n for timestamp, buf in pcap:\n\n # Print out the timestamp in UTC\n print '[%d] Timestamp: %s' %(i,str(datetime.datetime.utcfromtimestamp(timestamp)))\n i += 1\n # Unpack the Ethernet frame (mac src/dst, ethertype)\n eth = dpkt.ethernet.Ethernet(buf)\n ip = dpkt.ip.IP(buf)\n if type(ip.data) == UDP : # checking of type of data that was recognized by dpkg\n udp = ip.data\n print udp.sport\n else:\n print \"Not UDP\"\n #print dpkt.ethernet.ETH_TYPE_ARP\n #ip = dpkt.ip.IP(buf)\n #ip = dpkt.ip.IP(data)\n #ip = eth.data\n #ip_addr = socket.inet_ntoa(ip.src|ip.dst)\n #print 'Ethernet Frame: ', mac_addr(eth.src), mac_addr(eth.dst), eth.type\n #print 'IP Packet type: %s' % eth.data.__class__.__name__\n #print 'IP type: %s' %eth.data.__class__.__name__\n #print 'ARP frame type: %s' %eth.data._class_ \n # Make sure the Ethernet frame contains an IP packet\n # EtherType (IP, ARP, PPPoE, IP6... see http://en.wikipedia.org/wiki/EtherType)\n if eth.type != dpkt.ethernet.ETH_TYPE_IP:\n print 'Non IP Packet type not supported %s\\n' % eth.data.__class__.__name__\n continue\n\n # Now unpack the data within the Ethernet frame (the IP packet) \n # Pulling out src, dst, length, fragment info, TTL, and Protocol\n ip = eth.data\n\n # Pull out fragment information (flags and offset all packed into off field, so use bitmasks)\n do_not_fragment = bool(ip.off & dpkt.ip.IP_DF)\n more_fragments = bool(ip.off & dpkt.ip.IP_MF)\n fragment_offset = ip.off & dpkt.ip.IP_OFFMASK\n\n # Print out the info\n print 'IP: %s -> %s (len=%d ttl=%d DF=%d MF=%d offset=%d)\\n' % \\\n (ip_to_str(ip.src), ip_to_str(ip.dst), ip.len, ip.ttl, do_not_fragment, more_fragments, fragment_offset)", "def grouped_iterator(iterator):\n return itertools.groupby(iterator, lambda x: x.track)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Open a PCAP, seek to a packet offset, then get all packets belonging to the same connection
def packets_for_stream(fobj, offset): pcap = dpkt.pcap.Reader(fobj) pcapiter = iter(pcap) ts, raw = pcapiter.next() fobj.seek(offset) for p in next_connection_packets(pcapiter, linktype=pcap.datalink()): yield p
[ "def pcap():", "def read_pcap(self, filename):\n packets = rdpcap(filename)[IP]\n sessions = packets.sessions()\n for key in sessions:\n # try:\n if key not in self.session_collection:\n parts = key.split()\n protocol = parts[0]\n ip1_parts = parts[1].split(':')\n ip1 = ip1_parts[0]\n port1 = int(ip1_parts[1]) if len(ip1_parts) > 1 else 0\n ip2_parts = parts[3].split(':')\n ip2 = ip2_parts[0]\n port2 = int(ip2_parts[1]) if len(ip1_parts) > 1 else 0\n entropy = entropy_domain_names(sessions[key])\n self.session_collection[key] = [\n hash(protocol),\n entropy,\n # int(ipaddress.ip_address(ip1)),\n port1,\n # int(ipaddress.ip_address(ip2)),\n port2,\n MinHash()\n ]\n self.session_collection[key] = calculate_hash(self.session_collection[key], sessions[key])\n # except Exception as e:\n # print(str(e))\n # pass", "def __read_pcap(self):\n try:\n # use rdpcap function of scapy and return a list\n packets = rdpcap(self.read_file)\n except BaseException as e:\n self.logger.error(e)\n else:\n for pkt in packets:\n if self.verbose:\n self.logger.info(pkt.show())\n else:\n self.logger.info(pkt.summary())", "def print_packets(pcap):\n # For each packet in the pcap process the contents\n for timestamp, buf in pcap:\n\n # Print out the timestamp in UTC\n print('Timestamp: ', str(datetime.datetime.utcfromtimestamp(timestamp)))\n\n # Unpack the Ethernet frame (mac src/dst, ethertype)\n eth = dpkt.ethernet.Ethernet(buf)\n print('Ethernet Frame: ', mac_addr(eth.src), mac_addr(eth.dst), eth.type)\n\n # Make sure the Ethernet data contains an IP packet\n if not isinstance(eth.data, dpkt.ip.IP):\n print('Non IP Packet type not supported %s\\n' % eth.data.__class__.__name__)\n continue\n\n # Now unpack the data within the Ethernet frame (the IP packet)\n # Pulling out src, dst, length, fragment info, TTL, and Protocol\n ip = eth.data\n # help(eth)\n\n\n\n # Pull out fragment information (flags and offset all packed into off field, so use bitmasks)\n do_not_fragment = bool(ip.off & dpkt.ip.IP_DF)\n more_fragments = bool(ip.off & dpkt.ip.IP_MF)\n fragment_offset = ip.off & dpkt.ip.IP_OFFMASK\n\n # Print out the info\n print('IP: %s -> %s (len=%d ttl=%d DF=%d MF=%d offset=%d)\\n' % \\\n (inet_to_str(ip.src), inet_to_str(ip.dst), ip.len, ip.ttl, do_not_fragment, more_fragments, fragment_offset))", "def extract_streams(self):\n with open(self.filepath, 'rb') as pcap_file:\n pcap_reader = dpkt.pcap.Reader(pcap_file)\n for timestamp, buf in pcap_reader:\n try:\n frame = dpkt.ethernet.Ethernet(buf)\n except dpkt.dpkt.UnpackError:\n continue\n ip_pkt = frame.data\n if (not isinstance(ip_pkt, dpkt.ip.IP)\n and not isinstance(ip_pkt, dpkt.ip6.IP6)):\n continue\n if not isinstance(ip_pkt.data, dpkt.tcp.TCP):\n continue\n ip_ver = socket.AF_INET\n if ip_pkt.v == 6:\n ip_ver = socket.AF_INET6\n tcp_pkt = ip_pkt.data\n stream_id = (\n socket.inet_ntop(ip_ver, ip_pkt.src),\n tcp_pkt.sport,\n socket.inet_ntop(ip_ver, ip_pkt.dst),\n tcp_pkt.dport\n )\n if len(tcp_pkt.data) > 0:\n timestamp = int(timestamp * 1000) # milliseconds\n self.streams[stream_id].put(\n (tcp_pkt.seq, (timestamp, tcp_pkt.data)))\n\n logging.debug(f'Streams: {len(self.streams)}')", "def print_packets(pcap):\n # For each packet in the pcap process the contents\n i=0\n for timestamp, buf in pcap:\n\n # Print out the timestamp in UTC\n print '[%d] Timestamp: %s' %(i,str(datetime.datetime.utcfromtimestamp(timestamp)))\n i += 1\n # Unpack the Ethernet frame (mac src/dst, ethertype)\n eth = dpkt.ethernet.Ethernet(buf)\n ip = dpkt.ip.IP(buf)\n if type(ip.data) == UDP : # checking of type of data that was recognized by dpkg\n udp = ip.data\n print udp.sport\n else:\n print \"Not UDP\"\n #print dpkt.ethernet.ETH_TYPE_ARP\n #ip = dpkt.ip.IP(buf)\n #ip = dpkt.ip.IP(data)\n #ip = eth.data\n #ip_addr = socket.inet_ntoa(ip.src|ip.dst)\n #print 'Ethernet Frame: ', mac_addr(eth.src), mac_addr(eth.dst), eth.type\n #print 'IP Packet type: %s' % eth.data.__class__.__name__\n #print 'IP type: %s' %eth.data.__class__.__name__\n #print 'ARP frame type: %s' %eth.data._class_ \n # Make sure the Ethernet frame contains an IP packet\n # EtherType (IP, ARP, PPPoE, IP6... see http://en.wikipedia.org/wiki/EtherType)\n if eth.type != dpkt.ethernet.ETH_TYPE_IP:\n print 'Non IP Packet type not supported %s\\n' % eth.data.__class__.__name__\n continue\n\n # Now unpack the data within the Ethernet frame (the IP packet) \n # Pulling out src, dst, length, fragment info, TTL, and Protocol\n ip = eth.data\n\n # Pull out fragment information (flags and offset all packed into off field, so use bitmasks)\n do_not_fragment = bool(ip.off & dpkt.ip.IP_DF)\n more_fragments = bool(ip.off & dpkt.ip.IP_MF)\n fragment_offset = ip.off & dpkt.ip.IP_OFFMASK\n\n # Print out the info\n print 'IP: %s -> %s (len=%d ttl=%d DF=%d MF=%d offset=%d)\\n' % \\\n (ip_to_str(ip.src), ip_to_str(ip.dst), ip.len, ip.ttl, do_not_fragment, more_fragments, fragment_offset)", "def pcap(self, fname):\n\t\tcap = pcapy.open_offline(fname)\n\n\t\tself.map = []\n\t\tself.p = PacketDecoder()\n\t\tcap.loop(0, self.process)\n\n\t\treturn self.map", "def test():\n #f = open('/home/andyp/Documents/Studies/CONCORDIA/IoT_project/IoT_Sentinel/src/captures_IoT_Sentinel/captures_IoT-Sentinel/Aria/Setup-A-1-STA.pcap')\n with open('capture_test.pcap') as f:\n pcap = dpkt.pcap.Reader(f)\n print_packets(pcap)", "def read_packets(environment_id):\n if not environment_id:\n return None\n\n packets_path = os.path.join(RUN_DIRECTORY, \"packets\", \"original_\" + str(environment_id) + \".pcap\")\n if not os.path.exists(packets_path):\n return None\n\n parsed = []\n try:\n packets = rdpcap(packets_path)\n parsed = [layers.packet.Packet(p) for p in packets]\n except Exception as e:\n print(e)\n print(\"FAILED TO PARSE!\")\n\n return parsed", "def packet_directions(self, test=0):\n result = []\n time_limit = datetime.datetime.now() - self.flow_time_limit\n #*** Get Client IP and DPID of first switch to report flow:\n (flow_client, first_dpid) = self.origin()\n #*** Main search:\n db_data = {'flow_hash': self.packet.flow_hash,\n 'timestamp': {'$gte': time_limit},\n 'dpid': first_dpid}\n if not test:\n packet_cursor = self.packet_ins.find(db_data).sort('timestamp', 1)\n else:\n return self.packet_ins.find(db_data).sort('timestamp', 1).explain()\n #*** Iterate the packet cursor:\n for packet in packet_cursor:\n if packet['ip_src'] == flow_client:\n result.append(1)\n else:\n result.append(0)\n return result", "def test():\n with open('univ1_pt8.pcap', 'rb') as f: #univ1_trace/univ1_pt8\n pcap = Reader(f)\n print_packets(pcap)\n # top_flows()\n host_pairs()", "def parse_packets(pcap):\n # For each packet in the pcap process the contents\n flow_Info = []\n times = 0\n for timestamp, buf in pcap:\n times += 1\n tmp_flow_Info = {}\n\n # Unpack the Ethernet frame (mac src/dst, ethertype)\n eth = dpkt.ethernet.Ethernet(buf)\n # Unpack the data whthin the Ethernet frame (the IP packet)\n ip = eth.data\n\n # if protocol(ip.p) is not UDP(17) ,skip this packet\n if ip.p != 17:\n continue\n\n udp = ip.data\n # Temp_data = parse_data(eth.data.udp.data)\n # Filter CoAP by port\n if(udp.sport != 5683 or udp.dport != 5683):\n continue\n\n str_udp_data = parse_data(eth.data.udp.data)\n # skip packets of Non_confirmable\n if str_udp_data[0] == '5': \n continue\n\n cycle = 0\n index = 0\n Udp_data = []\n \n len_str_udp_data = len(str_udp_data)\n while cycle < (len_str_udp_data//3+1):\n # Udp_data.append(int('0x'+Str_Udp_data[index:index + 2], 16))\n Udp_data.append(int('0x' + str_udp_data[index:index + 2], 16))\n cycle += 1\n index += 3\n tmp_flow_Info['udp_data'] = (Udp_data)\n\n # confirmable or ack\n tmp_flow_Info['Coap_type'] = str_udp_data[0]\n #print(str_udp_data) \n \n # skip space and get \"Message ID\" \n HexMide = str_udp_data[6:8] + str_udp_data[9:11]\n tmp_flow_Info['Mid'] = int('0x'+HexMide, 16)\n\n tmp_flow_Info['Timestamp'] = str(datetime.datetime.fromtimestamp(timestamp))\n # print('Ethernet Frame: ', mac_addr(eth.src), mac_addr(eth.dst), eth.type)\n tmp_flow_Info['src'] = inet_to_str(ip.src)\n tmp_flow_Info['dst'] = inet_to_str(ip.dst)\n\n tmp_flow_Info['sport'] = udp.sport\n tmp_flow_Info['dport'] = udp.dport\n flow_Info.append(tmp_flow_Info)\n\n return flow_Info", "def packets_from_file(self, cap_or_xml):\n beginning = cap_or_xml.read(5)\n if beginning == '<?xml':\n # It's an xml file.\n return self._packets_from_fd(cap_or_xml, previous_data=beginning, wait_for_more_data=False)\n else:\n # We assume it's a PCAP file and use tshark to get the XML.\n p = subprocess.Popen([get_tshark_path(),\n '-T', 'pdml',\n '-r', cap_or_xml.name],\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n return self._packets_from_fd(p.stdout, previous_data=beginning, wait_for_more_data=False)", "def analyse_packet_data(self):\n #with memory.get_lock():\n for packet in self.packets: # O(N) packet iteration\n source_private_ip = None\n if \"IPv6\" in packet or \"IPV6\" in packet:\n if self.engine == \"scapy\":\n IP = \"IPv6\"\n else:\n IP = \"IPV6\"\n\n # TODO: Fix weird ipv6 errors in pyshark engine\n # * ExHandler as temperory fix\n try:\n private_source = IPAddress(packet[IP].src).is_private()\n except:\n private_source = None\n try:\n private_destination = IPAddress(packet[IP].dst).is_private()\n except:\n private_destination = None\n elif \"IP\" in packet:# and packet[\"IP\"].version == \"4\":\n # Handle IP packets that originated from LAN (Internal Network)\n #print(packet[\"IP\"].version == \"4\")\n IP = \"IP\"\n private_source = IPAddress(packet[IP].src).is_private()\n private_destination = IPAddress(packet[IP].dst).is_private()\n\n if \"TCP\" in packet or \"UDP\" in packet:\n # Sort out indifferences in pcap engine\n if self.engine == \"pyshark\":\n eth_layer = \"ETH\"\n tcp_src = str(\n packet[\"TCP\"].srcport if \"TCP\" in packet else packet[\"UDP\"].srcport)\n tcp_dst = str(\n packet[\"TCP\"].dstport if \"TCP\" in packet else packet[\"UDP\"].dstport)\n else:\n eth_layer = \"Ether\"\n tcp_src = str(\n packet[\"TCP\"].sport if \"TCP\" in packet else packet[\"UDP\"].sport)\n tcp_dst = str(\n packet[\"TCP\"].dport if \"TCP\" in packet else packet[\"UDP\"].dport)\n\n if private_source and private_destination: # Communication within LAN\n key1 = packet[IP].src + \"/\" + packet[IP].dst + \"/\" + tcp_dst\n key2 = packet[IP].dst + \"/\" + packet[IP].src + \"/\" + tcp_src\n if key2 in memory.packet_db:\n source_private_ip = key2\n else:\n source_private_ip = key1\n # IntraNetwork Hosts list\n memory.lan_hosts[packet[IP].src] = {\"mac\": packet[eth_layer].src}\n memory.lan_hosts[packet[IP].dst] = {\"mac\": packet[eth_layer].dst}\n elif private_source: # Internetwork packet\n key = packet[IP].src + \"/\" + packet[IP].dst + \"/\" + tcp_dst\n source_private_ip = key\n # IntraNetwork vs InterNetwork Hosts list\n memory.lan_hosts[packet[IP].src] = {\"mac\": packet[eth_layer].src}\n memory.destination_hosts[packet[IP].dst] = {}\n elif private_destination: # Internetwork packet\n #print(packet.show())\n key = packet[IP].dst + \"/\" + packet[IP].src + \"/\" + tcp_src\n source_private_ip = key\n # IntraNetwork vs InterNetwork Hosts list\n memory.lan_hosts[packet[IP].dst] = {\"mac\": packet[eth_layer].dst}\n memory.destination_hosts[packet[IP].src] = {}\n\n elif \"ICMP\" in packet:\n key = packet[IP].src + \"/\" + packet[IP].dst + \"/\" + \"ICMP\"\n source_private_ip = key\n # Fill packetDB with generated key\n #print(packet.show())\n if source_private_ip:\n if source_private_ip not in memory.packet_db:\n memory.packet_db[source_private_ip] = {}\n # Ethernet Layer ( Mac address )\n if \"Ethernet\" not in memory.packet_db[source_private_ip]:\n memory.packet_db[source_private_ip][\"Ethernet\"] = {}\n # HTTP Packets\n if \"Payload\" not in memory.packet_db:\n # Payload recording\n memory.packet_db[source_private_ip][\"Payload\"] = []\n if self.engine == \"pyshark\":\n memory.packet_db[source_private_ip][\"Ethernet\"][\"src\"] = packet[\"ETH\"].src\n memory.packet_db[source_private_ip][\"Ethernet\"][\"dst\"] = packet[\"ETH\"].dst\n # Refer https://github.com/KimiNewt/pyshark/issues/264\n #memory.packet_db[source_private_ip][\"Payload\"].append(packet.get_raw_packet())\n else:\n memory.packet_db[source_private_ip][\"Ethernet\"][\"src\"] = packet[\"Ether\"].src\n memory.packet_db[source_private_ip][\"Ethernet\"][\"dst\"] = packet[\"Ether\"].dst\n \n if \"TCP\" in packet:\n memory.packet_db[source_private_ip][\"Payload\"].append(str(packet[\"TCP\"].payload))\n elif \"UDP\" in packet:\n memory.packet_db[source_private_ip][\"Payload\"].append(str(packet[\"UDP\"].payload))\n elif \"ICMP\" in packet:\n memory.packet_db[source_private_ip][\"Payload\"].append(str(packet[\"ICMP\"].payload))", "def plotConnectionStatisticsFromPcap(self, pcaplocation):\n pcaps = self._collectFilesToAnalyse(pcaplocation)\n logging.debug(\"Analyse the following files from %s: %s\"%(pcaplocation, pcaps))\n result = []\n\n for pcapfile in pcaps:\n currdir = os.path.join(self.outputdir, removeSuffixes(os.path.basename(pcapfile), [\".pcap\", \".pcapng\"]))\n mkdir_p(currdir)\n\n ttoutput = self._runTcpTraceOnPcap(pcapfile)\n stats = self._extractConnectionStatistics(ttoutput)\n result.append(stats)\n self._createPlotOfFailedConnections(stats, currdir)\n self._createPlotOfLoadingTimes(stats, os.path.join(currdir, \"pageload_times.pdf\"))\n\n return result", "def pull(self):\n\n # For each packet in the pcap process the contents\n for item in self.input_stream:\n\n # Print out the timestamp in UTC\n print('%s -' % item['timestamp'], end='')\n\n # Transport info\n if item['transport']:\n print(item['transport']['type'], end='')\n\n # Print out the Packet info\n packet_type = item['packet']['type']\n print(packet_type, end='')\n packet = item['packet']\n if packet_type in ['IP', 'IP6']:\n # Is there domain info?\n if 'src_domain' in packet:\n print('%s(%s) --> %s(%s)' % (net_utils.inet_to_str(packet['src']), packet['src_domain'],\n net_utils.inet_to_str(packet['dst']), packet['dst_domain']), end='')\n else:\n print('%s --> %s' % (net_utils.inet_to_str(packet['src']), net_utils.inet_to_str(packet['dst'])), end='')\n else:\n print(str(packet))\n\n # Only include application if we have it\n if item['application']:\n print('Application: %s' % item['application']['type'], end='')\n print(str(item['application']), end='')\n\n # Just for newline\n print()", "def extract_tstat_data(pcap_filepath):\n connections = {}\n conn_id = 0\n print('We are here')\n with co.cd(os.path.basename(pcap_filepath[:-5])):\n with co.cd(os.listdir('.')[0]):\n print(connections)\n # Complete TCP connections\n connections, conn_id = extract_tstat_data_tcp_complete('log_tcp_complete', connections, conn_id)\n # Non complete TCP connections (less info, but still interesting data)\n connections, conn_id = extract_tstat_data_tcp_nocomplete('log_tcp_nocomplete', connections, conn_id)\n\n return connections", "def get_connections(capture):\n ip_dict = dict()\n for pkt in capture:\n\n if not hasattr(pkt, \"ip\") and not hasattr(pkt, \"ipv6\"):\n continue\n\n protocol = pkt.highest_layer\n\n tcp_dst_port = None\n tcp_src_port = None\n if hasattr(pkt, \"tcp\"):\n tcp_src_port = pkt.tcp.srcport\n tcp_dst_port = pkt.tcp.dstport\n\n if hasattr(pkt, \"ip\"):\n if pkt.ip.src.startswith(\"192.168.178\"):\n ip, dst = pkt.ip.src, pkt.ip.dst\n else:\n ip, dst = pkt.ip.dst, pkt.ip.src\n tcp_dst_port = tcp_src_port\n else:\n # TODO: how to discern src and dst in IPv6?\n ip, dst = pkt.ipv6.src, pkt.ipv6.dst\n\n ip = \"%s\" % ip\n dkey = (\n \"%s\" % protocol,\n int(tcp_dst_port) if tcp_dst_port else None,\n \"%s\" % dst\n )\n if ip not in ip_dict:\n ip_dict[ip] = {dkey: 1}\n else:\n ip_dict[ip][dkey] = ip_dict[ip].get(dkey, 0) + 1\n return ip_dict", "def process_pcap(self):\n fp = open(self.pcap, \"rb\")\n pkts = dpkt.pcap.Reader(fp)\n self.process_pkts(pkts)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Use SortCap class together with batch_sort to sort a pcap
def sort_pcap(inpath, outpath): inc = SortCap(inpath) batch_sort(inc, outpath, output_class=lambda path: WriteCap(path, linktype=inc.linktype)) return 0
[ "def sort(self, cmp=ProbeMapVector.compare):\n pass", "def performance_sort(tc: \"list[TransicationRecord]\", filename: str):\n tc.sort(key=performance, reverse=True)\n with open(filename, 'w+', newline='') as f:\n writer = csv.writer(f)\n writer.writerow(['id', 'r_count', 'type', 's_mean', 's_rate_mean',\n 's_rate_variance', 'long_s_rate', 's_delta', 'score_log'])\n for target in tc:\n writer.writerow([\n target.id,\n target.requests[target.requests > 0].size,\n target.src_type.value,\n target.supply.mean(),\n target.supply_rate.mean(),\n target.supply_rate.var(),\n target.long_term_supply_rate,\n target.supply_delta,\n np.log(performance(target)),\n ])", "def sort_with(inputs: jnp.ndarray,\n criterion: jnp.ndarray,\n topk: int = -1,\n **kwargs) -> jnp.ndarray:\n num_points = criterion.shape[0]\n weights = jnp.ones(num_points, dtype=criterion.dtype) / num_points\n if 0 < topk < num_points:\n start_index = 1\n target_weights = jnp.concatenate([\n jnp.array([(num_points - topk) / num_points]),\n jnp.ones(topk, dtype=inputs.dtype) / num_points\n ])\n else:\n start_index = 0\n target_weights = jnp.ones((num_points,)) / num_points\n ot = transport_for_sort(criterion, weights, target_weights, **kwargs)\n # Applies the topk on each of the dimensions of the inputs.\n sort_fn = jax.vmap(\n lambda x: (1.0 / target_weights * ot.apply(x, axis=0))[start_index:],\n in_axes=(1,), out_axes=1)\n return sort_fn(inputs)", "def sort_bag(self, bag):\n\t\traise NotImplementedError", "def sort_with(inputs: jnp.ndarray,\n criterion: jnp.ndarray,\n topk: int = -1,\n **kwargs) -> jnp.ndarray:\n num_points = criterion.shape[0]\n weights = jnp.ones(num_points, dtype=criterion.dtype) / num_points\n if 0 < topk < num_points:\n start_index = 1\n target_weights = jnp.concatenate([\n jnp.array([(num_points - topk) / num_points]),\n jnp.ones(topk, dtype=inputs.dtype) / num_points\n ])\n else:\n start_index = 0\n target_weights = jnp.ones((num_points,)) / num_points\n ot = transport_for_sort(criterion, weights, target_weights, kwargs)\n # Applies the topk on each of the dimensions of the inputs.\n sort_fn = jax.vmap(\n lambda x: (1.0 / target_weights * ot.apply(x, axis=0))[start_index:],\n in_axes=(1,), out_axes=1)\n return sort_fn(inputs)", "def test_batch_list_sorted_by_key(self):\n controls = self.make_sort_controls('header_signature')\n response = self.make_request(sorting=controls)\n\n self.assertEqual(self.status.OK, response.status)\n self.assertEqual('b' * 127 + '2', response.head_id)\n self.assert_valid_paging(response)\n self.assertEqual(3, len(response.batches))\n self.assert_all_instances(response.batches, Batch)\n self.assertEqual('b-0', response.batches[0].header_signature)\n self.assertEqual('b-2', response.batches[2].header_signature)", "def test_batch_list_sorted_in_reverse(self):\n controls = self.make_sort_controls('header_signature', reverse=True)\n response = self.make_request(sorting=controls)\n\n self.assertEqual(self.status.OK, response.status)\n self.assertEqual('b' * 127 + '2', response.head_id)\n self.assert_valid_paging(response)\n self.assertEqual(3, len(response.batches))\n self.assert_all_instances(response.batches, Batch)\n self.assertEqual('b-2', response.batches[0].header_signature)\n self.assertEqual('b-0', response.batches[2].header_signature)", "def do_sort(self, src, dst):\r\n\r\n p = subprocess.Popen([\"sort\", \"--version\"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\r\n s_ver = p.communicate()[0]\r\n del p\r\n\r\n xenv = os.environ.copy()\r\n xenv['LANG'] = 'C'\r\n xenv['LC_ALL'] = 'C'\r\n\r\n cmdline = ['sort', '-T', '.']\r\n if s_ver.find(\"coreutils\") > 0:\r\n cmdline.append('-S')\r\n cmdline.append('30%')\r\n cmdline.append('-o')\r\n cmdline.append(dst)\r\n cmdline.append(src)\r\n p = subprocess.Popen(cmdline, env = xenv)\r\n if p.wait() != 0:\r\n raise Exception('sort failed')", "def test_batch_list_sorted_by_implied_header(self):\n controls = self.make_sort_controls('signer_public_key')\n response = self.make_request(sorting=controls)\n\n self.assertEqual(self.status.OK, response.status)\n self.assertEqual('b' * 127 + '2', response.head_id)\n self.assert_valid_paging(response)\n self.assertEqual(3, len(response.batches))\n self.assert_all_instances(response.batches, Batch)\n self.assertEqual('b-0', response.batches[0].header_signature)\n self.assertEqual('b-2', response.batches[2].header_signature)", "def _singlepass_sort(self) -> None:\n #print('\\n ------------------ Sort Phase: -------------------------')\n start_time = time.time()\n sort_fields = self.sort_key_config.get_sort_fields()\n primary_order = self.sort_key_config.get_primary_order()\n\n if primary_order == 'forward':\n self.keys.sort(key=itemgetter(*sort_fields))\n else:\n self.keys.sort(key=itemgetter(*sort_fields), reverse=True)\n #print(f' duration = {(time.time() - start_time):.6f}')", "def volume_sort(self):\n self.jobs_sorted = sorted(\n self.jobs,\n key=lambda job: (job['height'], job['width'] * job['height']),\n # key=lambda job: job['width'] * job['height'],\n reverse=True)", "def sort_file(self):\n self._load_file_and_prepare_data()\n\n if self.sort_key_config.multi_orders():\n self._multipass_sort()\n else:\n self._singlepass_sort()\n\n self._write_file_and_dedupe()\n\n self.stats['recs_read'] = self.input_handler.rec_cnt + self.has_header_adjustment\n self.stats['recs_written'] = self.output_handler.rec_cnt", "def sort_grouped_packets(self, grouped_packets):\n for group in grouped_packets:\n group.sort(key=lambda x: x.time, reverse=False)\n return grouped_packets", "def testSorting(self):\n mtt.makeTempDirParent()\n shuffledTargets = list(g_targetBlocks)\n for i in xrange(0, 200):\n tmpDir = os.path.abspath(mtt.makeTempDir('sorting'))\n random.shuffle(g_nonTargetBlocks)\n random.shuffle(shuffledTargets)\n shuffledBlocks = list(shuffledTargets)\n lower = 0\n for j in xrange(0, len(g_nonTargetBlocks)):\n # randomly insert the non target blocks, but keep a record\n # of their relative order.\n index = random.randint(lower, len(shuffledBlocks))\n shuffledBlocks.insert(index, g_nonTargetBlocks[j])\n lower = index + 1\n testMaf = mtt.testFile(os.path.abspath(os.path.join(tmpDir, 'test.maf')), \n ''.join(shuffledBlocks), g_headers)\n parent = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n cmd = [os.path.abspath(os.path.join(parent, 'test', 'mafSorter'))]\n cmd += ['--maf', os.path.abspath(os.path.join(tmpDir, 'test.maf')), \n '--seq', 'hg18.chr7']\n outpipes = [os.path.abspath(os.path.join(tmpDir, 'sorted.maf'))]\n mtt.recordCommands([cmd], tmpDir, outPipes=outpipes)\n mtt.runCommandsS([cmd], tmpDir, outPipes=outpipes)\n self.assertTrue(mafIsSorted(os.path.join(tmpDir, 'sorted.maf')))\n mtt.removeDir(tmpDir)", "def sort(data, sort_size=500):\n\n buf = []\n for sample in data:\n buf.append(sample)\n if len(buf) >= sort_size:\n buf.sort(key=lambda x: x[\"feat\"].size(0))\n for x in buf:\n yield x\n buf = []\n # The sample left over\n buf.sort(key=lambda x: x[\"feat\"].size(0))\n for x in buf:\n yield x", "def sort_bed(args):\n try:\n chroms = getattr(genome, args.genome)\n except AttributeError:\n logging.error(\"Genome %s not available\", args.genome)\n sys.exit(1)\n \n sortproc = start_unix_sort(args.S)\n sortproc.next()\n\n outlst = []\n n = 0\n out = \"{0}\\t{1}\"\n logging.info(\"Start feeding sort\")\n with FileOrGzip(args.infile) as fh:\n for line in fh:\n chrom, pos, _ = line.split(\"\\t\", 2)\n outlst.append(out.format(chroms.cpos(chrom, long(pos)), line))\n n += 1\n if n == 100000:\n sortproc.send(\"\".join(outlst))\n outlst = []\n n = 0\n sortproc.send(\"\".join(outlst))\n sortproc.close()\n logging.info(\"Done feeding sort; Waiting for sort to finish\")", "def test_batch_list_sorted_by_length(self):\n self.add_blocks('longest', 'long')\n controls = self.make_sort_controls(\n 'header_signature', compare_length=True)\n response = self.make_request(sorting=controls)\n\n self.assertEqual(self.status.OK, response.status)\n self.assertEqual('b' * 124 + 'long', response.head_id)\n self.assert_valid_paging(response, total=5)\n self.assertEqual(5, len(response.batches))\n self.assert_all_instances(response.batches, Batch)\n self.assertEqual('b-long', response.batches[3].header_signature)\n self.assertEqual('b-longest', response.batches[4].header_signature)", "def sort(self, candList):\n\t\tcandList.sort(key=lambda c: c.cost, reverse=False)", "def make_sort(args, db):\n script_file = \"/{}/{}/Scripts/2_{}_sort.sh\".format(\n db[\"out_dir\"], args.name, args.name\n )\n with open(script_file, \"w\") as fout:\n fout.write(\"#!/bin/bash\\n\")\n fout.write(\"set -e\\n\")\n fout.write(\"##-------------\\n\")\n fout.write(\"##Step2: Sort\\n\")\n fout.write(\"##-------------\\n\")\n fout.write(\n \"docker run --rm -v /{}:/data {} \".format(db[\"out_dir\"], db[\"PICARD\"])\n )\n fout.write(\"SortSam \")\n fout.write(\"INPUT=/data/{}/SAM/{}_aligned.sam \".format(args.name, args.name))\n fout.write(\"OUTPUT=/data/{}/BAM/{}_sorted.bam \".format(args.name, args.name))\n fout.write(\"SORT_ORDER=coordinate \")\n fout.write(\"TMP_DIR=/data/{}/tmp\\n\".format(args.name))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
test that the StrainData.fetch_open_frame works as expected
def test_fetch_open_frame(self): import requests pesummary_data = StrainData.fetch_open_frame( "GW190412", IFO="L1", duration=32, sampling_rate=4096., channel="L1:GWOSC-4KHZ_R1_STRAIN", format="hdf5" ) N = len(pesummary_data) np.testing.assert_almost_equal(N * pesummary_data.dt.value, 32.) np.testing.assert_almost_equal(1. / pesummary_data.dt.value, 4096.) assert pesummary_data.IFO == "L1" _data = requests.get( "https://www.gw-openscience.org/eventapi/html/GWTC-2/GW190412/v3/" "L-L1_GWOSC_4KHZ_R1-1239082247-32.gwf" ) with open("L-L1_GWOSC_4KHZ_R1-1239082247-32.gwf", "wb") as f: f.write(_data.content) data2 = TimeSeries.read( "L-L1_GWOSC_4KHZ_R1-1239082247-32.gwf", channel="L1:GWOSC-4KHZ_R1_STRAIN" ) np.testing.assert_almost_equal(pesummary_data.value, data2.value) np.testing.assert_almost_equal( pesummary_data.times.value, data2.times.value )
[ "def test_fetch():\n try:\n df = fetch_lingspam()\n except Exception as e:\n print(e)\n assert((2893, 2) == df.values.shape)", "def test_get_dataframe(self):\n dfr = trappy.FTrace().sched_load_avg_sg.data_frame\n\n self.assertTrue(len(dfr) == 1)\n self.assertEqual(dfr[\"cpus\"].iloc[0], \"00000002\")\n self.assertEqual(dfr[\"load\"].iloc[0], 0)\n self.assertEqual(dfr[\"utilization\"].iloc[0], 0)", "def test_dataframe(self):\n\n url=\"http://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.data\"\n readerobject=requester.url_to_df(url)\n self.assertIsInstance(readerobject,pd.DataFrame)", "def test_fetch_small_data(self):\n self.ICNRepo.start_repo()\n content = self.fetch.fetch_data(Name(\"/test/data/f2\"))\n self.assertEqual(content, self.data2)", "def test_get_dataframe(self):\n dfr = trappy.FTrace().sched_load_avg_cpu.data_frame\n\n self.assertTrue(len(dfr) == 1)\n self.assertEqual(dfr[\"cpu\"].iloc[0], 0)\n self.assertEqual(dfr[\"load\"].iloc[0], 13)\n self.assertEqual(dfr[\"utilization\"].iloc[0], 18)", "def test_get_dataframe(self):\n dfr = trappy.FTrace().sched_wakeup.data_frame\n\n self.assertTrue(len(dfr) == 2)\n self.assertEqual(dfr[\"comm\"].iloc[0], \"rcu_preempt\")\n self.assertEqual(dfr[\"pid\"].iloc[0], 7)\n self.assertEqual(dfr[\"prio\"].iloc[0], 120)\n self.assertEqual(dfr[\"success\"].iloc[0], 1)\n self.assertEqual(dfr[\"target_cpu\"].iloc[0], 1)", "def test_fetch_big_data(self):\n self.ICNRepo.start_repo()\n content = self.fetch.fetch_data(Name(\"/test/data/f3\"))\n self.assertEqual(content, self.data3)", "def test_get_dataframe(self):\n dfr = trappy.FTrace().sched_wakeup_new.data_frame\n\n self.assertTrue(len(dfr) == 2)\n self.assertEqual(dfr[\"comm\"].iloc[0], \"shutils\")\n self.assertEqual(dfr[\"pid\"].iloc[0], 19428)\n self.assertEqual(dfr[\"prio\"].iloc[0], 120)\n self.assertEqual(dfr[\"success\"].iloc[0], 1)\n self.assertEqual(dfr[\"target_cpu\"].iloc[0], 2)", "def test_get_dataframe(self):\n dfr = trappy.FTrace().sched_load_avg_task.data_frame\n\n self.assertTrue(len(dfr) == 1)\n self.assertEqual(dfr[\"comm\"].iloc[0], \"sshd\")\n self.assertEqual(dfr[\"pid\"].iloc[0], 2962)\n self.assertEqual(dfr[\"load\"].iloc[0], 0)\n self.assertEqual(dfr[\"utilization\"].iloc[0], 0)\n self.assertEqual(dfr[\"runnable_avg_sum\"].iloc[0], 0)\n self.assertEqual(dfr[\"running_avg_sum\"].iloc[0], 0)\n self.assertEqual(dfr[\"avg_period\"].iloc[0], 48595)", "def test_readSongData():\n\n # check type\n assert isinstance(song_df, pd.DataFrame)\n\n # check shape\n assert song_df.shape == (1972060, 8)", "def test_get_dataframe(self):\n dfr = trappy.FTrace().cpu_capacity.data_frame\n\n self.assertTrue(len(dfr) == 1)\n self.assertEqual(dfr[\"cpu\"].iloc[0], 3)\n self.assertEqual(dfr[\"capacity\"].iloc[0], 430)\n self.assertEqual(dfr[\"rt_capacity\"].iloc[0], 1024)", "def supports_fetch_outside_dataloader(self):\r\n return True", "def test_fetch_crime(self):\n assert isinstance(_tabular.fetch_crime_data(), \n pd.DataFrame)", "def test_get_dataframe(self):\n dfr = trappy.FTrace().cpu_frequency.data_frame\n\n self.assertTrue(len(dfr) == 1)\n self.assertEqual(dfr[\"cpu\"].iloc[0], 0)\n self.assertEqual(dfr[\"frequency\"].iloc[0], 600000)\n self.assertFalse(\"cpu_id\" in dfr.columns)", "def testCircuitFetch(self):\n\n timeCol = 'timestamp'\n rows = []\n for row in self.aggregator.rawData(dataType = 'circuit',\n orderBy = [timeCol, 'circuit'],\n timestampCol = timeCol,\n startDate = self.testStart,\n endDate = self.testEnd):\n rows.append(row)\n self.assertIsNotNone(rows, 'Rows are present.')", "def test_readSongData():\n\n # make sure the number of columns pull out from the database is correct\n assert svd.song_df.shape[1] == 8", "def test_get_frame(mock_source):\n frame_ingestor = FrameIngestor(mock_source)\n frame_ingestor.get_frame()\n\n mock_source.get_frame.assert_called_once()", "def test_df_comparison(self):\n self.read_container = self.roundtripContainer()\n df_obt = self.read_container.to_dataframe()\n\n tsa = self.read_nwbfile.get_acquisition('a')\n df_exp = pd.DataFrame({\n 'foo': [1, 2, 3, 4],\n 'bar': ['fish', 'fowl', 'dog', 'cat'],\n 'start_time': [0.2, 0.25, 0.30, 0.35],\n 'stop_time': [0.25, 0.30, 0.40, 0.45],\n 'timeseries': [[(2, 1, tsa)],\n [(3, 1, tsa)],\n [(3, 1, tsa)],\n [(4, 1, tsa)]],\n 'tags': [[''], [''], ['fizz', 'buzz'], ['qaz']]\n },\n index=pd.Index(np.arange(4, dtype=np.int64), name='id')\n )\n # pop the timeseries column out because ts_obt has rows of lists of tuples and ts_exp has rows of lists of lists\n ts_obt = df_obt.pop('timeseries')\n ts_exp = df_exp.pop('timeseries')\n pd.testing.assert_frame_equal(df_exp, df_obt, check_like=True, check_dtype=False)\n\n # check the timeseries columns match\n for ex, obt in zip(ts_exp, ts_obt):\n self.assertEqual(ex[0][0], obt[0][0])\n self.assertEqual(ex[0][1], obt[0][1])\n self.assertContainerEqual(ex[0][2], obt[0][2])", "def test_get_data(self):\n\n # create a DataManager from a DataFrame with random data\n data_df = create_random_dataframe()\n data_manager = DataManager(data_df)\n\n # get data from the DataManager\n results_df = data_manager.get_data()\n\n # test if the data manager returns a copy of the DataFrame\n pd.testing.assert_frame_equal(data_manager.get_data(), data_df)\n self.assertIsNot(results_df, data_df)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add v to a ordered set s and return s. >>> s = Link(1, Link(3, Link(5))) >>> add(s, 0) Link(0, Link(1, Link(3, Link(5)))) >>> add(s, 4) Link(0, Link(1, Link(3, Link(4, Link(5))))) >>> add(s, 6) Link(0, Link(1, Link(3, Link(4, Link(5, Link(6)))))) >>> t = Link(1) >>> add(t, 0) Link(0, Link(1))
def add(s, v): if empty(s): return Link(v) head = s if head.first > v: # s = Link(v, s) #error: assigment, then s will rebind to a new object # s.first, s.rest = v, s # error s.rest = s s.first, s.rest = v, Link(s.first, s.rest) return s # head.first <= v while not empty(head.rest) and head.rest.first <= v: head = head.rest if head.first == v: return s else: head.rest = Link(v, head.rest) return s
[ "def add(s, v):\n if empty(s):\n return Link(v)\n if s.first > v:\n s.first, s.rest = v, Link(s.first, s.rest)\n elif s.first < v and empty(s.rest):\n s.rest = Link(v, s.rest)\n elif s.first < v:\n add(s.rest, v)\n return s", "def adjoin_set(S, v):\n if S.label is None:\n return Tree(v, None, None)\n elif v < S.label:\n return Tree(S.label, adjoin_set(S[0], v), S[1])\n elif v == S.label:\n return S\n else:\n return Tree(S.label, S[0], adjoin_set(S[1], v))", "def _add_to_set(s, v):\n if v in s:\n return False\n else:\n s.add(v)\n return True", "def add(self, s):\n current = self.first()\n # case 1 : list is empty, add new node as first node\n if self.size() == 0:\n self.__add_first(s)\n return\n # case 2 : list is not empty, element to be added is smaller than all existing ones\n elif s < current.value():\n self.__add_first(s)\n return\n # case 3 : list is not empty, element is larger than value of current element\n else:\n self.__length += 1\n nxt = current.next()\n # loop until we are at the end to find where to insert element\n while nxt is not None:\n if s < nxt.value():\n n = self.Node(s, nxt)\n current.set_next(n)\n return\n current = nxt\n nxt = nxt.next()\n current.set_next(self.Node(s, None))\n return", "def adjoin2(s, v):\n if empty(s) or s.first > v:\n return Link(v, s)\n elif s.first == v:\n return s\n else:\n return Link(s.first, adjoin2(s.rest, v))", "def add(self, s, value):\n\t\thead, tail = s[0], s[1:]\n\t\tcur_node = self.root[head]\n\t\tif not tail:\n\t\t\tcur_node.value = value\n\t\t\treturn # No further recursion\n\t\tcur_node.add(tail, value)", "def add_to_T(self, s, e, value):\n if e not in self.T[s]:\n self.T[s][e] = set()\n self.T[s][e].add(value)", "def add_edge(self, u, v):\r\n keys = self.d.keys()\r\n #if nodes are not in graph, add them\r\n if u not in keys:\r\n self.add_node(u)\r\n if v not in keys:\r\n self.add_node(v)\r\n #add each node to the value set of each other\r\n u_old = self.d[u]\r\n u_new = u_old.union(set(str(v)))\r\n v_old = self.d[v]\r\n v_new = v_old.union(set(str(u)))\r\n self.d.update({u:u_new, v:v_new})", "def add(self, s):\n for i in self._HashIndices(s): \n self.bv[i] = 1\n self.n += 1\n self.bits_in_inserted_values += 8 * len(s)", "def add(self, vector, weight):\n vector = tuple(vector)\n mins = (weight, vector)\n vecs = [vector]\n new_set = []\n for set_tup in self._set:\n smin, svecs = set_tup\n if any(self._similar(vector, v) for v in svecs):\n mins = min(smin, mins)\n vecs.extend(svecs)\n else:\n new_set.append(set_tup)\n\n bisect.insort(new_set, (mins, vecs))\n self._set = new_set\n return len(vecs) == 1", "def __radd__( self, v ) :\n\n return self + v", "def connect(self, u, v):\n self.e[u].add(v)\n self.e[v].add(u)", "def add_elements_to_set(s: set, *args) -> set:\n s.update(set(*args))\n return s", "def add(self, item):\n # Raise value error if set already contains input value.\n if self.has(item):\n # raise ValueError('Set already contains {}'.format(item))\n print('Set already contains {}'.format(item))\n return\n \n # Add item to set using LL.append() method\n self.list.append(item)\n print(self.list)", "def add_this_many(x, el, s):\r\n count = 0\r\n for i in range(len(s)):\r\n if s[i] == x:\r\n count +=1\r\n while count > 0:\r\n s.append(el)\r\n count -= 1", "def add(self, val):\n self.head = self.rec_add(self.head, val)", "def union_add(this, that):\n return this.add(that, fill_value=0)", "def add(self, *items):\n for item in items:\n self.unsorted.append(item)\n key = item[0]\n self.index[key] = item\n return self", "def __add_to_nodes(self, V, a, b, keys):\n if a in V:\n V[a].append(b)\n else:\n V[a] = [b]\n keys.append(a)\n if b in V:\n V[b].append(a)\n else:\n V[b] = [a]\n keys.append(b)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the Saved news object data in serializable format
def serialize(self): return { "id": self.id, "headline": self.headline, "url": self.url, "image": self.image, "shortDescription": self.shortDescription, "saved": True, "date": self.date, "savedDate": self.savedDate }
[ "def serialize_news(self):\n return {\n 'category': self.category,\n 'datetime': self.datetime,\n 'headline': self.headline,\n 'image': self.image,\n 'related': self.related,\n 'source': self.source,\n 'summary': self.summary,\n 'url': self.url,\n }", "def to_save_data(self):\n raw_data = self.story.raw_data._raw_data\n data = {'version': 1,\n 'checksum': self._get_save_checksum(),\n 'routines': [r.to_dict() for r in self.routines],\n 'state': self.state,\n 'pc': self.pc,\n 'text_buffer_addr': self._text_buffer_addr,\n 'parse_buffer_addr': self._parse_buffer_addr,\n 'dynamic_memory': [int(x) for x in self.story.raw_data._raw_data[0:self.story.header.static_memory_address]],\n }\n return data", "def serialized_data(self):\n return {\n 'id': self.id,\n 'start_time': str(self.start_time),\n 'venue_id': self.venue_id,\n 'venue_name': self.venue.name,\n 'venue_image_link': self.venue.image_link,\n 'artist_id': self.artist_id,\n 'artist_name': self.artist.name,\n 'artist_image_link': self.artist.image_link\n }", "def to_data(self):", "def serialize(self):\n return {\n \"id\": self.id,\n \"title\": self.title,\n \"price\": str(self.price),\n \"description\": self.description,\n \"location\": self.location,\n \"listing_owner\": self.listing_owner,\n \"photos\": [photo.serialize() for photo in self.photos],\n }", "def serialize(self, obj):\n return obj", "def to_data(self):\n return self.data", "def GetDataAsObject(self):", "def readSerializable(self):\n pass", "def serialized_data(self):\n return {\n 'id': self.id,\n 'city': self.name,\n 'state': self.state_name\n }", "def serialize(self):\r\n return {\r\n 'name': self.name,\r\n 'id': self.id,\r\n 'description': self.description,\r\n 'price': self.price,\r\n 'type': self.type,\r\n 'shoppingmall_id': self.shoppingmall_id,\r\n 'user_id':self.user_id\r\n }", "def saveClassroomData():\n with open(\"ClassRoomData.txt\",\"wb\") as classroomData:\n pickle.dump(classroomEntities,classroomData)", "def _serialise(self):\n # TODO (M Foley)\n pass", "def get_data(self):\n\t\treturn self.data", "def dump_model(self):", "def get_data(self):\n return self.data", "def get_object_data(self):\n return self.object_data.copy()", "def save(self):\n d1 = {}\n with open(self.__file_path, mode=\"w\") as f:\n for k, v in self.__objects.items():\n d1[k] = v.to_dict()\n json.dump(d1, f)", "def save_state(self):\n data = {}\n data['version'] = 1.0\n data['spline'] = self.spline.get_json()\n data['L_canal_spline'] = self.L_canal_spline.get_json()\n data['R_canal_spline'] = self.R_canal_spline.get_json()\n data['selected_slice'] = self.selected_slice\n with open(os.path.join(os.path.dirname(self.dicomdir_path), self.DUMP_FILENAME), \"w\") as outfile:\n json.dump(data, outfile)\n self.history.save_()\n if self.annotation_masks is not None:\n self.annotation_masks.save_mask_splines()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This method is responsible for getting the messages to respond with Also covers analytics events for those messages for e.g. click, view
def respond_to_message(self): MessageEventHandler(self.state, self.meta_data, self.message_data).handle_events(events=self.events) data = Converter(self.state).get_messages(meta_data=self.meta_data, message_data=self.message_data) outgoing_messages = data.get("messages", []) events_to_publish = data.get("publish_events", []) agent_messages = [message["message"] for message in outgoing_messages if message["sending_to"] == "AGENT"] user_messages = [message["message"] for message in outgoing_messages if message["sending_to"] == "USER"] agent_response = Util.send_messages(messages=agent_messages, sending_to="AGENT") user_response = Util.send_messages(messages=user_messages, sending_to="USER") if agent_response or user_response: Util.update_state(meta_data=self.meta_data, state=self.state) Util.log_events(meta_data=self.meta_data, state=self.state, events=events_to_publish) return 1
[ "def on_response_received(self, message):", "def process_messages(self):\n pass", "def receive_messages():\n\n # ensure that the signature on the request is valid\n if not verify_signature(request):\n return Response(status=403, response='invalid signature')\n\n messages = request.json['messages']\n responses = []\n\n for message in messages:\n print message\n # create a response to each received message just echoing the body text\n if message['type'] == 'text':\n if message['body'] == \"mirror\":\n responses.append({\n 'type': 'text',\n 'to': message['from'],\n 'body': 'You said: \"{}\"'.format(message['body']),\n 'typeTime': 0\n })\n if (message['body'] == 'Help') or (message['body'] == 'Cancel'):\n responses.append({\n 'type': 'text',\n 'to': message['from'],\n 'body': 'Welcome to the UW bot. I can get you UW related information. What would you like to know about?',\n 'suggestedResponses': defaultSuggestedResponses,\n 'typeTime': 0\n })\n if message['body'] == \"Weather\":\n addWeatherResponse(responses, message)\n if message['body'] == 'Courses':\n courseIntroResponse(responses, message)\n if message['body'] == 'Exam Schedule':\n examScheduleResponse(responses, message, 'CS', 245)\n if message['body'] == 'Info Sessions':\n infosessionsResponse(responses, message)\n if message['body'] == 'Location':\n classLocationResponse(responses, message, 'CS', 245, 1)\n if message['body'] == 'Prerequisites':\n coursePrerequisitesResponse(responses, message, 'CS', 245)\n\n\n\n if responses:\n # send the responses through the Chat Engine API\n requests.post(\n 'https://engine.apikik.com/api/v1/message',\n auth=(os.environ['USERNAME'], os.environ['API_KEY']),\n json={'messages': responses}\n )\n\n return Response(status=200)", "def received_message(self, message):\r\n pass", "def send_messages(self):\n if self.messages:\n messages, self.messages = self.messages, []\n self.mpub(\"events.%s\" % config.pool, messages)", "def messages_processor(request):\n return {\"messages\": messages.get_messages(request)}", "def on_event():\n event = request.get_json()\n if event['type'] == 'ADDED_TO_SPACE' and not event['space']['singleUserBotDm']:\n text = 'Thanks for adding me to \"%s\"!' % (event['space']['displayName'] if event['space']['displayName'] else 'this chat')\n elif event['type'] == 'MESSAGE':\n text = 'You said: `%s`' % str(chat_service.spaces().list().execute()) #event['message']['text']\n else:\n return\n return json.jsonify({'text': text, 'thread':\"chet_cool\"})", "def get_message_handlers(self):\n return [\n (\"normal\", self.message),\n ]", "def get_message(self):\n pass", "def msgHandler():", "def test_im_chat_messages(self):\n pass", "def callback_botmessage(self, message):\n pass", "def fetch_message(self, message_id, auth):", "def _on_message(self, message):\n print(\"RECEIVED on \" + self.session_name + \":\")\n message_json = json.loads(message)\n print(json.dumps(message_json, sort_keys=True, indent=2, separators=(',', ':')))\n\n for singleMsg in message_json:\n self._process_message(singleMsg)", "def messages(request):\n ctx = {}\n messages = get_messages(request)\n if messages:\n ctx['mesgs'] = messages\n return ctx", "def _handleMessage(self):\r\n raise NotImplementedError", "def message_handler(event):\n # Se saca el id del sender. We get the sender id.\n sender_id = event.sender_id\n # Vemos si el mensaje es un texto o un adjunto (imagen, gif, sticker, etc)\n # We see if the message is a text or an attachment (image, GIF, sticker, etc)\n if event.is_text_message:\n # We get the message from the event variable and sent it back7\n # Obtenemos el mensaje de la variable event y se lo regresamos al usuario\n # message = check_for_greeting(TextBlob(event.message_text))\n message = obtener_respuesta(event.message_text)\n page.send(sender_id, format(message))\n elif event.is_attachment_message:\n page.send(sender_id, \"Boo, you didn't send a text. \")", "def on_message(self, msg) -> None:\n\n decoded_msg = json.loads(msg)\n message_type = decoded_msg[\"type\"]\n\n if message_type == MSG_SUBCRIPTIONS:\n\n product_ids = decoded_msg[\"channels\"]\n logging.debug(\"Subscriptions: {}\".format(product_ids))\n\n elif message_type == MSG_SNAPSHOT:\n\n product_id = decoded_msg[\"product_id\"]\n self._snapshot(decoded_msg)\n\n # Old best bid and ask doesn't exist yet, this will always set a new bbo\n self.set_if_new_bbo(product_id)\n\n elif message_type == MSG_L2UPDATE:\n\n product_id = decoded_msg[\"product_id\"]\n self.update(decoded_msg)\n\n self.set_if_new_bbo(product_id)\n\n self.event_count += 1", "def show_messages(self):\n for msg in self.messages:\n print msg['text']" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given the vm data from the API, create a dictionary that contains all of the necessary keys for the template The keys will be checked in the update method and not here, this method is only concerned with fetching the data that it can.
def _get_template_data(vm_data: Dict[str, Any], span: Span) -> Optional[Dict[str, Any]]: vm_id = vm_data['id'] Windows.logger.debug(f'Compiling template data for VM #{vm_id}') data: Dict[str, Any] = {key: None for key in Windows.template_keys} data['vm_identifier'] = f'{vm_data["project"]["id"]}_{vm_id}' # changes changes: Dict[str, Any] = { 'ram': False, 'cpu': False, 'storages': False, } updates = vm_data['history'][0] try: if updates['ram_quantity'] is not None: # RAM is needed in MB for the updater but we take it in in GB (1024, not 1000) changes['ram'] = vm_data['ram'] * 1024 except KeyError: pass try: if updates['cpu_quantity'] is not None: changes['cpu'] = vm_data['cpu'] except KeyError: pass # Fetch the drive information for the update try: if len(updates['storage_histories']) != 0: Windows.logger.debug(f'Fetching drives for VM #{vm_id}') child_span = opentracing.tracer.start_span('fetch_drive_updates', child_of=span) changes['storages'] = Windows.fetch_drive_updates(vm_data) child_span.finish() except KeyError: pass # Add changes to data data['changes'] = changes data['storage_type'] = vm_data['storage_type'] data['vms_path'] = settings.HYPERV_VMS_PATH # Get the host name of the server host_name = None for interface in vm_data['server_data']['interfaces']: if interface['enabled'] is True and interface['ip_address'] is not None: if IPAddress(str(interface['ip_address'])).version == 6: host_name = interface['hostname'] break if host_name is None: error = f'Host ip address not found for the server # {vm_data["server_id"]}.' Windows.logger.error(error) vm_data['errors'].append(error) return None # Add the host information to the data data['host_name'] = host_name # Determine restart data['restart'] = vm_data['restart'] return data
[ "def _get_template_data(vm_data: Dict[str, Any], span: Span) -> Optional[Dict[str, Any]]:\n vm_id = vm_data['id']\n Windows.logger.debug(f'Compiling template data for VM #{vm_id}')\n data: Dict[str, Any] = {key: None for key in Windows.template_keys}\n\n data['vm_identifier'] = f'{vm_data[\"project\"][\"id\"]}_{vm_id}'\n data['image_answer_file_name'] = vm_data['image']['answer_file_name']\n\n data['image_filename'] = vm_data['image']['filename']\n # check if file exists at /mnt/images/HyperV/VHDXs/\n path = '/mnt/images/HyperV/VHDXs/'\n child_span = opentracing.tracer.start_span('vm_image_file_download', child_of=span)\n if not Windows.check_image(data['image_filename'], path):\n # download the file\n downloaded, errors = Windows.download_image(data['image_filename'], path)\n if not downloaded:\n for error in errors:\n Windows.logger.error(error)\n vm_data['errors'].append(error)\n return None\n child_span.finish()\n\n # RAM is needed in MB for the builder but we take it in in GB (1024, not 1000)\n data['ram'] = vm_data['ram'] * 1024\n data['cpu'] = vm_data['cpu']\n data['dns'] = vm_data['dns']\n\n # Generate encrypted passwords\n data['admin_password'] = Windows._password_generator(size=12)\n # Also save the password back to the VM data dict\n vm_data['admin_password'] = data['admin_password']\n\n # Check for the primary storage\n if not any(storage['primary'] for storage in vm_data['storages']):\n error = 'No primary storage drive found. Expected one primary storage drive'\n Windows.logger.error(error)\n vm_data['errors'].append(error)\n return None\n\n data['storages'] = vm_data['storages']\n data['storage_type'] = vm_data['storage_type']\n\n # Get the Networking details\n data['vlans'] = []\n data['ip_addresses'] = []\n data['default_ips'] = []\n data['default_gateway'] = ''\n data['default_netmask_int'] = ''\n data['default_vlan'] = ''\n\n # The private IPs for the VM will be the one we need to pass to the template\n vm_data['ip_addresses'].reverse()\n ip_addresses = []\n subnets = []\n for ip in vm_data['ip_addresses']:\n if IPAddress(ip['address']).is_private():\n ip_addresses.append(ip)\n subnets.append({\n 'address_range': ip['subnet']['address_range'],\n 'vlan': ip['subnet']['vlan'],\n 'id': ip['subnet']['id'],\n })\n # Removing duplicates\n subnets = [dict(tuple_item) for tuple_item in {tuple(subnet.items()) for subnet in subnets}]\n # sorting nics (each subnet is one nic)\n for subnet in subnets:\n non_default_ips = []\n gateway, netmask_int = subnet['address_range'].split('/')\n vlan = str(subnet['vlan'])\n data['vlans'].append(vlan)\n\n for ip_address in ip_addresses:\n address = ip_address['address']\n if ip_address['subnet']['id'] == subnet['id']:\n # Pick the default ips if any\n if vm_data['gateway_subnet'] is not None:\n if subnet['id'] == vm_data['gateway_subnet']['id']:\n data['default_ips'].append(address)\n data['default_gateway'] = gateway\n data['default_netmask_int'] = netmask_int\n data['default_vlan'] = vlan\n continue\n # else store the non gateway subnet ips\n non_default_ips.append(address)\n\n if len(non_default_ips) > 0:\n data['ip_addresses'].append({\n 'ips': non_default_ips,\n 'gateway': gateway,\n 'netmask_int': netmask_int,\n 'vlan': vlan,\n })\n\n # Add locale data to the VM\n data['language'] = 'en_IE'\n data['timezone'] = 'GMT Standard Time'\n\n # Get the host name of the server\n host_name = None\n for interface in vm_data['server_data']['interfaces']:\n if interface['enabled'] is True and interface['ip_address'] is not None:\n if IPAddress(str(interface['ip_address'])).version == 6:\n host_name = interface['hostname']\n break\n if host_name is None:\n error = f'Host name is not found for the server # {vm_data[\"server_id\"]}'\n Windows.logger.error(error)\n vm_data['errors'].append(error)\n return None\n\n # Add the host information to the data\n data['host_name'] = host_name\n data['network_drive_url'] = settings.NETWORK_DRIVE_URL\n data['vms_path'] = settings.HYPERV_VMS_PATH\n\n return data", "def _get_template_data(snapshot_data: Dict[str, Any], span: Span) -> Optional[Dict[str, Any]]:\n snapshot_id = snapshot_data['id']\n Linux.logger.debug(f'Compiling template data for Snapshot #{snapshot_id}')\n data: Dict[str, Any] = {key: None for key in Linux.template_keys}\n\n data['host_sudo_passwd'] = settings.NETWORK_PASSWORD\n data['snapshot_identifier'] = f'{snapshot_data[\"vm\"][\"id\"]}_{snapshot_data[\"id\"]}'\n data['vm_identifier'] = f'{snapshot_data[\"vm\"][\"project\"][\"id\"]}_{snapshot_data[\"vm\"][\"id\"]}'\n\n # Get the ip address of the host\n host_ip = None\n for interface in snapshot_data['server_data']['interfaces']:\n if interface['enabled'] is True and interface['ip_address'] is not None:\n if IPAddress(str(interface['ip_address'])).version == 6:\n host_ip = interface['ip_address']\n break\n if host_ip is None:\n error = f'Host ip address not found for the server # {snapshot_data[\"vm\"][\"server_id\"]}'\n Linux.logger.error(error)\n snapshot_data['errors'].append(error)\n return None\n data['host_ip'] = host_ip\n return data", "def setup_template_variables(self, context, data_dict):", "def _vm_templates(self, vm, log=None):\n vm_kwargs = self._vm_kwargs(vm)\n tids = self._get_templates(vm_kwargs, django_settings._MON_ZABBIX_TEMPLATES_VM, log=log)\n tids.update(self._get_vm_nic_templates(vm, vm_kwargs, django_settings._MON_ZABBIX_TEMPLATES_VM_NIC, log=log))\n tids.update(self._get_vm_disk_templates(vm, vm_kwargs, django_settings._MON_ZABBIX_TEMPLATES_VM_DISK, log=log))\n\n return tids", "def variables_for_template(self):\n\n return {}", "def _get_template_data(self):\n self._set_meta_info()\n if self._report_key == ReportTypes.SEARCH_TOC_REPORT:\n self._set_selected()\n elif self._report_key == ReportTypes.MHR_COVER:\n self._report_data['cover'] = report_utils.set_cover(self._report_data)\n self._report_data['createDateTime'] = Report._to_report_datetime(self._report_data['createDateTime'])\n elif self._report_key == ReportTypes.MHR_REGISTRATION_COVER:\n self._report_data['regCover'] = report_utils.set_registration_cover(self._report_data)\n self._report_data['createDateTime'] = Report._to_report_datetime(self._report_data['createDateTime'])\n if str(self._report_data.get('registrationType', '')).startswith('TRAN'):\n self._report_data['documentDescription'] = \\\n TO_TRANSFER_DESC.get(self._report_data.get('registrationType'))\n elif self._report_data.get('registrationType', '') == MhrRegistrationTypes.REG_NOTE:\n self._report_data['documentDescription'] = self._report_data['note'].get('documentDescription', '')\n else:\n if self._report_key == ReportTypes.SEARCH_DETAIL_REPORT:\n self._set_search_additional_message()\n elif self._report_key == ReportTypes.MHR_TRANSFER:\n self._report_data['documentDescription'] = \\\n TO_TRANSFER_DESC.get(self._report_data.get('registrationType'))\n elif self._report_data.get('registrationType', '') == MhrRegistrationTypes.REG_NOTE:\n self._report_data['documentDescription'] = self._report_data['note'].get('documentDescription', '')\n self._set_date_times()\n self._set_addresses()\n self._set_owner_groups()\n if self._report_key not in (ReportTypes.MHR_REGISTRATION,\n ReportTypes.MHR_TRANSFER,\n ReportTypes.MHR_TRANSPORT_PERMIT):\n self._set_notes()\n if self._report_key == ReportTypes.SEARCH_DETAIL_REPORT:\n self._set_selected()\n self._set_ppr_search()\n elif self._report_key == ReportTypes.SEARCH_BODY_REPORT:\n # Add PPR search template setup here:\n self._set_ppr_search()\n if self._report_key not in (ReportTypes.MHR_TRANSFER, ReportTypes.MHR_EXEMPTION, ReportTypes.MHR_NOTE):\n self._set_location()\n if self._report_key != ReportTypes.MHR_TRANSPORT_PERMIT:\n self._set_description()\n return self._report_data", "def generate_data_dictionary(self) -> dict:\n return json.loads(\n jinja_env().get_template(\"data_dictionary_base.json.j2\").render(mapping=self)\n )", "def build_dict(self, user_info):\n if user_info:\n lookup_dict = {\n \"cloud_stats\": {\"title\": \"Cloud Statistics\",\n \"link\": \"/status/cloud\",\n \"is_admin_panel\": True,\n \"template\": \"status/cloud.html\"},\n \"database_stats\": {\"title\": \"Database Information\",\n \"is_admin_panel\": True,\n \"template\": \"apps/database.html\"},\n \"memcache_stats\": {\"title\": \"Global Memcache Statistics\",\n \"is_admin_panel\": True,\n \"template\": \"apps/memcache.html\"},\n \"upload_app\": {\"title\": \"Upload Application\",\n \"link\": \"/apps/new\",\n \"template\": \"apps/new.html\"},\n \"delete_app\": {\"title\": \"Delete Application\",\n \"link\": \"/apps/delete\",\n \"template\": \"apps/delete.html\"},\n \"relocate_app\": {\"title\": \"Relocate Application\",\n \"link\": \"/apps/relocate\",\n \"template\": \"apps/relocate.html\"},\n \"service_accounts\": {\"title\": \"Service Accounts\",\n \"link\": \"/service_accounts\"},\n \"manage_users\": {\"title\": \"Manage Users\",\n \"link\": \"/authorize\",\n \"is_admin_panel\": True,\n \"template\": \"authorize/cloud.html\"},\n \"logging\": {\"title\": \"Log Viewer\",\n \"link\": \"/logs\",\n \"template\": \"logs/main.html\"},\n \"taskqueue\": {\"title\": \"TaskQueue\",\n \"link\": self.get_flower_url()},\n \"pull_queue_viewer\": {\"title\": \"Pull Queue Viewer\",\n \"link\": \"/pull_queue_viewer\"},\n \"cron\": {\"title\": \"Cron\",\n \"link\": \"/cron\",\n \"template\": \"cron/console.html\"},\n \"app_console\": {\"title\": \"Application Statistics\",\n \"template\": \"apps/console.html\",\n \"link\": \"/apps/\"},\n \"datastore_viewer\": {\"title\": \"Datastore Viewer\",\n \"link\": \"/datastore_viewer\"}\n }\n if user_info.can_upload_apps:\n lookup_dict[\"app_management\"] = {\"App Management\":\n [{\"upload_app\": lookup_dict[\n \"upload_app\"]},\n {\"delete_app\": lookup_dict[\n \"delete_app\"]},\n {\"relocate_app\": lookup_dict[\n \"relocate_app\"]},\n {\"service_accounts\": lookup_dict[\n \"service_accounts\"]}]}\n if user_info.is_user_cloud_admin:\n lookup_dict[\"appscale_management\"] = {\"AppScale Management\":\n [{\"cloud_stats\": lookup_dict[\n \"cloud_stats\"]},\n {\"manage_users\": lookup_dict[\n \"manage_users\"]}]}\n if user_info.owned_apps or user_info.is_user_cloud_admin:\n sections = ['taskqueue', 'pull_queue_viewer', 'logging',\n 'app_console', 'cron', 'datastore_viewer']\n lookup_dict[\"debugging_monitoring\"] = {\n \"Debugging/Monitoring\": [{section: lookup_dict[section]}\n for section in sections]\n }\n return lookup_dict\n else:\n return {}", "def create_initial_templates_document() -> Dict[str, Any]:\n return {\n 'schema-version': 'v1', 'document-version': '',\n 'gateway-templates': [], 'service-templates': [],\n }", "def _get_vms(self) -> Dict:\n logging.info(\"[ * ] Enumerating virtual machines in subscription\")\n\n url = f\"https://management.azure.com/subscriptions/{self.subscription_id}/providers/Microsoft.Compute/virtualMachines?api-version=2019-12-01\"\n headers = {\"Authorization\": f\"Bearer {self.access_token}\"}\n data = requests.get(url, headers=headers).json()\n\n # Build data structure\n vms = {\n count: {\n \"ResourceGroup\": [],\n \"Name\": [],\n \"Location\": [],\n \"Id\": [],\n \"ComputerName\": [],\n \"AdminUsername\": [],\n \"VMSize\": [],\n \"OS\": [],\n }\n for count in range(len(data[\"value\"]))\n }\n\n # Fill in data\n for count, value in enumerate(data[\"value\"]):\n vms[count][\"ResourceGroup\"].append(value[\"id\"].split(\"/\")[4])\n vms[count][\"Name\"].append(value[\"name\"])\n vms[count][\"Location\"].append(value[\"location\"])\n vms[count][\"Id\"].append(value[\"properties\"][\"vmId\"])\n vms[count][\"ComputerName\"].append(\n value[\"properties\"][\"osProfile\"][\"computerName\"]\n )\n vms[count][\"AdminUsername\"].append(\n value[\"properties\"][\"osProfile\"][\"adminUsername\"]\n )\n vms[count][\"VMSize\"].append(\n value[\"properties\"][\"hardwareProfile\"][\"vmSize\"]\n )\n try:\n if value[\"properties\"][\"osProfile\"][\"windowsConfiguration\"]:\n vms[count][\"OS\"].append(\"Windows\")\n except:\n vms[count][\"OS\"].append(\"Linux\")\n\n logging.info(f\"\\tVirtual machine count: {len(vms)}\")\n self.vms = vms", "def _data_for_template(self):\n data = self.config.resources\n data.update(self.template_data)\n data['variants'] = self._read_variants_file()\n return data", "def get_data(self) -> dict:\n return {\n \"pod\": {\n \"name\": self.name,\n \"ip\": self.ip,\n \"port\": self.port\n },\n \"status\": {\n \"configuration\": self.get_configuration(),\n \"health\": self.get_status_health(),\n \"ready\": self.get_status_ready()\n },\n \"hyperscan\": {\n \"db_version\": self.get_hyperscan_db_version()\n }\n }", "def get_data(self, **kwargs):\n\n self.data = {}\n #node_data = ''\n #link_data = ''\n templates_data = self.request_from_server('templates')\n self.templates = templates_data\n project_data = self.request_from_server('projects')\n for project in project_data:\n project_name = project['name']\n if 'project_name' in kwargs:\n if project_name != kwargs['project_name']:\n continue\n\n self.data[project_name] = {}\n self.data[project_name]['project_id'] = project['project_id']\n self.data[project_name]['nodes'] = {}\n node_data = self.request_from_server('projects/{}/nodes'.format(project['project_id']))\n link_data = self.request_from_server('projects/{}/links'.format(project['project_id']))\n for node in node_data:\n node_name = node['name']\n self.data[project_name]['nodes'][node_name] = {}\n self.data[project_name]['nodes'][node_name]['node_id'] = node['node_id']\n self.data[project_name]['nodes'][node_name]['template_id'] = node['template_id']\n self.data[project_name]['nodes'][node_name]['node_type'] = node['node_type']\n self.data[project_name]['nodes'][node_name]['console_port'] = node['console']\n self.data[project_name]['nodes'][node_name]['console_session'] = None\n self.data[project_name]['nodes'][node_name]['x'] = node['x']\n self.data[project_name]['nodes'][node_name]['y'] = node['y']\n self.data[project_name]['nodes'][node_name]['ports'] = {}\n if project['status'] != 'closed':\n self.data[project_name]['nodes'][node_name]['status'] = node['status']\n for port in node['ports']:\n port_name = port['short_name']\n self.data[project_name]['nodes'][node_name]['ports'][port_name] = {}\n self.data[project_name]['nodes'][node_name]['ports'][port_name]['adapter_number'] = port['adapter_number']\n self.data[project_name]['nodes'][node_name]['ports'][port_name]['port_number'] = port['port_number']\n self.data[project_name]['nodes'][node_name]['ports'][port_name]['link_type'] = port['link_type']\n self.data[project_name]['nodes'][node_name]['ports'][port_name]['link_id'] = None\n self.data[project_name]['nodes'][node_name]['ports'][port_name]['in_use'] = False\n self.data[project_name]['nodes'][node_name]['ports'][port_name]['connected_to'] = None\n for link in link_data:\n for link_node in link['nodes']:\n if node['node_id'] == link_node['node_id']:\n if link_node['label']['text'] == port_name:\n self.data[project_name]['nodes'][node_name]['ports'][port_name]['link_id'] = link['link_id']\n self.data[project_name]['nodes'][node_name]['ports'][port_name]['in_use'] = True\n if link['nodes'].index(link_node) == 0:\n self.data[project_name]['nodes'][node_name]['ports'][port_name]['connected_to_id'] = link['nodes'][1]['node_id']\n self.data[project_name]['nodes'][node_name]['ports'][port_name]['connected_to'] = self.get_node_name_from_id(project_name,link['nodes'][1]['node_id'])\n else:\n self.data[project_name]['nodes'][node_name]['ports'][port_name]['connected_to_id'] = link['nodes'][0]['node_id']\n self.data[project_name]['nodes'][node_name]['ports'][port_name]['connected_to'] = self.get_node_name_from_id(project_name,link['nodes'][0]['node_id'])", "def _make_vars_dict(self):\n tcl_vars = {}\n tcl_vars['chassis_ip'] = self._get_24_ip()\n tcl_vars['card_24_ip'] = self._get_24_ip_string()\n tcl_vars['card_5_ip'] = self._get_5_ip_string()\n tcl_vars['card_eth_ip'] = self._get_eth_ip_string()\n tcl_vars['card_eth_2_ip'] = tcl_vars['card_eth_ip']\n tcl_vars['vendor_name'] = self._get_dut_company()\n tcl_vars['vendor_model'] = self._get_dut_model()\n tcl_vars['vendor_firmware'] = self._get_dut_firmware_name()\n tcl_vars['num_24_antennas'] = self._get_num_ant_24()\n tcl_vars['num_5_antennas'] = self._get_num_ant_5()\n tcl_vars['test_list'] = self._get_test_case()\n tcl_vars['ssid_24'] = self._get_ssid_24()\n tcl_vars['ssid_5'] = self._get_ssid_5()\n tcl_vars['chamber_name'] = 'Chamber_{}'.format(self._get_chamber_name())\n\n ipv6_tcl = '/mnt/wifi_vol/SMART/test_suites/ixia/ipv6_defaults.tcl'\n tcl_vars['ipv6_defaults_tcl'] = ipv6_tcl\n\n return tcl_vars", "def __verify_details(self):\n if self.major[0] not in self.data[self.root]:\n self.data[self.root][self.major[0]] = {}\n for key, value in self.template_data[self.root][self.major[0]].items():\n key, value = self.__verified_details_key_value(key, value)\n self.data[self.root][self.major[0]][key] = self.__verify_values(key, value, self.data[self.root][self.major[0]])", "def _get_host_tmpl_vars(self, host_id, global_vars_dict):\n vars_dict = {}\n if global_vars_dict:\n temp = global_vars_dict[const.CLUSTER][const.DEPLOYED_PK_CONFIG]\n vars_dict[const.DEPLOYED_PK_CONFIG] = temp\n\n host_baseinfo = self.config_manager.get_host_baseinfo(host_id)\n util.merge_dict(vars_dict, host_baseinfo)\n\n pk_config = self.config_manager.get_host_package_config(host_id)\n if pk_config:\n # Get host template variables and merge to vars_dict\n metadata = self.config_manager.get_pk_config_meatadata()\n host_dict = self.get_tmpl_vars_from_metadata(metadata, pk_config)\n util.merge_dict(vars_dict[const.DEPLOYED_PK_CONFIG], host_dict)\n\n # Set role_mapping for host\n mapping = self.config_manager.get_host_roles_mapping(host_id)\n vars_dict[const.DEPLOYED_PK_CONFIG][const.ROLES_MAPPING] = mapping\n\n return {const.HOST: vars_dict}", "def make_entity_dict(class_reference, template, partial_dict): \n _data = class_reference.properties()\n for _key in _data:\n _data[_key] = partial_dict.get(_key, template.get(_key, '')) \n return _data", "def process_template(template, data):\n t = Template(template, data)\n t.job = get_current_job()\n t.process()\n\n result = dict(template=template, data=data, result_folder=t.resultdir, log=t.log)\n\n return result", "def prepare_product_variant_dict(self, instance, template, data, basic_detail, update_price,\n update_image, common_log_id, model_id):\n common_log_line_obj = self.env['common.log.lines.ept']\n wcapi = instance.woo_connect()\n variants_to_create = []\n flag = True\n for variant in template.woo_product_ids:\n # var_url = ''\n price = 0.0\n if variant.variant_id:\n info = {'id':variant.variant_id}\n\n if basic_detail:\n weight = self.convert_weight_by_uom(variant.product_id.weight, instance)\n info.update({'sku':variant.default_code, 'weight':str(weight),\n \"manage_stock\":variant.woo_is_manage_stock})\n else:\n attributes = \\\n self.get_product_attribute(template.product_tmpl_id, instance, common_log_id,\n model_id)[0]\n info = self.get_variant_data(variant, instance, False)\n\n if update_image:\n info.update(self.get_variant_image(instance, variant))\n\n if update_price:\n price = instance.woo_pricelist_id.get_product_price(variant.product_id, 1.0,\n partner=False,\n uom_id=variant.product_id.uom_id.id)\n info.update({'regular_price':str(price), 'sale_price':str(price)})\n\n if template.woo_tmpl_id != variant.variant_id:\n if variant.variant_id:\n data.get('variations').append(info)\n else:\n variants_to_create.append(info)\n flag = True\n elif template.woo_tmpl_id == variant.variant_id:\n del data['variations']\n if basic_detail:\n data.update({'sku':variant.default_code,\n \"manage_stock\":variant.woo_is_manage_stock})\n if update_price:\n data.update({'regular_price':str(price), 'sale_price':str(price)})\n flag = True\n\n if data.get('variations'):\n variant_batches = self.prepare_batches(data.get('variations'))\n for woo_variants in variant_batches:\n _logger.info('variations batch processing')\n res = wcapi.post('products/%s/variations/batch' % (data.get('id')),\n {'update':woo_variants})\n _logger.info('variations batch process completed [status: %s]', res.status_code)\n if res.status_code in [200, 201]:\n del data['variations']\n if res.status_code not in [200, 201]:\n message = \"Update Product Variations\\n%s\" % (res.content)\n common_log_line_obj.woo_product_export_log_line(message, model_id,\n common_log_id,\n False)\n if variants_to_create:\n \"\"\"Needed to update the attributes of template for adding new variant, while update\n process.\"\"\"\n _logger.info(\"Updating attributes of %s in Woo..\" % (template.name))\n if data.get(\"variations\"):\n del data['variations']\n data.update({\"attributes\":attributes})\n res = wcapi.put(\"products/%s\" % (data.get(\"id\")), data)\n\n _logger.info(\"Creating variants in Woo..\")\n res = wcapi.post('products/%s/variations/batch' % (data.get('id')),\n {'create':variants_to_create})\n try:\n response = res.json()\n except Exception as e:\n message = \"Json Error : While update products to WooCommerce for instance %s.\" \\\n \" \\n%s\" % (instance.name, e)\n common_log_line_obj.woo_product_export_log_line(message, model_id,\n common_log_id,\n False)\n return data, flag\n for product in response.get(\"create\"):\n if product.get(\"error\"):\n message = \"Update Product \\n%s\" % (product.get(\"error\").get('message'))\n common_log_line_obj.woo_product_export_log_line(message, model_id,\n common_log_id,\n False)\n else:\n variant_id = product.get(\"id\")\n sku = product.get(\"sku\")\n variant = template.woo_product_ids.filtered(lambda x:x.default_code == sku)\n if variant:\n variant.write({\"variant_id\":variant_id, \"exported_in_woo\":True})\n\n self.sync_woo_attribute_term(instance, common_log_id)\n\n return data, flag" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This runs instead of most of nextEvent when Shared_Board is True and there are ambiguous wild cards. It is looking for key strokes to designate ambiguous wild cards in runs. The mouse is ignored until you designate all the wilds (turn phase goes back to play).
def nextEventWildsOnBoard(self): if self.controller._state.rules.Shared_Board and self.num_wilds > 0: for self.event in pygame.event.get(): if self.event.type == pygame.QUIT: # The window crashed, we should handle this print("pygame crash, AAAHHH") pygame.quit() quit() else: # in Shared_Board games, check if there are wilds that need to be updated. # All other events are ignored until play is finished. HandManagement.wildsHiLoGetInput(self)
[ "def nextEvent(self):\n\n if self.controller._state.rules.Shared_Board:\n self.num_wilds = len(self.controller.unassigned_wilds_dict.keys())\n if self.num_wilds > 0:\n self.nextEventWildsOnBoard()\n\n for self.event in pygame.event.get():\n if self.event.type == pygame.QUIT:\n # The window crashed, we should handle this\n print(\"pygame crash, AAAHHH\")\n pygame.quit()\n quit()\n\n if not self.controller._state.rules.Shared_Board and self.num_wilds > 0:\n wild_instructions = 'Use the keyboard to designate your prepared wild cards \\r\\n '\n wild_instructions = wild_instructions + '(use 0 for 10 and J, Q, or K for facecards).'\n self.controller.note = wild_instructions\n pos = pygame.mouse.get_pos()\n\n if self.event.type == pygame.MOUSEBUTTONDOWN:\n self.RuleSetsButtons.ClickedButton(self, pos)\n for element in self.hand_info:\n # cannot select prepared cards, so not included in logic below.\n if element.img_clickable.isOver(pos):\n if element.status == 1:\n element.status = 0\n element.img_clickable.changeOutline(0)\n elif element.status == 0:\n element.status = 1\n element.img_clickable.changeOutline(2)\n\n elif self.event.type == pygame.MOUSEMOTION:\n self.RuleSetsButtons.MouseHiLight(self, pos)\n HandManagement.MouseHiLight(self.hand_info, pos)\n elif self.event.type == pygame.KEYDOWN:\n if self.controller._state.rules.Buy_Option:\n if self.controller.buying_opportunity:\n if self.event.key == pygame.K_y:\n self.controller.wantTopCard(True)\n self.controller.note = 'You have signaled you want to buy the card.'\n elif self.event.key == pygame.K_n:\n self.controller.wantTopCard(False)\n self.controller.note = 'You have signaled you do not want to buy the card.'\n if not self.controller._state.rules.Shared_Board and self.num_wilds > 0:\n HandManagement.ManuallyAssign(self)", "def check_events_battle_screen(ai_settings, screen, buttons,screen_status, button_status, card_database_filter, user, action, player2):\n for event in pygame.event.get():\n\n if event.type == pygame.QUIT:\n sys.exit()\n elif event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE:\n sys.exit()\n\n\n elif event.type == pygame.MOUSEBUTTONDOWN:\n #check click on cards in hand\n for i in range(1,8):\n if Rect((100+145*(i-1)),610,130,180).collidepoint(pygame.mouse.get_pos()):\n battle_screen_hand_click_action('hand',ai_settings,screen,buttons, screen_status, button_status, card_database_filter, user,player2, position = str(i))\n break\n\n for i in range(1,4):\n if Rect(420,(220 + 110*(i-1)),130,80).collidepoint(pygame.mouse.get_pos()):\n battle_screen_battleground_click_action('player2-monster',ai_settings,screen,buttons, screen_status, button_status, card_database_filter, user, player2, position = str(i))\n break\n\n for i in range(4,7):\n if Rect(245, (220 + 110*(i-4)),130,80).collidepoint(pygame.mouse.get_pos()):\n battle_screen_battleground_click_action('player2-monster',ai_settings,screen,buttons, screen_status, button_status, card_database_filter, user, player2, position = str(i))\n break\n\n\n\n if Rect(20,40,130,180).collidepoint(pygame.mouse.get_pos()):\n battle_screen_battleground_click_action('player2-character',ai_settings,screen,buttons, screen_status, button_status, card_database_filter, user, player2)\n\n # win/lost back to main menu button\n if Rect(500, 500, 200, 40).collidepoint(pygame.mouse.get_pos()):\n if screen_status.battle_screen_action_indicator == 'game-end':\n screen_status.battle_screen_display = False\n screen_status.welcome_screen_display = True\n\n if Rect(200, 0, 50, 30).collidepoint(pygame.mouse.get_pos()):\n button_status.rules_display = True\n\n\n # When menu window is on\n if button_status.battle_screen_menu_display == True:\n\n # Turn sound on\n if Rect(447+280, 323-270, 28, 28).collidepoint(pygame.mouse.get_pos()):\n ai_settings.sound_indicator = True\n # Turn sound off\n elif Rect(482+280, 323-270, 28, 28).collidepoint(pygame.mouse.get_pos()):\n ai_settings.sound_indicator = False\n # Turn music on\n elif Rect(447+280, 372-270, 28, 28).collidepoint(pygame.mouse.get_pos()):\n ai_settings.music_indicator = True\n # Turn music off\n elif Rect(482+280, 372-270, 28, 28).collidepoint(pygame.mouse.get_pos()):\n ai_settings.music_indicator = False\n\n # Change Theme\n elif Rect(447+280, 419-270, 98, 35).collidepoint(pygame.mouse.get_pos()):\n ai_settings.theme_indicator = 'Lith Harbor'\n change_bg_music('Lith Harbor')\n\n elif Rect(559+280, 419-270, 98, 35).collidepoint(pygame.mouse.get_pos()):\n ai_settings.theme_indicator = 'Leafre'\n change_bg_music('Leafre')\n\n elif Rect(447+280, 468-270, 98, 35).collidepoint(pygame.mouse.get_pos()):\n ai_settings.theme_indicator = 'Pantheon'\n change_bg_music('Pantheon')\n\n elif Rect(559+280, 468-270, 98, 35).collidepoint(pygame.mouse.get_pos()):\n ai_settings.theme_indicator = 'Ellinia'\n change_bg_music('Ellinia')\n\n # change AI speeding\n elif Rect(475+280, 524-270, 56, 35).collidepoint(pygame.mouse.get_pos()):\n ai_settings.AI_speed_indicator = '1000'\n\n elif Rect(545+280, 524-270, 56, 35).collidepoint(pygame.mouse.get_pos()):\n ai_settings.AI_speed_indicator = '2000'\n\n elif Rect(615+280, 524-270, 56, 35).collidepoint(pygame.mouse.get_pos()):\n ai_settings.AI_speed_indicator = '3000'\n\n # Quit settings window\n elif Rect(699+280, 300-270, 21, 21).collidepoint(pygame.mouse.get_pos()):\n button_status.battle_screen_menu_display = False\n\n # Concede and back to main menu\n elif Rect(700, 310, 180, 40).collidepoint(pygame.mouse.get_pos()):\n screen_status.battle_screen_action_indicator = 'game-end'\n button_status.battle_screen_win_lost_indicator = 'lost'\n\n if button_status.rules_display == True:\n # When we click on '>'\n if Rect(640, 37, 20, 20).collidepoint(pygame.mouse.get_pos()):\n if int(button_status.rules_page_id) < 4:\n button_status.rules_page_id = str(int(button_status.rules_page_id)+1)\n else:\n pass\n # When we click on '<'\n elif Rect(540, 37, 20, 20).collidepoint(pygame.mouse.get_pos()):\n if int(button_status.rules_page_id) > 1:\n button_status.rules_page_id = str(int(button_status.rules_page_id)-1)\n else:\n pass\n\n elif Rect(975, 35, 25, 25).collidepoint(pygame.mouse.get_pos()):\n button_status.rules_display = False\n\n\n\n\n if rect_union(buttons).collidepoint(pygame.mouse.get_pos()):\n for button in buttons:\n if button.rect.collidepoint(pygame.mouse.get_pos()):\n if button.text == 'Menu':\n button_status.battle_screen_menu_display = True\n\n elif button.text == '>':\n screen_status.battle_screen_my_hand_page_id += 1\n button_status.battle_screen_my_hand_indicator_display = False # Turn off display of buttons when change page\n\n if (screen_status.battle_screen_action_indicator == 'stage-1-level-up'\n or screen_status.battle_screen_action_indicator == 'stage-2-other-action-detail-spawn-and-think-fast'\n or screen_status.battle_screen_action_indicator == 'stage-2-other-action-detail-spawn-and-equip'\n or screen_status.battle_screen_action_indicator == 'stage-2-other-action-detail-think-fast-and-equip'\n or screen_status.battle_screen_action_indicator == 'stage-2-other-action-detail-spawn'\n or screen_status.battle_screen_action_indicator == 'stage-2-other-action-detail-think-fast'\n or screen_status.battle_screen_action_indicator == 'stage-2-other-action-detail-equip'\n ):\n button_status.battle_screen_instruction_bar_yes_display = False\n button_status.battle_screen_instruction_bar_yes_backend = False\n\n elif button.text == '<':\n screen_status.battle_screen_my_hand_page_id -= 1\n button_status.battle_screen_my_hand_indicator_display = False\n\n if (screen_status.battle_screen_action_indicator == 'stage-1-level-up'\n or screen_status.battle_screen_action_indicator == 'stage-2-other-action-detail-spawn-and-think-fast'\n or screen_status.battle_screen_action_indicator == 'stage-2-other-action-detail-spawn-and-equip'\n or screen_status.battle_screen_action_indicator == 'stage-2-other-action-detail-think-fast-and-equip'\n or screen_status.battle_screen_action_indicator == 'stage-2-other-action-detail-spawn'\n or screen_status.battle_screen_action_indicator == 'stage-2-other-action-detail-think-fast'\n or screen_status.battle_screen_action_indicator == 'stage-2-other-action-detail-equip'\n ):\n button_status.battle_screen_instruction_bar_yes_display = False\n button_status.battle_screen_instruction_bar_yes_backend = False\n\n elif button.text == 'level up':\n battle_screen_hand_click_action('level up',ai_settings,screen,buttons, screen_status, button_status, card_database_filter, user, player2)\n elif button.text == 'Yes':\n battle_screen_instruction_bar_yes_skip_action('yes',ai_settings,screen,buttons, screen_status, button_status, card_database_filter, user,action,player2)\n elif button.text == 'Skip':\n battle_screen_instruction_bar_yes_skip_action('skip',ai_settings,screen,buttons, screen_status, button_status, card_database_filter, user, action, player2)\n\n\n elif event.type == pygame.MOUSEMOTION: # Mostly for zoom in\n x = 0 # indicator helps remove zoom in.\n for i in range(1,8):\n if Rect((100+145*(i-1)),610,130,180).collidepoint(pygame.mouse.get_pos()):\n button_status.card_zoom_active = True\n button_status.card_zoom_screen_indicator = 'battle_screen'\n button_status.card_zoom_part_indicator = 'hand'\n button_status.card_zoom_position_indicator = str(i)\n x = 1\n\n for i in range(1,16):\n if Rect(1050,(220 + 23 * (i-1)),130,23).collidepoint(pygame.mouse.get_pos()):\n button_status.card_zoom_active = True\n button_status.card_zoom_screen_indicator = 'battle_screen'\n button_status.card_zoom_part_indicator = 'character 1 under'\n button_status.card_zoom_position_indicator = str(i)\n x = 1\n\n for i in range(1,16):\n if Rect(20,(220 + 23 * (i-1)),130,23).collidepoint(pygame.mouse.get_pos()):\n button_status.card_zoom_active = True\n button_status.card_zoom_screen_indicator = 'battle_screen'\n button_status.card_zoom_part_indicator = 'character 2 under'\n button_status.card_zoom_position_indicator = str(i)\n x = 1\n\n\n if Rect(1050,40,130,180).collidepoint(pygame.mouse.get_pos()):\n button_status.card_zoom_active = True\n button_status.card_zoom_screen_indicator = 'battle_screen'\n button_status.card_zoom_part_indicator = 'character 1'\n x = 1\n\n if Rect(20,40,130,180).collidepoint(pygame.mouse.get_pos()):\n button_status.card_zoom_active = True\n button_status.card_zoom_screen_indicator = 'battle_screen'\n button_status.card_zoom_part_indicator = 'character 2'\n x = 1\n\n if Rect(880, 5, 50, 20).collidepoint(pygame.mouse.get_pos()):\n button_status.battle_screen_history_bar_detail_display = True\n x = 1\n\n if x == 0:\n button_status.card_zoom_active = False\n button_status.battle_screen_history_bar_detail_display = False\n\n\n\n elif event.type == pygame.MOUSEBUTTONUP:\n pass", "def event2513():\n header(2513)\n\n if_player_does_not_have_special_effect(7, SPEFFECT.RunicHit0)\n if_player_does_not_have_special_effect(7, SPEFFECT.RunicHit1)\n if_player_does_not_have_special_effect(7, SPEFFECT.RunicHit2)\n if_player_does_not_have_special_effect(7, SPEFFECT.RunicHit3)\n if_player_does_not_have_special_effect(7, SPEFFECT.RunicHit4)\n if_player_does_not_have_special_effect(7, SPEFFECT.RunicHit5)\n if_condition_true(0, 7)\n\n if_event_flag_on(1, EVENT.ScintillaRuneActive)\n\n if_player_has_special_effect(-1, SPEFFECT.RunicHit0)\n if_player_has_special_effect(-1, SPEFFECT.RunicHit1)\n if_player_has_special_effect(-1, SPEFFECT.RunicHit2)\n if_player_has_special_effect(-1, SPEFFECT.RunicHit3)\n if_player_has_special_effect(-1, SPEFFECT.RunicHit4)\n if_player_has_special_effect(-1, SPEFFECT.RunicHit5)\n if_condition_true(1, -1)\n\n if_condition_true(0, 1)\n\n # Roll d30.\n flag.disable_chunk(970, 999)\n flag.enable_random_in_chunk(970, 999)\n\n # Count appropriate flag range as success and spawn Scintilla projectile.\n if_player_has_special_effect(2, SPEFFECT.RunicHit0)\n skip_if_condition_false(4, 2)\n if_at_least_one_true_flag_in_range(-2, 970, 971) # 2/30 chance at Scintilla level 0.\n restart_if_condition_false(-2)\n spawner.shoot_projectile(CHR.Player, projectile_entity_id=CHR.Player, damipoly_id=7, behavior_id=2001)\n restart()\n\n if_player_has_special_effect(3, SPEFFECT.RunicHit1)\n skip_if_condition_false(4, 3)\n if_at_least_one_true_flag_in_range(-2, 970, 972) # 3/30 chance at Scintilla level 1.\n restart_if_condition_false(-2)\n spawner.shoot_projectile(CHR.Player, projectile_entity_id=CHR.Player, damipoly_id=7, behavior_id=2001)\n restart()\n\n if_player_has_special_effect(4, SPEFFECT.RunicHit2)\n skip_if_condition_false(4, 4)\n if_at_least_one_true_flag_in_range(-2, 970, 973) # 4/30 chance at Scintilla level 2.\n restart_if_condition_false(-2)\n spawner.shoot_projectile(CHR.Player, projectile_entity_id=CHR.Player, damipoly_id=7, behavior_id=2001)\n restart()\n\n if_player_has_special_effect(5, SPEFFECT.RunicHit3)\n skip_if_condition_false(4, 5)\n if_at_least_one_true_flag_in_range(-2, 970, 974) # 5/30 chance at Scintilla level 3.\n restart_if_condition_false(-2)\n spawner.shoot_projectile(CHR.Player, projectile_entity_id=CHR.Player, damipoly_id=7, behavior_id=2001)\n restart()\n\n if_player_has_special_effect(6, SPEFFECT.RunicHit4)\n skip_if_condition_false(4, 6)\n if_at_least_one_true_flag_in_range(-2, 970, 975) # 6/30 chance at Scintilla level 4.\n restart_if_condition_false(-2)\n spawner.shoot_projectile(CHR.Player, projectile_entity_id=CHR.Player, damipoly_id=7, behavior_id=2001)\n restart()\n\n if_player_has_special_effect(-3, SPEFFECT.RunicHit5)\n restart_if_condition_false(-3) # This shouldn't happen.\n if_at_least_one_true_flag_in_range(-2, 970, 972) # 3/30 chance of Crystal Scintilla at level 5.\n skip_if_condition_false(2, -2)\n spawner.shoot_projectile(CHR.Player, projectile_entity_id=CHR.Player, damipoly_id=7, behavior_id=2002)\n skip(2)\n if_at_least_one_true_flag_in_range(-4, 973, 976) # 4/30 chance of normal Scintilla at level 5.\n skip_if_condition_false(1, -4)\n spawner.shoot_projectile(CHR.Player, projectile_entity_id=CHR.Player, damipoly_id=7, behavior_id=2001)\n restart()", "def beginSequenceandProbe(win, mouse, n):\n global iPattern\n squares = squarePattern[iPattern]\n for square in squares:\n square.setAutoDraw(True)\n iPattern += 1\n #done creating irregular square pattern\n sequence = range(NUM_SQUARES)\n random.shuffle(sequence)\n for i in sequence[:n]:\n squares[i].setFillColor(SQUARE_HIGHLIGHT_COLOR)\n win.flip()\n core.wait(HIGHLIGHT_TIME)\n squares[i].setFillColor(SQUARE_COLOR)\n #erase screen\n #for square in squares:\n # square.setAutoDraw(False)\n #win.flip()\n #core.wait(ISI_TIME)\n #begin asking for the subject to recall pattern\n for square in squares:\n square.setAutoDraw(True)\n instructions = visual.TextStim(win,text=\"Select the squares in the order they appeared. If you accidently click a square, click Back.\", pos=(0,10),wrapWidth=30)\n submitText = visual.TextStim(win,text=\"Submit\",pos=(5,-8))\n submitButton = visual.Rect(win,width=4, height=1.2, lineWidth=2)\n backText = visual.TextStim(win,text=\"Undo last click\",pos=(-5,-8))\n backButton = visual.Rect(win,width=7.3, height=1.2, lineWidth=2)\n backButton.setPos((-5,-8))\n submitButton.setPos((5,-8))\n submitButton.setAutoDraw(True)\n submitText.setAutoDraw(True)\n backButton.setAutoDraw(True)\n backText.setAutoDraw(True)\n instructions.setAutoDraw(True)\n instructions.draw()\n submitButton.draw()\n submitText.draw()\n backButton.draw()\n backText.draw()\n win.flip()\n #done rendering, time to start timer for subject response\n timer = core.Clock()\n clicked = []\n while(True):\n for i in range(len(squares)):\n if mouse.isPressedIn(squares[i]):\n clicked.append(i)\n squares[i].setFillColor(SQUARE_HIGHLIGHT_COLOR)\n win.flip()\n core.wait(0.200)\n while mouse.isPressedIn(squares[i]):\n pass\n squares[i].setFillColor(SQUARE_COLOR)\n win.flip()\n if(mouse.isPressedIn(backButton) and len(clicked) > 0):\n clicked.remove(clicked[len(clicked)-1])\n backText.setColor(SQUARE_HIGHLIGHT_COLOR)\n win.flip()\n core.wait(0.200)\n while mouse.isPressedIn(backButton):\n pass\n backText.setColor(\"White\")\n win.flip()\n if(mouse.isPressedIn(submitButton)):\n #erase display\n submitButton.setAutoDraw(False)\n submitText.setAutoDraw(False)\n backButton.setAutoDraw(False)\n backText.setAutoDraw(False)\n instructions.setAutoDraw(False)\n for i in range(len(squares)):\n squares[i].setAutoDraw(False)\n return (sequence[:n]==clicked,sequence[:n],clicked,correctness(clicked,sequence[:n]), n, timer.getTime())\n if(event.getKeys(keyList=['q','escape'])):\n quit()", "def main_board_maintenance(self,x_cor,y_cor):\r\n\t\r\n\t\tfor event in pygame.event.get(): \r\n\r\n\t\t\tif event.type == pygame.QUIT:\r\n\t\t\t\tpygame.display.quit()\r\n\t\t\t\tpygame.quit()\r\n\t\t\t\tquit()\r\n\t\t\t\r\n\t\t\tif event.type == pygame.MOUSEBUTTONDOWN:\r\n\r\n\t\t\t\tx_adjusted,y_adjusted = Helping_Class.convert_coordinate(x_cor,y_cor,from_where ='board')\r\n\t\t\t\t#print(x_adjusted/80,y_adjusted/80)\r\n\r\n\t\t\t\tif self.selected_from_selection_bar :\r\n\t\t\t\t\t#print('inside selection bar selection option')\r\n\t\t\t\t\tx_adjusted,y_adjusted = Helping_Class.convert_coordinate(x_cor,y_cor,from_where ='board')\r\n\t\t\t\t\ttemp_game_state = CP.game_data()\r\n\t\t\t\t\ttemp_game_state = copy.deepcopy(self.game_state)\r\n\t\t\t\t\tdata_convert = CP.Conversion_of_postion_name(self.selected_piece,Helping_Class.selection_bar_reverse_mapping[self.selected_piece] ,(x_adjusted,y_adjusted))\r\n\t\t\t\t\ttemp_game_state.update(data_convert.piece, int(data_convert.i_pos_ani()), int(data_convert.f_pos_ani()))\r\n\t\t\t\t\ttemp_game_state.active_color = not temp_game_state.active_color\r\n\t\t\t\t\tfen = temp_game_state.generate_fen()\r\n\t\t\t\t\tboard2 = chess.Board(fen=fen)\r\n\t\t\t\t\tprint(board2)\r\n\t\t\t\t\tprint(fen)\r\n\t\t\t\t\tprint('board2.is_check()',board2.is_check())\r\n\t\t\t\t\t\r\n\t\t\t\t\t#now we need to place the piece on board\r\n\t\t\t\t\tif self.game_state.board[Helping_Class.convert_my_coordinate_to_anirudh(x_adjusted,y_adjusted)] == None:\r\n\t\t\t\t\t\t#print(self.selected_position)\r\n\t\t\t\t\t\tif not board2.is_check():\r\n\t\t\t\t\t\t\tif self._check_valid_position_(x_adjusted,y_adjusted):\r\n\t\t\t\t\t\t\t\tself.place_piece_on_board_from_selection_bar(x_adjusted,y_adjusted)\r\n\t\t\t\t\t\t\t\t#rajan's\r\n\t\t\t\t\t\t\t\t#print(self.selected_piece)\r\n\t\t\t\t\t\t\t\t#print(self.selected_position)\r\n\t\t\t\t\t\t\t\tdata_convert = CP.Conversion_of_postion_name(self.selected_piece,self.selected_position ,(x_adjusted,y_adjusted))\r\n\t\t\t\t\t\t\t\tself.game_state.update(data_convert.piece, int(data_convert.i_pos_ani()), int(data_convert.f_pos_ani()))\r\n\t\t\t\t\t\t\t\tself.selected_piece = None\r\n\t\t\t\t\t\t\t\tself.selected_position = None\r\n\r\n\t\t\t\t\t\t\t\tself.computer_turn =True\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\tpass\r\n\t\t\t\t\t#board position is filled then nothing to do\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\t#if his piece change selection\r\n\t\t\t\t\t\tself.selected_from_selection_bar =False\r\n\t\t\t\t\t\tself.selected_from_board = True\r\n\t\t\t\t\t\tself.selected_piece = self.anirudh_2_pritish[self.game_state.board[Helping_Class.convert_my_coordinate_to_anirudh(x_adjusted,y_adjusted)]]\r\n\t\t\t\t\t\tself.selected_position = (x_adjusted,y_adjusted)\r\n\r\n\r\n\t\t\t\telif self.selected_from_board:\r\n\t\t\t\t\t#print('inside selection bar board option')\r\n\t\t\t\t\tx_adjusted,y_adjusted = Helping_Class.convert_coordinate(x_cor,y_cor,from_where ='board')\r\n\t\t\t\t\t\r\n\t\t\t\t\tomega = True\r\n\t\t\t\t\tif self.selected_position:\r\n\t\t\t\t\t\tif self.selected_position == (x_adjusted,y_adjusted):\r\n\t\t\t\t\t\t\tomega = False\r\n\t\t\t\t\t#print(self.selected_position,(x_adjusted,y_adjusted))\r\n\t\t\t\t\tif omega:\r\n\t\t\t\t\t\tmove = self._check_valid_move_(x_adjusted,y_adjusted)\r\n\t\t\t\t\t\tprint(move)\r\n\t\t\t\t\tif omega:\r\n\t\t\t\t\t\tif move:\r\n\t\t\t\t\t\t\tself.computer_turn = True\r\n\t\t\t\t\t\t\t#if move contains x then we have update state of captured piece\r\n\t\t\t\t\t\t\t#else just update selected piece\r\n\t\t\t\t\t\t\t#print(\"correct move\")\r\n\t\t\t\t\t\t\tself.capture_piece_update_board_or_place_piece(move,x_adjusted,y_adjusted)\r\n\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t#select the piece\r\n\t\t\t\t\t\t\tif self.game_state.board[Helping_Class.convert_my_coordinate_to_anirudh(x_adjusted,y_adjusted)]:\r\n\t\t\t\t\t\t\t\tself.selected_piece = self.anirudh_2_pritish[self.game_state.board[Helping_Class.convert_my_coordinate_to_anirudh(x_adjusted,y_adjusted)]]\r\n\t\t\t\t\t\t\t\tself.selected_position = (x_adjusted,y_adjusted)\r\n\t\t\t\t\t\t\t\tself.selected_from_board = True\r\n\t\t\t\t\t\r\n\t\t\t\telse:\r\n\t\t\t\t\t\r\n\t\t\t\t\tx_adjusted,y_adjusted = Helping_Class.convert_coordinate(x_cor,y_cor,from_where ='board')\r\n\t\t\t\t\tif self.game_state.board[Helping_Class.convert_my_coordinate_to_anirudh(x_adjusted,y_adjusted)]:\r\n\t\t\t\t\t\t#select the piece\r\n\t\t\t\t\t\tif self.whose_move == 'white':\r\n\t\t\t\t\t\t\tif 'W' in self.anirudh_2_pritish[self.game_state.board[Helping_Class.convert_my_coordinate_to_anirudh(x_adjusted,y_adjusted)]]:\r\n\t\t\t\t\t\t\t\tself.selected_piece = self.anirudh_2_pritish[self.game_state.board[Helping_Class.convert_my_coordinate_to_anirudh(x_adjusted,y_adjusted)]]\r\n\t\t\t\t\t\t\t\tself.selected_from_board = True\r\n\t\t\t\t\t\t\t\tself.selected_position = (x_adjusted,y_adjusted)\r\n\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t#nothing to do\r\n\t\t\t\t\t\t\t\tpass\r\n\t\t\t\t\t\telif self.whose_move == 'black':\r\n\t\t\t\t\t\t\tif 'B' in self.anirudh_2_pritish[self.game_state.board[Helping_Class.convert_my_coordinate_to_anirudh(x_adjusted,y_adjusted)]]:\r\n\t\t\t\t\t\t\t\tself.selected_piece = self.anirudh_2_pritish[self.game_state.board[Helping_Class.convert_my_coordinate_to_anirudh(x_adjusted,y_adjusted)]]\r\n\t\t\t\t\t\t\t\tself.selected_from_board = True\r\n\t\t\t\t\t\t\t\tself.selected_position = (x_adjusted,y_adjusted)\r\n\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t#nothing to do\r\n\t\t\t\t\t\t\t\tpass\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\t#it is none means nothing is their so nothing to do\r\n\t\t\t\t\t\tpass\r\n\t\t\t\t\t\r\n\t\t\t\r\n\r\n\t\t\telse:\r\n\t\t\t\t#print(\"not_pressed\")\r\n\t\t\t\tpass", "def _check_keydown_play_events(self, event):\n\t\tif (event.key in (pygame.K_SPACE, pygame.K_UP)) and (\n\t\t\tself.bolan.rect.y >= self.bolan.default_y):\n\t\t\tself.bolan.is_jump = True\n\t\tif event.key == pygame.K_DOWN:\n\t\t\tself.bolan.is_duck = True", "def mouse_handler(self, event, mouse_buttons, mouse_movement, mouse_position):\n if mouse_movement and not self.msgs_shown[hash(TUTORIAL.MESSAGE_BOARD)]:\n self.show_message(TUTORIAL.MESSAGE_BOARD)\n return\n if self.console_active:\n if event.type == pygame.MOUSEBUTTONDOWN: #IF DICE: Do shit. Go activating and deactivating the spritse in the order that we want thhe player to play them.\n self.hide_messages()\n return\n super().mouse_handler(event, mouse_buttons, mouse_movement, mouse_position)", "def play_piano(self):\n def draw_regular_down(index_key):\n \"\"\"This draws whichever regular key has been pressed down\"\"\"\n @window.event\n def on_draw():\n window.clear()\n for i in range(len(self.natural_notes)):\n if i == index_key:\n white_down.blit(i*white.width,0)\n else:\n white.blit(i*white.width,0)\n for i in range(len(self.sharp_notes)):\n if i <2:\n black.blit(i*white.width+3*white.width/4,117)\n else:\n black.blit(i*white.width+7*white.width/4,117)\n def draw_sharp_down(index_key):\n \"\"\"This draws whichever sharp key has been pressed down\"\"\"\n @window.event\n def on_draw():\n window.clear()\n for i in range(len(self.natural_notes)):\n white.blit(i*white.width,0)\n for i in range(len(self.sharp_notes)):\n if i == index_key and i <2:\n black_down.blit(3*white.width/4 +white.width*i,117)\n elif i == index_key:\n black_down.blit(7*white.width/4+white.width*i,117)\n elif i<2:\n black.blit(i*white.width+3*white.width/4,117)\n else:\n black.blit(i*white.width+7*white.width/4,117)\n def draw_board():\n @window.event\n def on_draw():\n window.clear() \n for i in range(len(self.natural_notes)):\n white.blit(i*white.width,0)\n for i in range(len(self.sharp_notes)):\n if i >= 2:\n black.blit(7*white.width/4.+i*white.width,117 )\n else:\n black.blit(3*white.width/4 + white.width*i,117)\n draw_board()\n def play_song(self,somelist):\n \"\"\"Given some arbitrary list of key combos it can play a simple piano tune.\"\"\"\n if not somelist:\n return\n on_key_press(somelist[0],0)\n time.sleep(.19)\n return play_song(self,somelist[1:])\n @window.event\n def on_key_press(symbol, modifiers):\n useful_regular_keys = [key.A,key.S, key.E, key.D, key.F, key.G, key.H, key.J, key.K]\n useful_sharp_keys = [key.W, key.E, key.T, key.Y, key.U]\n if symbol in useful_regular_keys:\n draw_regular_down(useful_regular_keys.index(symbol))\n pyglet.resource.media(self.natural_notes[useful_regular_keys.index(symbol)])\n if symbol in useful_sharp_keys:\n draw_sharp_down(useful_sharp_keys.index(symbol))\n pyglet.resource.media(self.natural_notes[useful_sharp_keys.index(symbol)])\n @window.event\n def on_key_release(symbol, modifiers):\n \"\"\"This function redraws the board after a key has been released so it looks normal again\"\"\"\n useful_keys = [key.A, key.S, key.W, key.E, key.D, key.F,key.G,key.T,key.H,key.Y,key.U,key.J, key.K]\n if symbol in useful_keys:\n draw_board()", "def process_actions(self):\n box_x, box_y = self.board.getbox_atpixel(self.mouse_x, self.mouse_y)\n if box_x is not None and box_y is not None: # Mouse on a box\n if not self.board.states[box_x][box_y]: # Not already discovered\n self.board.draw_highlighted(box_x, box_y,\n self.board.hightlight_color)\n if not self.board.states[box_x][box_y] and self.mouse_clic:\n self.board.reveal_box([(box_x, box_y)], self.clock)\n self.board.states[box_x][box_y] = True\n # Check discovered boxes\n self.selection_state(box_x, box_y)", "def check_events(self):\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n self._game_status = False\n\n # get coordinates of mouse click\n if event.type == pygame.MOUSEBUTTONDOWN:\n pos = pygame.mouse.get_pos()\n pos_tup = self.calculate_square(pos) # change x-y coord to square of row, column\n row = pos_tup[0]\n column = pos_tup[1]\n if (row == 0 or row == 9) or (column == 0 or column == 9):\n self.shoot_ray(row, column)\n elif 0 < row < 9 and 0 < column < 9:\n self.guess_atom(row, column)", "def test_040_mouse_keyboard(self):\n self.allow_service('qubes.InputMouse')\n self.allow_service('qubes.InputKeyboard')\n self.setUpDevice(mouse_events + keyboard_events)\n dev_name = '{}: {}'.format(\n self.vm.name if hasattr(self, 'vm') else 'remote',\n 'Test input device')\n self.find_device_and_start_listener('pointer:' + dev_name)\n self.emit_event('REL_X', 1)\n self.emit_event('REL_X', 1)\n self.emit_event('REL_Y', 1)\n self.emit_event('REL_Y', 1)\n self.emit_click('BTN_LEFT')\n\n self.assertEvent(['RawMotion', '0', {'0': '1.00', '1': '0.00'}])\n self.assertEvent(['RawMotion', '0', {'0': '1.00', '1': '0.00'}])\n self.assertEvent(['RawMotion', '0', {'1': '1.00', '0': '0.00'}])\n self.assertEvent(['RawMotion', '0', {'1': '1.00', '0': '0.00'}])\n self.assertEvent(['RawButtonPress', '1', {}])\n self.assertEvent(['RawButtonRelease', '1', {}])\n\n self.find_device_and_start_listener('keyboard:' + dev_name)\n\n self.emit_click('KEY_A')\n self.emit_click('KEY_B')\n self.emit_click('KEY_C')\n self.emit_click('KEY_D')\n for _ in range(4):\n self.emit_click('KEY_BACKSPACE')\n\n for key in ('38', '56', '54', '40'):\n self.assertEvent(['RawKeyPress', key, {}])\n self.assertEvent(['RawKeyRelease', key, {}])\n for _ in range(4):\n self.assertEvent(['RawKeyPress', '22', {}])\n self.assertEvent(['RawKeyRelease', '22', {}])", "def bindHotkeys(self):\r\n self.root.bind(\"s\",self.pause)\r\n self.root.bind(\"p\",self.play)\r\n self.root.bind(\"x\",self.stop)\r\n self.root.bind(\"<Right>\",lambda event, t=10: self.skipFor(event,t=t))\r\n self.root.bind(\"<Left>\",lambda event, t=-10: self.skipFor(event,t=t))\r\n self.bindDPHotkeys()", "def selection_board_maintenance(self,x_cor,y_cor):\t\t\r\n\t\tfor event in pygame.event.get():\r\n\t\t\tif event.type == pygame.QUIT:\r\n\t\t\t\tpygame.display.quit()\r\n\t\t\t\tpygame.quit()\r\n\t\t\t\tquit() \r\n\r\n\t\t\tif event.type == pygame.MOUSEBUTTONDOWN:\r\n\t\t\t\t#print(\"mouse is pressed\")\r\n\t\t\t\t#everything begins here\r\n\t\t\t\tx_adjusted,y_adjusted,who_is_clicked,piece = Helping_Class.convert_coordinate(x_cor,\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t y_cor,\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t 'selection_bar')\r\n\t\t\t\t#print(who_is_clicked)\r\n\t\t\t\tif (self.selected_from_selection_bar + self.selected_from_board):\r\n\t\t\t\t\t#print(\"inside selected item one\")\r\n\t\t\t\t\tif Helping_Class._check_if_clicked_on_his_own_piece_(self.whose_move,who_is_clicked):\r\n\t\t\t\t\t\t\r\n\t\t\t\t\t\tif self.pieces[piece].availability:\r\n\t\t\t\t\t\t\tself.selected_from_board = False\r\n\t\t\t\t\t\t\tself.selected_from_selection_bar = True\r\n\r\n\t\t\t\t\t\t\t#update \r\n\t\t\t\t\t\t\tself.selected_piece = piece\r\n\t\t\t\t\t\t\tself.selected_position =Helping_Class.selection_bar_reverse_mapping[piece]\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\t#nothing to do\r\n\t\t\t\t\t\tpass\r\n\t\t\t\telse:\r\n\t\t\t\t\t#print(\"nothing is selected\")\r\n\t\t\t\t\t#check if clicked on his piece change then select it\r\n\t\t\t\t\tif Helping_Class._check_if_clicked_on_his_own_piece_(self.whose_move,who_is_clicked):\r\n\r\n\t\t\t\t\t\tif self.pieces[piece].availability:\r\n\t\t\t\t\t\t\tself.selected_from_selection_bar = True\r\n\r\n\t\t\t\t\t\t\t#update \r\n\t\t\t\t\t\t\tself.selected_piece = piece\r\n\t\t\t\t\t\t\tself.selected_position =(x_adjusted,y_adjusted)\r\n\t\t\t\t\t\t\t#print(self.selected_piece,self.selected_position,self.selected_from_selection_bar)\r\n\r\n\t\t\t\t\t\t\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\t#nothing to do\r\n\t\t\t\t\t\tpass\r\n\t\t\t\t\t\t\r\n\t\t\t\t\r\n\t\t\telse:\r\n\t\t\t\t#color change\r\n\t\t\t\t#who_is_clicked is dummy variable as no click has occurred\r\n\t\t\t\tx_adjusted,y_adjusted,who_is_clicked,piece = Helping_Class.convert_coordinate(x_cor,\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t y_cor,\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t 'selection_bar')\r\n\r\n\t\t\t\tself.blit_piece = [(x_adjusted,y_adjusted),piece]", "def process_IN_MOVE_SELF(self, event):", "def events(self):\n # Game Loop - Events\n for event in pg.event.get():\n if event.type == pg.QUIT:\n self.playing = False\n self.running = False\n\n if event.type == pg.KEYUP:\n if event.key == pg.K_SPACE:\n self.player.jump_cut()\n if event.key == pg.K_k:\n self.player.shooting_locked = False", "def test_020_mouse_keyboard_mouse_only(self):\n self.allow_service('qubes.InputMouse')\n self.setUpDevice(['BTN_LEFT', 'BTN_RIGHT', 'REL_X', 'REL_Y'] + keyboard_events)\n self.find_device_and_start_listener()\n self.emit_event('REL_X', 1)\n self.emit_event('REL_X', 1)\n self.emit_event('REL_Y', 1)\n self.emit_event('REL_Y', 1)\n self.emit_click('BTN_LEFT')\n\n self.assertEvent(['RawMotion', '0', {'0': '1.00', '1': '0.00'}])\n self.assertEvent(['RawMotion', '0', {'0': '1.00', '1': '0.00'}])\n self.assertEvent(['RawMotion', '0', {'1': '1.00', '0': '0.00'}])\n self.assertEvent(['RawMotion', '0', {'1': '1.00', '0': '0.00'}])\n self.assertEvent(['RawButtonPress', '1', {}])\n self.assertEvent(['RawButtonRelease', '1', {}])\n\n self.emit_event('KEY_A', 1)\n self.emit_event('KEY_B', 1)\n self.emit_event('KEY_C', 1)\n self.emit_event('KEY_D', 1)\n self.assertNoEvent(msg=\"keyboard should be denied\")", "def check_events(arduino, led1, led2, led3, led4):\r\n \r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n arduino.close()\r\n sys.exit()\r\n \r\n elif event.type == pygame.MOUSEBUTTONDOWN:\r\n # Get Coordinates of where mouse when it was clicked\r\n mouse_x, mouse_y = pygame.mouse.get_pos()\r\n \r\n # Check if any of leds have been clicked. Becomes True\r\n # when it has been clicked and becomes False when it has \r\n # Not been clicked\r\n led1_clicked = led1.rect.collidepoint(mouse_x, mouse_y)\r\n led2_clicked = led2.rect.collidepoint(mouse_x, mouse_y)\r\n led3_clicked = led3.rect.collidepoint(mouse_x, mouse_y)\r\n led4_clicked = led4.rect.collidepoint(mouse_x, mouse_y)\r\n \r\n if led1_clicked:\r\n led_clicked(arduino, led1)\r\n \r\n elif led2_clicked:\r\n led_clicked(arduino, led2)\r\n \r\n elif led3_clicked:\r\n led_clicked(arduino, led3)\r\n \r\n elif led4_clicked:\r\n led_clicked(arduino, led4)", "def handle_events(self):\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n elif event.type == pygame.KEYDOWN and event.unicode == 's':\n if self.paused:\n self.paused = False\n else:\n self.paused = True\n\n if pygame.key.get_pressed()[pygame.K_q]:\n sys.exit()\n elif pygame.key.get_pressed()[pygame.K_r]:\n self.randomize_grid()\n\n # can't be used with toggling, as your press on the key may toggle it several time,\n # and the probability with toggling it even times so it would be like it didn't toggle\n # using the KEYDOWN event like above is the way to do it\n\n # elif pygame.key.get_pressed()[pygame.K_s]:\n # print('paused')\n # if self.paused:\n # self.paused=0\n # else:\n # self.paused=1", "def _check_events(self):\n\n for event in self._event_queue.get():\n if event.type == pygame.QUIT:\n self._menu.quit()\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_SPACE:\n self._level.player.jump()\n if event.key == pygame.K_ESCAPE:\n self.playing = False\n self._menu.show_start_view()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This submits the next user input to the controller, In games with Shared_Board = False (e.g. HandAndFoot) key strokes don't do anything unless designating values for prepared wild cards, at which time the mouse is ignored unless you want to clear the prepared cards. In games with Shared_Board = True wilds on board might change designation upon other cards being played. IF designation cannot be handled automatically (= if wild can be at the beginning or end of a run) then it must be designated before play is completed. This is done in nextEvenWildsOnBoard. All other events are ignored until num_wilds == 0 OR play is canceled.
def nextEvent(self): if self.controller._state.rules.Shared_Board: self.num_wilds = len(self.controller.unassigned_wilds_dict.keys()) if self.num_wilds > 0: self.nextEventWildsOnBoard() for self.event in pygame.event.get(): if self.event.type == pygame.QUIT: # The window crashed, we should handle this print("pygame crash, AAAHHH") pygame.quit() quit() if not self.controller._state.rules.Shared_Board and self.num_wilds > 0: wild_instructions = 'Use the keyboard to designate your prepared wild cards \r\n ' wild_instructions = wild_instructions + '(use 0 for 10 and J, Q, or K for facecards).' self.controller.note = wild_instructions pos = pygame.mouse.get_pos() if self.event.type == pygame.MOUSEBUTTONDOWN: self.RuleSetsButtons.ClickedButton(self, pos) for element in self.hand_info: # cannot select prepared cards, so not included in logic below. if element.img_clickable.isOver(pos): if element.status == 1: element.status = 0 element.img_clickable.changeOutline(0) elif element.status == 0: element.status = 1 element.img_clickable.changeOutline(2) elif self.event.type == pygame.MOUSEMOTION: self.RuleSetsButtons.MouseHiLight(self, pos) HandManagement.MouseHiLight(self.hand_info, pos) elif self.event.type == pygame.KEYDOWN: if self.controller._state.rules.Buy_Option: if self.controller.buying_opportunity: if self.event.key == pygame.K_y: self.controller.wantTopCard(True) self.controller.note = 'You have signaled you want to buy the card.' elif self.event.key == pygame.K_n: self.controller.wantTopCard(False) self.controller.note = 'You have signaled you do not want to buy the card.' if not self.controller._state.rules.Shared_Board and self.num_wilds > 0: HandManagement.ManuallyAssign(self)
[ "def nextEventWildsOnBoard(self):\n\n if self.controller._state.rules.Shared_Board and self.num_wilds > 0:\n for self.event in pygame.event.get():\n if self.event.type == pygame.QUIT:\n # The window crashed, we should handle this\n print(\"pygame crash, AAAHHH\")\n pygame.quit()\n quit()\n else:\n # in Shared_Board games, check if there are wilds that need to be updated.\n # All other events are ignored until play is finished.\n HandManagement.wildsHiLoGetInput(self)", "def playGame(self, px, po):\r\n print(\"Welcome to Connect Four! \\n\") \r\n user_type = input(\"Play as a Human or Computer...or GRUTOR? \")\r\n while True: \r\n if user_type == \"Human\": \r\n print(self)\r\n user_col = input(\"Choose your column: \")\r\n try:\r\n col = int(user_col)\r\n except ValueError:\r\n user_col = input(\"Choose another column: \")\r\n while not self.allowsMove(col):\r\n print(\"That's not possible.\")\r\n user_col = input(\"Choose your column: \")\r\n col = int(user_col)\r\n self.addMove(col, px.ox)\r\n\r\n if self.winsFor(px.ox) == True:\r\n print(\"You win! Congratulations!\")\r\n break\r\n\r\n print(self)\r\n\r\n time.sleep(1)\r\n\r\n print(\"It's my turn now...thinking for a very short time...\")\r\n col = po.nextMove(self)\r\n self.addMove(col, po.ox)\r\n\r\n if self.winsFor(po.ox) == True:\r\n print(\"HAHAHAHAHA I HAVE OUTWITTED YOU, MEASLY HUMAN.\")\r\n print(\"First stop, Connect Four \\ntomorrow...THE WORLD!\")\r\n break\r\n elif user_type == \"Computer\":\r\n print(self)\r\n\r\n print(\"X's turn\")\r\n col = px.nextMove(self)\r\n self.addMove(col, px.ox)\r\n\r\n if self.winsFor(px.ox) == True:\r\n print(\"X wins!\")\r\n break\r\n\r\n print(self)\r\n\r\n time.sleep(1)\r\n\r\n print(\"O's turn\")\r\n col = po.nextMove(self)\r\n self.addMove(col, po.ox)\r\n\r\n if self.winsFor(po.ox) == True:\r\n print(\"O wins!\")\r\n break\r\n elif user_type == \"GRUTOR\":\r\n webbrowser.open_new(\"https://www.youtube.com/watch?v=dQw4w9WgXcQ\")\r\n break\r\n else:\r\n print(\"Neither!! Amazing. Here's a video you should watch: https://www.youtube.com/watch?v=PFrPrIxluWk :)\")\r\n webbrowser.open_new(\"https://www.youtube.com/watch?v=PFrPrIxluWk\")\r\n break\r\n \r\n print(self)", "def inputnextmove(self):\n possibles = self._possiblemoves()\n next = None\n if len(possibles):\n while not next in possibles: # ensure valid moves are taken by the human\n print (\"Current board position is: \")\n print (self)\n if self.mustslide():\n movetext = input(\"Slide a piece using > char - enter from > to: eg. 0,2 > 1,1: \")\n movecoords = re.search(\"(\\d+).*,.*(\\d+).*>.*(\\d+).*,.*(\\d+)\", movetext)\n if movecoords:\n frmove = [int(x) for x in movecoords.groups()[:2]]\n tomove = [int(x) for x in movecoords.groups()[2:4]]\n next = self.setslide(frmove, tomove)\n else:\n movetext = input(\"Your move - enter location coordinate pair: e.g. 0,2: \")\n coords = re.search(\"(\\d+).*,.*(\\d+)\", movetext)\n if coords:\n move = [int(x) for x in coords.groups()]\n next = self.setmove(move)\n return next\n else:\n return None # no more moves possible", "def user_input2():\r\n\twhile states.running:\r\n\t\tif states.input_time:\r\n\t\t\tapp_cmd.cmdloop()\r\n\t\t\tstates.input_time = False\r\n\t\telse:\r\n\t\t\ttime.sleep(fps)", "def play_human_move(self):\n success, info = self.gms.play_human_move(raw_input('Make your next move\\n'.format('')))\n if success:\n print(self.gms.game.get_board_state_pretty())\n if info['status_code'] == core_constants.GAME_STATUS_HUMAN_MOVE_REQUIRED:\n self.play_human_move()\n elif info['status_code'] in [\n core_constants.GAME_STATUS_OVER_DRAW,\n core_constants.GAME_STATUS_OVER_HUMAN_WINNER,\n core_constants.GAME_STATUS_OVER_COMP_WINNER,\n ]:\n print(self.gms.status_code_message_map[info['status_code']])\n else:\n if info['error_code'] == core_constants.ERROR_CODE_INVALID_MOVE:\n self.play_human_move()", "def play():\r\n count = 0\r\n while True:\r\n count += 1\r\n board = create_board(6, 6, density=40)\r\n guess = create_guess_board(board)\r\n fill_empty(board, guess)\r\n print('Attempting to solve (%d) ...' % count)\r\n solver(board, guess)\r\n if solved(board, guess):\r\n guess = create_guess_board(board)\r\n fill_empty(board, guess)\r\n break\r\n\r\n cursor = [0, 0]\r\n\r\n while True:\r\n print()\r\n print_board(board, guess, cursor=cursor)\r\n if solved(board, guess):\r\n print('\\nYou solved it. Great work!')\r\n break\r\n # stuff = input('Your turn [1-9, ~, ^, s, ?, h, q]: ')\r\n # command = stuff.split()\r\n print('Your turn [1-9, ~, ^, s, ?, h, q]: ')\r\n stuff = msvcrt.getch()\r\n print('stuff = /%s/' % stuff)\r\n command = stuff.split()\r\n if len(command) == 0:\r\n continue\r\n if command[0] == 'q':\r\n break\r\n elif command[0] == 'h':\r\n print_board(board, cursor=cursor)\r\n elif command[0] == 's':\r\n solver(board, guess)\r\n elif command[0] == '?':\r\n if board[cursor[0]][cursor[1]] == BOARD_TENT:\r\n set(guess, cursor[0], cursor[1], BOARD_TENT)\r\n if board[cursor[0]][cursor[1]] == BOARD_EMPTY:\r\n set(guess, cursor[0], cursor[1], BOARD_EMPTY_GUESS)\r\n elif command[0] in ['~', '^']:\r\n if get(guess, cursor[0], cursor[1]) == BOARD_TREE:\r\n print('Please do not cut down the trees!')\r\n else:\r\n set(guess, cursor[0], cursor[1], command[0])\r\n elif command[0] in ['1', '2', '3', '4', '6', '7', '8', '9']:\r\n dir = int(command[0])\r\n adjacent = [\r\n [0, 0], #\r\n [1, -1], # 1\r\n [1, 0], # 2\r\n [1, 1], # 3\r\n [0, -1], # 4\r\n [0, 0], #\r\n [0, 1], # 6\r\n [-1, -1], # 7\r\n [-1, 0], # 8\r\n [-1, 1] # 9\r\n ]\r\n cursor[0] += adjacent[dir][0]\r\n cursor[1] += adjacent[dir][1]\r\n if cursor[0] < 0:\r\n cursor[0] = len(board) - 1\r\n if cursor[0] >= len(board):\r\n cursor[0] = 0\r\n if cursor[1] < 0:\r\n cursor[1] = len(board[0]) - 1\r\n if cursor[1] >= len(board[0]):\r\n cursor[1] = 0\r\n else:\r\n print(\"\"\"\r\nPlease type one of:\r\n # - Move the cursor in that direction\r\n ~ - Place an 'empty' marker at the current square\r\n ^ - Place a 'tent' marker at the current square\r\n h - Hint\r\n q - Quit\r\n\"\"\")\r\n\r\n print_board(board)", "def input(self, event):\n # If the window is quit.\n if event.type == pygame.QUIT:\n # Exit the game.\n return 0\n\n # If escape is hit.\n if (\n event.type == pygame.QUIT\n or event.type == pygame.KEYDOWN\n and event.key == pygame.K_ESCAPE\n ):\n # Return to the menu.\n return 1\n\n # If SPACE is hit.\n if event.type == pygame.KEYDOWN and event.key == pygame.K_SPACE:\n # If the player can move\n if self.background1.getMoving():\n # Jump sound effect.\n self.jumpSound.play()\n # Make the player jump.\n self.player.jump()\n\n # If game end.\n if self.gameEnd:\n # If the exit button is pressed.\n if self.exitButton.input(event):\n return 1\n # If the exit button is pressed.\n if self.retryButton.input(event):\n self.reset()\n\n # Continue the game.\n return 2", "def next_turn(self): \n if (self.moves):\n self.board = self.select_move() \n self.moves = []\n self.roll = self.roll_dice()\n self.player = not self.player\n self.generate_valid_moves()", "def inputnextmove(self):\n possibles = self._possiblemoves()\n next = None\n if len(possibles):\n while not next in possibles: # ensure valid moves are taken by the human\n print (\"Current board position is: \")\n print (self)\n move = input(\"Your move - enter location coordinate pair: e.g. 0,2: \")\n coords = re.search(\"(\\d+).*,.*(\\d+)\", move)\n if coords:\n \tmove = [int(x) for x in coords.groups()]\n \tnext = self.setmove(move)\n return next\n else:\n return None # no more moves possible", "def user_play(user_choice, checkerboard):\n print('请选择落子点')\n number = chess_limited(input(), checkerboard)\n checkerboard[number-1] = user_choice\n return checkerboard", "def play_game(word_list):\n hand = None\n while True:\n game_type = raw_input('Please choose from the following: n(new random hand), r(last hand) or e(exit the game):')\n if game_type == 'n':\n hand = deal_hand(HAND_SIZE)\n player_type = raw_input('Please choose from the following: u(user can play) or c(computer can play):')\n if player_type == 'u':\n play_hand(hand, word_list)\n elif player_type == 'c':\n comp_play_hand(hand, word_list)\n else: \n player_type = raw_input('Incorrect input. Please choose from the following: u(user can play) or c(computer can play):')\n elif game_type == 'r' and hand == None:\n print 'Incorrect input. Please first choose n.'\n elif game_type == 'r':\n player_type = raw_input('Please choose from the following: u(user can play) or c(computer can play):')\n if player_type == 'u':\n play_hand(hand, word_list)\n elif player_type == 'c':\n comp_play_hand(hand, word_list)\n else: \n player_type = raw_input('Incorrect input. Please choose from the following: u(user can play) or c(computer can play):') \n elif game_type == 'e':\n print \"Exited the game.\"\n break\n else: \n print 'Incorrect input.'", "def get_input(self):\n result = None\n\n try:\n while True:\n result = self.console.read_for_condition(prompt=\">>> \", condition=self.is_valid_input)\n\n if result is not None:\n break\n except KeyboardInterrupt:\n quit()\n\n # run command for next condition\n self.game_branch[result]()", "def process_input(self):\n # Gather all key inputs.\n key_pressed = False\n keys = []\n while True:\n k = self.wm.get_input()\n if k != -1:\n keys.append(k)\n else:\n break\n \n # For now, we only process individual key commands.\n # Soemthing like \"Shift + Left Arrow\" will result in multiple\n # keys and could trigger unintentional commands.\n # Disallow this until we support these kinds of key combinations.\n if len(keys) == 1:\n key_pressed = True\n key = keys[0]\n self.last_pressed_time = time.time()\n self.state.process_key(key)\n\n # If we didn't press a key, kick the state anyway.\n if not key_pressed:\n self.state.process_key(None)", "def game_start(user_choice, sequence_flag):\n checkerboard = ['_' for i in range(9)] # 棋盘列表\n compute_choice = 'O' if user_choice.lower() == 'x' else 'X'\n if sequence_flag: # 显示棋盘\n print('玩家先走')\n else:\n checkerboard = compute_play(compute_choice, checkerboard) # 电脑先走棋\n print('电脑先走')\n while True:\n display_board(checkerboard)\n checkerboard = user_play(user_choice, checkerboard)\n if referee(user_choice, checkerboard):\n print('玩家赢')\n display_board(checkerboard)\n break\n checkerboard = compute_play(compute_choice, checkerboard)\n if referee(compute_choice, checkerboard):\n print('电脑赢')\n display_board(checkerboard)\n break\n if '_' not in checkerboard:\n print('平局')\n display_board(checkerboard)\n break", "def prompt_player(self):\n board = self.draw_board()\n print board\n self.player_moves(self.board_values)", "def simulate(self):\n\t\tplt.ion()\n\t\tself.show_board()\n\t\twait = raw_input('Press enter to continue.')\n\t\tprint 'Simulating .......'\n\t\twhile self.robot.current_episode < self.number_of_episodes:\n\t\t\taction = self.robot.select_action(self.robot.coords)\n\t\t\tnew_coords = self.robot.find_destination(self.robot.coords, action)\n\t\t\tself.robot.moves += 1\n\n\t\t\treward = self.squares[new_coords[1]][new_coords[0]].reward + (\n\t\t\t\tself.squares[new_coords[1]][new_coords[0]].move_cost * self.robot.moves)\n\t\t\tself.robot.Q_Learning(action, reward, self.robot.coords, new_coords)\n\t\t\t\n\t\t\tself.robot.coords = new_coords\n\n\t\t\tif (self.squares[new_coords[1]][new_coords[0]].identifier == 'Death' or \n\t\t\t\tself.squares[new_coords[1]][new_coords[0]].identifier == 'Goal'):\n\t\t\t\tself.robot.moves = 0\n\t\t\t\tself.robot.coords = tuple(self.starting_coords)\n\t\t\t\tself.robot.current_episode += 1\n\t\tself.update_results()\n\t\twait = raw_input('Simulated. Press enter to exit.')", "def decide_next_turn(self, arg = None):\n\t\tif(self.action_queue.empty() or self.taking_input_flag):\n\t\t\treturn\n\t\telse:\n\t\t\taction = self.action_queue.dequeue_action()\n\t\t\taction.method(action.arg)\n\t\t\tself.current_level.enqueue_player_delay(self, action.delay)", "def play(self):\n intro1()\n name = input()\n player = cr.Player(name)\n intro2(name)\n gameRules()\n player.equipItem(brokenSword)\n while self.flag:\n print('\\n' + str(self.level) + '\\n')\n ctrl = input('Which way would you like to go? ').lower()\n time.sleep(1)\n if ctrl in Game.ctrls:\n d = Game.ctrls.index(ctrl)\n self.prevPos = self.currPos[:]\n self.currPos[d <= 2] += d - (1 if d < 3 else 4)\n if self.movePlayer():\n time.sleep(1)\n self.action(player)\n else:\n self.currPos = self.prevPos[:]\n elif ctrl == Game.exit:\n self.flag = False\n else:\n print('Please enter a proper direction.')", "def perform_action(self, current_player, action):\n self.inputs_[action] = current_player\n if Config.USER['debug']['enabled']:\n print \"---\"\n print str(self.inputs_[0:3])\n print str(self.inputs_[3:6])\n print str(self.inputs_[6:9])" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Confirm a user is sure about a discard and then perform it once confirmed.
def discardConfirmation(self, confirmed, wrapped_discards): discards = [] for element in wrapped_discards: discards.append(element.card) if self.discards != discards: confirmed = False self.discards = discards if not confirmed: self.controller.note = "Please confirm - discard " + "{0}".format(self.discards) return True # ask for confirmation else: # confirmed is True, performing discard and removing discarded wrapped cards from hand_info. if self.discard_confirm: controller_response = self.controller.discard(self.discards) if controller_response: for element in wrapped_discards: self.hand_info.remove(element) return False # now that this is done, we don't have anything waiting on confirmation
[ "def confirm_with_abort() -> None:\n\n click.confirm(\n \"Are you sure you want to drop the users table?\",\n abort=True\n )\n\n click.echo(\"We have gotten to this point, so the user has confirmed.\")", "def action_confirm(self):\n self.check_txt_ids()\n self.write({'state': 'confirmed'})\n return True", "def leaving_confirm(self):\n self.ensure_one()\n self.state = 'confirm'", "async def confirm(ctx, *args: discord.Member):\n await _confirm(args)", "def _confirm(self, confirmation, **kwargs):\n pass", "def confirm(self):\n for item in self.item_set.filter(~Q(statuses__status=sts_const.CONFIRMED)):\n item.change_status(sts_const.CONFIRMED)\n\n signals.order_confirm.send(instance=self, now=timezone.now())", "def confirm(self):\n with self.handle_alert(confirm=True):\n self.q(css='button#confirm').first.click()", "def confirm_action(message):\n if not click.confirm(message + \" Continue?\"):\n logger.info(\"User cancels action. Exiting...\")\n exit(0)\n else: return", "def confirm_removal(confirm, filename):\n if confirm == 'y' or confirm == 'yes':\n remove_file(filename)\n elif confirm == 'n' or confirm == 'no':\n print(\"File will stay there\")\n else:\n print(\"Please etner a valid answer (y/n, yes/no)\")\n confirm_removal()", "def cancel(self):\n with self.handle_alert(confirm=False):\n self.q(css='button#confirm').first.click()", "async def _confirm_action(self, confirmation, manager):\r\n def check(reaction, user):\r\n \"\"\"Check if the reaction is by the bot and then if it's an OK or a not OK\"\"\"\r\n if user.id == msg.author.id:\r\n pass\r\n else:\r\n return user.id == manager.id and (str(reaction.emoji) == '✅' or str(reaction.emoji) == '🛑')\r\n\r\n msg = await self.bot.say(confirmation)\r\n\r\n await self.bot.add_reaction(msg, '✅')\r\n await self.bot.add_reaction(msg, '🛑')\r\n await self._deletion_queue(msg)\r\n\r\n react = await self.bot.wait_for_reaction(timeout=60.0, message=msg, check=check)\r\n if react:\r\n return str(react.reaction.emoji) == '✅'\r\n return False", "def _confirm_unsaved(self):\n if(hasattr(self, '_saved')):\n if(not self._saved):\n return zio.promptYesNo('This will overwrite unsaved data, are you sure?')\n\n return True", "def confirm_delete(self):\n self.language = LANGUAGE.get(self.lang)\n message = Message(self.language[\"del_user\"], self.language[\"del_info\"])\n delete_message = message.create_question_message(self.language[\"yes\"])\n response = delete_message.exec()\n\n if response == QMessageBox.Yes:\n self.delete_user()\n elif response == QMessageBox.No:\n delete_message.close()", "def refuse_with_confirmation(proc):\n proc.sendline(u'mkdir -p ~/.thefuck')\n proc.sendline(u'echo \"require_confirmation = True\" > ~/.thefuck/settings.py')\n\n proc.sendline(u'ehco test')\n\n proc.sendline(u'fuck')\n assert proc.expect([TIMEOUT, u'echo test'])\n assert proc.expect([TIMEOUT, u'enter'])\n assert proc.expect_exact([TIMEOUT, u'ctrl+c'])\n proc.send('\\003')\n\n assert proc.expect([TIMEOUT, u'Aborted'])", "def get_confirmation():\n should_save = self.__get_boolean_input_from_user('Save crossword?')\n if should_save:\n save_crossword(crossword)\n else:\n confirm_discard = self.__get_boolean_input_from_user('Are you sure you want to discard this crossword?')\n if not confirm_discard:\n save_crossword(crossword)\n else:\n self.__display_information('Crossword discarded.')", "def test_confirm_user(self):\n user = User(email=\"test@email.com\", password=\"testpassword\")\n\n self.assertFalse(user.confirmed)\n self.assertIsNone(user.confirmed_at)\n self.assertIsNotNone(user.confirmation_token)\n\n user.confirm()\n\n self.assertTrue(user.confirmed)\n self.assertIsNotNone(user.confirmed_at)\n self.assertIsNone(user.confirmation_token)", "def prompt_discard(self, num_discards: int, state: 'State'):\n # TODO: Refactor to allow for flexible discarding (see Cellar). Meybe a force discard and a prompt discard?\n while self.hand and num_discards > 0:\n sorted_hand = sorted(list(self.hand), key=card_sort)\n card_name = self.get_input(\n f'Discard {num_discards} cards'\n f'Hand: {sorted_hand}',\n sorted_hand,\n state\n )\n # If the prompted card is in hand, discard it\n card = next((card for card in self.hand if card.name == card_name), None)\n if card:\n self.hand[card] -= 1\n self.hand += Counter() # Remove 0 and negative counts\n self.discard_pile.append(card)\n num_discards -= 1\n print(f'Discarded {card.name}')\n else:\n print(f'{card.name} is not in hand')", "def confirmCutdown(self):\n\t\tif self.stillImageOnline:\n\t\t\tprint(\"Still Image System cannot be Online\")\n\t\t\tself.updateRFDBrowser(\"Still Image System cannot be Online\")\n\t\t\treturn\n\n\t\tif not self.RFDAttached:\n\t\t\tprint(\"No RFD Attached\")\n\t\t\tself.updateRFDBrowser(\"No RFD Attached\")\n\t\t\treturn\n\t\t\t\n\t\t# Create the window to ask for confirmation, with text and buttons\n\t\tself.confirmationCheckWindow = QWidget()\n\t\tself.confirmationLabel = QLabel()\n\t\tself.confirmationLabel.setText(\"WARNING! Are you sure you want to cutdown?\")\n\t\tself.confirmationYesButton = QPushButton()\n\t\tself.confirmationNoButton = QPushButton()\n\t\tself.confirmationYesButton.setText(\"Yes\")\n\t\tself.confirmationNoButton.setText(\"No\")\n\t\tself.confirmationHLayout = QHBoxLayout()\n\t\tself.confirmationVLayout = QVBoxLayout()\n\t\tself.confirmationHLayout.addWidget(self.confirmationYesButton)\n\t\tself.confirmationHLayout.addWidget(self.confirmationNoButton)\n\t\tself.confirmationVLayout.addWidget(self.confirmationLabel)\n\t\tself.confirmationVLayout.addLayout(self.confirmationHLayout)\n\t\tself.confirmationCheckWindow.setLayout(self.confirmationVLayout)\n\t\tself.confirmationCheckWindow.show()\n\n\t\t# Connect the buttons to the functions\n\t\tself.confirmationYesButton.clicked.connect(lambda: self.attemptCutdown())\n\t\tself.confirmationNoButton.clicked.connect(lambda: self.deleteWindow(self.confirmationCheckWindow))", "def __onConfirmNo(self):\n self.__confDlg.reject()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test Category model data insertion/types/field attributes
def test_category_model_entry(self): # PRUEBA DE CARGAR LA INFORMACION EN LOS MODELOS A TESTEAR data = self.data1 self.assertTrue(isinstance(data, Category)) # REALIZA EL TESTEO
[ "def test_category_model_entry(self):\n data = self.data1\n self.assertTrue(isinstance(data, Category))", "def test_category_model_entry(self):\n data = self.data1\n self.assertTrue(isinstance(data, Category))\n self.assertEqual(str(data), 'recipe')", "def test_category_model_entry(self):\n data = self.data1\n self.assertTrue(isinstance(data, Category))\n self.assertEqual(str(data), 'django')", "def test_create_category(self):\n pass", "def test_category_has_access_to_model_data():\n category = Category()\n category_data = category.get_category_data()\n\n assert type(category_data) is list\n assert len(category_data) > 1", "def test_category_object(self):\n print('Test Category object run - correctly create object Category model')\n tools = Category.objects.get(name=\"Tools\")\n protection = Category.objects.get(name=\"Protection means\")\n self.assertEqual(tools.id, 1)\n self.assertEqual(tools.description, 'Building tools')\n self.assertEqual(type(tools.created), datetime.datetime)\n self.assertEqual(tools.ranking, 0)\n self.assertEqual(protection.id, 2)\n self.assertEqual(protection.description, 'Means of protection')\n self.assertEqual(type(protection.created), datetime.datetime)\n self.assertEqual(protection.ranking, 15)", "def test_update_category_custom_field(self):\n pass", "def test_update_category(self):\n pass", "def test_category_save(database):\n category = Category(title=\"Test Category\")\n category.save()\n\n assert category.title == \"Test Category\"", "def test_successful_category_update(self):\n # Data that we'll post to the server to get the new category created\n new_category = {\n \"category\": \"Alien Habitats\"\n }\n\n self.client.post(\n reverse('edit-category', kwargs={'id': '1'}),\n new_category)\n\n category = Category.objects.get(id=1)\n for field in new_category:\n self.assertEqual(\n getattr(category, field), new_category[field])", "def test_create_cat_object():\n from .scripts.initializedb import create_cat_object\n cat_object = create_cat_object(\"a\", \"b\", \"c\", \"c\")\n assert isinstance(cat_object, Category)", "def test_search_category_custom_field(self):\n pass", "def test_add_category(self):\n self.add_success(self.test_data['pants'])", "def test_category_creation(self):\n response = self.client.post(\n '/v2/categories',\n data=json.dumps({\n 'name': \"Electronics\"\n }),\n content_type=\"application/json\"\n )\n self.assertEqual(response.status_code, 201)", "def test_insert_database(self):\n expected_results = Config.DATABASE_EXPECTED\n category_name = \"Boissons\"\n category_object = Category.objects.create(name=category_name)\n\n fill_database: FillDatabase = FillDatabase(category_name, False)\n fill_database.insert_database(expected_results, category_object)\n assert Product.objects.count() == 1\n\n product_data = Product.objects.first()\n for attr_key, attr_val in expected_results.items():\n assert expected_results[attr_key] == getattr(product_data, attr_key)\n\n assert Category.objects.count() == 1\n assert Category.objects.first().name == category_name\n assert Product.objects.first().category_set.first().name == category_name", "def test_create_recipe_category(self):\n self.signup('Bo', 'Theo', 'Bo_theo5@example.com', 'Bo1995', 'Bo1995')\n self.login('Bo_theo5@example.com', 'Bo1995')\n self.dashboard()\n self.category('JunkFood')\n self.dashboard()\n self.recipe_dashboard()\n rv = self.create_recipe('cakes', 'blah, blah, blah....mix ingredient, heat')\n self.assertIn(b'Recipe created', rv.data)", "def test_create_category(self):\n res = self.client().post('/categories/', data=self.category)\n self.assertEqual(res.status_code, 201)\n self.assertIn('Stews', str(res.data))", "def test_post_category(self):\n response = client.post(reverse('category-list'), self.category_data)\n self.assertEqual(response.status_code, 201)\n self.assertEqual(response.data['name'], self.category_data['name'])", "def test_update_category_level(self):\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test product model data insertion/types/field attributes
def test_products_model_entry(self): data = self.data1 self.assertTrue(isinstance(data, Product)) self.assertEqual(str(data), 'django beginners')
[ "def test_product_fields(self):\n\n prd = Product.objects.get(id=1)\n\n # test the type of name field\n prd_type = prd._meta.get_field('name').get_internal_type()\n self.assertEqual(prd_type, 'CharField')\n # label name\n max_length = prd._meta.get_field('name').max_length\n self.assertEqual(max_length, 255)\n # test blank field in label name\n prd_blank = prd._meta.get_field('name').blank\n self.assertTrue(prd_blank)\n # test null field in label name\n prd_null = prd._meta.get_field('name').null\n self.assertTrue(prd_null)\n\n # test the type of description field\n prd_type = prd._meta.get_field('description').get_internal_type()\n self.assertEqual(prd_type, 'CharField')\n # label description\n max_length = prd._meta.get_field('description').max_length\n self.assertEqual(max_length, 255)\n # test blank field in label description\n prd_blank = prd._meta.get_field('description').blank\n self.assertTrue(prd_blank)\n # test null field in label description\n prd_null = prd._meta.get_field('description').null\n self.assertTrue(prd_null)\n\n # test the type of nutrition_grade field\n prd_type = prd._meta.get_field('nutrition_grade').get_internal_type()\n self.assertEqual(prd_type, 'CharField')\n # label nutrition_grade\n max_length = prd._meta.get_field('nutrition_grade').max_length\n self.assertEqual(max_length, 1)\n # test blank field in label nutrition_grade\n prd_blank = prd._meta.get_field('nutrition_grade').blank\n self.assertTrue(prd_blank)\n # test null field in label nutrition_grade\n prd_null = prd._meta.get_field('nutrition_grade').null\n self.assertTrue(prd_null)\n\n # test the type of barcode field\n prd_type = prd._meta.get_field('barcode').get_internal_type()\n self.assertEqual(prd_type, 'CharField')\n # label barcode\n max_length = prd._meta.get_field('barcode').max_length\n self.assertEqual(max_length, 255)\n # test blank field in label barcode\n prd_blank = prd._meta.get_field('barcode').blank\n self.assertFalse(prd_blank)\n # test null field in label barcode\n prd_null = prd._meta.get_field('barcode').null\n self.assertFalse(prd_null)\n\n # test the type of url field\n prd_type = prd._meta.get_field('url').get_internal_type()\n self.assertEqual(prd_type, 'CharField')\n # label url\n max_length = prd._meta.get_field('url').max_length\n self.assertEqual(max_length, 255)\n # test blank field in label url\n prd_blank = prd._meta.get_field('url').blank\n self.assertTrue(prd_blank)\n # test null field in label url\n prd_null = prd._meta.get_field('url').null\n self.assertTrue(prd_null)\n\n # test the type of url_pic field\n prd_type = prd._meta.get_field('url_pic').get_internal_type()\n self.assertEqual(prd_type, 'CharField')\n # label url_pic\n max_length = prd._meta.get_field('url_pic').max_length\n self.assertEqual(max_length, 255)\n # test blank field in label url_pic\n prd_blank = prd._meta.get_field('url_pic').blank\n self.assertTrue(prd_blank)\n # test null field in label url_pic\n prd_null = prd._meta.get_field('url_pic').null\n self.assertTrue(prd_null)\n\n # test the type of store field\n prd_type = prd._meta.get_field('store').get_internal_type()\n self.assertEqual(prd_type, 'CharField')\n # label store\n max_length = prd._meta.get_field('store').max_length\n self.assertEqual(max_length, 255)\n # test blank field in label store\n prd_blank = prd._meta.get_field('store').blank\n self.assertTrue(prd_blank)\n # test null field in label store\n prd_null = prd._meta.get_field('store').null\n self.assertTrue(prd_null)\n\n # test the type of fat field\n prd_type = prd._meta.get_field('fat').get_internal_type()\n self.assertEqual(prd_type, 'DecimalField')\n # label fat max digits\n max_digits = prd._meta.get_field('fat').max_digits\n self.assertEqual(max_digits, 5)\n # label fat decimal places\n dec_places = prd._meta.get_field('fat').decimal_places\n self.assertEqual(dec_places, 2)\n # test blank field in label fat\n prd_blank = prd._meta.get_field('fat').blank\n self.assertTrue(prd_blank)\n # test null field in label fat\n prd_null = prd._meta.get_field('fat').null\n self.assertTrue(prd_null)\n\n # test the type of saturated_fat field\n prd_type = prd._meta.get_field('saturated_fat').get_internal_type()\n self.assertEqual(prd_type, 'DecimalField')\n # label saturated_fat max digits\n max_digits = prd._meta.get_field('saturated_fat').max_digits\n self.assertEqual(max_digits, 5)\n # label saturated_fat decimal places\n dec_places = prd._meta.get_field('saturated_fat').decimal_places\n self.assertEqual(dec_places, 2)\n # test blank field in label saturated_fat\n prd_blank = prd._meta.get_field('saturated_fat').blank\n self.assertTrue(prd_blank)\n # test null field in label saturated_fat\n prd_null = prd._meta.get_field('saturated_fat').null\n self.assertTrue(prd_null)\n\n # test the type of sugar field\n prd_type = prd._meta.get_field('sugar').get_internal_type()\n self.assertEqual(prd_type, 'DecimalField')\n # label sugar max digits\n max_digits = prd._meta.get_field('sugar').max_digits\n self.assertEqual(max_digits, 5)\n # label sugar decimal places\n dec_places = prd._meta.get_field('sugar').decimal_places\n self.assertEqual(dec_places, 2)\n # test blank field in label sugar\n prd_blank = prd._meta.get_field('sugar').blank\n self.assertTrue(prd_blank)\n # test null field in label sugar\n prd_null = prd._meta.get_field('sugar').null\n self.assertTrue(prd_null)\n\n # test the type of salt\n prd_type = prd._meta.get_field('salt').get_internal_type()\n self.assertEqual(prd_type, 'DecimalField')\n # label salt max digits\n max_digits = prd._meta.get_field('salt').max_digits\n self.assertEqual(max_digits, 5)\n # label salt decimal places\n dec_places = prd._meta.get_field('salt').decimal_places\n self.assertEqual(dec_places, 2)\n # test blank field in label salt\n prd_blank = prd._meta.get_field('salt').blank\n self.assertTrue(prd_blank)\n # test null field in label salt\n prd_null = prd._meta.get_field('salt').null\n self.assertTrue(prd_null)\n\n # test the type of prd_cat\n prd_type = prd._meta.get_field('prd_cat').get_internal_type()\n self.assertEqual(prd_type, 'ForeignKey')\n # label db_column\n fk = prd._meta.get_field('prd_cat').db_column\n self.assertEqual(fk, 'prd_cat')\n # test blank field in label prd_cat\n prd_blank = prd._meta.get_field('prd_cat').blank\n self.assertFalse(prd_blank)\n # test null field in label prd_cat\n prd_null = prd._meta.get_field('prd_cat').null\n self.assertFalse(prd_null)\n\n # Favourite table ----------------------------------------------------", "def test_Product(self):\n self.assertEquals(self.prod_1.pk, 1)\n self.assertEquals(self.prod_1.ean, '3350033118072')\n self.assertEquals(self.prod_1.name, 'test 1')\n self.assertEquals(self.prod_1.nutriscore, 'u')\n self.assertEquals(self.prod_1.category, 'cat 1')", "def test_product(self):\n self.assertIsInstance(self.obj.product, Product)", "def test_model_save_fields(self):\n self.create_cat_and_product()\n product = self.product\n product.save()\n uploaded_img = settings.MEDIA_ROOT + os.sep + str(product.image)\n\n self.assertEqual(product.name, \"Honey\")\n self.assertIsInstance(product.name, str)\n self.assertIsInstance(product.category, Category)\n self.assertEqual(product.description, \"Honey is good\")\n self.assertIsInstance(product.description, str)\n self.assertEqual(product.price, self.product_data[\"price\"])\n self.assertIsInstance(\n product.image,\n models.fields.files.ImageFieldFile\n )\n self.assertTrue(os.path.isfile(uploaded_img))\n product.image.delete()", "def test_create_base_product(self):\n description = \"Motherboards or mainboards\"\n name = \"Motherboards\"\n models.Category.objects.create(name=name)\n category = models.Category.objects.get(name=name)\n models.BaseProduct.objects.create(\n category=category, description=description)\n get_data = models.BaseProduct.objects.get(category=category.id)\n self.assertEqual(description, get_data.description)", "def test_model_fields_with_correct_values(self):\n\n self.assertEqual(self.warehouse.address, \"Test address\")\n self.assertEqual(self.warehouse.phone, \"+36735454656\")", "def test_new_product_weight(self):\r\n prod = Product('Test Product')\r\n self.assertEqual(prod.weight, 15)", "def test_product(self):\n product = self.api.fetch_product(1)\n assert product.id == 1\n assert product.category_id == 1\n assert product.title == 'Apple iPhone 6'\n assert len(product.offers) == 5\n assert product.min_price == 15537.0\n assert product.max_price == 16620.0", "def test_update_attribute_data(self):\n pass", "def test_update_product_required_fields(self):\n data = {\n 'pk': 1,\n 'name': None,\n 'description': '''\n Yogurt also spelled yoghurt, yogourt or yoghourt,\n is a food produced by bacterial fermentation of milk.\n '''\n }\n url = reverse('products:detail', kwargs={'pk': data['pk']})\n response = self.client.put(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(models.Product.objects.filter(name=None).count(), 0)", "def setUp(self):\n super().setUp()\n list_of_product_types = [\n 'default_product_variant',\n 'multiple_product_variants',\n 'ceo_title'\n ]\n self.new_product = eval(f\"get_new_product_with_\" \\\n f\"{list_of_product_types[randint(0, len(list_of_product_types) - 1)]}()\")\n response = ProcessRequest('products.json').send_request(\n 'POST',\n data=self.new_product,\n expected_return_codes=[201],\n )\n self.product_id = response.response['product']['id']", "def setUp(self):\r\n super(ProductDetailTest, self).setUp()\r\n self.product = self.F.ProductFactory.create()", "def test_product_type():\n era5 = initialize()\n producttype = era5._product_type()\n assert producttype == 'ensemble_members'\n\n era5.ensemble = False\n producttype = era5._product_type()\n assert producttype == 'reanalysis'\n\n era5.period = 'monthly'\n producttype = era5._product_type()\n assert producttype == 'monthly_averaged_reanalysis'\n\n era5.synoptic = True\n producttype = era5._product_type()\n assert producttype == 'monthly_averaged_reanalysis_by_hour_of_day'\n\n era5.ensemble = False\n era5.statistics = True\n producttype = era5._product_type()\n assert producttype == 'monthly_averaged_reanalysis_by_hour_of_day'", "def test_02_product_update(self):\n # Update new product state2 from default draft to sellable\n new_product = self.create_product()\n self.assertEqual(new_product.state2, 'draft')\n new_product.state2 = 'sellable'\n self.assertEqual(new_product.state2, 'sellable')\n\n # Same but to an existing demo product.\n demo_product = self.product_obj.browse(\n self.ref('product_lifecycle.product_product_4g'))\n self.assertEqual(demo_product.state2, 'sellable')\n demo_product.state2 = 'draft'\n self.assertEqual(demo_product.state2, 'draft')\n\n # Update new product invividual field (field defined in product.product\n # model).\n self.assertEqual(new_product.default_code, 'A2330')\n new_product.default_code = 'A2330-1'\n self.assertEqual(new_product.default_code, 'A2330-1')\n\n # Same but to an existing demo product.\n self.assertEqual(demo_product.default_code, 'A2329')\n demo_product.default_code = 'A2329-1'\n self.assertEqual(demo_product.default_code, 'A2329-1')\n\n # Update new product commom characteristic (field defined in\n # product.template) and check that affects the another product\n # variants\n self.assertFalse(new_product.description)\n new_product.description = 'This is a New Product'\n self.assertEqual(new_product.description, 'This is a New Product')\n self.assertEqual(demo_product.description, 'This is a New Product')\n demo_product.description = False\n self.assertFalse(demo_product.description)", "def test_custom_attribute_post_both(self):\n gen = self.generator.generate_custom_attribute\n _, cad = gen(\"product\", attribute_type=\"Text\", title=\"normal text\")\n cad_json = builder.json.publish(cad.__class__.query.get(cad.id))\n cad_json = builder.json.publish_representation(cad_json)\n pid = models.Person.query.first().id\n\n product_data = [\n {\n \"product\": {\n \"kind\": None,\n \"owners\": [],\n \"custom_attribute_definitions\":[\n cad_json,\n ],\n \"custom_attribute_values\": [{\n \"attribute_value\": \"new value\",\n \"custom_attribute_id\": cad.id,\n }],\n \"custom_attributes\": {\n cad.id: \"old value\",\n },\n \"contact\": {\n \"id\": pid,\n \"href\": \"/api/people/{}\".format(pid),\n \"type\": \"Person\"\n },\n \"title\": \"simple product\",\n \"description\": \"\",\n \"secondary_contact\": None,\n \"notes\": \"\",\n \"url\": \"\",\n \"reference_url\": \"\",\n \"slug\": \"\",\n \"context\": None\n }\n }\n ]\n\n response = self._post(product_data)\n ca_json = response.json[0][1][\"product\"][\"custom_attribute_values\"][0]\n self.assertEqual(ca_json[\"attribute_value\"], \"new value\")\n\n product = models.Product.eager_query().first()\n self.assertEqual(len(product.custom_attribute_values), 1)\n self.assertEqual(\n product.custom_attribute_values[0].attribute_value,\n \"new value\"\n )", "def test_register_product():\n assert Product.objects.count() == 0\n Product.objects.create(\n product_name = 'Starwars', unit_price = 10, multiple = 1\n )\n assert Product.objects.count() == 1", "def test_edit_no_product(self):\r\n mozlogger.info('test_edit_no_product')\r\n\r\n # create fixture\r\n fixture1 = self.factory\r\n backend_obj = self.backend_object(fixture1.id)\r\n obj_id = str(fixture1.id)\r\n fields = self.new_object_data\r\n product = fields.pop(u'product')\r\n\r\n # do put\r\n res = self.put(\r\n self.get_detail_url(self.resource_name, obj_id),\r\n params=self.credentials,\r\n data=fields\r\n )\r\n\r\n # make sure object has been updated in the database\r\n fields[u'product'] = product\r\n fixture1 = self.refresh(fixture1)\r\n backend_data = self.clean_backend_data(fixture1)\r\n\r\n self.maxDiff = None\r\n self.assertEqual(fields, backend_data)", "def test_admin_product_info(self):\n self.add_product(self.TESTPRODUCT1, 1)\n\n # Missing product\n rv = self.app.get('/admin/product/nothing', follow_redirects=True)\n assert b'Produkten existerar inte!' in rv.data\n\n # Existing product\n rv = self.app.get('/admin/product/%s' % self.TESTPRODUCT1['barcode'], follow_redirects=True)\n assert self.TESTPRODUCT1['name'] in rv.data", "def test_data_object_vaporise(self):\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the total, nonblank and net loc for all the python files in a directory
def get_folder_total(path): files = os.listdir(path) pythonfiles = ['%s/%s' % (path, filename) for filename in files if filename[-3:] == '.py'] total = { 'net': 0, 'total': 0, 'nonblank': 0, 'num_inputs':0 } for filename in pythonfiles: with open(filename, 'r') as thisfile: blob = thisfile.read() # print filename thisloc = loc(blob) for k, v in thisloc.items(): total[k] += v return total
[ "def count_loc(project_path: str) -> int:\n _python_files = get_python_files(project_path)\n\n _loc = 0\n\n for _file_path in _python_files:\n if os.path.isfile(_file_path):\n _loc = _loc + _single_file_loc(_file_path)\n\n return _loc", "def file_stats(file_pairs):\n loc = 0\n nfiles = 0\n nsuites = 0\n ntests = 0\n for path, filename in file_pairs:\n loc += int(os.popen('wc -l '+path+'/'+filename).read().split()[0])\n nfiles += 1\n if (filename[:4] == 'Test'):\n nsuites+=1\n ntests+= int(os.popen('egrep -c -i \"void\\s+Test\" '+path+'/'+filename).read().split()[0])\n return (nfiles, loc, nsuites, ntests)", "def grab_used_nos():\n\n files = os.listdir(\"data/input/xml\")\n print(\"found {} files.\".format(len(files)))\n #print(\"files: {}\".format(files))\n\n print(\"\\nloading system numbers from files...\\n\")\n res = []\n i = 1\n max = len(files)+1\n for f in files:\n sys.stdout.write(\"\\r{} of {}\".format(i, max))\n sys.stdout.flush()\n i = i + 1\n # print(f)\n sys_no = get_by_name(f)\n res.append(sys_no)\n\n print(\"\\ngot {} numbers to work with.\".format(len(res)))\n# print(\"Result: \")\n# print(res)\n return res", "def fileCounter(directory):", "def analyze_files(self) -> None:\n for file in os.listdir(self.directory):\n if file.endswith(\".py\"):\n try:\n if not (fp := open(os.path.join(self.directory, file), \"r\")):\n raise FileNotFoundError(\n f\"File {fp} is could not able to found \")\n\n except FileNotFoundError:\n print(f\"File {fp} could not able to found\")\n\n with fp:\n class_count: int = 0\n function_count: int = 0\n line_count: int = 0\n char_count: int = 0\n\n for line in fp:\n if line.strip().startswith(\"def \"):\n function_count = function_count + 1\n\n elif line.strip().startswith(\"class \"):\n class_count = class_count + 1\n\n line_count = line_count + 1\n char_count += len(line)\n\n self.files_summary[str(os.path.join(self.directory, file))] = {\n \"class\": class_count,\n \"line\": line_count,\n \"function\": function_count,\n \"char\": char_count\n }", "def check_dir(self):\n if not Path(self.src_dir).exists():\n print('No such directory found:', self.src_dir)\n return\n\n nc_all = self.src_dir + \"/*.nc*\"\n if len(glob.glob(nc_all)) == 0:\n print('No NetCDF files found in:', self.src_dir)\n return\n\n return nc_all", "def analyze_files(self):\n num_file = 0\n results = dict()\n try:\n list_files = os.listdir(self.directory)\n except FileNotFoundError:\n raise FileNotFoundError(\"Can't find any file\")\n else:\n for file in list_files: #looping the files in the directly\n num_file += 1\n if file.endswith(\".py\"): # Looking for files that end with .py\n try:\n fp = open(os.path.join(self.directory, file), \"r\")\n except FileNotFoundError:\n raise FileNotFoundError(f\"Can't open file no {num_file}\")\n else:\n with fp:\n c_total = 0 #Total length of Characters for the entire file\n filename = file # Storing the file name\n t_line = 0 # Getting the total number of line\n t_def = 0 #Getting the total number of functions\n t_class = 0 #Getting the total number of classes\n \n for line in fp:\n t_line += 1 # Counting each line\n t_char = len(line) #Length of characters for each line\n n_line = line.strip() # gets rid of white spaces and new lines\n c_total += t_char # adding each total char in line to the pervious total char in line\n if n_line.startswith(\"def \"): \n t_def += 1 \n elif n_line.startswith(\"class \"):\n t_class += 1\n results[filename] = {'class': t_class, 'function': t_def, 'line': t_line, 'char': c_total }\n return results", "def list_local(self):\n print(\"-\"*42)\n print(\"| LOCAL FILES\")\n print(\"-\"*42)\n print(\"| [Size (MB)]-> File_name\")\n print(\"-\"*42)\n for file_ in iglob(path.join(self._source, \"*\"), recursive=True):\n print(\"| [{:0.2f}]-> {}\".format((path.getsize(file_) /\n 1024) / 1024, file_))\n print(\"-\"*42)", "def local_tree_stats(dirs):\n total_nfiles = 0\n total_size = 0\n\n for d in dirs:\n nfiles = 0\n size = 0\n\n for root, dirs, files in os.walk(d):\n nfiles += len(files)\n size += sum(os.path.getsize(os.path.join(root, name))\n for name in files)\n\n total_nfiles += nfiles\n total_size += size\n\n return total_nfiles, total_size", "def getALLPythonFiles(_r): # {{{\n return EExplorer.getPythonFiles(_r) + EExplorer.getPythonExecFiles(_r)", "def execute(root_dir):\n \n \n #Getting all the file recursively that py files\n lenght=[]\n libraries=[]\n nesting_factors=[]\n param_count=[]\n total_var=[]\n duplicate_for_the_repo=[]\n average_nesting_factor=0\n average_param=0\n code_duplication=0\n avg_var=0\n \n k=root_dir.rsplit('-')\n n=k[0]\n m=k[-1]\n \n urls=[ repo for repo in repo_list if n and m in repo ]\n if urls:\n url=urls[0]\n else:\n url=root_dir\n\n for filename in glob.iglob(root_dir + '/**/*.py', recursive=True):\n #filename=filename.replace(\" \", \"\\\\ \")\n filename=str_to_raw(filename)\n try: \n count=pygount.source_analysis(filename, 'pygount') # counting the line of codes for the py files\n l=count.code\n lenght.append(l)\n library =imported_module(filename)\n for lib in library:\n libraries.append(lib)\n deg_list=nesting_factor(for_loop_position(filename)) \n for deg in deg_list:\n nesting_factors.append(deg)\n\n\n\n for param in parameter_count(filename):\n param_count.append(param)\n for var in variable_count(filename):\n total_var.append(var)\n duplicate_for_the_repo.append(duplicated_line(filename))\n except Exception as e:\n print(\"type error: \" + str(e))\n print(filename)\n \n \n if len(nesting_factors) !=0: \n average_nesting_factor= np.mean(nesting_factors)\n if param_count: \n average_param= np.mean(param_count) \n libraries=unique(libraries)\n repo_count=sum(lenght)\n if total_var:\n avg_var=np.mean(total_var)\n if repo_count and duplicate_for_the_repo:\n code_duplication=(sum(duplicate_for_the_repo)/repo_count)*100\n \n return {'repository_url': url, \n 'number of lines': repo_count, \n 'libraries': libraries,\n 'nesting factor': average_nesting_factor,\n 'code duplication': code_duplication,\n 'average parameters':average_param,\n 'average variables':avg_var}", "def get_source_files():\n import vpr_netfile_parser\n\n return [os.path.abspath(os.path.join(os.path.dirname(vpr_netfile_parser\n .__file__),\n 'VprNetParser_ragel.cpp'))]", "def dir_analysis(path):\n global DISK_TOTAL, DISK_USED, DISK_FREE, ADDRESS, FILE_TOTAL_SIZE, DATA_TOTAL_SIZE\n with open('%s/address' % path, 'r') as r_address:\n ADDRESS = r_address.read().rstrip()\n disk_usage = psutil.disk_usage(SOFT_FILE_PATH)\n DISK_TOTAL = disk_usage.total\n DISK_USED = disk_usage.used\n DISK_FREE = disk_usage.free\n file_total_size = \"cd %s && du | tail -n 1 | awk '{print $1}'\" % (path)\n data_total_size = \"cd %s/data && du | tail -n 1 | awk '{print $1}'\" % (path)\n directory_size = \"file_size.txt\"\n # Determine whether the stored data size file exists, execute the du command once every 1 hour\n if os.path.exists(directory_size):\n statinfo = os.stat(directory_size)\n latest_update_time = statinfo.st_mtime\n nowtime = datetime.now()\n filetime = datetime.fromtimestamp(latest_update_time)\n if nowtime - filetime > timedelta(hours=1):\n file_input = open(directory_size, \"w\")\n file_total_size_exec = os.popen(file_total_size)\n FILE_TOTAL_SIZE = file_total_size_exec.read().split('\\n')[0]\n file_input.write(FILE_TOTAL_SIZE)\n data_total_size_exec = os.popen(data_total_size)\n DATA_TOTAL_SIZE = data_total_size_exec.read().split('\\n')[0]\n file_input.write('\\n')\n file_input.write(DATA_TOTAL_SIZE)\n else:\n readlines = open(directory_size).readlines()\n FILE_TOTAL_SIZE = readlines[0].strip()\n DATA_TOTAL_SIZE = readlines[1].strip()\n else:\n file_input = open(directory_size, \"w\")\n file_total_size_exec = os.popen(file_total_size)\n FILE_TOTAL_SIZE = file_total_size_exec.read().split('\\n')[0]\n file_input.write(FILE_TOTAL_SIZE)\n data_total_size_exec = os.popen(data_total_size)\n DATA_TOTAL_SIZE = data_total_size_exec.read().split('\\n')[0]\n file_input.write('\\n')\n file_input.write(DATA_TOTAL_SIZE)", "def count_LOC(path):\n re_empty = re.compile(r\"[\\s]*(#|\\n|\\\"\\\"\\\")\")\n re_for = re.compile(r\"for.*in\")\n re_lambda = re.compile(r\"lambda\")\n re_if = re.compile(r\"if.*:\")\n re_def = re.compile(r\"def (?P<fname>\\w+)\\(\")\n\n total_LOC, indent_level = 0, 0\n cur_part = None\n parts = defaultdict(int)\n\n with open(path, 'r') as _file:\n for line in filter(lambda l : not re_empty.match(l), _file):\n\n extra = len( re_for.findall(line) ) - 1 + len( re_lambda.findall(line) ) - 1 + len( re_if.findall(line) ) -1\n\n if extra < 0: extra = 0\n\n total_LOC += 1 + extra\n if cur_part:\n parts[cur_part] += 1 + extra\n\n defs = re_def.search(line)\n if defs:\n cur_part = defs.groupdict()['fname']\n indent_level = first_non_whitespace(line)\n\n cur_indent = first_non_whitespace(line)\n if cur_indent < indent_level:\n cur_part = None\n indent_level = cur_indent\n\n return(total_LOC, parts)", "def get_output_info(self):\n # --------------------------\n\n self.workdir = os.getcwd()\n filelist = os.listdir(self.workdir)\n\n outputlist = []\n for filename in filelist:\n if filename.startswith('output_'):\n outputlist.append(filename)\n\n if len(outputlist) < 1:\n print(\"I didn't find any output_XXXXX directories in current working directory.\")\n print(\"Are you in the correct workdir?\")\n print(\"use mergertree-extract.py -h or --help to print help message.\")\n quit()\n\n outputlist.sort()\n\n self.lastdir = outputlist[-1]\n self.lastdirnr = int(self.lastdir[-5:])\n self.noutput = len(outputlist)\n\n if (self.start_at > 0):\n # check that directory exists\n startnrstr = str(self.start_at).zfill(5)\n if 'output_' + startnrstr not in outputlist:\n print(\"Didn't find specified starting directory output_\" + startnrstr)\n print(\"use mergertree-extract.py -h or --help to print help message.\")\n quit()\n\n # read ncpu from infofile in last output directory\n infofile = self.lastdir + '/' + 'info_' + self.lastdir[-5:] + '.txt'\n f = open(infofile, 'r')\n ncpuline = f.readline()\n line = ncpuline.split()\n\n self.ncpu = int(line[-1])\n\n f.close()\n\n return", "def checkSumHelper(arg, dirname, fnames):\n val = 0\n files = [name for name in fnames if os.path.splitext(name)[1] in EXTENSIONS]\n for file in files:\n absFile = os.path.join(dirname,file)\n try:\n stats = os.stat(absFile)\n except OSError,e:\n # This is to skip over temporary files or files\n # nosy doesn't have permission to access\n # print \"Nosy: skipping file %s with error %s\"%(absFile,e)\n continue\n val += stats[stat.ST_SIZE] + stats[stat.ST_MTIME]\n arg.append(val)\n return", "def getPythonExecFiles(_r): # {{{\n l = []\n for top, dirs, files in os.walk(_r):\n for f in files:\n if os.path.splitext(f)[1] == '.pyc':\n l.append(str(top + '/' + f).replace(_r, ''))\n return l", "def getPythonFiles(_r): # {{{\n l = []\n for top, dirs, files in os.walk(_r):\n for f in files:\n if os.path.splitext(f)[1] == '.py' and not \\\n os.path.splitext(f)[0] == '__init__':\n pythonfile = str(top + '/' + f).replace(_r, '')\n if re.match(\"/.*\", pythonfile):\n pythonfile = pythonfile[1:]\n l.append(pythonfile)\n return l", "def find(self):\n print(\"Looking for pacnew and pacsave files…\")\n paths = ('/bin', '/etc', '/opt', '/usr')\n for dir_path, _, files in chain.from_iterable(os.walk(path) for path in paths):\n for f in files:\n pacnew = os.path.join(dir_path, f)\n if self.re_pacfiles.search(pacnew):\n self.pacfiles.append(pacnew)\n self.pacfiles.sort()\n print(\"%d file(s) found.\" % len(self.pacfiles))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get vertices dividing a 1d grid.
def get_1d_vertices(grid, cut_edges=False): if len(grid.shape) > 1: raise ValueError("grid must be 1d array.") diff = np.diff(grid) vert = np.zeros(grid.size+1) # Interior vertices: halfway between points vert[1:-1] = grid[0:-1] + diff/2 # Edge vertices: tight or reflect if cut_edges: vert[0] = grid[0] vert[-1] = grid[-1] else: vert[0] = grid[0] - diff[0]/2 vert[-1] = grid[-1] + diff[-1]/2 return vert
[ "def get_vertices(self, i, j):\n pts = []\n xgrid, ygrid = self.xgrid, self.ygrid\n pts.append([xgrid[i, j], ygrid[i, j]])\n pts.append([xgrid[i + 1, j], ygrid[i + 1, j]])\n pts.append([xgrid[i + 1, j + 1], ygrid[i + 1, j + 1]])\n pts.append([xgrid[i, j + 1], ygrid[i, j + 1]])\n pts.append([xgrid[i, j], ygrid[i, j]])\n if np.isscalar(i):\n return pts\n else:\n vrts = np.array(pts).transpose([2, 0, 1])\n return [v.tolist() for v in vrts]", "def get_vertices(self) -> []:\n res = []\n for v in range(self.v_count) :\n res.append(v)\n return res", "def vertices(self):\n try:\n return self._vertices\n except:\n self._vertices = [list(x) for x in self.vertex_generator()]\n return self._vertices", "def get_grid(self):\n\t\txvec = numpy.linspace(self.x_lower, self.x_upper, self.nx + 1)\n\t\tyvec = numpy.linspace(self.y_lower, self.y_upper, self.ny + 1)\n\t\t\n\t\t(xgrid, ygrid) = numpy.meshgrid(xvec, yvec)\n\t\t\n\t\treturn (xgrid, ygrid, self.data)", "def vertices(self):\n\n if self._faces is None:\n if self._vertices is None:\n return None\n self.triangulate()\n return self._vertices", "def getvertices(self):\n return self.vertices", "def vertices(self):\n return list(self._graph)", "def obtener_vertices(self):\n return list(self.vertices)", "def vertices(self) -> Iterator[UVec]:\n for location, length in self.cube_definitions:\n x, y, z = location\n yield [\n Vec3(x + xf * length, y + yf * length, z + zf * length)\n for xf, yf, zf in _cube_vertices\n ]", "def _vertices(self, point):\n vertex_0, vertex_1, vertex_2 = tuple(\n gs.take(point, indices=self.faces[:, i], axis=-2) for i in range(3)\n )\n if point.ndim == 3 and vertex_0.ndim == 2:\n vertex_0 = gs.expand_dims(vertex_0, axis=0)\n vertex_1 = gs.expand_dims(vertex_1, axis=0)\n vertex_2 = gs.expand_dims(vertex_2, axis=0)\n return vertex_0, vertex_1, vertex_2", "def vertexes(self):\n theta = self.orientation\n shifts = np.array([np.cos(theta), np.sin(theta)]) * self.a\n return self.coords + (shifts[:, None] * [-1, 1]).T", "def vertices(self):\n d = self.space_dimension()\n v = vector(ZZ, d)\n points = []\n for g in self.minimized_generators():\n for i in range(0,d):\n v[i] = g.coefficient(Variable(i))\n v_copy = copy.copy(v)\n v_copy.set_immutable()\n points.append(v_copy)\n return tuple(points)", "def vertices(self):\n for v in self.vert:\n yield v", "def get_vertices(self, rad, col):\n ivc = self.inner_vertex_count\n full_circle = self.full_circle\n ncol = self.ncol\n nver = ncol if full_circle else ncol + 1\n\n if rad == 0: # Case with no center point or single center point\n if self.single_center_cell:\n return [iv for iv in range(nver + ivc)][::-1]\n elif ivc == 1: # Single center point\n if full_circle and col == ncol - 1:\n return [1, col + 1, 0] # [col+2-nver, col+1, 0]\n return [col + 2, col + 1, 0]\n elif full_circle and col == ncol - 1:\n return [col + 1, nver + col, col, col + 1 - nver]\n else: # Normal inner band\n return [nver + col + 1, nver + col, col, col + 1]\n\n n = (rad - 1) * nver + ivc\n\n if full_circle and col == ncol - 1:\n return [n + col + 1, n + nver + col, n + col, n + col + 1 - nver]\n\n return [n + nver + col + 1, n + nver + col, n + col, n + col + 1]", "def get_all_vertices(self):\n return self.vertices_set", "def get_all_vertices(self):\n script = self.scripts.get(\"get_vertices\")\n params = None\n return self.gremlin(script, params)", "def _vertices_calculation(self):\n\n up_vertices = self.center_up + self.L / 2 * np.array(\n [[1, 1, 1], [1, -1, -1], [-1, -1, 1], [-1, 1, -1]]\n )\n\n down_vertices = -up_vertices + 2 * up_vertices[0]\n\n return up_vertices, down_vertices", "def evaluate_on_vertices(self):\n import numpy as np\n\n grid = self.space.grid\n local_coordinates = np.array([[0, 1, 0],[0, 0, 1]])\n index_set = grid.leaf_view.index_set()\n\n values = np.zeros((self.component_count, grid.leaf_view.entity_count(2)),\n dtype=self.dtype)\n\n for element in grid.leaf_view.entity_iterator(0):\n local_data = self.evaluate(element, local_coordinates)\n for i in range(3):\n index = index_set.sub_entity_index(element, i, 2)\n values[:, index] = local_data[:, i]\n return values", "def vertices_on_boundary(self):\n boundaries = self.vertices_on_boundaries()\n return boundaries[0] if boundaries else []" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compute padded image limits for x and y grids.
def pad_limits(xgrid, ygrid, xpad=0., ypad=0., square=None): xmin, xmax = xgrid.min(), xgrid.max() ymin, ymax = ygrid.min(), ygrid.max() dx = xmax - xmin dy = ymax - ymin x0 = xmin - xpad*dx x1 = xmax + xpad*dx y0 = ymin - ypad*dy y1 = ymax + ypad*dy if square: axis = square ax_position = axis.get_position() ax_height = ax_position.height * axis.figure.get_figheight() ax_width = ax_position.width * axis.figure.get_figwidth() ax_aspect = ax_height / ax_width im_height = y1 - y0 im_width = x1 - x0 im_aspect = im_height / im_width if (im_height/im_width) > (ax_height/ax_width): # Image too tall extra_w = im_height/ax_aspect - im_width x0 -= extra_w / 2 x1 += extra_w / 2 else: # Image too wide extra_h = im_width*ax_aspect - im_height y0 -= extra_h / 2 y1 += extra_h / 2 return [x0, x1, y0, y1]
[ "def bounds(pixels):\n for j in range(1, len(pixels)):\n if pixels[j][0] - pixels[j-1][0] > 10:\n left, right = pixels[:j], pixels[j:]\n\n # Join split sprites\n if len(left) > len(right):\n right = [(x-160, y) for (x, y) in right]\n else:\n left = [(x+160, y) for (x, y) in left]\n pixels = left + right\n break\n\n xmin, xmax = min(x for (x, y) in pixels), max(x for (x, y) in pixels)\n ymin, ymax = min(y for (x, y) in pixels), max(y for (x, y) in pixels)\n return xmin, xmax, ymin, ymax", "def compute_image_bounds(pixel_meter_size, frame, beam_width_data, additional_pixel_padding_x=0, additional_pixel_padding_y=0):\n\n # Compute the projected locations of all samples so that we can get the extent\n all_bl = []\n all_br = []\n all_fr = []\n all_fl = []\n\n for beam_num in [0, frame.BeamCount / 2, frame.BeamCount - 1]:\n for bin_num in [0, frame.samplesperbeam - 1]:\n bl, br, fr, fl = get_box_for_sample(beam_num, bin_num, frame, beam_width_data)\n\n all_bl.append(bl)\n all_br.append(br)\n all_fr.append(fr)\n all_fl.append(fl)\n\n all_bl = np.array(all_bl)\n all_br = np.array(all_br)\n all_fr = np.array(all_fr)\n all_fl = np.array(all_fl)\n\n # Get the xdim extent\n min_back_left = np.min(all_bl[:,0])\n min_back_right = np.min(all_br[:,0])\n min_front_left = np.min(all_fl[:,0])\n min_front_right = np.min(all_fr[:,0])\n assert min_back_left < min_back_right\n assert min_back_left < min_front_left\n assert min_back_left < min_front_right\n\n max_back_left = np.max(all_bl[:,0])\n max_back_right = np.max(all_br[:,0])\n max_front_left = np.max(all_fl[:,0])\n max_front_right = np.max(all_fr[:,0])\n assert max_back_right > max_back_left\n assert max_back_right > max_front_left\n assert max_back_right > max_front_right\n\n xdim_extent = np.array([min_back_left, max_back_right])\n\n\n # Get the ydim extent\n min_back_left = np.min(all_bl[:,1])\n min_back_right = np.min(all_br[:,1])\n min_front_left = np.min(all_fl[:,1])\n min_front_right = np.min(all_fr[:,1])\n min_front = min(min_front_left, min_front_right)\n assert min_front < min_back_right\n assert min_front < min_back_left\n\n\n max_back_left = np.max(all_bl[:,1])\n max_back_right = np.max(all_br[:,1])\n max_front_left = np.max(all_fl[:,1])\n max_front_right = np.max(all_fr[:,1])\n max_back = max(max_back_left, max_back_right)\n assert max_back > max_front_right\n assert max_back > max_front_left\n\n ydim_extent = np.array([min_front, max_back])\n\n # Determine which meter location corresponds to our \"target center\"\n bl, br, fr, fl = get_box_for_sample(frame.BeamCount / 2, 0, frame, beam_width_data)\n target_center_x = (fl[0] + fr[0]) / 2.\n target_center_y = (bl[1] + fl[1]) / 2.\n\n # Determine the x dimension size and what this corresponds to in meters\n extra_padding_x = pixel_meter_size + pixel_meter_size * additional_pixel_padding_x\n\n # X Min\n xmin_len = target_center_x - xdim_extent[0]\n xp = xmin_len % pixel_meter_size\n xmin_padded = xdim_extent[0] - (extra_padding_x - xp)\n xmin_len = target_center_x - xmin_padded\n x_min_cells = np.abs(xmin_len / pixel_meter_size)\n x_min_meters = target_center_x - xmin_len\n assert x_min_meters <= xdim_extent[0]\n\n\n # X Max\n xmax_len = xdim_extent[1] - target_center_x\n xp = xmax_len % pixel_meter_size\n xmax_padded = xdim_extent[1] + (extra_padding_x - xp)\n xmax_len = xmax_padded - target_center_x\n x_max_cells = np.abs(xmax_len / pixel_meter_size)\n x_max_meters = target_center_x + xmax_len\n assert x_max_meters >= xdim_extent[1]\n\n\n # if we want a specific beam to be the in the middle of the image then we should take the max?\n xdim = int(x_min_cells + x_max_cells)\n x_meter_start = x_min_meters\n x_meter_stop = x_max_meters\n\n # Determine the y dimension size and what this corresponds to in meters\n extra_padding_y = pixel_meter_size + pixel_meter_size * additional_pixel_padding_y\n\n # Y Min\n ymin_len = target_center_y - ydim_extent[0]\n yp = ymin_len % pixel_meter_size\n ymin_padded = ydim_extent[0] - ( extra_padding_y - yp)\n ymin_len = target_center_y - ymin_padded\n y_min_cells = np.abs(ymin_len / pixel_meter_size)\n y_min_meters = target_center_y - ymin_len\n assert y_min_meters <= ydim_extent[0]\n\n # Y Max\n ymax_len = ydim_extent[1] - target_center_y\n yp = ymax_len % pixel_meter_size\n ymax_padded = ydim_extent[1] + (extra_padding_y - yp)\n ymax_len = ymax_padded - target_center_y\n y_max_cells = np.abs(ymax_len / pixel_meter_size)\n y_max_meters = target_center_y + ymax_len\n assert y_max_meters >= ydim_extent[1]\n\n ydim = int(y_min_cells + y_max_cells)\n y_meter_start = y_max_meters\n y_meter_stop = y_min_meters\n\n return xdim, ydim, x_meter_start, y_meter_start, x_meter_stop, y_meter_stop", "def padding(self):\n if not self._pixels:\n return Bounds(0, 0, 0, 0)\n row_inked = tuple(self._1 in _row for _row in self._pixels)\n if not any(row_inked):\n return Bounds(self.width, self.height, 0, 0)\n bottom = row_inked[::-1].index(True)\n top = row_inked.index(True)\n col_inked = tuple(self._1 in _col for _col in zip(*self._pixels))\n left = col_inked.index(True)\n right = col_inked[::-1].index(True)\n return Bounds(left, bottom, right, top)", "def get_prune_bounds(self, center):\n x_low = max(0, center[0] - (self.prune_window_rows // 2))\n y_low = max(0, center[1] - (self.prune_window_cols // 2))\n x_high = min(center[0] + (self.prune_window_rows // 2), self.N - 1)\n y_high = min(center[1] + (self.prune_window_cols // 2), self.M - 1)\n return x_low, y_low, x_high, y_high", "def pad_dimensions(self):\n o = self._obj\n return _coordinate(\n _ordinate(o.xmin, o.xmax), _ordinate(o.ymin, o.ymax)\n )", "def space_bounds(self):\n min_col = np.inf\n max_col = -np.inf\n min_row = np.inf\n max_row = -np.inf\n for n in range(0,self.num_observations()):\n len_obs = self.length_vector[n]\n if self[n,0,:len_obs].min() < min_col:\n min_col = self[n,0,:len_obs].min()\n if self[n,0,:len_obs].max() > max_col:\n max_col = self[n,0,:len_obs].max()\n\n if self[n, 1, :len_obs].min() < min_row:\n min_row = self[n, 1, :len_obs].min()\n if self[n, 1, :len_obs].max() > max_row:\n max_row = self[n, 1, :len_obs].max()\n\n return np.asarray([min_col, max_col]), \\\n np.asarray([min_row, max_row])", "def pad_axis_limits(padding=0.05, yaxis=True, ax=None):\n ax = ax or plt.gca()\n if yaxis:\n axis_method = \"ylim\"\n else:\n axis_method = \"xlim\"\n\n dmin, dmax = getattr(ax, f\"get_{axis_method}\")()\n if padding < 1.0:\n padding_size = abs((dmax - dmin) * padding)\n else:\n padding_size = abs(padding)\n\n new_min = dmin - padding_size\n new_max = dmax + padding_size\n getattr(ax, f\"set_{axis_method}\")(new_min, new_max)\n return new_min, new_max", "def get_image_bounds(self):\n\n c1 = np.array([0,0])\n c2 = np.array([self.width,self.height])\n\n c1 = self.pixel_to_robot(c1)\n c2 = self.pixel_to_robot(c2)\n return c1,c2", "def determine_x_y_bounds(deformations, x_array, y_array, offset=5000):\n bounding_box = determine_deformation_bounding_box(deformations)\n x_start, x_end = x_array[0, bounding_box[:2]]\n y_start, y_end = y_array[bounding_box[2:], 0]\n\n return x_start-offset, x_end+offset, y_start-offset, y_end+offset", "def grid_bounding_boxes(w=10, xrng=(-70, 70), yrng=(-70, 70)):\n x0 = [no for no in range(xrng[0], xrng[1]+w, w)]\n y0 = [no for no in range(yrng[0], yrng[1]+w, w)]\n\n xcenters, ycenters = list(), list()\n for i in range(len(x0)):\n xcenters.extend(x0)\n ycenters.extend([y0[i]]*len(y0))\n centers = pd.DataFrame({0:xcenters, 1:ycenters})\n bbs = pd.concat([centers-(w/2), centers.rename(columns={0:2, 1:3})+(w/2)], axis=1)\n\n return bbs", "def _get_bounds(x, y, size):\n x = np.array(np.atleast_1d(x))\n y = np.array(np.atleast_1d(y))\n\n lower_x = np.rint(x - size[0]/2)\n lower_y = np.rint(y - size[1]/2)\n\n return np.stack((np.stack((lower_x, lower_x + size[0]), axis=1),\n np.stack((lower_y, lower_y + size[1]), axis=1)), axis=1).astype(int)", "def crop(masks, boxes, padding: int = 1):\n h, w, n = masks.shape\n x1, x2 = sanitize_coordinates(boxes[:, 0:1:1], boxes[:, 2:3:1], w, padding, cast=False)\n y1, y2 = sanitize_coordinates(boxes[:, 1:2:1], boxes[:, 3:4:1], h, padding, cast=False)\n\n cast = P.Cast()\n broadcast_to = P.BroadcastTo((h, w, n))\n row = broadcast_to((P.range(Tensor(0, mindspore.int32),\n Tensor(w, mindspore.int32),\n Tensor(1, mindspore.int32)).view(1, -1, 1)))\n rows = cast(row, x1.dtype)\n col = broadcast_to((P.range(Tensor(0, mindspore.int32),\n Tensor(w, mindspore.int32),\n Tensor(1, mindspore.int32)).view(-1, 1, 1)))\n cols = cast(col, x2.dtype)\n\n\n masks_left = rows >= x1.view(1, 1, -1)\n masks_right = rows < x2.view(1, 1, -1)\n masks_left = P.Cast()(masks_left, mindspore.float16)\n masks_right = P.Cast()(masks_right, mindspore.float16)\n crop_mask = masks_left * masks_right\n masks_up = cols >= y1.view(1, 1, -1)\n masks_up = P.Cast()(masks_up, mindspore.float16)\n crop_mask *= masks_up\n masks_down = cols < y2.view(1, 1, -1)\n masks_down = P.Cast()(masks_down, mindspore.float16)\n crop_mask *= masks_down\n\n return masks * crop_mask", "def get_bounds(root, pg):\n def panel_bounds(root, panel):\n size = panel.get_image_size()\n p0 = col(panel.get_pixel_lab_coord((0,0)))\n p1 = col(panel.get_pixel_lab_coord((size[0]-1,0)))\n p2 = col(panel.get_pixel_lab_coord((size[0]-1,size[1]-1)))\n p3 = col(panel.get_pixel_lab_coord((0,size[1]-1)))\n\n rn = col(root.get_normal())\n rf = col(root.get_fast_axis())\n rs = col(root.get_slow_axis())\n\n return [col((p.dot(rf), + p.dot(rs),0)) for p in [p0, p1, p2, p3]]\n\n if pg.is_group():\n minx = miny = float('inf')\n maxx = maxy = float('-inf')\n for panel in iterate_panels(pg):\n bounds = panel_bounds(root, panel)\n for v in bounds:\n if v[0] < minx:\n minx = v[0]\n if v[0] > maxx:\n maxx = v[0]\n if v[1] < miny:\n miny = v[1]\n if v[1] > maxy:\n maxy = v[1]\n return [col((minx, miny, 0)),\n col((maxx, miny, 0)),\n col((maxx, maxy, 0)),\n col((minx, maxy, 0))]\n\n else:\n return panel_bounds(root, pg)", "def get_sector_bounds_no_pad(self, center):\n x_low = max(0, center[0] - (self.sector_rows // 2))\n y_low = max(0, center[1] - (self.sector_cols // 2))\n x_high = min(center[0] + (self.sector_rows // 2), self.N - 1)\n y_high = min(center[1] + (self.sector_cols // 2), self.M - 1)\n return x_low, y_low, x_high, y_high", "def valid_kernel_mask(orig_mask, cnt_width, cnt_height, cnt_x, cnt_y, valid_kernel_ratio_list):\n \n # extract ratio values \n valid_kernel_ratio_left = valid_kernel_ratio_list[0]\n valid_kernel_ratio_right = valid_kernel_ratio_list[1]\n valid_kernel_ratio_top = valid_kernel_ratio_list[2]\n valid_kernel_ratio_bottom = valid_kernel_ratio_list[3]\n \n # a mask with the same size as inout image, pixels with a value of 0 (background) are\n # ignored in the original image while mask pixels with a value of\n # 255 (foreground) are allowed to be kept\n img_height, img_width = orig_mask.shape\n \n #print(img_height, img_width)\n \n # initialize empty image as mask\n v_mask = np.zeros(orig_mask.shape, dtype = \"uint8\")\n \n # compute the coordinates to get the masking area\n #x_l = 0\n #x_r = int(avg_x + img_width)\n #y_t = int(avg_y + max_height*valid_kernel_ratio_top)\n #y_b = int(avg_y + max_height*(1-valid_kernel_ratio_bottom))\n \n # compute the coordinates to get the masking area for two ears in one image\n for i in range(len(cnt_height)):\n \n # compute the coordinates to get the masking area\n x_l = int(cnt_x[i] + (cnt_width[i]-10)*valid_kernel_ratio_left)\n x_r = int(cnt_x[i] + (cnt_width[i]-10)*(1 - valid_kernel_ratio_right))\n y_t = int(cnt_y[i] + (cnt_height[i]-10)*valid_kernel_ratio_top)\n y_b = int(cnt_y[i] + (cnt_height[i]-10)*(1-valid_kernel_ratio_bottom))\n \n # assign area of valid kernel \n v_mask[y_t : y_b, x_l : x_r] = 255\n \n return v_mask", "def __padding(self, image, boxes, height, width):\n temp = boxes[:, :4].astype(np.int)\n y1 = np.where(temp[:, 0] < 0)[0]\n if len(y1) > 0:\n temp[y1, 0] = 0\n x1 = np.where(temp[:, 1] < 0)[0]\n if len(x1) > 0:\n temp[x1, 0] = 0\n y2 = np.where(temp[:, 2] > image.shape[0] - 1)[0]\n if len(y2) > 0:\n temp[y2, 0] = image.shape[0] - 1\n x2 = np.where(temp[:, 3] > image.shape[1] - 1)[0]\n if len(x2) > 0:\n temp[x2, 0] = image.shape[1] - 1\n pad_top = np.abs(temp[:, 0] - boxes[:, 0]).astype(np.int)\n pad_left = np.abs(temp[:, 1] - boxes[:, 1]).astype(np.int)\n pad_bottom = np.abs(temp[:, 2] - boxes[:, 2]).astype(np.int)\n pad_right = np.abs(temp[:, 3] - boxes[:, 3]).astype(np.int)\n input_data = np.empty([boxes.shape[0], 3, height, width], dtype=np.float32)\n for i in range(boxes.shape[0]):\n crop_img = image[temp[i, 0]:temp[i, 2] + 1, temp[i, 1]:temp[i, 3] + 1, :]\n crop_img = cv2.copyMakeBorder(crop_img, pad_top[i], pad_bottom[i], \\\n pad_left[i], pad_right[i], cv2.BORDER_CONSTANT, value=0)\n if crop_img is None:\n continue\n crop_img = cv2.resize(crop_img, (width, height)).astype(np.float32)\n crop_img[:, :, 0] -= self.mean[0]\n crop_img[:, :, 1] -= self.mean[1]\n crop_img[:, :, 2] -= self.mean[2]\n crop_img *= self.scale_factor\n crop_img = np.transpose(crop_img, (2, 0, 1))\n input_data[i] = crop_img.copy()\n return input_data", "def grid_bounding_boxes_lower_left(w=10, xrng=(-70, 70), yrng=(-70, 70)):\n x0 = [no for no in range(xrng[0], xrng[1]+w, w)]\n y0 = [no for no in range(yrng[0], yrng[1]+w, w)]\n\n xcenters, ycenters = list(), list()\n for i in range(len(x0)):\n xcenters.extend(x0)\n ycenters.extend([y0[i]]*len(y0))\n centers = pd.DataFrame({0:xcenters, 1:ycenters})\n\n return centers-(w/2)", "def _get_padded_grid_(ax):\n ax_pad = np.zeros(ax.size + 2)\n ax_pad[1:-1] = ax\n ax_pad[0] = ax[0] - (ax[2] - ax[1])\n ax_pad[-1] = ax[-1] + (ax[2] - ax[1])\n\n return ax_pad", "def crop_mask(mask, crop_offset=0.5):\n maxx, maxy, minx, miny = 0, 0, 0, 0\n for r in range(0, mask.shape[0]):\n if np.min(mask[r]) < 255:\n minx = int(r + mask.shape[0] * (crop_offset / 100))\n break\n\n for r in range(mask.shape[0] - 1, 0, -1):\n if np.min(mask[r]) < 255:\n maxx = int(r - mask.shape[0] * (crop_offset / 100))\n break\n\n for c in range(0, mask.shape[1]):\n if np.min(mask[:, c]) < 255:\n miny = int(c + mask.shape[1] * (crop_offset / 100))\n break\n\n for c in range(mask.shape[1] - 1, 0, -1):\n if np.min(mask[:, c]) < 255:\n maxy = int(c - mask.shape[1] * (crop_offset / 100))\n break\n\n return (maxx, maxy, minx, miny)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generate a object formatter for links..
def link(text, link_func): def object_formatter(v, c, m, p): """Format object view link.""" return Markup('<a href="{0}">{1}</a>'.format( link_func(m), text)) return object_formatter
[ "def object_formatter(v, c, m, p):\n return Markup('<a href=\"{0}\">{1}</a>'.format(\n link_func(m), text))", "def format_link(linkfunc):\n\n def func(item):\n url, text = linkfunc(item)\n return '<a href=\"%s\">%s</a>' % (url, text)\n return func", "def linkify(obj, link_text=''):\n try:\n lst = []\n # if obj is not a list, convert it into a list\n if not getattr(obj, '__iter__', False):\n obj = [obj]\n for item in obj:\n if hasattr(item, 'child'):\n item = item.child\n if link_text == '':\n l_text = unicode(item)\n else:\n try:\n link_text = link_text.encode('ascii')\n l_text = getattr(item, link_text, link_text)\n except UnicodeEncodeError:\n l_text = link_text\n if not (isinstance(item, Content) and\n isinstance(l_text, SafeText)):\n l_text = filter.force_escape(l_text)\n format_args = (item.get_absolute_url(), l_text)\n lst.append(mark_safe('<a href=\\'%s\\'>%s</a>' % format_args))\n\n # nonlists obj's should be returned as nonlists\n return lst[0] if len(lst) == 1 else lst\n except:\n return ''", "def get_object_link(cls, obj, project=None):\n return (\n '<a href=\"{}\" class=\"sodar-tl-object-link\">'\n '<i class=\"iconify\" data-icon=\"mdi:clock-time-eight-outline\"></i>'\n '</a>'.format(cls.get_object_url(obj, project))\n )", "def format_element(bfo, default='', separator='; ', style='', \\\n show_icons='no', prefix='', suffix=''):\n _ = gettext_set_language(bfo.lang)\n if style != \"\":\n style = 'class = \"' + style + '\"'\n\n links = []\n\n # KEKSCAN/CDS links\n identifiers = bfo.fields('035__')\n\n for ident in identifiers:\n if ident.get('9', '') == 'KEKSCAN' and ident.get('a', None) is not None:\n out = ident['a'].replace(\"-\", \"\")\n links.append('<a href=\"http://www-lib.kek.jp/cgi-bin/img_index?' + out + '\"> KEK scanned document </a>')\n\n if ident.get('9', '') == 'CDS' and ident.get('a', None) is not None:\n links.append('<a href=\"http://cds.cern.ch/record/' + ident['a'] + '\"> CERN Document Server </a>')\n\n # ADS links\n identifiers = bfo.fields('037__')\n current_links = bfo.field('8564_y')\n\n for ident in identifiers:\n if ident.get('9', '') == 'arXiv' and not (\"ADSABS\" in current_links) and ident.get('a', None) is not None:\n links.append('<a href=\"http://adsabs.harvard.edu/cgi-bin/basic_connect?qsearch=' + ident.get('a', '') + '\">ADS Abstract Service</a>')\n\n #links moved to new field 035\n urls = bfo.fields('035__')\n allowed_doctypes = [\"INSPIRE-PUBLIC\"]\n for url in urls:\n if \"9\" in url and \"a\" in url:\n if url[\"9\"].lower() == \"msnet\":\n links.append('<a ' + style + ' href=\"http://www.ams.org/mathscinet-getitem?mr=' + url[\"a\"] + '\">AMS MathSciNet</a>')\n if url[\"9\"].lower() == \"zblatt\":\n links.append('<a ' + style + ' href=\"http://www.zentralblatt-math.org/zmath/en/search/?an=' + url[\"a\"] + '\">zbMATH</a>')\n if url[\"9\"].lower() == \"euclid\":\n links.append('<a ' + style + ' href=\"http://projecteuclid.org/euclid.cmp/=' + url[\"a\"] + '\">Project Euclid</a>')\n\n # now look for explicit URLs\n # might want to check that we aren't repeating things from above...\n # Note: excluding self-links\n urls = bfo.fields('8564_')\n allowed_doctypes = [\"INSPIRE-PUBLIC\"]\n for url in urls:\n if url.get(\"y\", \"\").lower() not in (\"msnet\", \"zblatt\", \"euclid\"):\n if '.png' not in url['u'] and not \\\n (url.get('y', '').lower().startswith(\"fermilab\") and bfo.field(\"710__g\").lower() in ('atlas collaboration', 'cms collaboration')):\n if url.get('y', '').upper() != \"DURHAM\":\n if url.get(\"u\") and \\\n url.get('y', 'Fulltext').upper() != \"DOI\" and not \\\n url.get('u').startswith(CFG_SITE_URL):\n links.append('<a ' + style + \\\n 'href=\"' + url.get(\"u\") + '\">' + \\\n _lookup_url_name(bfo, url.get('y', 'Fulltext')) + '</a>')\n elif url.get(\"u\").startswith(CFG_SITE_URL) and \\\n url.get(\"u\")[-3:].lower() == \"pdf\" and bibdocfile_url_to_bibdoc(url.get('u')).doctype in allowed_doctypes:\n links.append('<a ' + style + 'href=\"' + url.get(\"u\") + '\">' + \\\n _lookup_url_name(bfo, url.get('y', 'Fulltext')) + '</a>')\n\n #put it all together\n if links:\n if show_icons.lower() == 'yes':\n img = '<img style=\"border:none\" \\\n src=\"%s/img/file-icon-text-12x16.gif\" alt=\"%s\"/>' \\\n % (CFG_BASE_URL, _(\"Download fulltext\"))\n links = [img + '<small>' + link + '</small>' for link in links]\n return prefix + separator.join(links) + suffix\n else:\n return default", "def _pdf_formatter(view, context, model, name):\n return Markup('<a href=\"{0}\" target=\"_blank\">{1}</a>'.format(\n model.pdf_link, model.pdf_link))", "def format_link_segment(value):\n format_type = json_api_settings.FORMAT_RELATED_LINKS\n return format_value(value, format_type)", "def format_amazon_objects(amazon_objects):\n if \"ERROR\" in amazon_objects[0]:\n return\n for amazon_object in amazon_objects:\n page_url = Markup(\n '<a href=\"{}\" target=\"_blank\">{}</a>'\n .format(amazon_object.get(\"Detail Page URL\"),\n amazon_object.get(\"Title\"))\n )\n amazon_object[\"Detail Page URL\"] = page_url\n technical_url = Markup(\n '<a href=\"{}\" target=\"_blank\">Technical Details</a>'\n .format(amazon_object.get(\"Technical Details\")))\n amazon_object[\"Technical Details\"] = technical_url\n image = Markup('<img src=\"{}\" style=\"width: 100%;\">'\n .format(amazon_object.get(\"Large Image\")))\n amazon_object[\"Large Image\"] = image", "def href(obj):\n if isinstance(obj, Filing):\n return reverse('filing', args=(obj.region, obj.name, obj.period_name))\n else:\n raise ValueError('cannot build a URL for {}.{} objects'.format(\n type(obj).__module__, type(obj).__name__))", "def modulelink(self, object):\n return '<a href=\"%s.html\">%s</a>' % (object.__name__, object.__name__)", "def object_link(obj, view=\"view\", attribute=\"Title\", content=\"\", target=\"\"):\n from Products.CMFPlone.utils import safe_unicode\n\n href = view and \"%s/%s\" % (obj.absolute_url(), view) or obj.absolute_url()\n if not content:\n if not hasattr(obj, attribute):\n attribute = \"Title\"\n content = getattr(obj, attribute)\n if callable(content):\n content = content()\n if target:\n target = ' target=\"{}\"'.format(target)\n return u'<a href=\"%s\"%s>%s</a>' % (href, target, safe_unicode(content))", "def reviewed_model_linked(obj):\n url = reverse(\n 'admin:{app_label}_{model_name}_changelist'.format(\n app_label=obj.content_type.app_label,\n model_name=obj.content_type.model,\n )\n )\n return \"{text} (<a href='{url}'>link</a>)\".format(text=obj.content_type.name.title(),\n url=url)", "def linkify(field_name):\n def _linkify(obj):\n linked_obj = getattr(obj, field_name)\n if linked_obj is None:\n return '-'\n app_label = linked_obj._meta.app_label\n model_name = linked_obj._meta.model_name\n view_name = f'admin:{app_label}_{model_name}_change'\n link_url = reverse(view_name, args=[linked_obj.pk])\n return format_html('<a href=\"{}\">{}</a>', link_url, linked_obj)\n\n _linkify.short_description = field_name # Sets column name\n return _linkify", "def linkify(field_name):\n\n def _linkify(obj):\n linked_obj = getattr(obj, field_name)\n if linked_obj is None:\n return '-'\n app_label = linked_obj._meta.app_label\n model_name = linked_obj._meta.model_name\n view_name = f'admin:{app_label}_{model_name}_change'\n link_url = reverse(view_name, args=[linked_obj.pk])\n return format_html('<a href=\"{}\">{}</a>', link_url, linked_obj)\n\n _linkify.short_description = field_name # Sets column name\n return _linkify", "def reviewed_object_linked(obj):\n url = reverse(\n 'admin:{app_label}_{model_name}_change'.format(\n app_label=obj.content_type.app_label,\n model_name=obj.content_type.model,\n ),\n args=(obj.reviewed_object.id,)\n )\n return \"{text} (<a href='{url}'>link</a>)\".format(text=obj.reviewed_object,\n url=url)", "def deriveLinkfromObject(obj, scale=1, parent_link=True, parent_objects=True,\n reparent_children=True, nameformat='', scaleByBoundingBox=False):\n log('Deriving link from ' + nUtils.getObjectName(obj), level=\"INFO\")\n # create armature/bone\n bUtils.toggleLayer('link', True)\n bpy.ops.object.select_all(action='DESELECT')\n bpy.ops.object.armature_add()\n newlink = bpy.context.active_object\n newlink.name = obj.name + \"_link\"\n newlink.matrix_world = obj.matrix_world\n newlink.phobostype = 'link'\n if scaleByBoundingBox:\n bound_box = (\n max([c[0] for c in obj.bound_box]),\n max([c[1] for c in obj.bound_box]),\n max([c[2] for c in obj.bound_box]),\n )\n newlink.scale = [max(bound_box)*scale] * 3\n else:\n newlink.scale = [scale] * 3\n if obj.parent is not None and parent_link:\n eUtils.parentObjectsTo(newlink, obj.parent)\n if parent_objects:\n eUtils.parentObjectsTo(obj, newlink)\n if reparent_children:\n eUtils.parentObjectsTo(list(obj.children), newlink)\n if bpy.context.scene.phoboswireframesettings.links:\n newlink.display_type = \"WIRE\"\n return newlink", "def _format(self):\n output = f\"\\n{color('>>> DUMP')} from {self.filename}: {color(f'L{self.line}')} in {color(f'{self.method}()')}\"\n\n for name, obj in self.objects.items():\n output += f\"\\n\\n{color(f' - {name}:')}\\n\"\n output += f\" {pformat(obj, width=110, indent=4)}\"\n\n output += color(\"\\n\\n<<< END\")\n return output", "def getLink(self):", "def format_element(bfo, links=\"no\", category=\"yes\", mirrors=\"yes\"):\n\n arxiv=get_arxiv(bfo, category=\"no\")\n\n if len(arxiv) == 0:\n return\n\n out = ''\n if links == 'yes':\n arxiv_ref = arxiv[0] # Take only first one\n out += '''\n<a href=\"http://arXiv.org/abs/%(ref)s\">Abstract</a> and\n<a href=\"http://arXiv.org/ps/%(ref)s\">Postscript</a>\n and <a href=\"http://arXiv.org/pdf/%(ref)s\">PDF</a> from arXiv.org'''% \\\n {'ref': arxiv_ref}\n\n if mirrors.lower()=='yes':\n out+='''\n (mirrors:\n<a href=\"http://au.arXiv.org/abs/%(ref)s\">au</a>\n\n<a href=\"http://br.arXiv.org/%(ref)s\">br</a>\n<a href=\"http://cn.arXiv.org/abs/%(ref)s\">cn</a>\n<a href=\"http://de.arXiv.org/abs/%(ref)s\">de</a>\n<a href=\"http://es.arXiv.org/abs/%(ref)s\">es</a>\n<a href=\"http://fr.arXiv.org/abs/%(ref)s\">fr</a>\n<a href=\"http://il.arXiv.org/abs/%(ref)s\">il</a>\n<a href=\"http://in.arXiv.org/abs/%(ref)s\">in</a>\n<a href=\"http://it.arXiv.org/abs/%(ref)s\">it</a>\n<a href=\"http://jp.arXiv.org/abs/%(ref)s\">jp</a>\n<a href=\"http://kr.arXiv.org/abs/%(ref)s\">kr</a>\n<a href=\"http://ru.arXiv.org/abs/%(ref)s\">ru</a>\n<a href=\"http://tw.arXiv.org/abs/%(ref)s\">tw</a>\n<a href=\"http://uk.arXiv.org/abs/%(ref)s\">uk</a>\n<a href=\"http://aps.arXiv.org/abs/%(ref)s\">aps</a>\n<a href=\"http://lanl.arXiv.org/abs/%(ref)s\">lanl</a>)''' % \\\n {'ref': arxiv_ref}\n\n\n else: # print only value\n out = ', '.join(get_arxiv(bfo,category))\n\n return out" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Format object view link.
def object_formatter(v, c, m, p): return Markup('<a href="{0}">{1}</a>'.format( link_func(m), text))
[ "def object_link(obj, view=\"view\", attribute=\"Title\", content=\"\", target=\"\"):\n from Products.CMFPlone.utils import safe_unicode\n\n href = view and \"%s/%s\" % (obj.absolute_url(), view) or obj.absolute_url()\n if not content:\n if not hasattr(obj, attribute):\n attribute = \"Title\"\n content = getattr(obj, attribute)\n if callable(content):\n content = content()\n if target:\n target = ' target=\"{}\"'.format(target)\n return u'<a href=\"%s\"%s>%s</a>' % (href, target, safe_unicode(content))", "def link(text, link_func):\n def object_formatter(v, c, m, p):\n \"\"\"Format object view link.\"\"\"\n return Markup('<a href=\"{0}\">{1}</a>'.format(\n link_func(m), text))\n return object_formatter", "def get_object_link(cls, obj, project=None):\n return (\n '<a href=\"{}\" class=\"sodar-tl-object-link\">'\n '<i class=\"iconify\" data-icon=\"mdi:clock-time-eight-outline\"></i>'\n '</a>'.format(cls.get_object_url(obj, project))\n )", "def object_view_with_links(obj, request):\n _view = _object_view(obj, request)\n obj_link = obj_ui_link = \"\"\n # UI link to the real business object referenced as topic\n if isinstance(obj, Posting):\n obj_ui_link = request.link(obj, app=get_root(request).child(\"activitystream\"))\n else:\n try:\n obj_link = request.link(obj, app=_get_collection_app(request))\n except morepath.error.LinkError:\n pass\n obj_ui_link = get_ui_link(request, obj) or \"\"\n _view.update({\n \"object_id\": obj_link,\n \"object_ui_link\": obj_ui_link})\n return _view", "def reviewed_object_linked(obj):\n url = reverse(\n 'admin:{app_label}_{model_name}_change'.format(\n app_label=obj.content_type.app_label,\n model_name=obj.content_type.model,\n ),\n args=(obj.reviewed_object.id,)\n )\n return \"{text} (<a href='{url}'>link</a>)\".format(text=obj.reviewed_object,\n url=url)", "def modulelink(self, object):\n return '<a href=\"%s.html\">%s</a>' % (object.__name__, object.__name__)", "def link(self, obj):\n return format_html(\n '<a href=\"{url}\">{url}</a>',\n url='https://sms.cam.ac.uk/media/{}'.format(obj.id)\n )", "def reviewed_model_linked(obj):\n url = reverse(\n 'admin:{app_label}_{model_name}_changelist'.format(\n app_label=obj.content_type.app_label,\n model_name=obj.content_type.model,\n )\n )\n return \"{text} (<a href='{url}'>link</a>)\".format(text=obj.content_type.name.title(),\n url=url)", "def get_link(self, entry):\n return _('<a href=\"%s\" target=\"blank\">View</a>') \\\n % entry.get_absolute_url()", "def href(obj):\n if isinstance(obj, Filing):\n return reverse('filing', args=(obj.region, obj.name, obj.period_name))\n else:\n raise ValueError('cannot build a URL for {}.{} objects'.format(\n type(obj).__module__, type(obj).__name__))", "def _pdf_formatter(view, context, model, name):\n return Markup('<a href=\"{0}\" target=\"_blank\">{1}</a>'.format(\n model.pdf_link, model.pdf_link))", "def view_link(self, obj):\n if obj.cwr:\n url = reverse(\n 'admin:music_publisher_ackimport_change', args=(obj.id,))\n url += '?preview=true'\n return mark_safe(\n '<a href=\"{}\" target=\"_blank\">View CWR</a>'.format(url))", "def pybb_link(object, anchor=''):\n\n url = hasattr(object, 'get_absolute_url') and object.get_absolute_url() or None\n #noinspection PyRedeclaration\n anchor = anchor or smart_text(object)\n return mark_safe('<a href=\"%s\">%s</a>' % (url, escape(anchor)))", "def getPrimaryLink(obj, target=None):\n link = \"\"\n if obj:\n if not obj.checkRemotePerm(\"View\", obj):\n link = obj.id\n else:\n attributes = \"\"\n if target is not None:\n attributes = \"target='%s' \" % (target,)\n link = \"<a %shref='%s'>%s</a>\" % (\n attributes,\n obj.getPrimaryUrlPath(),\n obj.id,\n )\n return link", "def pybb_link(object, anchor=u''):\n\n url = hasattr(object, 'get_absolute_url') and object.get_absolute_url() or None\n #noinspection PyRedeclaration\n anchor = anchor or smart_unicode(object)\n return mark_safe('<a href=\"%s\">%s</a>' % (url, escape(anchor)))", "def viewurilink(uri) :\n\tname = schema.uri_to_name(uri)\n\tif name :\n\t\turl = '/view/name/' + quote(name)\n\telif uri[:7] == \"http://\" :\n\t\turl = '/view/uri/' + uri[7:]\n\telse :\n\t\turl = '/view/uri?id=' + uri\n\t\n\treturn '<a href=\"%s\">%s</a>' % (url, name or n.shorten(uri))", "def detail_link(db_obj, text=None):\n\n def build_link(obj):\n name = str(obj) if text is None else text\n return _make_link(obj.detail_url(), name)\n\n return mark_safe(', '.join(map(build_link, as_list(db_obj))))", "def linkify(field_name):\n def _linkify(obj):\n linked_obj = getattr(obj, field_name)\n if linked_obj is None:\n return '-'\n app_label = linked_obj._meta.app_label\n model_name = linked_obj._meta.model_name\n view_name = f'admin:{app_label}_{model_name}_change'\n link_url = reverse(view_name, args=[linked_obj.pk])\n return format_html('<a href=\"{}\">{}</a>', link_url, linked_obj)\n\n _linkify.short_description = field_name # Sets column name\n return _linkify", "def _get_history_link(cls, ref_obj):\n url_name = 'timeline:list_object_site'\n url_kwargs = {\n 'object_model': ref_obj.object_model,\n 'object_uuid': ref_obj.object_uuid,\n }\n if ref_obj.event.project:\n url_name = 'timeline:list_object'\n url_kwargs['project'] = ref_obj.event.project.sodar_uuid\n history_url = reverse(url_name, kwargs=url_kwargs)\n return (\n '<a href=\"{}\" class=\"sodar-tl-object-link\">'\n '<i class=\"iconify\" '\n 'data-icon=\"mdi:clock-time-eight-outline\"></i></a>'.format(\n history_url\n )\n )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a list of the currently connected playes (on the MC server). First tries to hit the cache to see if this has been checked recently. If there is no cache entry, queries the Minecraft server's zombiepygman API to get the list of currently connected players.
def _get_connected_player_list(self): if not zpgapi.is_zgp_api_enabled(): # API is not configured, skip this. return [] cache_key = 'api_connected_players' cache_val = cache.get(cache_key) if cache_val != None: return cache_val api = zpgapi.get_zpg_api_iface() try: api_response = api.cmd_list_connected() cache_val = api_response['player_list'] except urllib2.URLError: # Error with zombiepygman. # This will get cached, but that's OK. It will prevent request # pileup on the gunicorn workers. cache_val = [] cache.set(cache_key, cache_val, 60) return cache_val
[ "def get_players(self):\n return self.server.status().players.online", "def getplayerlist(self):\n return self.referee.players.values()", "def get_players(self):\n return self.players", "def getplayerlist(self):\n return list(self.playerlist)", "def get_players(self):\r\n return self.players.values()", "def ready_players(self):\n return self.players.filter_by(sitting_out=False).join(players_active).all()", "def _player_list(self):\n game = self.ctrl.game\n return game.players[self.i_to_player_id(0)], game.players[self.i_to_player_id(1)]", "def current_players(self):\n return self.previous_event.current_players", "def get_all_game_players(self):\n return GamePlayer.objects.filter(game=self)", "def get_all_players(self):\n\n self._logger.debug(\"Getting player list\")\n\n try:\n self.check_if_db_connected()\n cursor = self._db_conn.cursor()\n cursor.execute(\"SELECT first_name, last_name, nickname, time FROM player \\\n ORDER BY time DESC\")\n players = cursor.fetchall()\n\n except MySQLdb.OperationalError:\n self._logger.error(\"MySQL operational error occured\")\n traceback.print_exc()\n raise exceptions.DBConnectionError(\"Cannot connect to MySQL server\")\n\n except MySQLdb.ProgrammingError:\n self._logger.error(\"MySQL programming error\")\n traceback.print_exc()\n raise exceptions.DBSyntaxError(\"MySQL syntax error\")\n\n else:\n return players", "def get_active_players():\n\n # Get a list of all nfl players in the database\n all_players = nflgame.players\n\n # Initialize empty list for active players\n available_players = []\n\n # Go through all players\n for p in all_players:\n\n # If the players are active, they are available for our roster\n if all_players[p].status == 'ACT':\n available_players.append(all_players[p])\n\n # Return all players that are available\n return available_players", "def players(self):\n return [p for p in self._manager._lavalink.players.values() if p.node == self]", "def query_players():\n return api.ChainballCentralAPI.central_api_get(\n sub_api=\"api\", path=\"players\"\n )", "def getPlayers(self):\n return iter(self.players)", "def online(ctx):\r\n if ctx.message.channel.is_private:\r\n user_servers = get_user_servers(bot, ctx.message.author.id)\r\n user_worlds = get_user_worlds(bot, ctx.message.author.id)\r\n else:\r\n user_servers = [ctx.message.server]\r\n user_worlds = [tracked_worlds.get(ctx.message.server.id)]\r\n if user_worlds[0] is None:\r\n yield from bot.say(\"This server is not tracking any tibia worlds.\")\r\n return\r\n c = userDatabase.cursor()\r\n now = datetime.utcnow()\r\n uptime = (now-start_time).total_seconds()\r\n count = 0\r\n online_list = {world: \"\" for world in user_worlds}\r\n try:\r\n for char in global_online_list:\r\n char = char.split(\"_\", 1)\r\n world = char[0]\r\n name = char[1]\r\n if world not in user_worlds:\r\n continue\r\n c.execute(\"SELECT name, user_id, vocation, ABS(last_level) as level FROM chars WHERE name LIKE ?\", (name,))\r\n row = c.fetchone()\r\n if row is None:\r\n continue\r\n # Only show members on this server or members visible to author if it's a pm\r\n owner = get_member(bot, row[\"user_id\"], server_list=user_servers)\r\n if owner is None:\r\n continue\r\n row[\"owner\"] = owner.display_name\r\n row['emoji'] = get_voc_emoji(row['vocation'])\r\n row['vocation'] = get_voc_abb(row['vocation'])\r\n online_list[world] += \"\\n\\t{name} (Lvl {level} {vocation}{emoji}, **@{owner}**)\".format(**row)\r\n count += 1\r\n\r\n if count == 0:\r\n if uptime < 60:\r\n yield from bot.say(\"I just started, give me some time to check online lists...\"+EMOJI[\":clock2:\"])\r\n else:\r\n yield from bot.say(\"There is no one online from Discord.\")\r\n return\r\n\r\n # Remove worlds with no players online\r\n online_list = {k: v for k, v in online_list.items() if v is not \"\"}\r\n reply = \"The following discord users are online:\"\r\n if len(user_worlds) == 1:\r\n reply += online_list[user_worlds[0]]\r\n else:\r\n for world, content in online_list.items():\r\n reply += \"\\n__**{0}**__{1}\".format(world, content)\r\n\r\n yield from bot.say(reply)\r\n finally:\r\n c.close()", "def get_installed_players(self) -> List[PlayerApp]:\n return self._get_installed(PlayerApp)", "async def get_players(self):\r\n if os.environ.get(\"WoW_Token\") is None:\r\n return\r\n else:\r\n async with aiohttp.ClientSession().get('https://us.api.battle.net/wow/leaderboard/3v3?locale=en_US&apikey=' + os.environ.get(\"WoW_Token\")) as res:\r\n if res.status == 200:\r\n data = await res.json()\r\n output = {}\r\n for player in range(0, 965):\r\n output[int(player)] = data['rows'][player]\r\n with open('Pvp_Players.json', 'w') as pvp_players:\r\n json.dump(output, pvp_players)\r\n return output", "def getPlayerList(self):\n return(self.playerList)", "async def players(self) -> list:\n\n return await self._client.GetLeaderboardPlayers(\n self.title,\n self.platform,\n gameType=self.gameType,\n gameMode=self.gameMode,\n timeFrame=self.timeFrame,\n page=self.page,\n )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Ensure the value of 'done' is set to False when creating an item
def test_done_default_value_is_False(self): item = Item(name = "A test item") self.assertEqual(item.name, "A test item") self.assertFalse(item.done)
[ "def test_done_value_can_be_set_to_True(self):\n item = Item(name = \"A test item\", done = True)\n self.assertEqual(item.name, \"A test item\")\n self.assertTrue(item.done)", "def test_mark_post_process_complete_create(self):\n pass", "def done_item(self) -> None:\n pass", "def test_add_already_present(self):\n food_item = self.create_a_food_item()\n # remove an entry from the frozen\n self.shelves['frozen'].food_dict.popitem()\n rc = process_new_item(self.shelves, food_item)\n self.assertEqual(rc, NewItemStatus.ok)\n food_item_dup = self.create_a_food_item()\n food_item_dup.temp = 'hot'\n rc = process_new_item(self.shelves, food_item_dup)\n self.assertEqual(rc, NewItemStatus.already_shelved)", "def test_done_not_bool(self):\n with pytest.raises(ValueError):\n tasks.add(Task(summary='summary', done='True'))", "def can_create_items(self):\n return # boolean", "def mark_done(self, user):\n if not self.is_done:\n self.is_done = True\n self.done_by = user\n self.save()\n return True", "def test_mark_completed(self):\n event = Event.objects.all()[0]\n\n todo = TodoItem.objects.create(\n event=event, completed=False, title=\"Test TODO1\",\n due=datetime.date.today(), additional=\"\",\n )\n\n assert todo.completed is False\n\n self.client.get(reverse('todo_mark_completed', args=[todo.pk]))\n todo.refresh_from_db()\n\n assert todo.completed is True", "def test_item_none_create(self):\n\n author = CustomUser.objects.get(id=101)\n topic = Topic.objects.get(id=101)\n item = Item.create(name='test', authors=[author], topic=topic, form='str')\n self.assertIsNone(item)", "def test_add_dest_has_room(self):\n # remove an entry from the frozen\n self.shelves['frozen'].food_dict.popitem()\n food_item = self.create_a_food_item()\n rc = process_new_item(self.shelves, food_item)\n self.assertEqual(rc, NewItemStatus.ok)\n found_it = self.shelves['frozen'].food_dict.get(food_item.id, None) is not None\n self.assertEqual(found_it, True)", "def test_item_creation_twice(self):\n # create an item\n self.test_shoppingitem()\n # create the same item twice\n res2 = self.client().post(\"/shoppinglists/1/items\",\n headers=dict(\n Authorization=\"Bearer \" + self.access_token),\n data=self.shoppingitem)\n self.assertIn(\"Item name already exists\", str(res2.data))", "def test_add_dest_is_empty(self):\n # remove an entry from the frozen\n self.shelves['frozen'].food_dict = dict() # destroy the old dict\n food_item = self.create_a_food_item()\n rc = process_new_item(self.shelves, food_item)\n self.assertEqual(rc, NewItemStatus.ok)\n found_it = self.shelves['frozen'].food_dict.get(food_item.id, None) is not None\n self.assertEqual(found_it, True)\n self.assertEqual(1, len(self.shelves['frozen'].food_dict))", "def am_i_done(self):\n return self.ballots_todo != None and len(self.ballots_todo) == 0", "def test_create_item(self):\n self.assertTrue(OrderItem.objects.exists())", "def mark_done(self):\n self.done = True", "def test_item_success_create(self):\n\n user_first = CustomUser.objects.get(id=101)\n user_second = CustomUser.objects.get(id=102)\n users = [user_first, user_second]\n superior_first = Item.objects.get(id=102)\n superior_second = Item.objects.get(id=103)\n superiors = [superior_first, superior_second]\n topic = Topic.objects.get(id=101)\n time = datetime.timedelta(seconds=66000)\n item = Item.create(name='new', authors=users, topic=topic,\n form=1, superiors=superiors, estimation=time)\n self.assertIsInstance(item, Item)\n self.assertEqual(item.name, 'new')\n self.assertEqual(item.topic.id, 101)\n self.assertEqual(item.form, 1)\n self.assertListEqual(list(item.authors.all()), users)\n self.assertListEqual(list(item.superiors.all()), superiors)\n self.assertEqual(item.description, '')\n self.assertEqual(item.estimation, time)", "def action_set_done(self):\n self.ensure_one()\n self.write({\"state\": \"done\"})\n self.credit_control_line_ids.write({\"state\": \"done\"})\n return True", "def test_mark_incompleted(self):\n event = Event.objects.all()[0]\n\n todo = TodoItem.objects.create(\n event=event, completed=True, title=\"Test TODO2\",\n due=datetime.date.today(), additional=\"\",\n )\n\n assert todo.completed is True\n\n self.client.get(reverse('todo_mark_incompleted', args=[todo.pk]))\n todo.refresh_from_db()\n\n assert todo.completed is False", "def check_done(self):\n return not bool(len(self.tasks))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Ensure the value of 'done' is True when set to True when creating an item
def test_done_value_can_be_set_to_True(self): item = Item(name = "A test item", done = True) self.assertEqual(item.name, "A test item") self.assertTrue(item.done)
[ "def test_done_default_value_is_False(self):\n item = Item(name = \"A test item\")\n self.assertEqual(item.name, \"A test item\")\n self.assertFalse(item.done)", "def done_item(self) -> None:\n pass", "def test_mark_post_process_complete_create(self):\n pass", "def test_done_not_bool(self):\n with pytest.raises(ValueError):\n tasks.add(Task(summary='summary', done='True'))", "def mark_done(self, user):\n if not self.is_done:\n self.is_done = True\n self.done_by = user\n self.save()\n return True", "def mark_done(self):\n self.done = True", "def action_set_done(self):\n self.ensure_one()\n self.write({\"state\": \"done\"})\n self.credit_control_line_ids.write({\"state\": \"done\"})\n return True", "def test_mark_completed(self):\n event = Event.objects.all()[0]\n\n todo = TodoItem.objects.create(\n event=event, completed=False, title=\"Test TODO1\",\n due=datetime.date.today(), additional=\"\",\n )\n\n assert todo.completed is False\n\n self.client.get(reverse('todo_mark_completed', args=[todo.pk]))\n todo.refresh_from_db()\n\n assert todo.completed is True", "def am_i_done(self):\n return self.ballots_todo != None and len(self.ballots_todo) == 0", "def test_add_already_present(self):\n food_item = self.create_a_food_item()\n # remove an entry from the frozen\n self.shelves['frozen'].food_dict.popitem()\n rc = process_new_item(self.shelves, food_item)\n self.assertEqual(rc, NewItemStatus.ok)\n food_item_dup = self.create_a_food_item()\n food_item_dup.temp = 'hot'\n rc = process_new_item(self.shelves, food_item_dup)\n self.assertEqual(rc, NewItemStatus.already_shelved)", "def can_create_items(self):\n return # boolean", "def done(self) -> bool:", "def check_done(self):\n return not bool(len(self.tasks))", "def mark_as_done(self):\n self.status = \"DONE\"", "def action_done(self):\n if not self.date_done:\n self.date_done = fields.Datetime.now()\n if self.state_rapel == '1':\n self.generate_rapel()\n self.state = 'done'", "def test_mark_incompleted(self):\n event = Event.objects.all()[0]\n\n todo = TodoItem.objects.create(\n event=event, completed=True, title=\"Test TODO2\",\n due=datetime.date.today(), additional=\"\",\n )\n\n assert todo.completed is True\n\n self.client.get(reverse('todo_mark_incompleted', args=[todo.pk]))\n todo.refresh_from_db()\n\n assert todo.completed is False", "def test_add_dest_has_room(self):\n # remove an entry from the frozen\n self.shelves['frozen'].food_dict.popitem()\n food_item = self.create_a_food_item()\n rc = process_new_item(self.shelves, food_item)\n self.assertEqual(rc, NewItemStatus.ok)\n found_it = self.shelves['frozen'].food_dict.get(food_item.id, None) is not None\n self.assertEqual(found_it, True)", "def test_item_success_create(self):\n\n user_first = CustomUser.objects.get(id=101)\n user_second = CustomUser.objects.get(id=102)\n users = [user_first, user_second]\n superior_first = Item.objects.get(id=102)\n superior_second = Item.objects.get(id=103)\n superiors = [superior_first, superior_second]\n topic = Topic.objects.get(id=101)\n time = datetime.timedelta(seconds=66000)\n item = Item.create(name='new', authors=users, topic=topic,\n form=1, superiors=superiors, estimation=time)\n self.assertIsInstance(item, Item)\n self.assertEqual(item.name, 'new')\n self.assertEqual(item.topic.id, 101)\n self.assertEqual(item.form, 1)\n self.assertListEqual(list(item.authors.all()), users)\n self.assertListEqual(list(item.superiors.all()), superiors)\n self.assertEqual(item.description, '')\n self.assertEqual(item.estimation, time)", "def test_item_creation_twice(self):\n # create an item\n self.test_shoppingitem()\n # create the same item twice\n res2 = self.client().post(\"/shoppinglists/1/items\",\n headers=dict(\n Authorization=\"Bearer \" + self.access_token),\n data=self.shoppingitem)\n self.assertIn(\"Item name already exists\", str(res2.data))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Ensure the string value of the object is equal to the item name
def test_object_name_is_equal_to_item_name(self): item = Item(name = "A test item") self.assertEqual(str(item), "A test item")
[ "def _valid_object_with_name(ui_object):\n return ui_object.obj_name", "def check_name(self, check_obj, schema):\n raise NotImplementedError", "def test_name(self):\n node = self.create(ObjectNodeItem, UML.ObjectNode)\n name = node.shape.icon.children[1]\n\n node.subject.name = \"Blah\"\n\n assert \"Blah\" == name.text()", "def set_name(self, item_name):\r\n self.name = item_name", "def verify_name(data):\n assert data['Name'] == \"Carbon credits\", \"Name is Incorrect\"", "def test_object_name_is_title(self):\n expected_object_name = self.lesson.title\n self.assertEqual(expected_object_name, str(self.lesson))", "def test_printing_shoppping_item_returns_name(create_shopping_item):\n item = create_shopping_item\n assert item.__str__() == 'shopping item one'", "def test_value_returned_when_object_no_longer_exists(self, value):\n assert _get_object_name_for_pk(Book, value) == value", "def test_from_name(self, testdata: TestData) -> None:\n for record in testdata['observation_type']:\n assert ObservationType.from_name(record['name']).name == record['name']", "def test_asset_name():\n\n invalid = {}\n inventory_ = copy.deepcopy(self._inventory)\n inventory_[\"assets\"].append(invalid)\n\n for name in (\"mixedCaseOk\",\n \"lowercaseok\",\n \"underscore_ok\"):\n invalid[\"name\"] = name\n\n inventory.save(\n name=self._project[\"name\"],\n config=self._config,\n inventory=inventory_\n )\n\n for name in (\"spaces not ok\",\n \"special~characters$not^ok\",\n \"dash-not-ok\"):\n invalid[\"name\"] = name\n\n assert_raises(\n schema.ValidationError,\n inventory.save,\n name=self._project[\"name\"],\n config=self._config,\n inventory=inventory_\n )", "def test_bucketlist_item_edit_blank_name(self):\n resp = self.client.post(\"/auth/register/\", data=self.user_details)\n self.assertEqual(resp.status_code, 201)\n # login user\n result = self.client.post(\"/auth/login/\", data=self.user_details)\n access_token = json.loads(result.data.decode())[\"access_token\"]\n # create bucketlist\n self.client.post('/bucketlists/', headers=dict(\n Authorization=access_token),\n data=self.bucketlist)\n # create bucketlist item\n bucketlist_item = self.client.post(\n \"/bucketlists/1/items/\", headers=dict(Authorization=access_token),\n data=self.item)\n result = json.loads(bucketlist_item.data.decode())\n resp = self.client.put(\n \"/bucketlists/1/items/1/\",\n headers=dict(Authorization=access_token))\n result = self.client.get(\n \"/bucketlists/1/items/1/\",\n headers=dict(Authorization=access_token))\n self.assertEqual(resp.status_code, 400)\n self.assertIn(\"Enter a Valid Name\", str(resp.data))", "def __contains__(self, item_name):\n tuples = self._execute(\n \"SELECT name FROM items WHERE name == ?\",\n (item_name,)\n )\n return len(tuples) == 1", "def test_correct_upload_item(upload_items: List[JSONDict]) -> None:\n validated = UploadItem(**upload_items[0])\n assert validated.dict() == upload_items[0]", "def test_property_name(self):\n \n name = self.location.name\n\n self.assertIsInstance(name, str)\n self.assertRaises(DataObjectError, \n setattr(self, \"name\", \"Bogus Location name\")\n )", "def _ScrubItem(dict, item_name):\r\n if item_name in dict:\r\n dict[item_name] = '...scrubbed %s bytes...' % len(dict[item_name])", "def check_name(self, name):\n assert self.file_name.get_attribute('value') == name", "def test___str__(self):\n item = Item(self.item_entry)\n print (item)", "def test_should_name_field(self):\n self.assertIn(\"name\", self.fields)", "def test_bucketlist_item_edit_with_existing_name(self):\n resp = self.client.post(\"/auth/register/\", data=self.user_details)\n self.assertEqual(resp.status_code, 201)\n # login user\n result = self.client.post(\"/auth/login/\", data=self.user_details)\n access_token = json.loads(result.data.decode())[\"access_token\"]\n # create bucketlist\n self.client.post('/bucketlists/', headers=dict(\n Authorization=access_token),\n data=self.bucketlist)\n # create bucketlist item\n bucketlist_item = self.client.post(\n \"/bucketlists/1/items/\", headers=dict(Authorization=access_token),\n data=self.item)\n self.client.post(\n \"/bucketlists/1/items/\", headers=dict(Authorization=access_token),\n data={\"name\": \"Stan Chart Marathon\"})\n result = json.loads(bucketlist_item.data.decode())\n resp = self.client.put(\n \"/bucketlists/1/items/1/\",\n headers=dict(Authorization=access_token),\n data={\"name\": \"Stan Chart Marathon\"})\n self.assertEqual(resp.status_code, 409)\n self.assertIn(\"Name exists, enter another\", str(resp.data))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a postvalidator function that makes sure the value of this item is a key in the sibling dictionary 'sib_name'. Raises a ValueError if not. This generally assumes siblings[sib_name] is a required CategoryElement.
def is_sib_key(sib_name): def is_sib_key_val(siblings, value): if value not in siblings[sib_name].keys(): raise ValueError( "Must be a key of {}, but got {}" .format(sib_name, value)) return value return is_sib_key_val
[ "def check_items_slugs(cls, slugs, registry):\n for m in registry.items():\n for i in m[1]['items'].items():\n for slug in slugs:\n try:\n item = i[1]['_class'].objects.get(slug=slug)\n raise ItemAttributeChoicesSlugsDuplicateItemInstanceSlug(cls, item)\n except ObjectDoesNotExist:\n pass", "def validate_children(self, source, **kwargs):\n # TODO cache this loaded data keyed on a hashed version of kwargs\n children = self._load_json(\"children\", source, **kwargs)\n self._validate_against_schema(\"children\", children)\n\n strand = getattr(self, \"children\", [])\n\n # Loop the children and accumulate values so we have an O(1) check\n children_keys = {}\n for child in children:\n children_keys[child[\"key\"]] = children_keys.get(child[\"key\"], 0) + 1\n\n # Check there is at least one child for each item described in the strand\n # TODO add max, min num specs to the strand schema and check here\n for item in strand:\n strand_key = item[\"key\"]\n if children_keys.get(strand_key, 0) <= 0:\n raise exceptions.InvalidValuesContents(f\"No children found matching the key {strand_key}\")\n\n # Loop the strand and add unique keys to dict so we have an O(1) check\n strand_keys = {}\n for item in strand:\n strand_keys[item[\"key\"]] = True\n\n # Check that each child has a key which is described in the strand\n for child in children:\n child_key = child[\"key\"]\n if not strand_keys.get(child_key, False):\n raise exceptions.InvalidValuesContents(\n f\"Child with key '{child_key}' found but no such key exists in the 'children' strand of the twine.\"\n )\n\n # TODO Additional validation that the children match what is set as required in the Twine\n return children", "def _registerSceneItemPair(self, kSceneItem, dccSceneItem):\n\n pairing = {\n \"src\": kSceneItem,\n \"tgt\": dccSceneItem\n }\n\n self._buildElements.append(pairing)\n\n return True", "def is_sibling_of(self,sibling_obj_id,obj_id):\n obj = self[obj_id]\n return obj['sibling'] == sibling_obj_id", "def validate_catalogue_name(attrs, source):\n value = attrs[source]\n if QuestionCatalogue.objects.filter(catalogue_name=value).count() > 0:\n raise serializers.ValidationError(\"A catalogue with this name already exists.\")\n return attrs", "def GetSubkeyByName(self, name):", "def SALESMAN_BASKET_ITEM_VALIDATOR(self) -> callable:\n default = \"salesman.basket.utils.validate_basket_item\"\n value = self._setting('SALESMAN_BASKET_ITEM_VALIDATOR', default)\n return self._callable(value)", "def hasSiblings():", "def clean_subcategory(self):\n subcategory = self.cleaned_data['subcategory']\n\n # If subcategory does not have a parent category, then it is a parent category.\n if subcategory.parent_category is None:\n raise forms.ValidationError(INVALID_SUBCATEGORY, code='invalid')\n return subcategory", "def _pfp__handle_non_consecutive_duplicate(self, name, child, insert=True):\n if name in self._pfp__children_map:\n previous_child = self._pfp__children_map[name]\n\n # DO NOT cause __eq__ to be called, we want to test actual objects, not comparison\n # operators\n if previous_child is not child:\n self._pfp__handle_non_consecutive_duplicate(\n name, previous_child, insert=False\n )\n del self._pfp__children_map[name]\n\n next_suffix = self._pfp__name_collisions.setdefault(name, 0)\n new_name = \"{}_{}\".format(name, next_suffix)\n child._pfp__name = new_name\n self._pfp__name_collisions[name] = next_suffix + 1\n self._pfp__children_map[new_name] = child\n child._pfp__parent = self\n\n if insert:\n self._pfp__children.append(child)\n\n return child", "def validate_unique_taxon_slugs(cls, values):\n if 'attributes' in values:\n # count occurrence of each taxon slug in attributes\n attributes: List[FdqModelAttribute] = values['attributes']\n taxon_slugs = cls._get_available_attrs_taxon_slugs(attributes)\n\n taxon_slugs_counter = Counter(taxon_slugs)\n\n multiple_taxon_slugs = [\n taxon_slug for taxon_slug, occurrence in taxon_slugs_counter.items() if occurrence > 1\n ]\n if len(multiple_taxon_slugs):\n raise ValueError('Following fields are mapped more than once - ' + ','.join(multiple_taxon_slugs))\n\n return values", "def __contains__(self, k: str) -> bool:\n\n return k in self.named_children()", "def check_slug(self, target, position, slug, node):\n if target is None:\n siblings = TreeItem.objects.root_nodes()\n else:\n if position == 'first-child' or position == 'last-child':\n siblings = target.get_children()\n else:\n siblings = target.get_siblings(include_self=True)\n for sibling in siblings:\n if sibling != node and \\\n sibling.get_slug() == slug:\n return False\n return True", "def _duplicate_child_allowed_check(self):\n\n for rule in self.options[\n 'parent_allows_duplicate_child']:\n if self.lineage_test(rule):\n return True\n return False", "def post_validated(self, struct, item, value):\n return value", "def _error(self, child):\r\n child._set_parent(self)\r\n if not self._children.has_key(child._name):\r\n # skip all internal objects (that start with _)\r\n if not child._name.startswith('?'):\r\n self._order.insert(0,child._name)\r\n self._children[child._name] = child\r\n else:\r\n raise exceptions.AlreadyExists('Child %s already exists' % child._name)\r\n return", "def __contains__(self, key):\n try: self._item(key)\n except KeyValuePair.DoesNotExist: \n if self._parent != None: return self.parent.__contains__(key)\n else: return False\n return True", "def exists(node, value):\n\n # case: context is a Folder\n if not IAlias.providedBy(context):\n name_checker = context.check_name\n\n # case: context is an Alias that is being renamed\n elif value != context.__name__:\n name_checker = context.__parent__.check_name\n\n # case: context is an Alias and the key isn't changing\n else:\n name_checker = lambda v: None\n\n # raise colander.Invalid if the chosen name_checker fails\n try:\n name_checker(value)\n except Exception as e:\n raise colander.Invalid(node, e.args[0], value)", "async def validate_publisher(\n self,\n slot_value: Any,\n dispatcher: CollectingDispatcher,\n tracker: Tracker,\n domain: DomainDict,\n ) -> Dict[Text, Any]:\n if slot_value.lower() == \"skip\":\n return {\"publisher\": \"skip\"}\n\n if type(slot_value) is str:\n publisher = tracker.get_slot(\"publisher\")\n return {\"publisher\": publisher}\n \n dispatcher.utter_message(response=\"utter_wrong_type\")\n return {\"publisher\": publisher}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
get requirements file line.
def get_line(self): # type: () -> str line = "{}=={}".format(self.name, self.version) if self.type != RequirementType.LATEST_VERSION: line += ' # ' + TEMPLATES[self.type] if self.type == RequirementType.NOT_LATEST_VERSION: line = line.replace(r'(\S*)', self.error_version) return line + '\n'
[ "def get_line(self, path, line):\n\t\tlines = self.find_source(path)\n\t\tif lines == None:\n\t\t\treturn None\n\t\telse:\n\t\t\ttry:\n\t\t\t\treturn lines[line - 1]\n\t\t\texcept IndexError:\n\t\t\t\treturn None", "def requirements(self):\n requirements_txt = path.join(self.directory, \"requirements.txt\")\n if path.isfile(requirements_txt):\n return requirements_txt\n return None", "def _get_relevant_line(self):\n # () -> (Phi.Line)\n line_name = self._get_line_name()\n print(\"looking for \"+str(line_name))\n return Phi.findLine(line_name)", "def get_requirements(req_file: str) -> List[str]:\n req_path = Path(req_file)\n requirements = req_path.read_text().split(\"\\n\") if req_path.exists() else []\n return requirements", "def GetLine(line):\r\n pass", "def requires():\n with open('requirements.txt') as infile:\n return infile.read().splitlines()", "def _get_line_at(self, position):\n source = self._get_file_source(position.file_name)\n lines = source.splitlines()\n if len(lines) == 0:\n return ''\n return lines[position.line - 1]", "def requirements():\n with open('requirements.txt') as req:\n return req.read().splitlines()", "def get_requirements_path():\n\n root = os.path.dirname(os.path.abspath(__file__))[:-13]\n req_path = os.path.join(root, 'requirements.txt')\n\n return req_path", "def get_readme_line(self, test_name, line_match):\n return self.get_output_line(test_name, line_match, \"README\")", "def _get_api_requirement(\n self,\n subpath: Path,\n meta_lines: dict[int, list[str]],\n toplevel: bool,\n ) -> int | None:\n lines = [\n l\n for l in meta_lines.values()\n if len(l) == 4\n and l[0] == 'ba_meta'\n and l[1] == 'require'\n and l[2] == 'api'\n and l[3].isdigit()\n ]\n\n # We're successful if we find exactly one properly formatted\n # line.\n if len(lines) == 1:\n return int(lines[0][3])\n\n # Ok; not successful. lets issue warnings for a few error cases.\n if len(lines) > 1:\n logging.warning(\n \"metascan: %s: multiple '# ba_meta require api <NUM>'\"\n ' lines found; ignoring module.',\n subpath,\n )\n self.results.announce_errors_occurred = True\n elif not lines and toplevel and meta_lines:\n # If we're a top-level module containing meta lines but no\n # valid \"require api\" line found, complain.\n logging.warning(\n \"metascan: %s: no valid '# ba_meta require api <NUM>\"\n ' line found; ignoring module.',\n subpath,\n )\n self.results.announce_errors_occurred = True\n return None", "def parse_requirement(req_text):\n req_text = req_text.strip()\n if not req_text:\n return None\n if req_text[0] == \"#\":\n return None\n return pkg_resources.Requirement.parse(req_text)", "def lookup(self, filename, line):\n\n return self.line_coverage.get(filename, {}).get(line)", "def _python_requires_for_reqs(reqs):\n for req_file in reqs:\n try:\n s = open(req_file, \"r\").read()\n except IOError:\n pass\n else:\n m = PYTHON_REQ_P.search(s)\n if m:\n return m.group(1), req_file\n return None", "def first_line(self):\n with open(self.file_path) as file:\n return file.readline()", "def requirements():\n requirements = []\n with open('requirements.txt', 'r+') as f:\n for line in f.readlines():\n requirements.append(line.strip('\\n'))\n return requirements", "def get_line(self, line: int) -> str:\n if self.__content is None:\n return util.get_line(self.original_path, line)\n\n return self.__content.splitlines(keepends=True)[line - 1]", "def get_first_valid_line(f_cov):\n with open(f_cov) as f:\n for line_no, line in enumerate(f):\n if line.startswith('-'*98):\n return line_no\n else:\n return 0", "def get_requirements(path=\"requirements.txt\"):\n content = read(path)\n lines = []\n\n for line in content.splitlines():\n line = line.strip()\n if not line or any(map(line.startswith, SKIP_REQUIREMENTS)):\n continue\n\n if line.startswith(\"-r\"):\n new_path = line[2:].strip()\n lines.extend(get_requirements(path=new_path))\n continue\n\n lines.append(line)\n\n return lines" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculate cosine distance between two vector
def findCosineDistance(vector1, vector2): vec1 = vector1.flatten() vec2 = vector2.flatten() a = np.dot(vec1.T, vec2) b = np.dot(vec1.T, vec1) c = np.dot(vec2.T, vec2) return 1 - (a / (np.sqrt(b) * np.sqrt(c)))
[ "def cosine_dist(v1, v2):\n n1 = np.sqrt(np.sum(v1 ** 2) + SMALL)\n n2 = np.sqrt(np.sum(v2 ** 2) + SMALL)\n return 1 - (np.dot(v1, v2) / (n1 * n2))", "def findCosineDistance(vector1, vector2):\n vec1 = vector1.flatten()\n vec2 = vector2.flatten()\n\n a = np.dot(vec1.T, vec2)\n b = np.dot(vec1.T, vec1)\n c = np.dot(vec2.T, vec2)\n return 1 - (a/(np.sqrt(b)*np.sqrt(c)))", "def cosine_similarity(cls, vec_a, vec_b):\n return np.dot(vec_a, vec_b) / \\\n (np.linalg.norm(vec_a) * np.linalg.norm(vec_b))", "def cosineDistance(a, b):\n n = len(a)\n numerator = 0\n denominatorA = 0\n denominatorB = 0\n for i in range(n):\n numerator += a[i] * b[i]\n denominatorA += a[i]**2\n denominatorB += b[i]**2\n num = numerator / (denominatorA*denominatorB)**(1/2)\n distance = arccos(num)\n return distance", "def cosine_distance(point1, point2):\n cos_dist = 0\n length_point1 = norm(point1)\n length_point2 = norm(point2)\n cos_dist = 1 - (dot_product(point1, point2)/(length_point1 * length_point2))\n return cos_dist", "def cosine(vec1,vec2):\n mag1 = 0\n mag2 = 0\n dotp = 0\n for lang in vec1:\n mag1 += vec1[lang]**2\n for lang in vec2:\n mag2 += vec2[lang]**2\n mag1 = math.sqrt(mag1)\n mag2 = math.sqrt(mag2)\n\n if mag1 * mag2 == 0:\n\treturn 0.0\n \n for lang in vec1:\n if lang in vec2:\n dotp += ( vec1[lang] * vec2[lang] )\n \n return float( dotp / ( mag1 * mag2 ) )", "def cosine(u, v):\n # Use scipy's method:\n return scipy.spatial.distance.cosine(u, v)", "def cosine_score(vector1, vector2):\n return np.dot(vector1, vector2)/np.sqrt(np.dot(np.dot(vector1, vector1), np.dot(vector2, vector2)))", "def tf_cosine_distance(self, a, b):\n normalize_a = tf.nn.l2_normalize(a, -1)\n normalize_b = tf.nn.l2_normalize(b, -1)\n cos_similarity = tf.reduce_sum(\n tf.multiply(normalize_a, normalize_b), axis=-1, keep_dims=True\n )\n return (1.0 - cos_similarity) / 2.0", "def getCosineAngle(vec1, vec2):\n\n dotProduct = np.dot(vec1, vec2)\n norming = np.linalg.norm(vec1) * np.linalg.norm(vec2)\n return dotProduct/norming", "def distance(self, u, v):\r\n return self.cosine_measure(u,v)", "def _compute_cosine_similarity(d1: np.ndarray, d2: np.ndarray) -> float:\n assert d1.shape == d2.shape\n\n # To avoid dividing by zero. This edge case occurs when both vectors share\n # no common elements\n if (np.linalg.norm(d1) * np.linalg.norm(d2)) == 0:\n return 0\n\n # Computing cosine similarity between both vectors, refer to report for explicit forumla\n similarity = (np.dot(d1, d2)) / (np.linalg.norm(d1) * np.linalg.norm(d2))\n return similarity", "def pdist_func(x, y):\n a_first = x[:, 0:1, :]\n b_first = y[:, 0:1, :]\n a_vec = x[:, 1:, :] - x[:, :-1, :]\n b_vec = y[:, 1:, :] - y[:, :-1, :]\n a_vec = torch.cat([a_first, a_vec], dim=1)\n b_vec = torch.cat([b_first, b_vec], dim=1)\n cos_simil = torch.nn.CosineSimilarity(dim=2)(a_vec, b_vec)\n cos_simil[torch.isnan(cos_simil)] = -1\n return (1 - cos_simil).sum(dim=1)", "def cosine_similarity(a: np.ndarray, b: np.ndarray) -> float:\n return np.dot(a, b) / (np.linalg.norm(a) * np.linalg.norm(b))", "def cosine_similarity(pos1, pos2):\n return dot(pos1, pos2) / (norm(pos1) * norm(pos2))", "def cosine_distance(A, B):\n\n A = A / T.sqrt(T.sum(A ** 2, axis=1)).reshape((-1, 1))\n B = B / T.sqrt(T.sum(B ** 2, axis=1)).reshape((-1, 1))\n D = T.dot(A, T.transpose(B))\n\n return 1 - D", "def cosine_similarity(a, b):\n a = a.flatten()\n b = b.flatten()\n return jnp.dot(a, b) / (jnp.linalg.norm(a) * jnp.linalg.norm(b))", "def return_cosine_similarity(self, a, b):\n #return np.dot(a,b.T)/(np.linalg.norm(a)*np.linalg.norm(b))\n return np.dot(a,b.T) / (np.sqrt(np.dot(a,a.T)) * np.sqrt(np.dot(b,b.T)))", "def distance(self, word1, word2):\n\n return scipy.spatial.distance.cosine(self.vectors.get(word1), self.vectors.get(word2))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add index operation with name to the operations given.
def add_index_operation(self, name, operations): if name not in self._index_operations: self._add_io(name, operations) else: raise AttributeError("An index operation with the name {} was already taken".format(name))
[ "def add_index(self, name, func):\n assert name not in self.indices\n info_name = 'index:%s:%s' % (self.info['name'], name)\n info = self.store._get_info(info_name, index_for=self.info['name'])\n index = Index(self, info, func)\n self.indices[name] = index\n if IndexKeyBuilder:\n self._index_keys = IndexKeyBuilder(self.indices.values()).build\n return index", "def _apply_index_op(db, op):\n if 'createIndexes' not in op['o']:\n return\n o = op['o']\n coll_name = o['createIndexes']\n key = list(o['key'].items())\n name = o['name']\n return db[coll_name].create_index(key, name=name)", "def __call__(self, op):\n self._handle_renameCollection(op)\n if self.regex.match(op['ns']):\n ns = self.regex.sub(self.new_ns, op['ns']).rstrip(\".\")\n logging.debug(\"renaming %s to %s\", op['ns'], ns)\n op['ns'] = ns\n if op['ns'].endswith('.system.indexes'):\n # index operation; update ns in the op also.\n self(op['o'])\n self._handle_create(op)", "def add_to_index(operation, path, indexPath, checksumOld=0, checksumNew=0):\n indexPath = indexPath + '.' + str(os.getpid())\n line = operation + ' ' + str(checksumOld) + ' ' + str(checksumNew) + ' ' + path\n print_verbose(1, line)\n with open(indexPath, 'a') as indexFile:\n indexFile.write(unicode(line + '\\n'))", "def add_operations(self, operations):\n for op in operations:\n self.add_operation(op)", "def define_index(self, index_name, columns, kind=\"index\"):\n pass", "def addIndex(indexDef):", "def AddOperation(self, op):\n self._operations.append(op)", "def _add_operator(operators, operator, name=None):\r\n if name is None:\r\n name = operator.__name__\r\n\r\n if name in operators:\r\n warnings.warn(\"The ALNS instance already knows an operator by the\"\r\n \" name `{0}'. This operator will now be replaced with\"\r\n \" the newly passed-in operator. If this is not what\"\r\n \" you intended, consider explicitly naming your\"\r\n \" operators via the `name' argument.\".format(name),\r\n OverwriteWarning)\r\n\r\n operators[name] = operator", "def op_name(self, name):\n self._op_name = name", "def _register_operation(self, **operation):\n name = operation[\"name\"]\n if name in self.operations:\n raise ValueError(\"operation name already registered: {}\".format(name))\n self.operations[name] = _Operation({**operation, \"resource\": self})", "def append_op(ops, # pylint: disable=dangerous-default-value\n op_name,\n op_type,\n input_names = None,\n input_kwargs = None,\n op_kwargs = {},\n num_outputs = 1):\n if not input_names:\n input_names = [ops[-1].name]\n default_op_kwargs = DEFAULT_OP_KWARGS.get(op_type, {})\n ops.append(\n new_op(op_name=op_name,\n op_type=op_type,\n input_names=input_names,\n input_kwargs=input_kwargs,\n op_kwargs={**default_op_kwargs, **op_kwargs},\n num_outputs=num_outputs))", "def add_name_index(self, index):\n self.name += \".%d\" % index", "def operation(self, name):\n\n try:\n return self.operations[name]\n except KeyError:\n return self.operation_not_found(name)", "def add_operator(self, operator, name=None):\r\n self._add_operator(self._operators, operator, name)", "def add(self, name, index = None):\n if index is None:\n while self.indexDict.has_key(self.count):\n self.count += 1\n index = self.count\n self.fieldDict[name] = index\n self.indexDict[index] = name", "def add_catalog_index(config, name, factory_name, category, **factory_args):\n def add_index():\n factories = get_index_factories(config.registry)\n indexes = get_candidate_indexes(config.registry)\n\n if not factory_name in factories:\n raise ConfigurationError(\n 'No index factory named %r' % factory_name\n )\n\n catvals = {\n 'factory_name':factory_name, \n 'factory_args':factory_args,\n }\n indexes.setdefault(category, {})[name] = catvals\n\n intr = config.introspectable(\n 'sd catalog indexes', name, name, 'sd catalog index'\n )\n intr['name'] = name\n intr['factory_name'] = factory_name\n intr['factory_args'] = factory_args\n intr['category'] = category\n intr.relate(\n 'sd catalog index factories', \n ('sd-catalog-index-factory', factory_name)\n )\n\n discriminator = ('sd-catalog-index', name, category)\n config.action(discriminator, callable=add_index, introspectables=(intr,))", "def post_create_index(\n self, response: operations_pb2.Operation\n ) -> operations_pb2.Operation:\n return response", "def add_repair_operator(\n self, op: _OperatorType, name: Optional[str] = None\n ):\n logger.debug(f\"Adding repair operator {op.__name__}.\")\n self._r_ops[name if name else op.__name__] = op" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the offset of the param inside this parameterized object. This does not need to account for shaped parameters, as it basically just sums up the parameter sizes which come before param.
def _offset_for(self, param): if param.has_parent(): p = param._parent_._get_original(param) if p in self.parameters: return reduce(lambda a,b: a + b.size, self.parameters[:p._parent_index_], 0) return self._offset_for(param._parent_) + param._parent_._offset_for(param) return 0
[ "def offset(self):\n\n if 'offset' in self:\n return self['offset']\n else:\n return None", "def Offset(self) -> int:", "def get_position_offset_and_size(self, field):\n\n if field not in self._fields:\n return None\n\n out = 0\n for fld in self._fields:\n val = getattr(self, fld)\n if fld == field:\n return out, val\n else:\n out += val\n return None", "def _get_flat_param_offsets(self) -> List[Tuple[int, int]]:\n cumulative_sum = list(accumulate(self.flat_param._numels))\n starts = [0] + cumulative_sum[:-1]\n ends = [end - 1 for end in cumulative_sum] # inclusive\n param_offsets = list(zip(starts, ends))\n return param_offsets", "def offset(self) -> Tuple[int, int]:\n return (self.ioffset[0].to_pixels(self.parent.width),\n self.ioffset[1].to_pixels(self.parent.height))", "def getParamIndexByIndex(self, i):\n if i >= 0:\n return self.dimpoint * i + self.dimx + self.dimu\n else:\n return (self.nPoint + i) * self.dimpoint + self.dimx + self.dimu", "def offset(self):\n return self.query.offset", "def get_field_relative_offset(self, field_name):\n return self.__field_offsets__[field_name]", "def get_input_offset(self):\n return ELFLING_PADDING + len(self.__data) - 4", "def ivt_offset(self) -> int:\n return self.offset", "def get_relative_offset(self):\n\n\t\treturn self.get_offset_1()", "def min_offset(self):\n return self.offset", "def _getLayoutOffset(self):\n view = self.parent().parent()\n pos = view.pageLayout().offset2pos(self._layoutOffset)\n return pos + view.layoutPosition()", "def _offset(self) -> int:\n ...", "def get_position_offset( self, params, return_parameters=False ):\n\n\t\tif len( params ) < 3: raise ValueError( 'Did not receive enough parameters to calculate offsets!' )\n\n\t\t# first remap x/y back to physical coordinates\n\t\t# location of parameters in params array is determined by return order of self.get_first_guess()\n\t\tnew_x = mapping.remap( params[0], self.lim_x )\n\t\tnew_y = mapping.remap( params[1], self.lim_y )\n\n\t\t# calculate position offset\n\t\tshifts = np.array( [ new_y-self.img_y, new_x-self.img_x ] )\n\n\t\t# return\n\t\tif not return_parameters: return shift\n\n\t\t# return_parameters == True: also return parameters array with first three parameters gone\n\t\treturn ( shifts, self.get_trimmed( params ) )", "def viewport_offset(self):\n pass", "def _raveled_index_for(self, param):\n from ..param import ParamConcatenation\n if isinstance(param, ParamConcatenation):\n return np.hstack((self._raveled_index_for(p) for p in param.params))\n return param._raveled_index() + self._offset_for(param)", "def _raveled_index_for(self, param):\n from .param import ParamConcatenation\n if isinstance(param, ParamConcatenation):\n return np.hstack((self._raveled_index_for(p) for p in param.params))\n return param._raveled_index() + self._offset_for(param)", "def annotation_offset(self):\n\n return self._seq.offset + self._seq.start" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
get the raveled index for a param that is an int array, containing the indexes for the flattened param inside this parameterized logic. !Warning! be sure to call this method on the highest parent of a hierarchy, as it uses the fixes to do its work
def _raveled_index_for(self, param): from ..param import ParamConcatenation if isinstance(param, ParamConcatenation): return np.hstack((self._raveled_index_for(p) for p in param.params)) return param._raveled_index() + self._offset_for(param)
[ "def _raveled_index_for(self, param):\n from .param import ParamConcatenation\n if isinstance(param, ParamConcatenation):\n return np.hstack((self._raveled_index_for(p) for p in param.params))\n return param._raveled_index() + self._offset_for(param)", "def getArrayIndices(self):\n \n pass", "def find_index_at(self, param):\n axis_slice = list(range(len(self.param.shape)))\n if len(axis_slice) > 2: # Treat incoherence of axes of meshgrid...\n axis_slice[0] = 1\n axis_slice[1] = 0\n linspaces = [np.swapaxes(var, ax, -1)[tuple(\n np.zeros(self.dof - 1, int))]\n for ax, var in zip(axis_slice, self.param)]\n indices = np.array([np.argmin(np.abs(ls - p))\n for ls, p in zip(linspaces, param)])\n if len(axis_slice) > 2:\n indices[0], indices[1] = indices[1], indices[0]\n return indices", "def indices(self):", "def _get_flat_index(self, row: int, col: int) -> int:\n return (row - 1) * self.cols + col - 1", "def getParamIndexByIndex(self, i):\n if i >= 0:\n return self.dimpoint * i + self.dimx + self.dimu\n else:\n return (self.nPoint + i) * self.dimpoint + self.dimx + self.dimu", "def get_param_indexes(self):\n self.debug.start_function('get_param_indexes')\n\n for i, key in enumerate(self.mcmc_version.param_keys):\n self.param_idxs[key] = i\n for i, key in enumerate(self.mcmc_version.interp_keys):\n self.interp_idxs[key] = i\n\n self.debug.end_function()", "def tree_idx(tree,j1,J1,J2):\n j = j1\n for k in np.arange(J1+1,J2+1,1):\n j = tree[k]['IDX'][j]\n \n j2 = j\n return j2", "def recompose_index(self, array):\n idx = 0\n for i in range(len(array)):\n idx += array[i] * self.N**i\n return idx", "def _retrieve_index(self, p):\n i = 0\n for d in range(self.depth):\n left = self.tree[d + 1][2 * i]\n if p < left:\n i *= 2\n else:\n i = i * 2 + 1\n p -= left\n return i", "def _get_index_array(self):\n table_index = self._parameter_root['tb_names'].value[0]\n nbr_points = len(self._tables[\n table_index][self._tables[table_index].dtype.names[0]])\n index_array = np.arange(0, nbr_points)\n return index_array", "def get_indexed_param(self):\n switcher_index = self.input_param(\"switch_index\").value \n indexed_param = self.input_param(\"index_%s\" % switcher_index)\n if indexed_param is None:\n raise Exception(\"Switch index value for %s is out of bouned.\" % self)\n return indexed_param", "def pndindex(*args):\r\n return np.ndindex(*args)", "def index_arg(self, spec):\n for i,v in enumerate(self._parameter_types):\n if v[0] == spec[0] and v[1].rstrip('_') == spec[1].rstrip('_'):\n return i\n elif v[0] == spec[0] and spec[1] == None:\n return i\n elif v[1].rstrip('_') == spec[1].rstrip('_') and spec[0] == None:\n return i\n return -1", "def parameter_id_to_index(parameter_id):\n\n assert(parameter_id[0] == 'n') # make sure this is a nonbonded parameter...\n\n return int(parameter_id[1:]) - 1", "def _get_chunk_indexer(self, array):\n if self.data.num_chunks == 1:\n return np.broadcast_to(0, len(array))\n return np.digitize(array, self.offsets[1:])", "def GetArrayIncrements(self, vtkDataArray, p_int=..., p_int=..., p_int=...):\n ...", "def getbaraidx(self,idx_,sub_,weights_):\n maxnum_ = self.getbaraidxinfo((idx_))\n i_ = ctypes.c_int32()\n j_ = ctypes.c_int32()\n num_ = ctypes.c_int64()\n _sub_minlength = (maxnum_)\n if (maxnum_) > 0 and sub_ is not None and len(sub_) != (maxnum_):\n raise ValueError(\"Array argument sub is not long enough: Is %d, expected %d\" % (len(sub_),(maxnum_)))\n if isinstance(sub_,numpy.ndarray) and not sub_.flags.writeable:\n raise ValueError(\"Argument sub must be writable\")\n if sub_ is None:\n raise ValueError(\"Argument sub may not be None\")\n if isinstance(sub_, numpy.ndarray) and sub_.dtype is numpy.dtype(numpy.int64) and sub_.flags.contiguous:\n _sub_copyarray = False\n _sub_tmp = ctypes.cast(sub_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int64))\n elif sub_ is not None:\n _sub_copyarray = True\n _sub_np_tmp = numpy.zeros(len(sub_),numpy.dtype(numpy.int64))\n _sub_np_tmp[:] = sub_\n assert _sub_np_tmp.flags.contiguous\n _sub_tmp = ctypes.cast(_sub_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int64))\n else:\n _sub_copyarray = False\n _sub_tmp = None\n \n _weights_minlength = (maxnum_)\n if (maxnum_) > 0 and weights_ is not None and len(weights_) != (maxnum_):\n raise ValueError(\"Array argument weights is not long enough: Is %d, expected %d\" % (len(weights_),(maxnum_)))\n if isinstance(weights_,numpy.ndarray) and not weights_.flags.writeable:\n raise ValueError(\"Argument weights must be writable\")\n if weights_ is None:\n raise ValueError(\"Argument weights may not be None\")\n if isinstance(weights_, numpy.ndarray) and weights_.dtype is numpy.dtype(numpy.float64) and weights_.flags.contiguous:\n _weights_copyarray = False\n _weights_tmp = ctypes.cast(weights_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif weights_ is not None:\n _weights_copyarray = True\n _weights_np_tmp = numpy.zeros(len(weights_),numpy.dtype(numpy.float64))\n _weights_np_tmp[:] = weights_\n assert _weights_np_tmp.flags.contiguous\n _weights_tmp = ctypes.cast(_weights_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _weights_copyarray = False\n _weights_tmp = None\n \n res = __library__.MSK_XX_getbaraidx(self.__nativep,idx_,maxnum_,ctypes.byref(i_),ctypes.byref(j_),ctypes.byref(num_),_sub_tmp,_weights_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n i_ = i_.value\n _i_return_value = i_\n j_ = j_.value\n _j_return_value = j_\n num_ = num_.value\n _num_return_value = num_\n if _sub_copyarray:\n sub_[:] = _sub_np_tmp\n if _weights_copyarray:\n weights_[:] = _weights_np_tmp\n return (_i_return_value,_j_return_value,_num_return_value)", "def pndindex(*args):\n return np.ndindex(*args)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Helper preventing copy code. This adds the given what (transformation, prior etc) to parameter index operations which. reconstrained are reconstrained indices. warn when reconstraining parameters if warning is True.
def _add_to_index_operations(self, which, reconstrained, what, warning): if warning and reconstrained.size > 0: # TODO: figure out which parameters have changed and only print those print("WARNING: reconstraining parameters {}".format(self.hierarchy_name() or self.name)) index = self._raveled_index() which.add(what, index) return index
[ "def ensure_default_constraints(self,warn=False):\n positive_strings = ['variance','lengthscale', 'precision']\n for s in positive_strings:\n for i in self.grep_param_names(s):\n if not (i in self.all_constrained_indices()):\n name = self._get_param_names()[i]\n self.constrain_positive(name)\n if warn:\n print \"Warning! constraining %s postive\"%name", "def _restrict(self):\n self.macro_state = self.restriction_operator(self,self.micro_state)", "def apply_constraints(self):\n pass", "def solve_constraint_propagate_reduced_domains(problem) :\n raise NotImplementedError", "def propagate_concretizer(self, primitive, index, *in_vals, **params):\n pass", "def test_feature_map_without_parameters_warns(self):\n var_form = QuantumCircuit(1)\n var_form.ry(Parameter('a'), 0)\n feature_map = QuantumCircuit(1)\n feature_map.rx(0.2, 0)\n optimizer = SPSA()\n with self.assertWarns(UserWarning):\n _ = VQC(optimizer, feature_map, var_form, self.training_data, self.testing_data)", "def test_warning_for_overlapping_wires(self):\n X = qml.PauliX(0)\n Y = qml.PauliY(0)\n op = qml.PauliX(0) @ qml.PauliY(1)\n\n with pytest.warns(UserWarning, match=\"Tensor object acts on overlapping wires\"):\n Tensor(X, Y)\n\n with pytest.warns(UserWarning, match=\"Tensor object acts on overlapping wires\"):\n _ = op @ qml.PauliZ(1)", "def accept_optimize():\n pass", "def _check_inputs(node, storage_map, r_vals, dr_vals, active_nodes,\r\n clobber_dr_vals=True,\r\n perform=None, warn_input_not_reused=True):\r\n destroyed_idx_list = []\r\n destroy_map = getattr(node.op, 'destroy_map', {})\r\n for o_pos, i_pos_list in destroy_map.iteritems():\r\n destroyed_idx_list.extend(i_pos_list)\r\n destroyed_res_list = [node.inputs[i] for i in destroyed_idx_list]\r\n\r\n actually_inplace_outputs = []\r\n dmap = getattr(node.op, 'destroy_map', {})\r\n for oo, ii in dmap.iteritems():\r\n out_var = storage_map[node.outputs[oo]][0]\r\n in_var = storage_map[node.inputs[ii[0]]][0]\r\n if _may_share_memory(out_var, in_var):\r\n actually_inplace_outputs.append(node.outputs[oo])\r\n\r\n if warn_input_not_reused and destroyed_res_list:\r\n if isinstance(node.op, OutputGuard):\r\n # The point of OutputGuard is to be declared as destructive\r\n # while not destroying anything\r\n continue\r\n if out_var is not in_var:\r\n _logger.warning(\"Optimization Warning: input idx %d marked \"\r\n \"as destroyed was not changed for node '%s'\",\r\n ii[0], str(node))\r\n\r\n vmap = getattr(node.op, 'view_map', {})\r\n for oo, ii in vmap.iteritems():\r\n out_var = storage_map[node.outputs[oo]][0]\r\n in_var = storage_map[node.inputs[ii[0]]][0]\r\n if _may_share_memory(out_var, in_var):\r\n actually_inplace_outputs.append(node.outputs[oo])\r\n\r\n if warn_input_not_reused:\r\n # We don't try to optimize simple scalar and empty ndarray,\r\n # as this is not worth our time. This happen at least in\r\n # Subtensor when the output is a scalar But this depend on\r\n # the version of numpy!\r\n if getattr(out_var, 'size', 2) <= 1:\r\n continue\r\n if isinstance(node.op, OutputGuard):\r\n # This class is not in the final graph.\r\n continue\r\n if not _may_share_memory(out_var, in_var):\r\n _logger.warning(\"Optimization Warning: input idx %d marked \"\r\n \"as viewed but new memory allocated by node '%s'\",\r\n ii[0], str(node))\r\n\r\n for r_idx, r in enumerate(node.inputs):\r\n if not r.type.values_eq(r_vals[r], storage_map[r][0]):\r\n # some input node 'r' got changed by running the node\r\n # this may or may not be ok...\r\n if r in destroyed_res_list:\r\n # ok, we expected r to be destroyed\r\n if node in active_nodes:\r\n if dr_vals.get(r, (0, node))[1] is not node:\r\n # bad: there should only be one active node that destroys any variable\r\n raise Exception('failure in topological ordering')\r\n if clobber_dr_vals:\r\n dr_vals[r] = (storage_map[r][0], node) #no copy, this is the last use of this variable\r\n storage_map[r][0] = None #make sure that dr_vals[r] doens't get used again\r\n else:\r\n raise BadDestroyMap(node, r_idx, r_vals[r],\r\n storage_map[r][0], perform)\r\n\r\n return actually_inplace_outputs", "def propose_optimize():\n pass", "def update_paramconv_if_necessary(self):", "def addConstraint(constraint, problem):\n problem += constraint", "def constraints(self):", "def constraining_classifiers_constrain_args(\n self, diagnostics=None, context=None):\n raise NotImplementedError(\n 'operation constraining_classifiers_constrain_args(...) not yet implemented')", "def _generateVarsUpdateConstrained(self,traj,ak,gradient,varK):\n varKPlus = {}\n try:\n gain = ak[:]\n except (TypeError,IndexError):\n gain = [ak]*len(self.getOptVars()) #technically incorrect, but missing ones will be *0 anyway just below here\n gain = np.asarray(gain)\n for index,var in enumerate(self.getOptVars()): #get full opt vars so all variables carried through\n varKPlus[var] = varK[var]-gain[index]*gradient.get(var,0.0)*1.0\n satisfied, activeConstraints = self.checkConstraint(self.denormalizeData(varKPlus))\n if satisfied:\n return varKPlus, False\n # else if not satisfied ...\n # check if the active constraints are the boundary ones. In this case, try to project the gradient at an angle\n modded = False\n if len(activeConstraints['internal']) > 0:\n modded = True\n projectedOnBoundary= {}\n for activeConstraint in activeConstraints['internal']:\n projectedOnBoundary[activeConstraint[0]] = activeConstraint[1]\n gradient[activeConstraint[0]] = 0.0 # remove this component\n varKPlus.update(self.normalizeData(projectedOnBoundary))\n newNormWithoutComponents = LA.norm(gradient.values())\n for var in gradient.keys():\n gradient[var] = gradient[var]/newNormWithoutComponents if newNormWithoutComponents != 0.0 else gradient[var]\n\n if len(activeConstraints['external']) == 0:\n return varKPlus, modded\n\n # Try to find varKPlus by shorten the gradient vector\n self.raiseADebug('Trajectory \"{}\" hit constraints ...'.format(traj))\n self.raiseADebug(' Attempting to shorten step length ...')\n foundVarsUpdate, varKPlus = self._bisectionForConstrainedInput(traj,varK, ak, gradient)\n if foundVarsUpdate:\n self.raiseADebug(' ... successfully found new point by shortening length.')\n return varKPlus, True\n\n # Try to find varKPlus by rotate the gradient towards its orthogonal, since we consider the gradient as perpendicular\n # with respect to the constraints hyper-surface\n self.raiseADebug(' Attempting instead to rotate trajectory ...')\n innerLoopLimit = self.constraintHandlingPara['innerLoopLimit']\n if innerLoopLimit < 0:\n self.raiseAnError(IOError, 'Limit for internal loop for constraint handling shall be nonnegative')\n loopCounter = 0\n foundPendVector = False\n while not foundPendVector and loopCounter < innerLoopLimit:\n loopCounter += 1\n depVarPos = randomUtils.randomIntegers(0,len(self.getOptVars(traj=traj))-1,self)\n pendVector = {}\n npDot = 0\n for varID, var in enumerate(self.getOptVars(traj=traj)):\n pendVector[var] = self.stochasticEngineForConstraintHandling.rvs() if varID != depVarPos else 0.0\n npDot += pendVector[var]*gradient[var]\n for varID, var in enumerate(self.getOptVars(traj=traj)):\n if varID == depVarPos:\n pendVector[var] = -npDot/gradient[var]\n\n r = LA.norm(np.asarray([gradient[var] for var in self.getOptVars(traj=traj)]))/LA.norm(np.asarray([pendVector[var] for var in self.getOptVars(traj=traj)]))\n for var in self.getOptVars(traj=traj):\n pendVector[var] = copy.deepcopy(pendVector[var])*r\n\n varKPlus = {}\n for index, var in enumerate(self.getOptVars(traj=traj)):\n varKPlus[var] = copy.copy(varK[var]-gain[index]*pendVector[var]*1.0)\n foundPendVector, activeConstraints = self.checkConstraint(self.denormalizeData(varKPlus))\n if not foundPendVector:\n foundPendVector, varKPlus = self._bisectionForConstrainedInput(traj,varK, gain, pendVector)\n gain = gain/2.\n\n if foundPendVector:\n lenPendVector = 0\n for var in self.getOptVars(traj=traj):\n lenPendVector += pendVector[var]**2\n lenPendVector = np.sqrt(lenPendVector)\n\n rotateDegreeUpperLimit = 2\n while self.angleBetween(traj,gradient, pendVector) > rotateDegreeUpperLimit:\n sumVector, lenSumVector = {}, 0\n for var in self.getOptVars(traj=traj):\n sumVector[var] = gradient[var] + pendVector[var]\n lenSumVector += sumVector[var]**2\n\n tempTempVarKPlus = {}\n for index, var in enumerate(self.getOptVars(traj=traj)):\n sumVector[var] = copy.deepcopy(sumVector[var]/np.sqrt(lenSumVector)*lenPendVector)\n tempTempVarKPlus[var] = copy.copy(varK[var]-gain[index]*sumVector[var]*1.0)\n satisfied, activeConstraints = self.checkConstraint(self.denormalizeData(tempTempVarKPlus))\n if satisfied:\n varKPlus = copy.deepcopy(tempTempVarKPlus)\n pendVector = copy.deepcopy(sumVector)\n else:\n gradient = copy.deepcopy(sumVector)\n self.raiseADebug(' ... successfully found new point by rotating trajectory.')\n return varKPlus, True\n varKPlus = varK\n self.raiseADebug(' ... did not successfully find new point.')\n return varKPlus, False", "def ensure_default_constraints(self):\r\n positive_strings = ['variance', 'lengthscale', 'precision', 'decay', 'kappa']\r\n # param_names = self._get_param_names()\r\n currently_constrained = self.all_constrained_indices()\r\n to_make_positive = []\r\n for s in positive_strings:\r\n for i in self.grep_param_names(\".*\" + s):\r\n if not (i in currently_constrained):\r\n to_make_positive.append(i)\r\n if len(to_make_positive):\r\n self.constrain_positive(np.asarray(to_make_positive))", "def reformat_gen_constraints(gen_constraints, params, new_snaps): \n snapshots = params['snapshots']\n resampled_snapshots = new_snaps\n # Set index in gen contraints \n for k in gen_constraints:\n if gen_constraints[k] is None: # If no data is passed. An empty df is created\n gen_constraints.update({k: pd.DataFrame(index=snapshots)})\n else: # Otherwise it will set up new indexes\n if not isinstance(gen_constraints[k], pd.DataFrame): \n raise RuntimeError(f'Gen constraints {k} must be a dataframe - > Error!! {type(gen_constraints[k])}')\n gen_constraints[k].index = snapshots\n gen_constraints.update({k: gen_constraints[k]})\n # Finally a resampled df is updated in dict\n gen_constraints.update({k: gen_constraints[k].loc[resampled_snapshots]})\n return gen_constraints", "def __build_can_only_assign_task_to_one_resource_constraints(\n self, solver: pywraplp.Solver, assign_vars: pd.Series\n ):\n print(\n \"\\tBuilding constraints for limiting tasks to only be assigned \"\n \"to one resource.\"\n )\n cons_df = assign_vars.to_frame(\"sum_over_r_on_x\").groupby(\"Task\").sum()\n for it in cons_df.itertuples():\n solver.Add(\n it.sum_over_r_on_x == 1,\n name=namer(\"AssignEachTaskToOneResource\", it.Index),\n )\n print(\n f\"\\t\\tAdded {len(cons_df)} tasks assigned to a single resource constraints.\"\n )", "def applyConstraints(self):\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Helper preventing copy code. Remove given what (transform prior etc) from which param index ops.
def _remove_from_index_operations(self, which, transforms): if len(transforms) == 0: transforms = which.properties() removed = np.empty((0,), dtype=int) for t in list(transforms): unconstrained = which.remove(t, self._raveled_index()) removed = np.union1d(removed, unconstrained) if t is __fixed__: self._highest_parent_._set_unfixed(self, unconstrained) return removed
[ "def _remove_operator(self, operator):", "def remove_extra_index_from_context_actions(context_action_dict):\n keys_to_keep = {'initial_value', 'replacement_value'}\n for question in context_action_dict:\n for obj_dct in context_action_dict[question]:\n total_keys = set(obj_dct.keys())\n keys_to_remove = total_keys - keys_to_keep\n for key in keys_to_remove:\n obj_dct.pop(key)\n return context_action_dict", "def remove_unused_args(args, thnn_args):\n def clean_name(name):\n name = name[:name.index('[')] if '[' in name else name\n if name.endswith('_'):\n name = name[:-1]\n return name\n uses = set([clean_name(arg['name']) for arg in thnn_args])\n uses.add('output_mask')\n args = [arg for arg in args if arg['name'] in uses]\n for arg in args:\n if 'default' in arg:\n del arg['default']\n return args", "def _drop_component_params(self, k):\n raise NotImplementedError", "def removeInputCopies(self):\n for p in self.assoc.parlist:\n if int(p['group']) == 1:\n _img = p['image'].datafile\n shutil.move(p['orig_filename'],_img)", "def _drop_features(self):", "def _clean_update_ops(self):\n update_ops = set(ge.get_forward_walk_ops(\n list(self._grad_ops), inclusive=False))\n for i in range(0, len(self._topo_sort)):\n ops = self._topo_sort[i]\n # remove ops that are not bw or fw op\n # e.g ops in the update phase\n self._topo_sort[i] = ops - update_ops", "def _clean_bw_ops(self):\n for i in range(0, len(self._topo_sort)):\n dep_ops = self._topo_sort[i]\n fw_dep_ops = dep_ops - self._grad_ops\n if fw_dep_ops:\n self._topo_sort[i] = fw_dep_ops\n else:\n self._topo_sort[i] = dep_ops", "def fast_inplace_check(inputs):\r\n fgraph = inputs[0].fgraph\r\n protected_inputs = [f.protected for f in fgraph._features if isinstance(f,theano.compile.function_module.Supervisor)]\r\n protected_inputs = sum(protected_inputs,[])#flatten the list\r\n protected_inputs.extend(fgraph.outputs)\r\n\r\n inputs = [i for i in inputs if\r\n not isinstance(i,graph.Constant)\r\n and not fgraph.destroyers(i)\r\n and i not in protected_inputs]\r\n return inputs", "def _tf_remove_noise_op(self):\n remove_noise_ops = list()\n for var, noise in zip(self.model_variables, self.noise):\n remove_noise_ops.append(tf1.assign_add(var, -noise))\n ret = tf.group(*tuple(remove_noise_ops))\n with tf1.control_dependencies([ret]):\n return tf.no_op()", "def _clean_params(symbol, parameter_dict):\n parameter_dict = parameter_dict.copy()\n keys_to_delete_arg = set(parameter_dict.keys()) - set(symbol.get_internals().list_outputs())\n for key in keys_to_delete_arg:\n del parameter_dict[key]\n return parameter_dict", "def removeSortCriterion():", "def remove_parameters(self):\n self.parameters = []", "def ignore(self, *__args): # real signature unknown; restored from __doc__ with multiple overloads\n pass", "def remove_incompatible_operations(pipelines):\n\n def find_duplicates(pipelines):\n for idx in range(len(pipelines)):\n for idx_ in range(idx + 1, len(pipelines)):\n if pipelines[idx] == pipelines[idx_]:\n return idx\n return -1\n\n\n def _remove_illegal_combination(pipelines, combination):\n illegal_pipes = []\n pipelines_ = []\n for idx, pipeline in enumerate(pipelines):\n combination_ = list(set.intersection(set(pipeline.keys()), set(combination)))\n actives = [pipeline[key] != None for key in pipeline if key in combination_]\n\n if sum(actives) > 1:\n illegal_pipes.append(idx) # Store the index of bad combination\n for param in combination_: # Generate substituting legal combinations\n if pipeline[param] != None: # we need to make new pipe\n pipeline_ = pipeline.copy()\n for param_ in combination_: # Set ALL conflicting parameters to None\n pipeline_[param_] = None\n pipeline_[param] = pipeline[param] # Set current parameter back to original value\n pipelines_.append(pipeline_)\n\n new_pipelines = [i for j, i in enumerate(pipelines) if j not in illegal_pipes]\n # new_pipelines.extend(pipelines_)\n return new_pipelines, pipelines_\n\n illegal_combinations = [['BASELINE', 'MSC', 'EMSC', 'RNV', 'SNV', 'LSNV'],\n ['SMOOTH', 'SAVGOL']]\n\n for combination in illegal_combinations:\n pipelines, new_pipes = _remove_illegal_combination(pipelines, combination)\n\n pipelines.extend(new_pipes)\n pipelines_set = {json.dumps(pipeline, sort_keys=True) for pipeline in pipelines}\n pipelines = [json.loads(item) for item in pipelines_set]\n\n\n return pipelines", "def remove_idx(self, k):\n # Remove the bit\n self.var_list.pop(k)\n # Sum\n self.pdf = self.pdf.sum(k)", "def get_other_params(step):\n params = copy.copy(step.get('parameters', {}))\n for to_remove in ['input', 'inputs', 'output', 'outputs', 'src_output', 'tgt_output']:\n if to_remove in params:\n del params[to_remove]\n return params", "def eliminate_empty_shifts(mp):\n c_map = {}\n affected_instrs = []\n\n for i in range(len(mp)):\n if mp[i].source in c_map:\n mp[i].source = c_map[mp[i].source]\n if isinstance(mp[i], AddMetaInstruction) and mp[i].source2 in c_map:\n mp[i].source2 = c_map[mp[i].source2]\n if isinstance(mp[i], MoveMetaIntstruction) and mp[i].shift == (0,0) and mp[i].scale == 0 and not mp[i].neg:\n c_map[mp[i].target] = mp[i].source\n affected_instrs.append(mp[i])\n\n for i in affected_instrs:\n mp.remove(i)\n return mp", "def _prune_parameter_by_idx(self,\n scope,\n params,\n pruned_idx,\n pruned_axis,\n place,\n lazy=False,\n only_graph=False,\n param_shape_backup=None,\n param_backup=None):\n if params[0].name() in self.pruned_list[pruned_axis]:\n return\n for param in params:\n assert isinstance(param, VarWrapper)\n param_t = scope.find_var(param.name()).get_tensor()\n if param_backup is not None and (param.name() not in param_backup):\n param_backup[param.name()] = copy.deepcopy(np.array(param_t))\n pruned_param = self.pruner.prune_tensor(\n np.array(param_t), pruned_idx, pruned_axis, lazy=lazy)\n if not only_graph:\n param_t.set(pruned_param, place)\n ori_shape = param.shape()\n\n if param_shape_backup is not None and (\n param.name() not in param_shape_backup):\n param_shape_backup[param.name()] = copy.deepcopy(param.shape())\n new_shape = list(param.shape())\n new_shape[pruned_axis] = pruned_param.shape[pruned_axis]\n param.set_shape(new_shape)\n _logger.debug(\n '|----------------------------------------+----+------------------------------+------------------------------|'\n )\n _logger.debug('|{:^40}|{:^4}|{:^30}|{:^30}|'.format(\n str(param.name()),\n str(pruned_axis), str(ori_shape), str(param.shape())))\n self.pruned_list[pruned_axis].append(param.name())" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Emit a JSON representation of a given row
def format(self, row): return json.dumps(row.print_fields)
[ "def row_to_json(row: sqlite3.Row) -> str:\n d = {}\n for key in row.keys():\n d[key] = row[key]\n\n return json.dumps(d)", "def __data_row_to_json(self, row):\n raw_data = {}\n raw_data[\"body\"] = row.body\n raw_data[\"score_hidden\"] = row.score_hidden\n raw_data[\"archived\"] = row.archived\n raw_data[\"name\"] = row.name\n raw_data[\"author\"] = row.author\n raw_data[\"author_flair_text\"] = row.author_flair_text\n raw_data[\"downs\"] = row.downs\n raw_data[\"created_utc\"] = row.created_utc\n raw_data[\"subreddit_id\"] = row.subreddit_id\n raw_data[\"link_id\"] = row.link_id\n raw_data[\"parent_id\"] = row.parent_id\n raw_data[\"score\"] = row.score\n raw_data[\"retrieved_on\"] = row.retrieved_on\n raw_data[\"controversiality\"] = row.controversiality\n raw_data[\"gilded\"] = row.gilded\n raw_data[\"id\"] = row.id\n raw_data[\"subreddit\"] = row.subreddit\n raw_data[\"ups\"] = row.ups\n raw_data[\"distinguished\"] = row.distinguished\n raw_data[\"author_flair_css_class\"] = row.author_flair_css_class\n\n return json.dumps(raw_data)", "def _as_json(self,res):\n try:\n import json\n except:\n print >> sys.stderr, \"Don't have json\"\n return json.dumps( self._zip_rows( res ), indent=2, default= self._handler )", "def write_row(self, row):\n pass", "def to_json_line(bq_row):\n row = dict()\n for key in bq_row:\n row[key] = bq_row[key]\n\n # default=str converts non JSON serializable objects to str eg datetime.datetime\n row_json = json.dumps(row, default=str)\n return row_json.encode('utf-8')", "def convert_to_json(self, rows):\n\t\tjson_list = []\n\t\tfor row in rows:\n\t\t\tjson_record = {}\n\t\t\tjson_record[\"movie_id\"] = row[0]\n\t\t\tjson_record[\"title\"] = change_title(row[1])\n\t\t\tjson_record[\"genres\"] = row[2][:5]\n\t\t\tjson_record[\"imdb_id\"] = row[3]\n\t\t\tjson_record[\"tmdb_id\"] = row[4]\n\t\t\tjson_record[\"rating\"] = row[5]\n\t\t\tjson_record[\"number_of_ratings\"] = row[6]\n\t\t\tjson_record[\"weighted_rating\"] = row[7]\n\t\t\tjson_record[\"release_year\"] = row[8]\n\t\t\tjson_record[\"img_path\"] = row[9]\n\t\t\tjson_record[\"description\"] = row[10]\n\t\t\tjson_record[\"director\"] = row[11]\n\t\t\tjson_record[\"length\"] = row[12]\n\t\t\tjson_list.append(json_record)\n\t\treturn json.dumps(json_list, indent = 4)", "def row_list_to_json(rows: List[sqlite3.Row]) -> str:\n l = []\n for row in rows:\n l.append(row_to_json(row))\n\n return json.dumps(l)", "def emitRow(s=None):\n\t# logger.debug(s + settings.ROW_DELIMITER)\n\tsys.stdout.write(s + ROW_DELIMITER)", "def format_row(self, row):\n raise NotImplementedError()", "def getJson(self, row):\n postid = row[0]\n\n # feeds conll output and persists JSON in a local file. Must be streamed to a file, \n # as the JSON returned by semafor is framed parse JSON per line which must be converted\n # into array of JSON objects\n conllfilename = '/tmp/conll/conll_' + repr(postid)\n jsonfile = open('/tmp/conll/' + repr(postid) + '.json', 'w')\n if os.path.exists(conllfilename):\n process = subprocess.Popen('cat ' + conllfilename + ' | nc localhost 5000', shell=True, stdout=jsonfile)\n process.wait()\n process.kill() \n\n # read persisted JSON from local file system and append it to parsed array as a JSON\n # object.\n parsed = []\n with open('/tmp/conll/' + repr(postid) + '.json', 'r') as f:\n for line in f:\n parsed.append(json.loads(line.strip())) \n f.close()\n\n # update curosr to persist JSON returned by semafor server. parsed array must be cast\n # as a JSONB during the update\n updcursor = self.conn.cursor()\n updcursor.execute('UPDATE public.post SET parsed_json = CAST(%s as JSONB) WHERE postid=%s', (json.dumps(parsed), postid))\n updated_rows = updcursor.rowcount \n updcursor.close()", "def render(self, row, index=None):\n return self.represent(row)", "def write_row(self, data):\n raise NotImplementedError()", "def _json_export(self, exppath):\n # TODO: Settle on JSON format for colortable\n pass", "def gen_json(self, show_headers=True, show_tags=True, use_objects=False):\n is_first = True\n yield \"[\\n\"\n if use_objects:\n for row in self:\n if is_first:\n is_first = False\n yield json.dumps(row.dictionary, sort_keys=True, indent=2)\n else:\n yield \",\\n\" + json.dumps(row.dictionary, sort_keys=True, indent=2)\n else:\n for raw in self.gen_raw(show_headers, show_tags):\n if is_first:\n is_first = False\n yield json.dumps(raw)\n else:\n yield \",\\n\" + json.dumps(raw)\n yield \"\\n]\\n\"", "def render(self):\n content = []\n for col in self._columns:\n content.append(col.render())\n return json.dumps(content)", "def json(self) -> CellJson:\n\n return {\"id\": self.id, \"content\": self.content, \"data\": self.data}", "def toJSON(self, df) :\n ret = json.dumps(df, indent=4)\n return ret", "def tostring(row):\r\n return row.tostring()", "def format(self, table):\n #return table.data.to_json()\n data = _replace_nans(table.as_array().tolist())\n if hasattr(data, \"strip\") or \\\n (not hasattr(data, \"__getitem__\") and \\\n not hasattr(data, \"__iter__\")):\n # data is not a list/tuple => wrap it\n data = [ data ]\n v = {\n 'offset': table.offset,\n 'data': data,\n 'headers': table.headers,\n 'types': table.types,\n }\n if table.sizes is not None:\n v[\"sizes\"] = table.sizes\n return json.dumps(v, cls=ExtEncoder)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates a dictonary of nodes listed by currie id from answers 1 and 2
def make_node_dict(self): if self.input1 is None or self.input2 is None: raise Exception("Missing input: please run the populate() method first") self.node_dict1 = {} for node in self.input1['knowledge_graph']['nodes']: self.node_dict1[node['id']] = node self.node_dict2 = {} for node in self.input2['knowledge_graph']['nodes']: self.node_dict2[node['id']] = node
[ "def get_nodes_by_id(ntwrk, nodeid):\r\n return {k: v for el in ntwrk\r\n for k, v in el.items() if k == nodeid}", "def node_diff(self):\n if self.input1 is None or self.input2 is None:\n raise Exception(\"Missing input: please run the populate() method first\")\n if self.node_dict1 is None or self.node_dict2 is None:\n self.make_node_dict()\n # Initialize dictonaries to keep track of the nodes in respnse 1 and response 2\n g1={}\n g2={}\n # Set to keep track of the union of all curie ids\n curie_set = set()\n for curie in self.node_dict1.keys():\n g1[curie] = {}\n # intersection is only in the g1 dictionary\n g1[curie]['intersection'] = set()\n # node section keeps track of node ids associated with each node i.e. \"n0\"\n g1[curie]['node'] = set()\n curie_set.add(curie)\n for curie in self.node_dict2.keys():\n g2[curie] = {}\n # node section keeps track of node ids associated with each node i.e. \"n0\"\n g2[curie]['node'] = set()\n curie_set.add(curie)\n node_names1 = []\n node_names2 = []\n\n # extract all node ids (i.e. \"n0\",\"n1\",ect...)\n if len(self.input1['question_graph']['nodes'])>0:\n if 'id' in self.input1['question_graph']['nodes'][0]:\n node_names1 = [x['id'] for x in self.input1['question_graph']['nodes']]\n elif 'node_id' in self.input1['question_graph']['nodes'][0]:\n node_names1 = [x['node_id'] for x in self.input1['question_graph']['nodes']]\n if len(self.input2['question_graph']['nodes'])>0:\n if 'id' in self.input2['question_graph']['nodes'][0]:\n node_names2 = [x['id'] for x in self.input2['question_graph']['nodes']]\n elif 'node_id' in self.input2['question_graph']['nodes'][0]:\n node_names2 = [x['node_id'] for x in self.input2['question_graph']['nodes']]\n \n # initialize the result dictonary\n diff_dict = {}\n diff_dict[\"-1|-1\"] = {'intersection':[],'g1-g2':[],'g2-g1':[]}\n # initialize node id tuple keys\n for id1 in node_names1:\n for id2 in node_names2:\n diff_dict[id1+\"|\"+id2] = {'intersection':[],'g1-g2':[],'g2-g1':[]}\n # iterate through answers\n for answer1 in self.input1['answers']:\n for answer2 in self.input2['answers']:\n for id1 in answer1['node_bindings'].keys():\n # This is to handle cases where answer node id has a list or a string\n if isinstance(answer1['node_bindings'][id1], str):\n bindings1 = [answer1['node_bindings'][id1]]\n elif isinstance(answer1['node_bindings'][id1], list):\n bindings1 = answer1['node_bindings'][id1]\n for curie1 in bindings1:\n # store node id\n g1[curie1]['node'].add(id1)\n for id2 in answer2['node_bindings'].keys():\n # This is to handle cases where answer node id has a list or a string\n if isinstance(answer2['node_bindings'][id2], str):\n bindings2 = [answer2['node_bindings'][id2]]\n elif isinstance(answer2['node_bindings'][id2], list):\n bindings2 = answer2['node_bindings'][id2]\n for curie2 in bindings2:\n # store node id\n g2[curie2]['node'].add(id2)\n if curie1 == curie2:\n # stor intersection tuple\n g1[curie1]['intersection'].add(id1+\"|\"+id2)\n # iterate through all curies\n for curie in curie_set:\n # check if curie is from answer 1\n if curie in g1.keys():\n # check if in intersection\n if len(g1[curie]['intersection'])>0:\n diff_dict[\"-1|-1\"]['intersection'] += [self.node_dict1[curie]]\n for id1 in node_names1:\n for id2 in node_names2:\n node_tuple = id1+\"|\"+id2\n if id1 in g1[curie]['node'] and id2 in g2[curie]['node']:\n diff_dict[node_tuple]['intersection'] += [self.node_dict1[curie]]\n elif id1 in g1[curie]['node']:\n diff_dict[node_tuple]['g1-g2'] += [self.node_dict1[curie]]\n elif id2 in g2[curie]['node']:\n diff_dict[node_tuple]['g2-g1'] += [self.node_dict1[curie]]\n # If not in intersection store in g1-g2\n else:\n diff_dict[\"-1|-1\"]['g1-g2'] += [self.node_dict1[curie]]\n for id1 in g1[curie]['node']:\n # iterate through all answer 2 ids\n for id2 in node_names2:\n diff_dict[id1+\"|\"+id2]['g1-g2'] += [self.node_dict1[curie]]\n # if not in g1 but in g2 then in g2-g1\n elif curie in g2.keys():\n diff_dict[\"-1|-1\"]['g2-g1'] += [self.node_dict2[curie]]\n for id2 in g2[curie]['node']:\n # iterate through all answer 1 ids\n for id1 in node_names1:\n diff_dict[id1+\"|\"+id2]['g2-g1'] += [self.node_dict2[curie]]\n return diff_dict", "def as_dict(self):\n return {n.obj.id: n for n in self.nodes}", "def _get_node_relation(self, nn_id, wf_ver, node_id):\n # TODO : will be deprecated\n warnings.warn(\"_get_node_relation will be deprecated !! \")\n return_obj = {}\n prev_arr = []\n prev_grp = []\n prev_type = []\n next_arr = []\n next_grp = []\n next_type = []\n\n query_set = models.NN_WF_NODE_RELATION.objects.filter(wf_state_id=nn_id + \"_\" + wf_ver)\n\n for data in query_set:\n if(node_id == data.nn_wf_node_id_2) :\n prev_arr.append(data.nn_wf_node_id_1)\n submenu1 = models.NN_WF_NODE_INFO.objects.filter(nn_wf_node_id=data.nn_wf_node_id_1)[0].wf_task_submenu_id_id\n menu1 = models.WF_TASK_SUBMENU_RULE.objects.filter(wf_task_submenu_id=submenu1)[0].wf_task_menu_id_id\n prev_type.append(submenu1)\n prev_grp.append(menu1)\n if (node_id == data.nn_wf_node_id_1):\n next_arr.append(data.nn_wf_node_id_2)\n submenu2 = models.NN_WF_NODE_INFO.objects.filter(nn_wf_node_id=data.nn_wf_node_id_2)[0].wf_task_submenu_id_id\n menu2 = models.WF_TASK_SUBMENU_RULE.objects.filter(wf_task_submenu_id=submenu2)[0].wf_task_menu_id_id\n next_type.append(submenu2)\n next_grp.append(menu2)\n\n return_obj['prev'] = prev_arr\n return_obj['prev_grp'] = prev_grp\n return_obj['prev_type'] = prev_type\n return_obj['next'] = next_arr\n return_obj['next_grp'] = next_grp\n return_obj['next_type'] = next_type\n\n return return_obj", "def makeSelectionMap(self):\n for element in self.root.findall( 'cc:selection-depends', ns):\n # req=element.attrib[\"req\"]\n selIds=element.attrib[\"ids\"]\n slaveId=self.up(element).attrib[\"id\"]\n for selId in selIds.split(','):\n reqs=[]\n if selId in self.selMap:\n reqs =self.selMap[selId]\n reqs.append(slaveId)\n self.selMap[selId]=reqs", "def create_cameFrom(self):\n # TODO: return a data structure that shows which node can most efficiently be reached from another,\n # for each node. \n return dict()", "def get_gold_question_dict():\n df = pd.read_csv('/home/ndg/users/sbagga1/unpalatable-questions/crowdsourcing/data/TestQuestions.csv', lineterminator='\\n')\n map_replyID_label = dict([tuple(x) for x in df[['reply_id', 'label']].values])\n return map_replyID_label", "def make_node_index_dic(self):\n\n for key in self.nodes:\n gkey = geometric_key(self.node_xyz(key), '{0}f'.format(self.tol))\n self.node_index[gkey] = key", "def buildNodesDict(self):\n # Get relevant nodes from TANA ca_jc, intersect with BUS_ROUTE_TRAVERSAL_EDGES.\n # Then get the X,Y for the features.\n arcpy.env.workspace = PublicTransit.WORKING_GDB\n arcpy.AddXY_management(PublicTransit.RELEVANT_NODES)\n nodes = arcpy.SearchCursor(PublicTransit.RELEVANT_NODES, \"\", \"\",\n \"ID_hash; POINT_X; POINT_Y\", \"\")\n self.nodesDict = dict()\n numNodes = int(arcpy.GetCount_management(PublicTransit.RELEVANT_NODES).getOutput(0))\n print \"Found %d nodes\" % numNodes\n for node in nodes:\n self.nodesDict[node.ID_hash] = Node(node.ID_hash, node.POINT_X, node.POINT_Y)\n del node\n del nodes", "def get_current_ids(recipes):\n keys = {}\n for x in recipes:\n keys[x['id']] = 1\n return keys", "def rel_concept():\n index = 0\n rel_index ={}\n # rel_out is in shape like [[n, d]]\n rel_out = []\n # If True, return entire node attribute dict as (n, ddict).\n # n is node Varible, d is all the dict attributes, index is nodes index.\n for n, d in self.graph.nodes(True):\n if \"gold\" in d:\n rel_out.append([n,d])\n rel_index[n] = index\n index += 1\n\n # rel_out is in shape like [[var, Node]]\n # rel_index is dict(key = var, value= Int index)\n return rel_out,rel_index", "def coords_to_id():\n m = {}\n for id, line in enumerate(open(\"NewNodes.txt\").readlines()):\n if line == \"\\n\":\n continue\n splits = line.split(\" \")\n coords = (float(splits[1][:-1]), float(splits[2]))\n print(coords)\n m[coords] = id\n\n\n return m", "def _process_question_relationships(self):\n self.question_groups_by_key = {}\n for q in self.questions_by_id.values():\n if q.parent_qid in self.questions_by_id:\n parent = self.questions_by_id[q.parent_qid]\n self._create_question_link(parent, q)\n self._create_question_group(parent)", "def _init_catalog_node(catalog, pid, lid=None, rid=None):\n if pid not in catalog: catalog[pid] = {'_langs': {}}\n if lid is not None:\n if lid not in catalog[pid]['_langs']: catalog[pid]['_langs'][lid] = {'_res': {}, 'language': {}}\n if lid is not None and rid is not None:\n if rid not in catalog[pid]['_langs'][lid]['_res']: catalog[pid]['_langs'][lid]['_res'][rid] = {}", "def _get_childs_dict(self, list_of_childs):\n d_child = {}\n for child in list_of_childs:\n d_child[child.id] = child\n return d_child", "def answer(self, nid, **kw):\n a, = self.answers((nid,), **kw)\n return a", "def _create_graph(ingredients, ing2ingredients):\n graph = {}\n for ing_label in ingredients.keys(): # for each ingredient label...\n ing_id = ingredients[ing_label]['id']\n graph[ing_id] = {}\n graph[ing_id]['edges'] = ing2ingredients[ing_id].keys()\n graph[ing_id]['label'] = ing_label\n graph[ing_id]['n_recipes'] = len(ingredients[ing_label])\n curr_cuisines = ingredients[ing_label]['cuisines']\n main_cuisine_idx = curr_cuisines.values().index(max([curr_cuisines[r] \\\n for r in curr_cuisines]))\n graph[ing_id]['main_cuisine'] = curr_cuisines.keys()[main_cuisine_idx]\n return graph", "def all_in_edges_of_node(self, id1: int) -> dict:\n if id1 in self.vertices:\n in_edges = {}\n # current_node = self.get_node(id1)\n for v1 in self.vertices.values():\n if id1 in v1.connectedTo.keys():\n # Return the weight of the edge between self and nbr (two vertices)\n w = v1.get_edge_Weight(id1)\n in_edges[v1.getKey()] = w\n\n return in_edges", "def nodes(self) -> google.protobuf.internal.containers.MessageMap[builtins.int, global___ModelProto.Node]:" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Runs through all of the nodes in the json responses storing the intersection and set differences into a dictonary organized by tuples of node ids or the tuple (1, 1) for all nodes.
def node_diff(self): if self.input1 is None or self.input2 is None: raise Exception("Missing input: please run the populate() method first") if self.node_dict1 is None or self.node_dict2 is None: self.make_node_dict() # Initialize dictonaries to keep track of the nodes in respnse 1 and response 2 g1={} g2={} # Set to keep track of the union of all curie ids curie_set = set() for curie in self.node_dict1.keys(): g1[curie] = {} # intersection is only in the g1 dictionary g1[curie]['intersection'] = set() # node section keeps track of node ids associated with each node i.e. "n0" g1[curie]['node'] = set() curie_set.add(curie) for curie in self.node_dict2.keys(): g2[curie] = {} # node section keeps track of node ids associated with each node i.e. "n0" g2[curie]['node'] = set() curie_set.add(curie) node_names1 = [] node_names2 = [] # extract all node ids (i.e. "n0","n1",ect...) if len(self.input1['question_graph']['nodes'])>0: if 'id' in self.input1['question_graph']['nodes'][0]: node_names1 = [x['id'] for x in self.input1['question_graph']['nodes']] elif 'node_id' in self.input1['question_graph']['nodes'][0]: node_names1 = [x['node_id'] for x in self.input1['question_graph']['nodes']] if len(self.input2['question_graph']['nodes'])>0: if 'id' in self.input2['question_graph']['nodes'][0]: node_names2 = [x['id'] for x in self.input2['question_graph']['nodes']] elif 'node_id' in self.input2['question_graph']['nodes'][0]: node_names2 = [x['node_id'] for x in self.input2['question_graph']['nodes']] # initialize the result dictonary diff_dict = {} diff_dict["-1|-1"] = {'intersection':[],'g1-g2':[],'g2-g1':[]} # initialize node id tuple keys for id1 in node_names1: for id2 in node_names2: diff_dict[id1+"|"+id2] = {'intersection':[],'g1-g2':[],'g2-g1':[]} # iterate through answers for answer1 in self.input1['answers']: for answer2 in self.input2['answers']: for id1 in answer1['node_bindings'].keys(): # This is to handle cases where answer node id has a list or a string if isinstance(answer1['node_bindings'][id1], str): bindings1 = [answer1['node_bindings'][id1]] elif isinstance(answer1['node_bindings'][id1], list): bindings1 = answer1['node_bindings'][id1] for curie1 in bindings1: # store node id g1[curie1]['node'].add(id1) for id2 in answer2['node_bindings'].keys(): # This is to handle cases where answer node id has a list or a string if isinstance(answer2['node_bindings'][id2], str): bindings2 = [answer2['node_bindings'][id2]] elif isinstance(answer2['node_bindings'][id2], list): bindings2 = answer2['node_bindings'][id2] for curie2 in bindings2: # store node id g2[curie2]['node'].add(id2) if curie1 == curie2: # stor intersection tuple g1[curie1]['intersection'].add(id1+"|"+id2) # iterate through all curies for curie in curie_set: # check if curie is from answer 1 if curie in g1.keys(): # check if in intersection if len(g1[curie]['intersection'])>0: diff_dict["-1|-1"]['intersection'] += [self.node_dict1[curie]] for id1 in node_names1: for id2 in node_names2: node_tuple = id1+"|"+id2 if id1 in g1[curie]['node'] and id2 in g2[curie]['node']: diff_dict[node_tuple]['intersection'] += [self.node_dict1[curie]] elif id1 in g1[curie]['node']: diff_dict[node_tuple]['g1-g2'] += [self.node_dict1[curie]] elif id2 in g2[curie]['node']: diff_dict[node_tuple]['g2-g1'] += [self.node_dict1[curie]] # If not in intersection store in g1-g2 else: diff_dict["-1|-1"]['g1-g2'] += [self.node_dict1[curie]] for id1 in g1[curie]['node']: # iterate through all answer 2 ids for id2 in node_names2: diff_dict[id1+"|"+id2]['g1-g2'] += [self.node_dict1[curie]] # if not in g1 but in g2 then in g2-g1 elif curie in g2.keys(): diff_dict["-1|-1"]['g2-g1'] += [self.node_dict2[curie]] for id2 in g2[curie]['node']: # iterate through all answer 1 ids for id1 in node_names1: diff_dict[id1+"|"+id2]['g2-g1'] += [self.node_dict2[curie]] return diff_dict
[ "def get_nodes_edges(response_json):\n\n nodes = dict()\n paths = dict()\n\n for element in response_json['elements']:\n if element['type'] == \"node\":\n nodes[element['id']] = convert_node(element)\n elif element['type'] == \"way\":\n paths[element['id']] = convert_edge(element)\n\n return nodes, paths", "def make_node_dict(self):\n if self.input1 is None or self.input2 is None:\n raise Exception(\"Missing input: please run the populate() method first\")\n self.node_dict1 = {}\n for node in self.input1['knowledge_graph']['nodes']:\n self.node_dict1[node['id']] = node\n self.node_dict2 = {}\n for node in self.input2['knowledge_graph']['nodes']:\n self.node_dict2[node['id']] = node", "def intersect(self, rays): \n result = {}\n \n if bool(self._merged):\n result[\"x\"], result[\"y\"], result[\"z\"], result[\"valid\"], result[\"ray_u\"], \\\n result[\"trig_u\"], result[\"trig_v\"], result[\"gather_ray\"], \\\n result[\"gather_trig\"] = self._intersection(\n rays[\"x_start\"],\n rays[\"y_start\"],\n rays[\"z_start\"],\n rays[\"x_end\"],\n rays[\"y_end\"],\n rays[\"z_end\"],\n self._merged[\"xp\"],\n self._merged[\"yp\"],\n self._merged[\"zp\"],\n self._merged[\"x1\"],\n self._merged[\"y1\"],\n self._merged[\"z1\"],\n self._merged[\"x2\"],\n self._merged[\"y2\"],\n self._merged[\"z2\"],\n self.intersect_epsilion,\n self.size_epsilion,\n self.ray_start_epsilion\n )\n \n result[\"norm\"] = tf.gather(\n self._merged[\"norm\"],\n result[\"gather_trig\"]\n )\n \n return result", "def merge_duplicate_nodes(self):\n merges={}\n xys={}\n for n in self.valid_node_iter():\n k=tuple(self.nodes['x'][n])\n if k in xys:\n merges[n]=xys[k]\n self.merge_nodes(xys[k],n)\n else:\n xys[k]=n\n return merges", "def get_list_intersection_counts() -> Dict[str, int]:\n LOGGER.info(\n 'Generating the intersection counts between a set of resolved, unresolved and backlog lists.')\n\n error_lists = _generate_lists()\n resolved, unresolved, backlog = error_lists['resolved'], error_lists['unresolved'], error_lists['backlog']\n\n # find intersections\n def list_intersection(list1, list2):\n set1 = set([x[\"code\"] for x in list1])\n set2 = set([x[\"code\"] for x in list2])\n return len(set1.intersection(set2))\n\n resolved_unresolved = list_intersection(resolved, unresolved)\n resolved_backlog = list_intersection(resolved, backlog)\n unresolved_backlog = list_intersection(unresolved, backlog)\n\n return {\n 'resolved_unresolved': resolved_unresolved,\n 'resolved_backlog': resolved_backlog,\n 'unresolved_backlog': unresolved_backlog\n }", "def node_filter_from_successes(self):\n nf = dict(filter_set_type='intersection', filter_set=[])\n nf['filter_set'].append(\n dict(node_names=self.result.successes, filter_type='union'))\n\n return nf", "def get_objects_in_difft_states(self, json_list):\n detections = []\n other_recs = []\n for json_ele in json_list:\n if json_ele.get(\"event\", {}).get(\"type\", None) == \"detection\":\n detections.append(json_ele)\n elif json_ele.get(\"event\", {}).get(\"type\", None) == \"detection_adj\":\n detections.append(json_ele)\n else:\n other_recs.append(json_ele)\n return {\"detection\": detections,\n \"others\": other_recs}", "def get_gene_id_dict(list_of_results):\n dict1 = {}\n for i, dict2 in enumerate(list_of_results):\n key = dict2[\"GeneID\"]\n if key in dict1.keys():\n # list1 = dict1[key]\n # list1.append(list_of_results[i])\n # dict1[key] = list1\n # list1.append(list_of_results[i])\n dict1[key].append(list_of_results[i])\n else:\n dict1[key] = [list_of_results[i]]\n return dict1", "def intersection_report(self, second_metadata):\n\n return {\n 'items': self.intersection(second_metadata=second_metadata),\n 'files': list(set(self.unique_files).intersection(set(second_metadata.unique_files))),\n 'identifiers': list(set(self.unique_identifiers).intersection(set(second_metadata.unique_identifiers))),\n 'scene_labels': list(set(self.unique_scene_labels).intersection(set(second_metadata.unique_scene_labels))),\n 'event_labels': list(set(self.unique_event_labels).intersection(set(second_metadata.unique_event_labels))),\n 'tags': list(set(self.unique_tags).intersection(set(second_metadata.unique_tags)))\n }", "def maintain_matched_ids(self, clustered_json_list):\n if clustered_json_list:\n\n # 1. Find a valid set across all elements\n first_obj_id = None\n same_id_list = None\n json_ele = None\n for json_ele in clustered_json_list:\n first_obj_id = trackerutils.get_obj_id(json_ele)\n same_id_list = self.state.clustered_oid_map.get(\n first_obj_id, None)\n if same_id_list is not None:\n break\n\n if same_id_list is None:\n # Create a new set with this obj id in it\n same_id_list = {\n \"update_ts\": iso8601.parse_date(\n json_ele.get(\"@timestamp\")),\n \"id_set\": set([first_obj_id]),\n \"id\": self.state.curr_cl_obj_id\n }\n self.state.curr_cl_obj_id += 1 #increment mc tracker id\n self.state.clustered_oid_map[first_obj_id] = same_id_list\n\n set_id = same_id_list['id']\n curr_set = same_id_list['id_set']\n merge_sets = []\n for json_ele in clustered_json_list:\n obj_id = trackerutils.get_obj_id(json_ele)\n this_set_list = self.state.clustered_oid_map.get(\n obj_id, None)\n this_ts = iso8601.parse_date(json_ele[\"@timestamp\"])\n\n if this_set_list is None:\n\n # This is a new object. Add it to the list\n curr_set.add(obj_id)\n self.state.clustered_oid_map[obj_id] = same_id_list\n else:\n\n if ((this_set_list['id'] != set_id) or\n (first_obj_id not in this_set_list['id_set'])):\n\n # There is some problem. We see that two different\n # ids have been issued for the same cluster elements\n merge_sets.append(obj_id)\n\n if this_ts > same_id_list[\"update_ts\"]:\n same_id_list[\"update_ts\"] = this_ts\n\n if merge_sets:\n self.merge_cluster_id_sets([first_obj_id] + merge_sets)", "def test_dict_of_clusters_related_to_gene_true():\n response = dict_of_clusters_related_to_gene(db, 'PA14', 'PA14_RS00005')\n assert response['index'].iloc[0] == 3456 and len(response) == 1\n assert list(response.columns) == ['index', 'pa14', 'combined_index']", "def _get_duplicates(self, root):\n namespaces = self.NSMAP.values()\n id_nodes = collections.defaultdict(list)\n\n for desc in utils.iterdescendants(root):\n if 'id' not in desc.attrib:\n continue\n\n ns = utils.get_namespace(desc)\n\n if ns not in namespaces:\n continue\n\n id_ = desc.attrib['id']\n id_nodes[id_].append(desc)\n\n filtered = {}\n for id_, nodes in iteritems(id_nodes):\n if len(nodes) > 1:\n filtered[id_] = nodes\n\n return filtered", "def getIntersectionNodes(self):\n ways=self.getWayList()\n nodeList=[]\n intersectionList=[]\n for wayElement in ways:\n nodes=wayElement.getElementsByTagName('nd')\n for nodeElement in nodes:\n nodeList.append(nodeElement.getAttribute('ref'))\n for nodeElement in nodeList:\n if nodeList.count(nodeElement)>1 and intersectionList.count(nodeElement)==0:\n intersectionList.append(nodeElement)\n return intersectionList", "def as_dict(self):\n return {n.obj.id: n for n in self.nodes}", "def calculate_tanimoto_set_distances(dict_of_sets):\n result = defaultdict(dict)\n\n for x, y in itt.combinations(dict_of_sets, 2):\n result[x][y] = result[y][x] = tanimoto_set_similarity(dict_of_sets[x], dict_of_sets[y])\n\n for x in dict_of_sets:\n result[x][x] = 1.0\n\n return dict(result)", "def _find_common_and_uncommon_nodes(self):\n\n self.message('Finding common and uncommon nodes')\n common_nodes = set()\n uncommon_nodes = set()\n uncommon_nodesets = [set() for _ in range(self._num_meshes)]\n appears = defaultdict(lambda: array('b', [0] * self._num_meshes))\n\n # Build a binary array for each node. The bit at each index will indicate\n # whether the node appears in the mesh.\n for m in range(self._num_meshes):\n\n nodal_coordinates, in_range = self._coordinates_in_range(m)\n\n for n in range(nodal_coordinates.shape[0]):\n\n if in_range[n]:\n\n appears[nodal_coordinates[n].tobytes()][m] = 1\n\n # If the number of times a node appears is equal to the number of meshes,\n # then the node is a common node. Otherwise, keep track of which meshes\n # the node does not fall into\n for key, in_mesh in appears.items():\n\n if in_mesh.count(1) == self._num_meshes:\n\n common_nodes.add(key)\n\n else:\n\n uncommon_nodes.add(key)\n\n for m in range(self._num_meshes):\n\n if in_mesh[m] == 0:\n\n uncommon_nodesets[m].add(key)\n\n self.message('{} nodes fall into all meshes'.format(len(common_nodes)))\n self.message('{} nodes are missing from at least one mesh'.format(len(uncommon_nodes)))\n for m in range(self._num_meshes):\n self.message('{} nodes missing from mesh {}'.format(len(uncommon_nodesets[m]), m))\n\n return common_nodes, uncommon_nodes, uncommon_nodesets", "def find_intersections():\n regions = [region for region in session[\n \"regions\"] if region.get(\"visible\")]\n if len(regions) > 1:\n intersection = process_intersections(regions)\n else:\n return jsonify(\n {\"success\": False, \"data\": \"Not enough regions selected\"})\n if intersection:\n return jsonify({\"success\": True, \"data\": hseg_to_coords(intersection)})\n else:\n return jsonify(\n {\"success\": False, \"data\": \"No common intersection\"})", "def intersection_info(self, id_):\n state = {}\n\n get_lane_vehicle_count = self.eng.get_lane_vehicle_count()\n get_lane_waiting_vehicle_count = self.eng.get_lane_waiting_vehicle_count()\n # get_lane_vehicles = self.eng.get_lane_vehicles()\n # get_vehicle_speed = self.eng.get_vehicle_speed()\n\n state['start_lane_vehicle_count'] = {lane: get_lane_vehicle_count[lane] for lane in self.start_lane[id_]}\n state['end_lane_vehicle_count'] = {lane: get_lane_vehicle_count[lane] for lane in self.end_lane[id_]}\n\n # state['lane_vehicle_count'] = state['start_lane_vehicle_count'].copy()\n # state['lane_vehicle_count'].update(state['end_lane_vehicle_count'].items())\n state['start_lane_waiting_vehicle_count'] = {lane: get_lane_waiting_vehicle_count[lane] for lane in\n self.start_lane[id_]}\n # state['end_lane_waiting_vehicle_count'] = {lane: get_lane_waiting_vehicle_count[lane] for lane in\n # self.end_lane[id_]}\n #\n # state['start_lane_vehicles'] = {lane: get_lane_vehicles[lane] for lane in self.start_lane[id_]}\n # state['end_lane_vehicles'] = {lane: get_lane_vehicles[lane] for lane in self.end_lane[id_]}\n #\n # state['start_lane_speed'] = {\n # lane: np.sum(list(map(lambda vehicle: get_vehicle_speed[vehicle], get_lane_vehicles[lane]))) / (\n # get_lane_vehicle_count[lane] + 1e-5) for lane in\n # self.start_lane[id_]} # compute start lane mean speed\n # state['end_lane_speed'] = {\n # lane: np.sum(list(map(lambda vehicle: get_vehicle_speed[vehicle], get_lane_vehicles[lane]))) / (\n # get_lane_vehicle_count[lane] + 1e-5) for lane in\n # self.end_lane[id_]} # compute end lane mean speed\n\n state['current_phase'] = self.current_phase[id_]\n # state['current_phase_time'] = self.current_phase_time[id_]\n\n state['adjacency_matrix'] = self.traffic_light_node_dict[id_]['adjacency_row']\n\n return state", "def results(self) -> Dict[str, Any]:\n return self.nodes" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Reproducing kernel Calculate the inverse FunkRadon transform of reproducing kernel for the space of spherical harmonics of maximum degree N.
def inv_funk_radon_kernel(mu, N): # Check that -1 <= mu <= 1 mu = np.clip(mu, -1, 1) # Need Legendre polynomials legPolys = legp(mu, N) p_at_zero = legp(0, N) coefs = 2*np.arange(0, N+1, 2) + 1 ker = coefs*legPolys[::2]/p_at_zero[::2] return ker.sum() / (8*np.pi)
[ "def inv_funk_radon_even_kernel(mu, N):\n A = np.zeros_like(mu)\n\n for k in range(2, N + 1, 2):\n Pk = sp.special.legendre(k)\n A += (2 * k + 1) / (8 * np.pi**2 * Pk(0) * k * (k + 1)) * Pk(mu)\n\n return A", "def inv_funk_radon_even_kernel(mu, N):\n\n # Check that -1 <= mu <= 1\n mu = np.clip(mu, -1, 1)\n\n # Need Legendre polynomials\n legPolys = legp(mu, N)\n p_at_zero = legp(0, N)\n\n coefs_num = 2*np.arange(0, N+1) + 1\n coefs_den = np.arange(2,N+1,2) * (np.arange(2,N+1,2) + 1)\n\n ker = coefs_num[2::2]*legPolys[2::2] / (p_at_zero[2::2] * coefs_den)\n\n return ker.sum() / (8.0*np.pi*np.pi)", "def even_kernel(mu, N):\n\n # Check that -1 <= mu <= 1\n mu = np.clip(mu, -1, 1)\n\n # Need Legendre polynomials\n legPolys = legp(mu, N)\n \n\n coefs = 2*np.arange(0, N+1) + 1\n \n ker = coefs[0::2]*legPolys[0::2] \n\n return ker.sum() / (4.0*np.pi)", "def kernel(mu, N):\n A = np.zeros_like(mu)\n\n for k in range(N + 1):\n Pk = sp.special.legendre(k)\n A += (2 * k + 1) / (4 * np.pi) * Pk(mu)\n\n return A", "def flipkernel(k):\r\n return np.squeeze(np.fliplr(k[None,:])) ###important for temporal causality!!!??\r", "def even_kernel_der(mu, N):\n\n # Check that -1 <= mu <= 1\n mu = np.clip(mu, -1, 1)\n\n #Derivatives of Legendre polynomials\n DlegPolys = legp_der(mu, N)\n \n coefs = 2*np.arange(0, N+1) + 1\n \n ker = coefs[0::2]*DlegPolys[0::2] \n\n return ker.sum() / (4.0*np.pi)", "def even_kernel(mu, N):\n A = np.zeros_like(mu)\n\n for k in range(2, N + 1, 2):\n Pk = sp.special.legendre(k)\n A += (2 * k + 1) / (4 * np.pi) * Pk(mu)\n\n return A", "def kernel_notch(M, N, d0, centro = (0, 0), forma = 0, pasa = 0, n = 1.0):\n \n if forma == 0:\n kernel_prov = kernel_ideal_notch(M, N, centro, d0)\n elif forma == 1:\n kernel_prov = kernel_gaussiano_notch(M, N, centro, d0)\n else:\n kernel_prov = kernel_butterworth_notch(M, N, centro, d0, n)\n \n kernel = pasa + (-1)**pasa * kernel_prov\n \n return kernel", "def make_kernel(grid,fwhm,nfwhm=4.):\n\n ngrd = len(grid)\n spacing = (grid[ngrd-1]-grid[0])/(ngrd-1.)\n nkpts = round(nfwhm*fwhm/spacing)\n\n if (nkpts % 2) != 0:\n nkpts += 1\n\n kernel = spacing* (np.arange(nkpts)-(nkpts/2.))\n kernel = np.exp(-np.log(2.0)/(fwhm/2.0)**2*(kernel)**2) ## Gaussian kernel\n kernel_norm = kernel/np.sum(kernel) ## Normalize\n kernel_norm=np.append(kernel_norm,kernel_norm[0]) ## Make sure it's symmetric\n\n return kernel_norm", "def nd_kernel(n):\n n = int(n)\n total_size = 3**n\n mid_point = int((3**n - 1)/2)\n kern = np.zeros(total_size, dtype=bool)\n for i in range(n):\n kern[mid_point-3**i] = True\n kern[mid_point+3**i] = True\n new_shape = 3*np.ones(n, dtype=int) \n unnormed_kern = kern.reshape(new_shape)\n return unnormed_kern/unnormed_kern.sum()", "def filter_wrapped_phase(image, k):\n ny, nx = image.shape\n assert(ny == nx) ## assert a square image for simplicity\n if (k%2 == 0):\n print(\"k has to be an integer!\")\n return\n N = nx\n i, j = np.arange(N), np.arange(N)\n ii, jj = np.meshgrid(i, j)\n filt_psi = np.zeros((N,N))\n\n inside = (jj[k/2:N-(k/2), k/2:N-(k/2)].flatten(), ii[k/2:N-(k/2), k/2:N-(k/2)].flatten())\n krange = np.linspace(-1 * (k/2), (k/2), k, dtype = 'int64') ## amount of added spaces, if k = 5, it ranges from -2 to 2\n krange_tile = np.tile(krange * N, (k, 1)).T ## tile them to make a (k/2)**2 matrix, containing for instance -2N, -N, 0, N, 2N for k=5\n k_tile = np.tile(krange, (k, 1)) ## tile to add to krange_tile\n coords_add = (krange_tile + k_tile).flatten() ## all coordinates, in a (k/2)**2 matrix, from -2N - 2: -2N + 2, -N-2 : -N+2 , -2 : 2, N -2 : N +2, 2N -2 : 2N +2\n inside = np.ravel_multi_index(inside, (N, N))\n coords_add = np.tile(coords_add, (len(inside), 1)) ## stack all differences to add to inside\n inside_tile = np.tile(inside, (coords_add.shape[1],1)).T ## stack all inside to add to differences\n all_coords = inside_tile + coords_add### a matrix of len(inside) x (k/2)**2 with all coordinates in a k x k square around a certain coordinate\n unrav_coords = np.unravel_index(all_coords, (N, N)) ## unraveled coordinates of all coordinates\n sum_sin_psi = np.sum(np.sin(image[unrav_coords]), axis = 1) ## sum over a sin (psi) over a k x k square\n sum_cos_psi = np.sum(np.cos(image[unrav_coords]), axis = 1) ## sum over a cos (psi) over a k x k square\n psi_app = np.arctan2(sum_sin_psi, sum_cos_psi)\n filt_psi[np.unravel_index(inside, (N, N))] = psi_app \n\n #### top layers\n for i in range(k/2):\n ## for indices directly above the \"inside square\"\n top = (jj[i, k/2:N-(k/2)].flatten(), ii[i, k/2: N - (k/2)].flatten())\n coords_add = (krange_tile + k_tile)[(k/2)-i:, :].flatten()\n top = np.ravel_multi_index(top, (N, N))\n coords_add = np.tile(coords_add, (len(top), 1))\n top_tile = np.tile(top, (coords_add.shape[1],1)).T\n top_coords = top_tile + coords_add\n unrav_coords = np.unravel_index(top_coords, (N, N))\n sum_sin_top = np.sum(np.sin(image[unrav_coords]), axis = 1)\n sum_cos_top = np.sum(np.cos(image[unrav_coords]), axis = 1)\n psi_top = np.arctan2(sum_sin_top, sum_cos_top)\n filt_psi[np.unravel_index(top, (N, N))] = psi_top\n\n ## indices directly below the \"inside square\"\n bot = (jj[N- 1 - i, k/2:N-(k/2)].flatten(), ii[N-1-i, k/2: N - (k/2)].flatten()) ## starting at the bottom working inwards\n coords_add = (krange_tile + k_tile)[:(k/2) + 1 + i, :].flatten()\n bot = np.ravel_multi_index(bot, (N, N))\n coords_add = np.tile(coords_add, (len(top), 1))\n bot_tile = np.tile(bot, (coords_add.shape[1],1)).T\n bot_coords = bot_tile + coords_add\n unrav_coords = np.unravel_index(bot_coords, (N, N))\n sum_sin_bot = np.sum(np.sin(image[unrav_coords]), axis = 1)\n sum_cos_bot = np.sum(np.cos(image[unrav_coords]), axis = 1)\n psi_bot = np.arctan2(sum_sin_bot, sum_cos_bot)\n filt_psi[np.unravel_index(bot, (N, N))] = psi_bot\n\n ## indices directly left of the \"inside square\"\n left = (jj[k/2:N-(k/2), i].flatten(), ii[k/2:N-(k/2), i].flatten()) ## starting at the bottom working inwards\n coords_add = (krange_tile + k_tile)[:, (k/2)-i:].flatten()\n left = np.ravel_multi_index(left, (N, N))\n coords_add = np.tile(coords_add, (len(left), 1))\n left_tile = np.tile(left, (coords_add.shape[1],1)).T\n left_coords = left_tile + coords_add\n unrav_coords = np.unravel_index(left_coords, (N, N))\n sum_sin_left = np.sum(np.sin(image[unrav_coords]), axis = 1)\n sum_cos_left = np.sum(np.cos(image[unrav_coords]), axis = 1)\n psi_left = np.arctan2(sum_sin_left, sum_cos_left)\n filt_psi[np.unravel_index(left, (N, N))] = psi_left\n\n ## indices directly left of the \"inside square\"\n right = (jj[k/2:N-(k/2), N - 1 - i].flatten(), ii[k/2:N-(k/2), N - 1 - i].flatten()) ## starting at the bottom working inwards\n coords_add = (krange_tile + k_tile)[:, :(k/2)+1+i].flatten()\n right = np.ravel_multi_index(right, (N, N))\n coords_add = np.tile(coords_add, (len(right), 1))\n right_tile = np.tile(right, (coords_add.shape[1],1)).T\n right_coords = right_tile + coords_add\n unrav_coords = np.unravel_index(right_coords, (N, N))\n sum_sin_right = np.sum(np.sin(image[unrav_coords]), axis = 1)\n sum_cos_right = np.sum(np.cos(image[unrav_coords]), axis = 1)\n psi_right = np.arctan2(sum_sin_right, sum_cos_right)\n filt_psi[np.unravel_index(right, (N, N))] = psi_right\n \n ## calculate boundaries diagonals\n left_t, right_t, left_b, right_b = (i, i), (i, -1 -i), (-1 - i, i), (-1 - i, -1 - i) \n left_t, right_t, left_b, right_b = (jj[left_t], ii[left_t]), (jj[right_t], ii[right_t]), (jj[left_b], ii[left_b]), (jj[right_b], ii[right_b])\n left_t, right_t, left_b, right_b = np.ravel_multi_index(left_t, (N, N)), np.ravel_multi_index(right_t, (N, N)), np.ravel_multi_index(left_b, (N, N)), np.ravel_multi_index(right_b, (N, N))\n coord_mat = krange_tile + k_tile\n coords_add_lt, coords_add_rt, coords_add_lb, coords_add_rb = coord_mat[(k/2)-i:, (k/2)-i:].flatten(), coord_mat[(k/2)-i:, :(k/2)+1+i].flatten(), coord_mat[:(k/2)+i+1, (k/2)-i:].flatten(), coord_mat[:(k/2)+i+1, :(k/2)+i+1].flatten()\n coords_add_tot = np.vstack((coords_add_lt, coords_add_rt, coords_add_lb, coords_add_rb))\n lt_tile, rt_tile, lb_tile, rb_tile = np.tile(left_t, (coords_add_lt.shape[0],1)).T, np.tile(right_t, (coords_add_lt.shape[0],1)).T, np.tile(left_b, (coords_add_lt.shape[0],1)).T, np.tile(right_b, (coords_add_lt.shape[0],1)).T\n coords_tile_tot = np.squeeze(np.stack((lt_tile, rt_tile, lb_tile, rb_tile)))\n coords_tot = coords_add_tot + coords_tile_tot\n unrav_coords = np.unravel_index(coords_tot, (N, N))\n sum_sin_diag = np.sum(np.sin(image[unrav_coords]), axis = 1)\n sum_cos_diag = np.sum(np.cos(image[unrav_coords]), axis = 1)\n psi_diag = np.arctan(sum_sin_diag, sum_cos_diag)\n filt_psi[np.unravel_index(np.stack((left_t, right_t, left_b, right_b)), (N, N))] = psi_diag\n\n return filt_psi", "def _compute_toeplitz_kernel(self):\n trajectory = self.trajectory\n weights = self.weights\n if self.rank is None:\n raise NotImplementedError(\n f\"The rank of {self.name} must be known statically.\")\n\n if weights is None:\n # If no weights were passed, use ones.\n weights = tf.ones(tf.shape(trajectory)[:-1], dtype=self.dtype.real_dtype)\n # Cast weights to complex dtype.\n weights = tf.cast(tf.math.sqrt(weights), self.dtype)\n\n # Compute N-D kernel recursively. Begin with last axis.\n last_axis = self.rank - 1\n kernel = self._compute_kernel_recursive(trajectory, weights, last_axis)\n\n # Make sure that the kernel is symmetric/Hermitian/self-adjoint.\n kernel = self._enforce_kernel_symmetry(kernel)\n\n # Additional normalization by sqrt(2 ** rank). This is required because\n # we are using FFTs with twice the length of the original image.\n if self.norm == 'ortho':\n kernel *= tf.cast(tf.math.sqrt(2.0 ** self.rank), kernel.dtype)\n\n # Put the kernel in Fourier space.\n fft_axes = list(range(-self.rank, 0))\n fft_norm = self.norm or \"backward\"\n return fft_ops.fftn(kernel, axes=fft_axes, norm=fft_norm)", "def kernel(h):\n ker = np.zeros((4*h-1, 4*h-1, 4*h-1))\n\n for i in range(4*h-1):\n for j in range(4*h-1):\n for k in range(4*h-1):\n r_grid = np.linalg.norm([i - 2*h + 1, j - 2*h + 1, k - 2*h + 1])\n ker[i, j, k] = W(r_grid / h, h)\n return ker", "def kernel_factory(s, m1, m2):\r\n m_max = max(m1, m2)\r\n A = np.zeros([s, m_max, m_max], dtype=complex)\r\n symmetry = random.choice([2, 3, 4, 6])\r\n half_sym = np.floor(symmetry / 2).astype('int')\r\n lowest_k = 0.5\r\n highest_k = 3\r\n k = np.zeros([s, symmetry])\r\n for level in range(s):\r\n k[level, :] = np.random.uniform(lowest_k, highest_k, symmetry)\r\n\r\n x, y = np.meshgrid(np.linspace(-1, 1, m_max), np.linspace(-1, 1, m_max))\r\n # dist = np.sqrt(x * x + y * y)\r\n # theta = np.arctan(x / y)\r\n arb_angle = np.random.uniform(0, 2 * np.pi)\r\n for direction in range(symmetry):\r\n ang = direction * 180 / symmetry\r\n ang = arb_angle + ang * np.pi / 180\r\n r = (x * np.cos(ang) + np.sin(ang) * y)\r\n phi = np.random.uniform(0, 2 * np.pi)\r\n for i in range(s):\r\n A[i, :, :] += np.cos(2 * np.pi * k[i, direction % half_sym] * r)\r\n\r\n # Adding normal decay\r\n sigma = np.random.uniform(0.3, 0.6)\r\n decay = gaussian_window(m_max, m_max, sigma)\r\n A = np.multiply(np.abs(A), decay)\r\n # Normalizing:\r\n A = sphere_norm_by_layer(A)\r\n return A", "def ghosal_edge_v2(img,Ks,kmin=0,kmax=1000,lmax=0.5,phimin=1,thresholding=True,debug=False,mirror=False):\n\t# gather image properties before its altered\n\tni,nj = np.shape(img)\n\t# Ks must be odd\n\tif Ks%2 != 1:\n\t\tprint(\"Ks must be odd! Continuing with Ks = Ks-1\")\n\t\tKs = Ks-1\n\t# define the rectangular kernels\n\t#Vc00 = np.zeros((Ks,Ks),dtype=complex) # not needed\n\tVc11 = np.zeros((Ks,Ks),dtype=complex)\n\tVc20 = np.zeros((Ks,Ks),dtype=complex)\n\tofs = 1 *(1-1/Ks) # offset for centering kernel around 0,0\n\tfor i in range(Ks):\n\t\tfor j in range(Ks):\n\t\t\tKx = 2*j/Ks-ofs # limits of integration between -1 and 1\n\t\t\tKy = 2*i/Ks-ofs\n\t\t\tif Kx**2+Ky**2 <= 1: # only a circle\n\t\t\t\t#Vc00[i,j] = 1 # the conjugate of V00 # not needed\n\t\t\t\tVc11[i,j] = Kx-Ky*1j # ...\n\t\t\t\tVc20[i,j] = 2*Kx**2+2*Ky**2-1\n\t# mirror the edges to avoid edge effects from convolution\n\tif mirror:\n\t\tthick = int((Ks-1)/2)\n\t\timg = np.concatenate((img[:,(thick-1)::-1],img,img[:,:-(thick+1):-1]),1)\n\t\timg = np.concatenate((img[(thick-1)::-1,:],img,img[:-(thick+1):-1,:]),0)\n\t\tmode = \"valid\"\n\telse:\n\t\tmode = \"same\"\n\t\n\t# do the convolution with the images to get the zernike moments\n\tAnorm = lambda n : (n+1)/np.pi\t# a normalization value\n\t#A00 = scig.convolve2d(img,Vc00,mode='same') # not needed\n\tA11 = Anorm(1)*scig.oaconvolve(img,Vc11,mode=mode)\n\tA20 = Anorm(2)*scig.oaconvolve(img,Vc20,mode=mode)\n\n\tphi = np.arctan(np.imag(A11)/zero_to_small(np.real(A11)))\n\tAl11 = np.real(A11)*np.cos(phi)+np.imag(A11)*np.sin(phi)\n\tl = np.real(A20)/Al11 # A20 has no imaginary component so A20 = A'20\n\tl = np.minimum(l,1-SMALL) # chop off those that go beyond the kernel boundaries\n\tl = np.maximum(l,-1+SMALL)\n\tk = abs(3*Al11/(2*(1-l**2)**(3/2))) \n\t\n\tif thresholding==True:\n\t\t# conditions\n\t\tphi_c = abs(phi)>phimin\n\t\tl_c = abs(l)<lmax\n\t\tk_c = (k<kmax) & (k>kmin)\n\t\tvalid = phi_c & (k_c & l_c)\n\telif thresholding==False:\n\t\tvalid = np.ones_like(k)\n\t# define a grid of pixel positions\n\ti,j = np.meshgrid(np.arange(nj),np.arange(ni))\n\t\n\t# get a list of the valid relevant parameters \n\ti = i[valid]\n\tj = j[valid]\n\t#\tk = k[valid] # not necessary\n\tl = l[valid]\n\tphi = phi[valid]\n\t\n\t# convert to the subpixel position\n\ti_s = i+l*Ks/2*np.cos(phi)\n\tj_s = j+l*Ks/2*np.sin(phi)\n\t\n\t# put all detected points in a vector of (x,y) values\n\tedg = np.squeeze((j_s,i_s)).transpose()\n\torg = np.squeeze((j,i)).transpose()\n\tif debug==True:\n\t\treturn edg, org, k, l, phi\n\telse:\n\t\treturn edg, org", "def calc_inv_kernel(fn_inv, method=\"dSPM\", nave=1, snr=1.,\n pick_ori=\"normal\", verbose=None):\n\n # -------------------------------------------\n # import necessary modules\n # -------------------------------------------\n import mne.minimum_norm as min_norm\n from mne.minimum_norm.inverse import _assemble_kernel\n import numpy as np\n\n # -------------------------------------------\n # estimate inverse kernel\n # -------------------------------------------\n # load inverse solution\n import mne.minimum_norm as min_norm\n inv_operator = min_norm.read_inverse_operator(fn_inv, verbose=verbose)\n\n\n # set up the inverse according to the parameters\n lambda2 = 1. / snr ** 2. # the regularization parameter.\n inv_operator = min_norm.prepare_inverse_operator(inv_operator, nave, lambda2, method)\n\n # estimate inverse kernel and noise normalization coefficient\n kernel, noise_norm, vertno = _assemble_kernel(inv_operator, None, method, pick_ori)\n\n if method == \"MNE\":\n noise_norm = np.ones((kernel.shape[0]/3))\n noise_norm = noise_norm[:, np.newaxis]\n\n\n # -------------------------------------------\n # return results\n # -------------------------------------------\n return kernel, noise_norm, vertno", "def filter_kernel(self, kf, Gtype='spectral', k_kf=None,\n dtype=np.complex128):\n\n if k_kf is None:\n A = self.L/self.L.min() # domain size aspect ratios\n A.resize((3, 1, 1, 1)) # ensure proper array broadcasting\n kmag = np.sqrt(np.sum(np.square(self.K/A), axis=0))\n k_kf = kmag/kf\n\n Ghat = np.empty(k_kf.shape, dtype=dtype)\n\n if Gtype == 'spectral':\n Ghat[:] = (np.abs(k_kf) < 1.0).astype(dtype)\n\n elif Gtype == 'tophat':\n Ghat[:] = np.sin(pi*k_kf)/(pi*k_kf**2)\n\n elif Gtype == 'comp_exp':\n # A Compact Exponential filter that:\n # 1) has compact support in _both_ physical and spectral space\n # 2) is strictly positive in _both_ spaces\n # 3) is smooth (infinitely differentiable) in _both_ spaces\n # 4) has simply-connected support in spectral space with\n # an outer radius kf, and\n # 5) has disconnected (lobed) support in physical space\n # with an outer radius of 2*pi/kf\n with np.errstate(divide='ignore'):\n Ghat[:] = np.exp(-k_kf**2/(0.25-k_kf**2),\n where=k_kf < 0.5,\n out=np.zeros_like(k_kf)\n ).astype(dtype)\n\n G = irfft3(self.comm, Ghat)\n G[:] = np.square(G)\n rfft3(self.comm, G, Ghat)\n Ghat *= 1.0/self.comm.allreduce(Ghat[0, 0, 0], op=MPI.MAX)\n Ghat -= 1j*np.imag(Ghat)\n\n elif Gtype == 'inv_comp_exp':\n # Same as 'comp_exp' but the physical-space and\n # spectral-space kernels are swapped so that the\n # physical-space support is a simply-connected ball\n raise ValueError('inv_comp_exp not yet implemented!')\n\n else:\n raise ValueError('did not understand filter type')\n\n return Ghat", "def cs4243_filter_faster(image, kernel):\n Hi, Wi = image.shape\n Hk, Wk = kernel.shape\n filtered_image = np.zeros((Hi, Wi))\n\n ###Your code here####\n \n # pad image to handle border pixels\n pad_height = (int)((Hk - 1)/2)\n pad_width = (int)((Wk - 1)/2)\n image_pad = pad_zeros(image, pad_height, pad_width)\n \n # compute effective output size, assume stride=1\n out_height = 1 + Hi - Hk + 2*pad_height\n out_width = 1 + Wi - Wk + 2*pad_width\n \n # get initial nodes of receptive fields\n recep_fields_h = [i for i in range(out_height)]\n recep_fields_w = [i for i in range(out_width)]\n \n # extract receptive area into matrix of shape (Hi*Wi, Hk*Wk)\n recep_areas = []\n for i in recep_fields_h:\n for j in recep_fields_w:\n recep_areas.append(image_pad[i: i+Hk, j: j+Wk].reshape(-1))\n out = np.stack(recep_areas)\n \n # Flip the kernel horizontal and vertical\n kernel = cs4243_rotate180(kernel).reshape(Hk*Wk, 1)\n \n # dot product kernel and receptive areas\n filtered_image = np.dot(out, kernel).reshape(Hi, Wi)\n \n ###\n\n return filtered_image", "def prewitt_kernel() -> torch.Tensor:\n\n return torch.outer(\n torch.tensor([1., 1., 1.]) / 3,\n torch.tensor([1., 0., -1.]),\n )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Reproducing kernel Calculate inverse FunkRadon transform and inverse spherical Laplacian of reproducing kernel for even degree subspace of spherical harmonics of maximum degree N, i.e., calculates H(\mu) = \Delta^1 G^1 K_e(\mu), where \Delta is the spherical Laplacian and G is the FunkRadon transporm. The calculation is done in spectral space.
def inv_funk_radon_even_kernel(mu, N): # Check that -1 <= mu <= 1 mu = np.clip(mu, -1, 1) # Need Legendre polynomials legPolys = legp(mu, N) p_at_zero = legp(0, N) coefs_num = 2*np.arange(0, N+1) + 1 coefs_den = np.arange(2,N+1,2) * (np.arange(2,N+1,2) + 1) ker = coefs_num[2::2]*legPolys[2::2] / (p_at_zero[2::2] * coefs_den) return ker.sum() / (8.0*np.pi*np.pi)
[ "def inv_funk_radon_even_kernel(mu, N):\n A = np.zeros_like(mu)\n\n for k in range(2, N + 1, 2):\n Pk = sp.special.legendre(k)\n A += (2 * k + 1) / (8 * np.pi**2 * Pk(0) * k * (k + 1)) * Pk(mu)\n\n return A", "def inv_funk_radon_kernel(mu, N):\n\n # Check that -1 <= mu <= 1\n mu = np.clip(mu, -1, 1)\n\n # Need Legendre polynomials\n legPolys = legp(mu, N)\n p_at_zero = legp(0, N)\n coefs = 2*np.arange(0, N+1, 2) + 1\n ker = coefs*legPolys[::2]/p_at_zero[::2]\n return ker.sum() / (8*np.pi)", "def even_kernel(mu, N):\n\n # Check that -1 <= mu <= 1\n mu = np.clip(mu, -1, 1)\n\n # Need Legendre polynomials\n legPolys = legp(mu, N)\n \n\n coefs = 2*np.arange(0, N+1) + 1\n \n ker = coefs[0::2]*legPolys[0::2] \n\n return ker.sum() / (4.0*np.pi)", "def even_kernel_der(mu, N):\n\n # Check that -1 <= mu <= 1\n mu = np.clip(mu, -1, 1)\n\n #Derivatives of Legendre polynomials\n DlegPolys = legp_der(mu, N)\n \n coefs = 2*np.arange(0, N+1) + 1\n \n ker = coefs[0::2]*DlegPolys[0::2] \n\n return ker.sum() / (4.0*np.pi)", "def kernel(mu, N):\n A = np.zeros_like(mu)\n\n for k in range(N + 1):\n Pk = sp.special.legendre(k)\n A += (2 * k + 1) / (4 * np.pi) * Pk(mu)\n\n return A", "def filter_kernel(self, kf, Gtype='spectral', k_kf=None,\n dtype=np.complex128):\n\n if k_kf is None:\n A = self.L/self.L.min() # domain size aspect ratios\n A.resize((3, 1, 1, 1)) # ensure proper array broadcasting\n kmag = np.sqrt(np.sum(np.square(self.K/A), axis=0))\n k_kf = kmag/kf\n\n Ghat = np.empty(k_kf.shape, dtype=dtype)\n\n if Gtype == 'spectral':\n Ghat[:] = (np.abs(k_kf) < 1.0).astype(dtype)\n\n elif Gtype == 'tophat':\n Ghat[:] = np.sin(pi*k_kf)/(pi*k_kf**2)\n\n elif Gtype == 'comp_exp':\n # A Compact Exponential filter that:\n # 1) has compact support in _both_ physical and spectral space\n # 2) is strictly positive in _both_ spaces\n # 3) is smooth (infinitely differentiable) in _both_ spaces\n # 4) has simply-connected support in spectral space with\n # an outer radius kf, and\n # 5) has disconnected (lobed) support in physical space\n # with an outer radius of 2*pi/kf\n with np.errstate(divide='ignore'):\n Ghat[:] = np.exp(-k_kf**2/(0.25-k_kf**2),\n where=k_kf < 0.5,\n out=np.zeros_like(k_kf)\n ).astype(dtype)\n\n G = irfft3(self.comm, Ghat)\n G[:] = np.square(G)\n rfft3(self.comm, G, Ghat)\n Ghat *= 1.0/self.comm.allreduce(Ghat[0, 0, 0], op=MPI.MAX)\n Ghat -= 1j*np.imag(Ghat)\n\n elif Gtype == 'inv_comp_exp':\n # Same as 'comp_exp' but the physical-space and\n # spectral-space kernels are swapped so that the\n # physical-space support is a simply-connected ball\n raise ValueError('inv_comp_exp not yet implemented!')\n\n else:\n raise ValueError('did not understand filter type')\n\n return Ghat", "def even_kernel(mu, N):\n A = np.zeros_like(mu)\n\n for k in range(2, N + 1, 2):\n Pk = sp.special.legendre(k)\n A += (2 * k + 1) / (4 * np.pi) * Pk(mu)\n\n return A", "def kernel(h):\n ker = np.zeros((4*h-1, 4*h-1, 4*h-1))\n\n for i in range(4*h-1):\n for j in range(4*h-1):\n for k in range(4*h-1):\n r_grid = np.linalg.norm([i - 2*h + 1, j - 2*h + 1, k - 2*h + 1])\n ker[i, j, k] = W(r_grid / h, h)\n return ker", "def make_kernel(grid,fwhm,nfwhm=4.):\n\n ngrd = len(grid)\n spacing = (grid[ngrd-1]-grid[0])/(ngrd-1.)\n nkpts = round(nfwhm*fwhm/spacing)\n\n if (nkpts % 2) != 0:\n nkpts += 1\n\n kernel = spacing* (np.arange(nkpts)-(nkpts/2.))\n kernel = np.exp(-np.log(2.0)/(fwhm/2.0)**2*(kernel)**2) ## Gaussian kernel\n kernel_norm = kernel/np.sum(kernel) ## Normalize\n kernel_norm=np.append(kernel_norm,kernel_norm[0]) ## Make sure it's symmetric\n\n return kernel_norm", "def kernel_factory(s, m1, m2):\r\n m_max = max(m1, m2)\r\n A = np.zeros([s, m_max, m_max], dtype=complex)\r\n symmetry = random.choice([2, 3, 4, 6])\r\n half_sym = np.floor(symmetry / 2).astype('int')\r\n lowest_k = 0.5\r\n highest_k = 3\r\n k = np.zeros([s, symmetry])\r\n for level in range(s):\r\n k[level, :] = np.random.uniform(lowest_k, highest_k, symmetry)\r\n\r\n x, y = np.meshgrid(np.linspace(-1, 1, m_max), np.linspace(-1, 1, m_max))\r\n # dist = np.sqrt(x * x + y * y)\r\n # theta = np.arctan(x / y)\r\n arb_angle = np.random.uniform(0, 2 * np.pi)\r\n for direction in range(symmetry):\r\n ang = direction * 180 / symmetry\r\n ang = arb_angle + ang * np.pi / 180\r\n r = (x * np.cos(ang) + np.sin(ang) * y)\r\n phi = np.random.uniform(0, 2 * np.pi)\r\n for i in range(s):\r\n A[i, :, :] += np.cos(2 * np.pi * k[i, direction % half_sym] * r)\r\n\r\n # Adding normal decay\r\n sigma = np.random.uniform(0.3, 0.6)\r\n decay = gaussian_window(m_max, m_max, sigma)\r\n A = np.multiply(np.abs(A), decay)\r\n # Normalizing:\r\n A = sphere_norm_by_layer(A)\r\n return A", "def ghosal_edge_v2(img,Ks,kmin=0,kmax=1000,lmax=0.5,phimin=1,thresholding=True,debug=False,mirror=False):\n\t# gather image properties before its altered\n\tni,nj = np.shape(img)\n\t# Ks must be odd\n\tif Ks%2 != 1:\n\t\tprint(\"Ks must be odd! Continuing with Ks = Ks-1\")\n\t\tKs = Ks-1\n\t# define the rectangular kernels\n\t#Vc00 = np.zeros((Ks,Ks),dtype=complex) # not needed\n\tVc11 = np.zeros((Ks,Ks),dtype=complex)\n\tVc20 = np.zeros((Ks,Ks),dtype=complex)\n\tofs = 1 *(1-1/Ks) # offset for centering kernel around 0,0\n\tfor i in range(Ks):\n\t\tfor j in range(Ks):\n\t\t\tKx = 2*j/Ks-ofs # limits of integration between -1 and 1\n\t\t\tKy = 2*i/Ks-ofs\n\t\t\tif Kx**2+Ky**2 <= 1: # only a circle\n\t\t\t\t#Vc00[i,j] = 1 # the conjugate of V00 # not needed\n\t\t\t\tVc11[i,j] = Kx-Ky*1j # ...\n\t\t\t\tVc20[i,j] = 2*Kx**2+2*Ky**2-1\n\t# mirror the edges to avoid edge effects from convolution\n\tif mirror:\n\t\tthick = int((Ks-1)/2)\n\t\timg = np.concatenate((img[:,(thick-1)::-1],img,img[:,:-(thick+1):-1]),1)\n\t\timg = np.concatenate((img[(thick-1)::-1,:],img,img[:-(thick+1):-1,:]),0)\n\t\tmode = \"valid\"\n\telse:\n\t\tmode = \"same\"\n\t\n\t# do the convolution with the images to get the zernike moments\n\tAnorm = lambda n : (n+1)/np.pi\t# a normalization value\n\t#A00 = scig.convolve2d(img,Vc00,mode='same') # not needed\n\tA11 = Anorm(1)*scig.oaconvolve(img,Vc11,mode=mode)\n\tA20 = Anorm(2)*scig.oaconvolve(img,Vc20,mode=mode)\n\n\tphi = np.arctan(np.imag(A11)/zero_to_small(np.real(A11)))\n\tAl11 = np.real(A11)*np.cos(phi)+np.imag(A11)*np.sin(phi)\n\tl = np.real(A20)/Al11 # A20 has no imaginary component so A20 = A'20\n\tl = np.minimum(l,1-SMALL) # chop off those that go beyond the kernel boundaries\n\tl = np.maximum(l,-1+SMALL)\n\tk = abs(3*Al11/(2*(1-l**2)**(3/2))) \n\t\n\tif thresholding==True:\n\t\t# conditions\n\t\tphi_c = abs(phi)>phimin\n\t\tl_c = abs(l)<lmax\n\t\tk_c = (k<kmax) & (k>kmin)\n\t\tvalid = phi_c & (k_c & l_c)\n\telif thresholding==False:\n\t\tvalid = np.ones_like(k)\n\t# define a grid of pixel positions\n\ti,j = np.meshgrid(np.arange(nj),np.arange(ni))\n\t\n\t# get a list of the valid relevant parameters \n\ti = i[valid]\n\tj = j[valid]\n\t#\tk = k[valid] # not necessary\n\tl = l[valid]\n\tphi = phi[valid]\n\t\n\t# convert to the subpixel position\n\ti_s = i+l*Ks/2*np.cos(phi)\n\tj_s = j+l*Ks/2*np.sin(phi)\n\t\n\t# put all detected points in a vector of (x,y) values\n\tedg = np.squeeze((j_s,i_s)).transpose()\n\torg = np.squeeze((j,i)).transpose()\n\tif debug==True:\n\t\treturn edg, org, k, l, phi\n\telse:\n\t\treturn edg, org", "def calc_ked_WFI(self):\n\n #Initialize kinetic energy density\n self.ked_WFI = np.zeros( (self.grid.Nelem, 1))\n\n #Figure out the number of occupied orbitals\n if self.m == 0:\n if self.pol == 1:\n Nocc = np.floor(self.N/2)\n nu = self.N / 2 - Nocc\n else:\n Nocc = np.floor(self.N)\n nu = self.N - Nocc\n\n else:\n #m>0 orbitals hold twice as many electrons due to +-m symmetry\n if self.pol == 1:\n Nocc = np.floor(self.N / 4)\n nu = self.N / 4 - Nocc\n else:\n Nocc = np.floor(self.N/2)\n nu = self.N / 2 - Nocc\n\n #Construct density\n for i in range(int(Nocc)):\n # print(\"phi from pssolver\", self.phi)\n # print(\"phi subset\", self.phi[:,i])\n # print(\"integrate returns\", self.grid.integrate( self.phi[:,i]**2 )**0.5)\n\n #Normalized orbital\n phi_norm = self.phi[:,i] / self.grid.integrate( self.phi[:,i]**2 )**0.5\n phi_norm = phi_norm[:, None]\n self.ked_WFI += (phi_norm * (self.H0 @ phi_norm)) / self.grid.w[:, None]\n\n #If we are doing fractional robitals and are non-integer\n if self.FRACTIONAL is True and nu != 0:\n #Normalized orbital\n phi_norm = self.phi[:,i] / self.grid.integrate( self.phi[:, Nocc+1]**2)**0.5\n phi_norm = phi_norm[:, None]\n self.ked_WFI += nu * ( phi_norm * (self.H0 @ phi_norm) ) / self.grid.w[:, None]\n\n #Scale densities appropriately\n if self.m == 0:\n if self.pol == 1: #Unpolarized electrons\n self.ked_WFI = 2 * self.ked_WFI\n\n else: # m>0 orbitals hold twice as many electrons due to +-m symmetry\n if self.pol == 1:\n self.ked_WFI = 4 * self.ked_WFI\n else:\n self.ked_WFI = 2 * self.ked_WFI", "def filter_wrapped_phase(image, k):\n ny, nx = image.shape\n assert(ny == nx) ## assert a square image for simplicity\n if (k%2 == 0):\n print(\"k has to be an integer!\")\n return\n N = nx\n i, j = np.arange(N), np.arange(N)\n ii, jj = np.meshgrid(i, j)\n filt_psi = np.zeros((N,N))\n\n inside = (jj[k/2:N-(k/2), k/2:N-(k/2)].flatten(), ii[k/2:N-(k/2), k/2:N-(k/2)].flatten())\n krange = np.linspace(-1 * (k/2), (k/2), k, dtype = 'int64') ## amount of added spaces, if k = 5, it ranges from -2 to 2\n krange_tile = np.tile(krange * N, (k, 1)).T ## tile them to make a (k/2)**2 matrix, containing for instance -2N, -N, 0, N, 2N for k=5\n k_tile = np.tile(krange, (k, 1)) ## tile to add to krange_tile\n coords_add = (krange_tile + k_tile).flatten() ## all coordinates, in a (k/2)**2 matrix, from -2N - 2: -2N + 2, -N-2 : -N+2 , -2 : 2, N -2 : N +2, 2N -2 : 2N +2\n inside = np.ravel_multi_index(inside, (N, N))\n coords_add = np.tile(coords_add, (len(inside), 1)) ## stack all differences to add to inside\n inside_tile = np.tile(inside, (coords_add.shape[1],1)).T ## stack all inside to add to differences\n all_coords = inside_tile + coords_add### a matrix of len(inside) x (k/2)**2 with all coordinates in a k x k square around a certain coordinate\n unrav_coords = np.unravel_index(all_coords, (N, N)) ## unraveled coordinates of all coordinates\n sum_sin_psi = np.sum(np.sin(image[unrav_coords]), axis = 1) ## sum over a sin (psi) over a k x k square\n sum_cos_psi = np.sum(np.cos(image[unrav_coords]), axis = 1) ## sum over a cos (psi) over a k x k square\n psi_app = np.arctan2(sum_sin_psi, sum_cos_psi)\n filt_psi[np.unravel_index(inside, (N, N))] = psi_app \n\n #### top layers\n for i in range(k/2):\n ## for indices directly above the \"inside square\"\n top = (jj[i, k/2:N-(k/2)].flatten(), ii[i, k/2: N - (k/2)].flatten())\n coords_add = (krange_tile + k_tile)[(k/2)-i:, :].flatten()\n top = np.ravel_multi_index(top, (N, N))\n coords_add = np.tile(coords_add, (len(top), 1))\n top_tile = np.tile(top, (coords_add.shape[1],1)).T\n top_coords = top_tile + coords_add\n unrav_coords = np.unravel_index(top_coords, (N, N))\n sum_sin_top = np.sum(np.sin(image[unrav_coords]), axis = 1)\n sum_cos_top = np.sum(np.cos(image[unrav_coords]), axis = 1)\n psi_top = np.arctan2(sum_sin_top, sum_cos_top)\n filt_psi[np.unravel_index(top, (N, N))] = psi_top\n\n ## indices directly below the \"inside square\"\n bot = (jj[N- 1 - i, k/2:N-(k/2)].flatten(), ii[N-1-i, k/2: N - (k/2)].flatten()) ## starting at the bottom working inwards\n coords_add = (krange_tile + k_tile)[:(k/2) + 1 + i, :].flatten()\n bot = np.ravel_multi_index(bot, (N, N))\n coords_add = np.tile(coords_add, (len(top), 1))\n bot_tile = np.tile(bot, (coords_add.shape[1],1)).T\n bot_coords = bot_tile + coords_add\n unrav_coords = np.unravel_index(bot_coords, (N, N))\n sum_sin_bot = np.sum(np.sin(image[unrav_coords]), axis = 1)\n sum_cos_bot = np.sum(np.cos(image[unrav_coords]), axis = 1)\n psi_bot = np.arctan2(sum_sin_bot, sum_cos_bot)\n filt_psi[np.unravel_index(bot, (N, N))] = psi_bot\n\n ## indices directly left of the \"inside square\"\n left = (jj[k/2:N-(k/2), i].flatten(), ii[k/2:N-(k/2), i].flatten()) ## starting at the bottom working inwards\n coords_add = (krange_tile + k_tile)[:, (k/2)-i:].flatten()\n left = np.ravel_multi_index(left, (N, N))\n coords_add = np.tile(coords_add, (len(left), 1))\n left_tile = np.tile(left, (coords_add.shape[1],1)).T\n left_coords = left_tile + coords_add\n unrav_coords = np.unravel_index(left_coords, (N, N))\n sum_sin_left = np.sum(np.sin(image[unrav_coords]), axis = 1)\n sum_cos_left = np.sum(np.cos(image[unrav_coords]), axis = 1)\n psi_left = np.arctan2(sum_sin_left, sum_cos_left)\n filt_psi[np.unravel_index(left, (N, N))] = psi_left\n\n ## indices directly left of the \"inside square\"\n right = (jj[k/2:N-(k/2), N - 1 - i].flatten(), ii[k/2:N-(k/2), N - 1 - i].flatten()) ## starting at the bottom working inwards\n coords_add = (krange_tile + k_tile)[:, :(k/2)+1+i].flatten()\n right = np.ravel_multi_index(right, (N, N))\n coords_add = np.tile(coords_add, (len(right), 1))\n right_tile = np.tile(right, (coords_add.shape[1],1)).T\n right_coords = right_tile + coords_add\n unrav_coords = np.unravel_index(right_coords, (N, N))\n sum_sin_right = np.sum(np.sin(image[unrav_coords]), axis = 1)\n sum_cos_right = np.sum(np.cos(image[unrav_coords]), axis = 1)\n psi_right = np.arctan2(sum_sin_right, sum_cos_right)\n filt_psi[np.unravel_index(right, (N, N))] = psi_right\n \n ## calculate boundaries diagonals\n left_t, right_t, left_b, right_b = (i, i), (i, -1 -i), (-1 - i, i), (-1 - i, -1 - i) \n left_t, right_t, left_b, right_b = (jj[left_t], ii[left_t]), (jj[right_t], ii[right_t]), (jj[left_b], ii[left_b]), (jj[right_b], ii[right_b])\n left_t, right_t, left_b, right_b = np.ravel_multi_index(left_t, (N, N)), np.ravel_multi_index(right_t, (N, N)), np.ravel_multi_index(left_b, (N, N)), np.ravel_multi_index(right_b, (N, N))\n coord_mat = krange_tile + k_tile\n coords_add_lt, coords_add_rt, coords_add_lb, coords_add_rb = coord_mat[(k/2)-i:, (k/2)-i:].flatten(), coord_mat[(k/2)-i:, :(k/2)+1+i].flatten(), coord_mat[:(k/2)+i+1, (k/2)-i:].flatten(), coord_mat[:(k/2)+i+1, :(k/2)+i+1].flatten()\n coords_add_tot = np.vstack((coords_add_lt, coords_add_rt, coords_add_lb, coords_add_rb))\n lt_tile, rt_tile, lb_tile, rb_tile = np.tile(left_t, (coords_add_lt.shape[0],1)).T, np.tile(right_t, (coords_add_lt.shape[0],1)).T, np.tile(left_b, (coords_add_lt.shape[0],1)).T, np.tile(right_b, (coords_add_lt.shape[0],1)).T\n coords_tile_tot = np.squeeze(np.stack((lt_tile, rt_tile, lb_tile, rb_tile)))\n coords_tot = coords_add_tot + coords_tile_tot\n unrav_coords = np.unravel_index(coords_tot, (N, N))\n sum_sin_diag = np.sum(np.sin(image[unrav_coords]), axis = 1)\n sum_cos_diag = np.sum(np.cos(image[unrav_coords]), axis = 1)\n psi_diag = np.arctan(sum_sin_diag, sum_cos_diag)\n filt_psi[np.unravel_index(np.stack((left_t, right_t, left_b, right_b)), (N, N))] = psi_diag\n\n return filt_psi", "def aGMKernel(Ni,Nj,alpha,gamma):\n \n #Dimension of data\n d = Ni.mu.size\n I = sp.eye(d)\n\n ##Normalisation\n deltaMean = (Ni.mu-Nj.mu).reshape(d,)\n SigmaSum = alpha * (Ni.Sigma+Nj.Sigma) + I/gamma\n Kij = (linalg.det(2*gamma*alpha * Ni.Sigma + I) * linalg.det(2*gamma*alpha * Nj.Sigma + I))**0.25\n Kij *= sp.exp(-0.5*sp.dot(deltaMean.T,linalg.solve(SigmaSum,deltaMean)))\n Kij /= sp.sqrt(linalg.det(SigmaSum*gamma)) \n \n return Kij", "def kernel_notch(M, N, d0, centro = (0, 0), forma = 0, pasa = 0, n = 1.0):\n \n if forma == 0:\n kernel_prov = kernel_ideal_notch(M, N, centro, d0)\n elif forma == 1:\n kernel_prov = kernel_gaussiano_notch(M, N, centro, d0)\n else:\n kernel_prov = kernel_butterworth_notch(M, N, centro, d0, n)\n \n kernel = pasa + (-1)**pasa * kernel_prov\n \n return kernel", "def flipkernel(k):\r\n return np.squeeze(np.fliplr(k[None,:])) ###important for temporal causality!!!??\r", "def kernel(theta, pad):\r\n nb = len(theta)\r\n basis = basis_function1(pad, nb) #construct basises\r\n k = np.dot(theta, basis.T) #construct kernels with parameter-weighted sum\r\n return flipkernel(k)", "def disp_surf_calc(kc_x_max, kc_z_max, m_i, wp_e):\n\n # Make vectors of the wave numbers\n kc_z = np.linspace(1e-6, kc_z_max, 35)\n kc_x = np.linspace(1e-6, kc_x_max, 35)\n\n # Turn those vectors into matrices\n kc_x_mat, kc_z_mat = np.meshgrid(kc_x, kc_z)\n\n # Find some of the numbers that appear later in the calculations\n kc_ = np.sqrt(kc_x_mat ** 2 + kc_z_mat ** 2) # Absolute value of k\n theta_ = np.arctan2(kc_x_mat, kc_z_mat) # The angle between k and B\n wc_i = 1 / m_i # The ion gyro frequency\n wp_i = wp_e / np.sqrt(m_i) # The ion plasma frequency\n wp_ = np.sqrt(wp_e ** 2 + wp_i ** 2) # The total plasma frequency\n\n # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n # For every k_perp and k_par, turn the dispersion relation into a\n # polynomial equation and solve it.\n # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n # The polynomial coefficients are calculated\n pol_koeff_8 = -2 * kc_ ** 2\n pol_koeff_8 -= (1 + wc_i ** 2 + 3 * wp_ ** 2) * np.ones(kc_.shape)\n pol_koeff_6 = (2 * kc_ ** 2 + wp_ ** 2) * (1 + wc_i ** 2 + 2 * wp_ ** 2)\n pol_koeff_6 += kc_ ** 4 + (wp_ ** 2 + wc_i) ** 2\n pol_koeff_4 = -kc_ ** 4 * (1 + wc_i ** 2 + wp_ ** 2)\n pol_koeff_4 -= 2 * kc_ ** 2 * (wp_ ** 2 + wc_i) ** 2\n pol_koeff_4 -= (kc_ * wp_) ** 2 * (1 + wc_i ** 2 - wc_i) * (\n 1 + np.cos(theta_) ** 2)\n pol_koeff_4 -= wp_ ** 2 * (wp_ ** 2 + wc_i) ** 2\n pol_koeff_2 = kc_ ** 4 * (wp_ ** 2 * (1 + wc_i ** 2 - wc_i) * np.cos(\n theta_) ** 2 + wc_i * (wp_ ** 2 + wc_i))\n pol_koeff_2 += kc_ ** 2 * wp_ ** 2 * wc_i * (wp_ ** 2 + wc_i) * (\n 1 + np.cos(theta_) ** 2)\n pol_koeff_0 = -kc_ ** 4 * wc_i ** 2 * wp_ ** 2 * np.cos(theta_) ** 2\n\n w_final = np.zeros((10, len(kc_z), len(kc_x)))\n\n # For each k, solve the equation\n for k_z, k_x in itertools.product(range(len(kc_z)), range(len(kc_x))):\n disp_polynomial = [1, 0, pol_koeff_8[k_z, k_x], 0,\n pol_koeff_6[k_z, k_x], 0, pol_koeff_4[k_z, k_x],\n 0, pol_koeff_2[k_z, k_x], 0, pol_koeff_0[k_z, k_x]]\n # theoretically should be real (A. Tjulin)\n w_temp = np.real(np.roots(disp_polynomial))\n # We need to sort the answers to get nice surfaces.\n w_final[:, k_z, k_x] = np.sort(w_temp)\n\n n2_ = kc_ ** 2 / w_final ** 2\n v_ph_c = np.sqrt(1. / n2_)\n va_c = 1 / (wp_e * np.sqrt(m_i))\n v_ph_va = v_ph_c / va_c\n\n diel_tensor = _calc_diel(kc_, w_final, theta_, wp_e, wp_i, wc_i)\n\n e_x, e_y, e_z, e_per, e_tot, e_pol = _calc_e(diel_tensor)\n e_par = (kc_x_mat * e_x + kc_z_mat * e_z) / kc_\n\n b_x, b_y, b_z, b_par, b_per, b_pol, b_tot = _calc_b(kc_x_mat, kc_z_mat,\n w_final, e_x, e_y, e_z)\n\n dk_x, dk_z = [kc_x_mat[1], kc_z_mat[1]]\n dw_x, dw_z = [np.zeros(w_final.shape) for _ in range(2)]\n dw_x[:, :, 1:] = np.diff(w_final, axis=2)\n dw_z[:, 1:, :] = np.diff(w_final, axis=1)\n v_x, v_z = [dw_ / dk for dw_, dk in zip([dw_x, dw_z], [dk_x, dk_z])]\n\n s_par, s_tot = _calc_s(e_x, e_y, e_z, b_x, b_y, b_z)\n\n # Compute ion and electron velocities\n v_ex, v_ey, v_ez, v_ix, v_iy, v_iz = _calc_vei(m_i, wc_i, w_final,\n e_x, e_y, e_z)\n\n # Ratio of parallel and perpendicular to B speed\n vepar_perp = v_ez * np.conj(v_ez)\n vepar_perp /= (v_ex * np.conj(v_ex) + v_ey * np.conj(v_ey))\n vipar_perp = v_iz * np.conj(v_iz)\n vipar_perp /= (v_ix * np.conj(v_ix) + v_iy * np.conj(v_iy))\n\n # Total particle speeds\n v_e2 = v_ex * np.conj(v_ex) + v_ey * np.conj(v_ey) + v_ez * np.conj(v_ez)\n v_i2 = v_ix * np.conj(v_ix) + v_iy * np.conj(v_iy) + v_iz * np.conj(v_iz)\n\n # Ion and electron energies\n m_e = -1\n en_e = 0.5 * m_e * v_e2\n en_i = 0.5 * m_i * v_i2\n\n # Ratio of particle and field energy densities\n ratio_part_field = _calc_part2fields(wp_e, en_e, en_i, e_tot, b_tot)\n\n # Continuity equation\n dn_e_n, dn_i_n, dne_dni = _calc_continuity(kc_x_mat, kc_z_mat, w_final,\n v_ex, v_ez, v_ix, v_iz)\n\n dn_e_n_db_b = dn_e_n / b_tot\n dn_i_n_db_b = dn_i_n / b_tot\n\n dn_e_n_dbpar_b = dn_e_n / b_par\n dn_i_n_dbpar_b = dn_i_n / b_par\n\n dn_e = dn_e_n * wp_e ** 2\n k_dot_e = e_x * kc_x_mat + e_z * kc_z_mat\n k_dot_e = np.sqrt(k_dot_e * np.conj(k_dot_e))\n\n # Build output dict\n extra_param = {\"Degree of electromagnetism\": np.log10(b_tot / e_tot),\n \"Degree of longitudinality\": np.abs(e_par) / e_tot,\n \"Degree of parallelity E\": e_z / e_tot,\n \"Degree of parallelity B\": np.sqrt(\n b_z * np.conj(b_z)) / b_tot,\n \"Ellipticity E\": e_pol, \"Ellipticity B\": b_pol,\n \"E_part/E_field\": np.log10(ratio_part_field),\n \"v_g\": np.sqrt(v_x ** 2 + v_z ** 2),\n \"v_ph/v_a\": np.log10(v_ph_va),\n \"E_e/E_i\": np.log10(en_e / en_i),\n \"v_e/v_i\": np.log10(np.sqrt(v_e2 / v_i2)),\n \"v_epara/v_eperp\": np.log10(vepar_perp),\n \"v_ipara/v_iperp\": np.log10(vipar_perp),\n \"dn_e/dn_i\": np.log10(dne_dni),\n \"(dn_e/n)/ (dB/B)\": np.log10(dn_e_n_db_b),\n \"(dn_i/n)/(dB/B)\": np.log10(dn_i_n_db_b),\n \"(dn_i/n)/(dBpar/B)\": np.log10(dn_i_n_dbpar_b),\n \"(dn_e/n)/(dB/B)\": np.log10(dn_e / k_dot_e),\n \"(dn_e/n)/(dBpar /B)\": np.log10(dn_e_n_dbpar_b),\n \" Spar/Stot\": s_par / s_tot}\n\n for k, v in zip(extra_param.keys(), extra_param.values()):\n extra_param[k] = np.transpose(np.real(v), [0, 2, 1])\n\n kx_ = np.transpose(kc_x_mat)\n kz_ = np.transpose(kc_z_mat)\n wf_ = np.transpose(w_final, [0, 2, 1])\n\n return kx_, kz_, wf_, extra_param", "def GPR_Kernel (a,h,sig_data=1,K=Squared_Expo,close_BP=None,width=9,badpix=None,x_grid=None,y_grid=None):\r\n \r\n if badpix is None:\r\n badpix=[width//2,width//2]\r\n if close_BP is None:\r\n close_BP=np.zeros((width,width),dtype=bool)\r\n close_BP[badpix[0],badpix[1]]=True\r\n if close_BP[badpix[0],badpix[1]]==False:\r\n raise ValueError(\"close_BP must be True at the location of badpix.\")\r\n good_pix=~close_BP\r\n if x_grid is None and y_grid is None:\r\n x=np.linspace(0,close_BP.shape[1]-1,close_BP.shape[1])\r\n y=np.linspace(0,close_BP.shape[0]-1,close_BP.shape[0])\r\n x_grid,y_grid=np.meshgrid(x,y)\r\n elif (x_grid is None and y_grid is not None) or (x_grid is not None and y_grid is None) or x_grid.shape !=y_grid.shape:\r\n raise ValueError(\"x_grid and y_grid do not have the same shape, or only one of them is given. \"\\\r\n \"Cannot constuct a grid of coordinates.\")\r\n if close_BP.shape != x_grid.shape:\r\n raise ValueError(\"close_BP should have the same shape as x and y grids.\")\r\n # Change to coordinate where the bad pixel to be fixed is at (0,0)\r\n x_grid=x_grid-badpix[1]\r\n y_grid=y_grid-badpix[0]\r\n X, Y = x_grid[good_pix], y_grid[good_pix]\r\n Cov_data=np.identity(X.size)*sig_data**2 \r\n Kinv=np.linalg.inv(K(X,X,Y,Y,a,h)+Cov_data)\r\n Kernel=np.zeros(close_BP.shape)\r\n Kernel[good_pix]=np.dot(Kinv,K(X,np.zeros(1),Y,np.zeros(1),a,h))[:,0]\r\n # Normalization\r\n Kernel/=np.sum(Kernel)\r\n # Reshape back to 2D\r\n Kernel=np.reshape(Kernel,close_BP.shape)\r\n return Kernel" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Reproducing kernel Calculate of reproducing kernel for even subspace of spherical harmonics of maximum degree N.
def even_kernel(mu, N): # Check that -1 <= mu <= 1 mu = np.clip(mu, -1, 1) # Need Legendre polynomials legPolys = legp(mu, N) coefs = 2*np.arange(0, N+1) + 1 ker = coefs[0::2]*legPolys[0::2] return ker.sum() / (4.0*np.pi)
[ "def even_kernel(mu, N):\n A = np.zeros_like(mu)\n\n for k in range(2, N + 1, 2):\n Pk = sp.special.legendre(k)\n A += (2 * k + 1) / (4 * np.pi) * Pk(mu)\n\n return A", "def inv_funk_radon_even_kernel(mu, N):\n A = np.zeros_like(mu)\n\n for k in range(2, N + 1, 2):\n Pk = sp.special.legendre(k)\n A += (2 * k + 1) / (8 * np.pi**2 * Pk(0) * k * (k + 1)) * Pk(mu)\n\n return A", "def even_kernel_der(mu, N):\n\n # Check that -1 <= mu <= 1\n mu = np.clip(mu, -1, 1)\n\n #Derivatives of Legendre polynomials\n DlegPolys = legp_der(mu, N)\n \n coefs = 2*np.arange(0, N+1) + 1\n \n ker = coefs[0::2]*DlegPolys[0::2] \n\n return ker.sum() / (4.0*np.pi)", "def inv_funk_radon_even_kernel(mu, N):\n\n # Check that -1 <= mu <= 1\n mu = np.clip(mu, -1, 1)\n\n # Need Legendre polynomials\n legPolys = legp(mu, N)\n p_at_zero = legp(0, N)\n\n coefs_num = 2*np.arange(0, N+1) + 1\n coefs_den = np.arange(2,N+1,2) * (np.arange(2,N+1,2) + 1)\n\n ker = coefs_num[2::2]*legPolys[2::2] / (p_at_zero[2::2] * coefs_den)\n\n return ker.sum() / (8.0*np.pi*np.pi)", "def kernel(h):\n ker = np.zeros((4*h-1, 4*h-1, 4*h-1))\n\n for i in range(4*h-1):\n for j in range(4*h-1):\n for k in range(4*h-1):\n r_grid = np.linalg.norm([i - 2*h + 1, j - 2*h + 1, k - 2*h + 1])\n ker[i, j, k] = W(r_grid / h, h)\n return ker", "def kernel(mu, N):\n A = np.zeros_like(mu)\n\n for k in range(N + 1):\n Pk = sp.special.legendre(k)\n A += (2 * k + 1) / (4 * np.pi) * Pk(mu)\n\n return A", "def nd_kernel(n):\n n = int(n)\n total_size = 3**n\n mid_point = int((3**n - 1)/2)\n kern = np.zeros(total_size, dtype=bool)\n for i in range(n):\n kern[mid_point-3**i] = True\n kern[mid_point+3**i] = True\n new_shape = 3*np.ones(n, dtype=int) \n unnormed_kern = kern.reshape(new_shape)\n return unnormed_kern/unnormed_kern.sum()", "def kernel_factory(s, m1, m2):\r\n m_max = max(m1, m2)\r\n A = np.zeros([s, m_max, m_max], dtype=complex)\r\n symmetry = random.choice([2, 3, 4, 6])\r\n half_sym = np.floor(symmetry / 2).astype('int')\r\n lowest_k = 0.5\r\n highest_k = 3\r\n k = np.zeros([s, symmetry])\r\n for level in range(s):\r\n k[level, :] = np.random.uniform(lowest_k, highest_k, symmetry)\r\n\r\n x, y = np.meshgrid(np.linspace(-1, 1, m_max), np.linspace(-1, 1, m_max))\r\n # dist = np.sqrt(x * x + y * y)\r\n # theta = np.arctan(x / y)\r\n arb_angle = np.random.uniform(0, 2 * np.pi)\r\n for direction in range(symmetry):\r\n ang = direction * 180 / symmetry\r\n ang = arb_angle + ang * np.pi / 180\r\n r = (x * np.cos(ang) + np.sin(ang) * y)\r\n phi = np.random.uniform(0, 2 * np.pi)\r\n for i in range(s):\r\n A[i, :, :] += np.cos(2 * np.pi * k[i, direction % half_sym] * r)\r\n\r\n # Adding normal decay\r\n sigma = np.random.uniform(0.3, 0.6)\r\n decay = gaussian_window(m_max, m_max, sigma)\r\n A = np.multiply(np.abs(A), decay)\r\n # Normalizing:\r\n A = sphere_norm_by_layer(A)\r\n return A", "def scharr_kernel() -> torch.Tensor:\n\n return torch.outer(\n torch.tensor([3., 10., 3.]) / 16,\n torch.tensor([1., 0., -1.]),\n )", "def inv_funk_radon_kernel(mu, N):\n\n # Check that -1 <= mu <= 1\n mu = np.clip(mu, -1, 1)\n\n # Need Legendre polynomials\n legPolys = legp(mu, N)\n p_at_zero = legp(0, N)\n coefs = 2*np.arange(0, N+1, 2) + 1\n ker = coefs*legPolys[::2]/p_at_zero[::2]\n return ker.sum() / (8*np.pi)", "def make_kernel(grid,fwhm,nfwhm=4.):\n\n ngrd = len(grid)\n spacing = (grid[ngrd-1]-grid[0])/(ngrd-1.)\n nkpts = round(nfwhm*fwhm/spacing)\n\n if (nkpts % 2) != 0:\n nkpts += 1\n\n kernel = spacing* (np.arange(nkpts)-(nkpts/2.))\n kernel = np.exp(-np.log(2.0)/(fwhm/2.0)**2*(kernel)**2) ## Gaussian kernel\n kernel_norm = kernel/np.sum(kernel) ## Normalize\n kernel_norm=np.append(kernel_norm,kernel_norm[0]) ## Make sure it's symmetric\n\n return kernel_norm", "def kernel_notch(M, N, d0, centro = (0, 0), forma = 0, pasa = 0, n = 1.0):\n \n if forma == 0:\n kernel_prov = kernel_ideal_notch(M, N, centro, d0)\n elif forma == 1:\n kernel_prov = kernel_gaussiano_notch(M, N, centro, d0)\n else:\n kernel_prov = kernel_butterworth_notch(M, N, centro, d0, n)\n \n kernel = pasa + (-1)**pasa * kernel_prov\n \n return kernel", "def LIC2_sparse(u,v,points,grid_object,trace_length,kernel='anisotropic_linear',delta_t=3600.):\n \n \n # temporary variable for setting up the convolution kernel and noise field.\n trace_length = float(trace_length)\n delta_t = float(delta_t)\n steps_per_trace = int(trace_length/delta_t)\n \n\n if kernel == 'anisotropic_linear':\n k = np.ones(steps_per_trace)\n for i in xrange(steps_per_trace):\n k[i] = 1 - float(i)/float(steps_per_trace-1)\n #k[:int(steps_per_trace/2.)] = 0\n #k[int(2*kernel_length/4.):] = 0\n #k = k/np.sum(delta_t*k)\n \n #plt.plot(k)\n \n noise = np.zeros(2*steps_per_trace)\n noise[steps_per_trace] = 1\n \n intensity = np.zeros((steps_per_trace))\n for i in xrange(steps_per_trace):\n intensity[i] = np.sum(k*noise[i:steps_per_trace+i])\n intensity = intensity/np.max(intensity)\n #plt.plot(intensity)\n \n elif kernel == 'box':\n k = np.ones(kernel_length)\n # k[:int(kernel_length/4.)] = 0\n # k[int(3*kernel_length/4.):] = 0\n #k = k/np.sum(delta_t*k)\n\n noise = np.zeros(2*steps_per_trace)\n noise[steps_per_trace] = 1\n \n intensity = np.zeros((steps_per_trace))\n for i in xrange(steps_per_trace):\n intensity[i] = np.sum(k*noise[i:steps_per_trace+i])\n intensity = intensity/np.max(intensity)\n\n else:\n raise ValueError('Valid options for kernel are: anisotropic_linear, box')\n \n \n x_start = (np.random.random_sample(points)*(np.max(grid_object['Xp1'][:]) - np.min(grid_object['Xp1'][:])) +\n np.min(grid_object['Xp1'][:]))\n y_start = (np.random.random_sample(points)*(np.max(grid_object['Yp1'][:]) - np.min(grid_object['Yp1'][:])) +\n np.min(grid_object['Yp1'][:]))\n \n output = np.zeros((3,points,steps_per_trace))\n \n for i in xrange(points):\n x_stream,y_stream,t_stream = mitgcm.streamlines.stream2(u,v,x_start[i],y_start[i],\n grid_object,trace_length,delta_t)\n\n output[0,i,:steps_per_trace] = x_stream[:steps_per_trace]\n output[1,i,:steps_per_trace] = y_stream[:steps_per_trace]\n output[2,i,:steps_per_trace] = intensity[:steps_per_trace]\n\n return output", "def filter_wrapped_phase(image, k):\n ny, nx = image.shape\n assert(ny == nx) ## assert a square image for simplicity\n if (k%2 == 0):\n print(\"k has to be an integer!\")\n return\n N = nx\n i, j = np.arange(N), np.arange(N)\n ii, jj = np.meshgrid(i, j)\n filt_psi = np.zeros((N,N))\n\n inside = (jj[k/2:N-(k/2), k/2:N-(k/2)].flatten(), ii[k/2:N-(k/2), k/2:N-(k/2)].flatten())\n krange = np.linspace(-1 * (k/2), (k/2), k, dtype = 'int64') ## amount of added spaces, if k = 5, it ranges from -2 to 2\n krange_tile = np.tile(krange * N, (k, 1)).T ## tile them to make a (k/2)**2 matrix, containing for instance -2N, -N, 0, N, 2N for k=5\n k_tile = np.tile(krange, (k, 1)) ## tile to add to krange_tile\n coords_add = (krange_tile + k_tile).flatten() ## all coordinates, in a (k/2)**2 matrix, from -2N - 2: -2N + 2, -N-2 : -N+2 , -2 : 2, N -2 : N +2, 2N -2 : 2N +2\n inside = np.ravel_multi_index(inside, (N, N))\n coords_add = np.tile(coords_add, (len(inside), 1)) ## stack all differences to add to inside\n inside_tile = np.tile(inside, (coords_add.shape[1],1)).T ## stack all inside to add to differences\n all_coords = inside_tile + coords_add### a matrix of len(inside) x (k/2)**2 with all coordinates in a k x k square around a certain coordinate\n unrav_coords = np.unravel_index(all_coords, (N, N)) ## unraveled coordinates of all coordinates\n sum_sin_psi = np.sum(np.sin(image[unrav_coords]), axis = 1) ## sum over a sin (psi) over a k x k square\n sum_cos_psi = np.sum(np.cos(image[unrav_coords]), axis = 1) ## sum over a cos (psi) over a k x k square\n psi_app = np.arctan2(sum_sin_psi, sum_cos_psi)\n filt_psi[np.unravel_index(inside, (N, N))] = psi_app \n\n #### top layers\n for i in range(k/2):\n ## for indices directly above the \"inside square\"\n top = (jj[i, k/2:N-(k/2)].flatten(), ii[i, k/2: N - (k/2)].flatten())\n coords_add = (krange_tile + k_tile)[(k/2)-i:, :].flatten()\n top = np.ravel_multi_index(top, (N, N))\n coords_add = np.tile(coords_add, (len(top), 1))\n top_tile = np.tile(top, (coords_add.shape[1],1)).T\n top_coords = top_tile + coords_add\n unrav_coords = np.unravel_index(top_coords, (N, N))\n sum_sin_top = np.sum(np.sin(image[unrav_coords]), axis = 1)\n sum_cos_top = np.sum(np.cos(image[unrav_coords]), axis = 1)\n psi_top = np.arctan2(sum_sin_top, sum_cos_top)\n filt_psi[np.unravel_index(top, (N, N))] = psi_top\n\n ## indices directly below the \"inside square\"\n bot = (jj[N- 1 - i, k/2:N-(k/2)].flatten(), ii[N-1-i, k/2: N - (k/2)].flatten()) ## starting at the bottom working inwards\n coords_add = (krange_tile + k_tile)[:(k/2) + 1 + i, :].flatten()\n bot = np.ravel_multi_index(bot, (N, N))\n coords_add = np.tile(coords_add, (len(top), 1))\n bot_tile = np.tile(bot, (coords_add.shape[1],1)).T\n bot_coords = bot_tile + coords_add\n unrav_coords = np.unravel_index(bot_coords, (N, N))\n sum_sin_bot = np.sum(np.sin(image[unrav_coords]), axis = 1)\n sum_cos_bot = np.sum(np.cos(image[unrav_coords]), axis = 1)\n psi_bot = np.arctan2(sum_sin_bot, sum_cos_bot)\n filt_psi[np.unravel_index(bot, (N, N))] = psi_bot\n\n ## indices directly left of the \"inside square\"\n left = (jj[k/2:N-(k/2), i].flatten(), ii[k/2:N-(k/2), i].flatten()) ## starting at the bottom working inwards\n coords_add = (krange_tile + k_tile)[:, (k/2)-i:].flatten()\n left = np.ravel_multi_index(left, (N, N))\n coords_add = np.tile(coords_add, (len(left), 1))\n left_tile = np.tile(left, (coords_add.shape[1],1)).T\n left_coords = left_tile + coords_add\n unrav_coords = np.unravel_index(left_coords, (N, N))\n sum_sin_left = np.sum(np.sin(image[unrav_coords]), axis = 1)\n sum_cos_left = np.sum(np.cos(image[unrav_coords]), axis = 1)\n psi_left = np.arctan2(sum_sin_left, sum_cos_left)\n filt_psi[np.unravel_index(left, (N, N))] = psi_left\n\n ## indices directly left of the \"inside square\"\n right = (jj[k/2:N-(k/2), N - 1 - i].flatten(), ii[k/2:N-(k/2), N - 1 - i].flatten()) ## starting at the bottom working inwards\n coords_add = (krange_tile + k_tile)[:, :(k/2)+1+i].flatten()\n right = np.ravel_multi_index(right, (N, N))\n coords_add = np.tile(coords_add, (len(right), 1))\n right_tile = np.tile(right, (coords_add.shape[1],1)).T\n right_coords = right_tile + coords_add\n unrav_coords = np.unravel_index(right_coords, (N, N))\n sum_sin_right = np.sum(np.sin(image[unrav_coords]), axis = 1)\n sum_cos_right = np.sum(np.cos(image[unrav_coords]), axis = 1)\n psi_right = np.arctan2(sum_sin_right, sum_cos_right)\n filt_psi[np.unravel_index(right, (N, N))] = psi_right\n \n ## calculate boundaries diagonals\n left_t, right_t, left_b, right_b = (i, i), (i, -1 -i), (-1 - i, i), (-1 - i, -1 - i) \n left_t, right_t, left_b, right_b = (jj[left_t], ii[left_t]), (jj[right_t], ii[right_t]), (jj[left_b], ii[left_b]), (jj[right_b], ii[right_b])\n left_t, right_t, left_b, right_b = np.ravel_multi_index(left_t, (N, N)), np.ravel_multi_index(right_t, (N, N)), np.ravel_multi_index(left_b, (N, N)), np.ravel_multi_index(right_b, (N, N))\n coord_mat = krange_tile + k_tile\n coords_add_lt, coords_add_rt, coords_add_lb, coords_add_rb = coord_mat[(k/2)-i:, (k/2)-i:].flatten(), coord_mat[(k/2)-i:, :(k/2)+1+i].flatten(), coord_mat[:(k/2)+i+1, (k/2)-i:].flatten(), coord_mat[:(k/2)+i+1, :(k/2)+i+1].flatten()\n coords_add_tot = np.vstack((coords_add_lt, coords_add_rt, coords_add_lb, coords_add_rb))\n lt_tile, rt_tile, lb_tile, rb_tile = np.tile(left_t, (coords_add_lt.shape[0],1)).T, np.tile(right_t, (coords_add_lt.shape[0],1)).T, np.tile(left_b, (coords_add_lt.shape[0],1)).T, np.tile(right_b, (coords_add_lt.shape[0],1)).T\n coords_tile_tot = np.squeeze(np.stack((lt_tile, rt_tile, lb_tile, rb_tile)))\n coords_tot = coords_add_tot + coords_tile_tot\n unrav_coords = np.unravel_index(coords_tot, (N, N))\n sum_sin_diag = np.sum(np.sin(image[unrav_coords]), axis = 1)\n sum_cos_diag = np.sum(np.cos(image[unrav_coords]), axis = 1)\n psi_diag = np.arctan(sum_sin_diag, sum_cos_diag)\n filt_psi[np.unravel_index(np.stack((left_t, right_t, left_b, right_b)), (N, N))] = psi_diag\n\n return filt_psi", "def kernel(n):\r\n return [(k, n - abs(k)) for k in range(-n, n + 1)]", "def gadget_kernel(r, h):\n factor = r/h\n factor2 = factor * factor\n prefactor = 4/(3 * h)\n\n if factor <= 0.5:\n poly = 1 - 6 * factor2 + 6 * factor2 * factor\n elif factor <= 1:\n one_minus_factor = 1 - factor\n poly = 2 * one_minus_factor * one_minus_factor * one_minus_factor\n else:\n poly = 0.\n\n return prefactor * poly", "def ghosal_edge_v2(img,Ks,kmin=0,kmax=1000,lmax=0.5,phimin=1,thresholding=True,debug=False,mirror=False):\n\t# gather image properties before its altered\n\tni,nj = np.shape(img)\n\t# Ks must be odd\n\tif Ks%2 != 1:\n\t\tprint(\"Ks must be odd! Continuing with Ks = Ks-1\")\n\t\tKs = Ks-1\n\t# define the rectangular kernels\n\t#Vc00 = np.zeros((Ks,Ks),dtype=complex) # not needed\n\tVc11 = np.zeros((Ks,Ks),dtype=complex)\n\tVc20 = np.zeros((Ks,Ks),dtype=complex)\n\tofs = 1 *(1-1/Ks) # offset for centering kernel around 0,0\n\tfor i in range(Ks):\n\t\tfor j in range(Ks):\n\t\t\tKx = 2*j/Ks-ofs # limits of integration between -1 and 1\n\t\t\tKy = 2*i/Ks-ofs\n\t\t\tif Kx**2+Ky**2 <= 1: # only a circle\n\t\t\t\t#Vc00[i,j] = 1 # the conjugate of V00 # not needed\n\t\t\t\tVc11[i,j] = Kx-Ky*1j # ...\n\t\t\t\tVc20[i,j] = 2*Kx**2+2*Ky**2-1\n\t# mirror the edges to avoid edge effects from convolution\n\tif mirror:\n\t\tthick = int((Ks-1)/2)\n\t\timg = np.concatenate((img[:,(thick-1)::-1],img,img[:,:-(thick+1):-1]),1)\n\t\timg = np.concatenate((img[(thick-1)::-1,:],img,img[:-(thick+1):-1,:]),0)\n\t\tmode = \"valid\"\n\telse:\n\t\tmode = \"same\"\n\t\n\t# do the convolution with the images to get the zernike moments\n\tAnorm = lambda n : (n+1)/np.pi\t# a normalization value\n\t#A00 = scig.convolve2d(img,Vc00,mode='same') # not needed\n\tA11 = Anorm(1)*scig.oaconvolve(img,Vc11,mode=mode)\n\tA20 = Anorm(2)*scig.oaconvolve(img,Vc20,mode=mode)\n\n\tphi = np.arctan(np.imag(A11)/zero_to_small(np.real(A11)))\n\tAl11 = np.real(A11)*np.cos(phi)+np.imag(A11)*np.sin(phi)\n\tl = np.real(A20)/Al11 # A20 has no imaginary component so A20 = A'20\n\tl = np.minimum(l,1-SMALL) # chop off those that go beyond the kernel boundaries\n\tl = np.maximum(l,-1+SMALL)\n\tk = abs(3*Al11/(2*(1-l**2)**(3/2))) \n\t\n\tif thresholding==True:\n\t\t# conditions\n\t\tphi_c = abs(phi)>phimin\n\t\tl_c = abs(l)<lmax\n\t\tk_c = (k<kmax) & (k>kmin)\n\t\tvalid = phi_c & (k_c & l_c)\n\telif thresholding==False:\n\t\tvalid = np.ones_like(k)\n\t# define a grid of pixel positions\n\ti,j = np.meshgrid(np.arange(nj),np.arange(ni))\n\t\n\t# get a list of the valid relevant parameters \n\ti = i[valid]\n\tj = j[valid]\n\t#\tk = k[valid] # not necessary\n\tl = l[valid]\n\tphi = phi[valid]\n\t\n\t# convert to the subpixel position\n\ti_s = i+l*Ks/2*np.cos(phi)\n\tj_s = j+l*Ks/2*np.sin(phi)\n\t\n\t# put all detected points in a vector of (x,y) values\n\tedg = np.squeeze((j_s,i_s)).transpose()\n\torg = np.squeeze((j,i)).transpose()\n\tif debug==True:\n\t\treturn edg, org, k, l, phi\n\telse:\n\t\treturn edg, org", "def sobel_kernel() -> torch.Tensor:\n\n return torch.outer(\n torch.tensor([1., 2., 1.]) / 4,\n torch.tensor([1., 0., -1.]),\n )", "def gauss_kernel(n_fwhm,sigma):\n\n x_length = int(n_fwhm * sigma + 0.5) #Add 0.5 to approximate to nearest integer\n y_length = x_length\n \n \n x, y = mgrid[-x_length:x_length+1, -y_length:y_length+1]\n g = numpy.exp(-(x**2/(2*(float(sigma)**2))+y**2/(2*(float(sigma)**2))))\n return g / g.sum()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Derivative of reproducing kernel on even subspaces of maximum degree N.
def even_kernel_der(mu, N): # Check that -1 <= mu <= 1 mu = np.clip(mu, -1, 1) #Derivatives of Legendre polynomials DlegPolys = legp_der(mu, N) coefs = 2*np.arange(0, N+1) + 1 ker = coefs[0::2]*DlegPolys[0::2] return ker.sum() / (4.0*np.pi)
[ "def even_kernel(mu, N):\n\n # Check that -1 <= mu <= 1\n mu = np.clip(mu, -1, 1)\n\n # Need Legendre polynomials\n legPolys = legp(mu, N)\n \n\n coefs = 2*np.arange(0, N+1) + 1\n \n ker = coefs[0::2]*legPolys[0::2] \n\n return ker.sum() / (4.0*np.pi)", "def inv_funk_radon_even_kernel(mu, N):\n A = np.zeros_like(mu)\n\n for k in range(2, N + 1, 2):\n Pk = sp.special.legendre(k)\n A += (2 * k + 1) / (8 * np.pi**2 * Pk(0) * k * (k + 1)) * Pk(mu)\n\n return A", "def inv_funk_radon_even_kernel(mu, N):\n\n # Check that -1 <= mu <= 1\n mu = np.clip(mu, -1, 1)\n\n # Need Legendre polynomials\n legPolys = legp(mu, N)\n p_at_zero = legp(0, N)\n\n coefs_num = 2*np.arange(0, N+1) + 1\n coefs_den = np.arange(2,N+1,2) * (np.arange(2,N+1,2) + 1)\n\n ker = coefs_num[2::2]*legPolys[2::2] / (p_at_zero[2::2] * coefs_den)\n\n return ker.sum() / (8.0*np.pi*np.pi)", "def delta(N):\n assert assert_odd(N) # Make sure kernel is odd\n X = np.zeros((N,N)) # Square matrix with all 0s\n middle = int(N/2) # Get the middle cell\n X[middle, middle] = 1\n return X", "def even_kernel(mu, N):\n A = np.zeros_like(mu)\n\n for k in range(2, N + 1, 2):\n Pk = sp.special.legendre(k)\n A += (2 * k + 1) / (4 * np.pi) * Pk(mu)\n\n return A", "def nd_kernel(n):\n n = int(n)\n total_size = 3**n\n mid_point = int((3**n - 1)/2)\n kern = np.zeros(total_size, dtype=bool)\n for i in range(n):\n kern[mid_point-3**i] = True\n kern[mid_point+3**i] = True\n new_shape = 3*np.ones(n, dtype=int) \n unnormed_kern = kern.reshape(new_shape)\n return unnormed_kern/unnormed_kern.sum()", "def _derivative_(self, n, x, diff_param):\n if diff_param == 1:\n return -(bessel_K(n - 1, x) + bessel_K(n + 1, x)) / Integer(2)\n else:\n raise NotImplementedError('derivative with respect to order')", "def nth_derivative(f, x, n):\n h = 10e-2\n out_h = 1/(h**n)\n out = 0\n for k in range(0, n+1):\n out += (-1)**(k+n)*choose(n,k)*f(x +k*h)\n return out_h*out", "def kernel(n):\r\n return [(k, n - abs(k)) for k in range(-n, n + 1)]", "def sub_kernel(kernel, dim1, dim2):\n\n sub_kernel = kernel[dim1[0]:dim1[1],dim2[0]:dim2[1]]\n return sub_kernel", "def _derivative_(self, n, x, diff_param):\n if diff_param == 1:\n return (bessel_I(n - 1, x) + bessel_I(n + 1, x)) / Integer(2)\n else:\n raise NotImplementedError('derivative with respect to order')", "def get_kernel_derivs(self, input_layer, delta_layer, layer_idx):\n \"\"\" Set local variables \"\"\"\n kernel_shape = self.kernel_layers[layer_idx].shape\n stride_size = self.stride_sizes[layer_idx]\n\n \"\"\" Start getting d kernelsˡ w.r.t δˡ \"\"\"\n num_filters, kernel_rows, kernel_cols, kernel_channels = kernel_shape[0], kernel_shape[1], kernel_shape[2], kernel_shape[3]\n num_inputs, delta_rows, delta_cols = delta_layer.shape[0], delta_layer.shape[1], delta_layer.shape[2]\n stride_rows, stride_cols = stride_size[0], stride_size[1]\n kernel_derivs = np.zeros((num_inputs, num_filters, kernel_rows, kernel_cols, kernel_channels))\n \n for delta_row in range(delta_rows):\n for delta_col in range(delta_cols):\n inputs = copy.deepcopy(input_layer)\n deltas = copy.deepcopy(delta_layer)\n inputs = np.repeat(np.array(inputs[:, stride_rows * delta_row:stride_rows * delta_row + kernel_rows, stride_cols * delta_col:stride_cols * delta_col + kernel_cols,:], ndmin=5), num_filters, 0)\n deltas = np.repeat(np.array(deltas[:, delta_row:delta_row + 1, delta_col:delta_col + 1,:], ndmin=5), kernel_channels, 0)\n inputs = np.swapaxes(inputs, 0, 1)\n deltas = np.swapaxes(np.swapaxes(deltas, 0, 1), 1, 4)\n z = (inputs * deltas)\n kernel_derivs += np.rot90(z,2,axes=(2, 3))\n return kernel_derivs", "def get_kernel_derivs(input_layer, delta_layer, layer_idx):\n\n \"\"\" Set local variables \"\"\"\n kernel_shape = self.kernel_layers[layer_idx].shape\n stride_size = self.stride_sizes[layer_idx]\n\n \"\"\" Start getting dδˡ⁺¹ w.r.t kernelsˡ \"\"\"\n num_filters, kernel_rows, kernel_cols, kernel_channels = kernel_shape[0], kernel_shape[1], kernel_shape[2], kernel_shape[3]\n num_inputs, delta_rows, delta_cols = delta_layer.shape[0], delta_layer.shape[1], delta_layer.shape[2]\n stride_rows, stride_cols = stride_size[0], stride_size[1]\n kernel_derivs = np.zeros(kernel_shape) \n for kernel_idx in range(num_filters):\n for kernel_row in range(kernel_rows):\n for kernel_col in range(kernel_cols):\n for kernel_channel in range(kernel_channels):\n kernel_gradient_sum = 0\n for inputs_idx in range(num_inputs):\n for delta_row in range(delta_rows):\n for delta_col in range(delta_cols):\n z = activation_fn(input_layer[inputs_idx, stride_rows*delta_row+kernel_row, stride_cols*delta_col+kernel_col, kernel_channel]) * delta_layer[inputs_idx, delta_row, delta_col, kernel_idx]\n kernel_gradient_sum += z\n kernel_derivs[kernel_idx, -kernel_row-1, -kernel_col-1, kernel_channel] = kernel_gradient_sum\n return kernel_derivs", "def _eunn_loop(state, capacity, diag_vec_list, off_vec_list, diag, fft):\n i = 0\n def layer_tunable(x, i):\n\n diag_vec = diag_vec_list.read(i)\n off_vec = off_vec_list.read(i)\n\n diag = tf.multiply(x, diag_vec)\n off = tf.multiply(x, off_vec)\n\n def even_input(off, size):\n\n def even_s(off, size):\n off = tf.reshape(off, [-1, size//2, 2])\n off = tf.reshape(tf.reverse(off, [2]), [-1, size])\n return off\n\n def odd_s(off, size):\n off, helper = tf.split(off, [size-1, 1], 1)\n size -= 1\n off = even_s(off, size)\n off = tf.concat([off, helper], 1)\n return off\n\n off = tf.cond(tf.equal(tf.mod(size, 2), 0), lambda: even_s(off, size), lambda: odd_s(off, size))\n return off\n\n def odd_input(off, size):\n helper, off = tf.split(off, [1, size-1], 1)\n size -= 1\n off = even_input(off, size)\n off = tf.concat([helper, off], 1)\n return off\n\n size = int(off.get_shape()[1])\n off = tf.cond(tf.equal(tf.mod(i, 2), 0), lambda: even_input(off, size), lambda: odd_input(off, size))\n\n layer_output = diag + off\n i += 1\n\n return layer_output, i\n\n def layer_fft(state, i):\n\n diag_vec = diag_vec_list.read(i)\n off_vec = off_vec_list.read(i)\n diag = tf.multiply(state, diag_vec)\n off = tf.multiply(state, off_vec)\n\n hidden_size = int(off.get_shape()[1])\n # size = 2**i\n dist = capacity - i\n normal_size = (hidden_size // (2**dist)) * (2**(dist-1))\n normal_size *= 2\n extra_size = tf.maximum(0, (hidden_size % (2**dist)) - (2**(dist-1)))\n hidden_size -= normal_size\n\n def modify(off_normal, dist, normal_size):\n off_normal = tf.reshape(tf.reverse(tf.reshape(off_normal, [-1, normal_size//(2**dist), 2, (2**(dist-1))]), [2]), [-1, normal_size])\n return off_normal\n\n def do_nothing(off_normal):\n return off_normal\n\n off_normal, off_extra = tf.split(off, [normal_size, hidden_size], 1)\n off_normal = tf.cond(tf.equal(normal_size, 0), lambda: do_nothing(off_normal), lambda: modify(off_normal, dist, normal_size))\n helper1, helper2 = tf.split(off_extra, [hidden_size-extra_size, extra_size], 1)\n off_extra = tf.concat([helper2, helper1], 1)\n off = tf.concat([off_normal, off_extra], 1)\n\n layer_output = diag + off\n i += 1\n\n return layer_output, i\n\n if fft:\n layer_function = layer_fft\n else:\n layer_function = layer_tunable\n output, _ = tf.while_loop(lambda state, i: tf.less(i, capacity), layer_function, [state, i])\n\n if not diag is None:\n output = tf.multiply(output, diag)\n\n\n return output", "def flipkernel(k):\r\n return np.squeeze(np.fliplr(k[None,:])) ###important for temporal causality!!!??\r", "def rk4_sde(self, x, rv_n):\n a21 = 2.71644396264860\n a31 = - 6.95653259006152\n a32 = 0.78313689457981\n a41 = 0.0\n a42 = 0.48257353309214\n a43 = 0.26171080165848\n a51 = 0.47012396888046\n a52 = 0.36597075368373\n a53 = 0.08906615686702\n a54 = 0.07483912056879\n\n q1 = 2.12709852335625\n q2 = 2.73245878238737\n q3 = 11.22760917474960\n q4 = 13.36199560336697\n\n n = self.mp.params[0]; k = self.mp.params[1];\n gamma = self.mp.params[2]; dt = self.mp.params[3];\n\n if x.get_shape()[1] > 1:\n evolve_fun = self.evolve_system\n else:\n evolve_fun = self.evolve\n\n x1 = x\n k1 = dt * evolve_fun(x1, n, k, gamma) + tf.sqrt(dt) * x * rv_n\n\n x2 = x1 + a21 * k1\n k2 = dt * evolve_fun(x2, n, k, gamma) + tf.sqrt(dt) * x * rv_n\n\n x3 = x1 + a31 * k1 + a32 * k2\n k3 = dt * evolve_fun(x3, n, k, gamma) + tf.sqrt(dt) * x * rv_n\n\n x4 = x1 + a41 * k1 + a42 * k2\n k4 = dt * evolve_fun(x4, n, k, gamma) + tf.sqrt(dt) * x * rv_n\n\n x_new = x1 + a51 * k1 + a52 * k2 + a53 * k3 + a54 * k4\n\n return tf.cast(x_new, tf.float32)", "def inv_funk_radon_kernel(mu, N):\n\n # Check that -1 <= mu <= 1\n mu = np.clip(mu, -1, 1)\n\n # Need Legendre polynomials\n legPolys = legp(mu, N)\n p_at_zero = legp(0, N)\n coefs = 2*np.arange(0, N+1, 2) + 1\n ker = coefs*legPolys[::2]/p_at_zero[::2]\n return ker.sum() / (8*np.pi)", "def dndxfiz(x, n, param, h=1 / 100000):\n if (param[1] == 1 or param[1] == 2) and (n==1):\n if param[1]==1:\n if param[0]==0:\n return -1/2\n else:\n return 1/2\n elif param[1]==2:\n if param[0]==0:\n return x - 1/2\n elif param[0]==1:\n return -2*x\n else:\n return x+1/2\n else:\n if n <= 0: # Comprueba si el orden de diferenciacion es 0 para retornar el valor de la funcion\n return FEMSections.fiz(x, param) # Retorna el valor de la funcion\n else: # Recurre cambiando el orden de diferenciacion\n # Calcula diferencias finitas centradas de manera recursiva\n return (FEMSections.dndxfiz(x + h, n - 1, param, h) - FEMSections.dndxfiz(x - h, n - 1, param, h)) / (2 * h)", "def softmax_derivative(x):\n der = derivative(softmax,x,dx=1e-9)\n return der" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns truncated iterated logarithm y = log( log(x) ) where if x<delta, x = delta and if 1delta < x, x = 1delta.
def ilog(x,delta): if(delta < x and x < 1.0 - delta): return np.log( -np.log(x) ) elif(x < delta): return np.log( -np.log(delta) ) else: return np.log( -np.log(1.0 - delta) )
[ "def log(x):\n\treturn log1p(x-1)", "def log(x):\r\n\r\n return math.log(x)", "def safelog(x):\n #return np.log(x)\n return np.log(np.clip(x,floor,np.inf))", "def diff_log(x):\n \n return np.diff(np.log(x)),np.log(x)[0]", "def diff_log(x):\n\n return np.diff(np.log(x)),np.log(x)[0]", "def _logit(x):\n\treturn numpy.log(x / (1. - x)).astype(theano.config.floatX)", "def log_transform(x, epsilon = 1e-4):\n if x.min() < 0: epsilon += np.abs(x.min())\n return (x.fillna(0).astype(float) + epsilon).apply(np.log)", "def _log_normalizer(self, x):\n out_unst_reg = torch.max(torch.le(x, self._lims[0] - 0.5),\n torch.gt(x, self._lims[1] - 0.5))\n cut_nat_params = torch.where(out_unst_reg,\n x,\n (self._lims[0] - 0.5) * torch.ones_like(x))\n log_norm = torch.log(torch.abs(torch.exp(cut_nat_params) - 1.0)) - torch.log(torch.abs(cut_nat_params))\n taylor = 0.5 * x + torch.pow(x, 2) / 24.0 - torch.pow(x, 4) / 2880.0\n return torch.where(out_unst_reg, log_norm, taylor)", "def logarithm(x, eps=10e-5):\n if abs(x) >= 1:\n return float('Nan')\n\n pre_x = x\n tmp = x ** 2\n sign = -1\n i = 2\n res_x = pre_x + sign * tmp / i\n\n while abs(res_x - pre_x) > eps:\n sign = -sign\n i += 1\n tmp *= x\n pre_x = res_x\n res_x += sign * tmp / i\n\n return res_x", "def log(x, base):\n try:\n return ad.AD(math.log(x.val, base), x.der/(x.val*np.log(base)))\n except AttributeError:\n return math.log(x,base)", "def log(self, x, base=2):\n if x == 0:\n return 0\n return math.log(x, base)", "def logaddexp(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:\n return torch.max(x, y) + torch.log(1 + torch.exp(-torch.abs(y - x)))", "def lognormalize(x, temp = 1):\n if type(x) is list: x = np.array(x)\n\n x = x - np.max(x)\n # anneal\n xp = np.power(np.exp(x), temp)\n return xp / xp.sum()", "def ln(x):\n return log(x, const.e)", "def log(x, base=math.e):\n return 0.0", "def log(val):\n if val > 0:\n return math.log(val)\n return float('-inf')", "def log10_inplace(a):", "def log_base(x: float, base):\n return np.log(x) / np.log(base)", "def safe_log(x):\n safe_x = jnp.where(x > 0.0, x, jnp.ones_like(x))\n return jnp.where(x > 0.0, jnp.log(safe_x), jnp.zeros_like(x))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a 3D rotation matrix for rotation about xaxis. (1 0 0 ) R(theta) = (0 cos(x) sin(x)) (0 sin(x) cos(x))
def rotation3Dx(theta): rmat = np.zeros((3,3)) rmat[0,0], rmat[0,1], rmat[0,2] = 1.0, 0.0, 0.0 rmat[1,0], rmat[1,1], rmat[1,2] = 0.0, np.cos(theta), np.sin(theta) rmat[2,0], rmat[2,1], rmat[2,2] = 0.0, -np.sin(theta), np.cos(theta) return rmat
[ "def matrix_rotate_3d_x(deg: float) -> np.matrix:\n from numpy import cos, sin, pi\n rad_x = -deg * pi/180\n c_x = cos(rad_x)\n s_x = sin(rad_x)\n return np.matrix([[1, 0, 0], [0, c_x, -s_x], [0, s_x, c_x]])", "def rotation3D_x(angle: float) -> np.array:\n c = np.cos(angle)\n s = np.sin(angle)\n return np.array([[1.0, 0.0, 0.0], [0.0, c, -s], [0.0, s, c]])", "def rotation3Dz(theta):\n rmat = np.zeros((3,3))\n rmat[0,0] = rmat[1,1] = np.cos(theta)\n rmat[0,1] = np.sin(theta)\n rmat[1,0] = -rmat[0,1]\n rmat[2,2] = 1\n return rmat", "def make_rotation_x(angle):\n return Mat4(\n [\n [1, 0, 0, 0],\n [0, math.cos(angle), -math.sin(angle), 0],\n [0, math.sin(angle), math.cos(angle), 0],\n [0, 0, 0, 1],\n ]\n )", "def _create_rotation_matrix(angle, x, y, z):\n if la.norm((x, y, z)) < 0.0001:\n return np.eye(3, dtype=np.float32)\n x, y, z = np.array((x, y, z))/la.norm((x, y, z))\n matrix = np.zeros((3, 3), dtype=np.float32)\n cos = np.cos(angle)\n sin = np.sin(angle)\n matrix[0, 0] = x*x*(1-cos)+cos\n matrix[1, 0] = x*y*(1-cos)+sin*z\n matrix[0, 1] = x*y*(1-cos)-sin*z\n matrix[2, 0] = x*z*(1-cos)-sin*y\n matrix[0, 2] = x*z*(1-cos)+sin*y\n matrix[1, 1] = y*y*(1-cos)+cos\n matrix[1, 2] = y*z*(1-cos)-sin*x\n matrix[2, 1] = y*z*(1-cos)+sin*x\n matrix[2, 2] = z*z*(1-cos)+cos\n return matrix", "def rotation_matrix3(axis, theta):\n R = np.eye(3)\n c = math.cos(theta)\n s = math.sin(theta)\n a1 = (axis + 1) % 3\n a2 = (axis + 2) % 3\n R[a1, a1] = c\n R[a1, a2] = -s\n R[a2, a1] = s\n R[a2, a2] = c\n return np.matrix(R)", "def rot_axis3(theta):\n ct = cos(theta)\n st = sin(theta)\n mat = ((ct,st,0),\n (-st,ct,0),\n (0,0,1))\n return MutableMatrix(mat)", "def create_rotation_matrix_3d(angles) -> np.array:\n\n mat1 = np.array([[1., 0., 0.],\n [0., math.cos(angles[0]), math.sin(angles[0])],\n [0., -math.sin(angles[0]), math.cos(angles[0])]],\n dtype='float')\n\n mat2 = np.array([[math.cos(angles[1]), 0., -math.sin(angles[1])],\n [0., 1., 0.],\n [math.sin(angles[1]), 0., math.cos(angles[1])]],\n dtype='float')\n\n mat3 = np.array([[math.cos(angles[2]), math.sin(angles[2]), 0.],\n [-math.sin(angles[2]), math.cos(angles[2]), 0.],\n [0., 0., 1.]],\n dtype='float')\n\n mat = (mat1 @ mat2) @ mat3\n return mat", "def rotation3DFromRotation(*args):\n return _almathswig.rotation3DFromRotation(*args)", "def matrix_rotate_3d_z(deg: float) -> np.matrix:\n from numpy import cos, sin, pi\n rad_z = -deg * pi/180\n c_z = cos(rad_z)\n s_z = sin(rad_z)\n return np.matrix([[c_z, -s_z, 0], [s_z, c_z, 0], [0, 0, 1]])", "def x_rotmat(theta):\n cos_t = np.cos(theta)\n sin_t = np.sin(theta)\n return np.array([[1, 0, 0],\n [0, cos_t, -sin_t],\n [0, sin_t, cos_t]])", "def rotation3D_z(angle: float) -> np.array:\n c = np.cos(angle)\n s = np.sin(angle)\n return np.array([[c, -s, 0.0], [s, c, 0.0], [0.0, 0.0, 1.0]])", "def make_rotation_z(angle):\n return Mat4(\n [\n [math.cos(angle), -math.sin(angle), 0, 0],\n [math.sin(angle), math.cos(angle), 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1],\n ]\n )", "def rotation_matrix( axis, theta ):\n\n # if the axis is only a point, return the identity matrix\n if np.linalg.norm( axis, 2 ) == 0:\n return np.matrix( np.diag([1,1,1] ) )\n # else normalise the axis vector, build and return the 3D rotation matrix\n else:\n axis = axis / np.linalg.norm( axis, 2 )\n a = np.cos( theta / 2 )\n b, c, d = -axis * np.sin( theta / 2 )\n aa, bb, cc, dd = a**2, b**2, c**2, d**2\n bc, ad, ac, ab, bd, cd = b * c, a * d, a * c, a * b, b * d, c * d\n return np.matrix( [ [aa + bb - cc - dd, 2 * ( bc + ad ), 2 * ( bd - ac ) ],\n [ 2 * ( bc - ad ), aa + cc - bb - dd, 2 * ( cd + ab ) ],\n [ 2 * ( bd + ac ), 2 * ( cd - ab ), aa + dd - bb- cc ] ] )", "def _rotation(axis, angle):\n\n R = np.zeros((3,3))\n\n ux,uy,uz = axis\n c = np.cos(angle)\n s = np.sin(angle)\n\n R[0][0] = c + ux**2*(1-c)\n R[0][1] = ux*uy*(1-c) - uz*s\n R[0][2] = ux*uz*(1-c) + uy*s\n \n R[1][0] = uy*ux*(1-c) + uz*s\n R[1][1] = c + uy**2*(1-c)\n R[1][2] = uy*uz*(1-c) - ux*s\n\n R[2][0] = uz*ux*(1-c) - uy*s\n R[2][1] = uz*uy*(1-c) + ux*s\n R[2][2] = c + uz**2*(1-c)\n\n return R", "def random_rotation_matrix():\n\n x = np.random.uniform(size=3)\n theta = x[0]*2*math.pi\n phi = x[1]*2*math.pi\n z = x[2]*2\n\n r = math.sqrt(z)\n vx = math.sin(phi)*r\n vy = math.cos(phi)*r\n vz = math.sqrt(2.0-z)\n\n st = math.sin(theta)\n ct = math.cos(theta)\n\n sx = vx*ct-vy*st\n sy = vx*st+vy*ct\n\n return np.array([[vx*sx-ct, vx*sy-st, vx*vz],\n [vy*sx+st, vy*sy-ct, vy*vz],\n [vz*sx,vz*sy,1.0-z]])", "def rotationMatrix(self):\n\n R = Compute3DRotationMatrix(self.exteriorOrientationParameters[3], self.exteriorOrientationParameters[4],\n self.exteriorOrientationParameters[5])\n\n return R", "def generate_rotation_matrix(self):\n phi = self.get_phi()\n theta = self.get_theta()\n gamma = self.get_gamma()\n matrices = [\n rotation_about_z(-theta - 90 * DEGREES),\n rotation_matrix(-phi, RIGHT),\n rotation_about_z(gamma),\n ]\n result = np.identity(3)\n for matrix in matrices:\n result = np.dot(matrix, result)\n return result", "def rotation_x(theta):\n return np.array([[1, 0, 0],\n [0, np.cos(theta), -np.sin(theta)],\n [0, np.sin(theta), np.cos(theta)]])" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a 3D rotation matrix for rotation about zaxis. ( cos(x) sin(x) 0) R(theta) = (sin(x) cos(x) 0) ( 0 0 1)
def rotation3Dz(theta): rmat = np.zeros((3,3)) rmat[0,0] = rmat[1,1] = np.cos(theta) rmat[0,1] = np.sin(theta) rmat[1,0] = -rmat[0,1] rmat[2,2] = 1 return rmat
[ "def matrix_rotate_3d_z(deg: float) -> np.matrix:\n from numpy import cos, sin, pi\n rad_z = -deg * pi/180\n c_z = cos(rad_z)\n s_z = sin(rad_z)\n return np.matrix([[c_z, -s_z, 0], [s_z, c_z, 0], [0, 0, 1]])", "def make_rotation_z(angle):\n return Mat4(\n [\n [math.cos(angle), -math.sin(angle), 0, 0],\n [math.sin(angle), math.cos(angle), 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1],\n ]\n )", "def rotation3D_z(angle: float) -> np.array:\n c = np.cos(angle)\n s = np.sin(angle)\n return np.array([[c, -s, 0.0], [s, c, 0.0], [0.0, 0.0, 1.0]])", "def _create_rotation_matrix(angle, x, y, z):\n if la.norm((x, y, z)) < 0.0001:\n return np.eye(3, dtype=np.float32)\n x, y, z = np.array((x, y, z))/la.norm((x, y, z))\n matrix = np.zeros((3, 3), dtype=np.float32)\n cos = np.cos(angle)\n sin = np.sin(angle)\n matrix[0, 0] = x*x*(1-cos)+cos\n matrix[1, 0] = x*y*(1-cos)+sin*z\n matrix[0, 1] = x*y*(1-cos)-sin*z\n matrix[2, 0] = x*z*(1-cos)-sin*y\n matrix[0, 2] = x*z*(1-cos)+sin*y\n matrix[1, 1] = y*y*(1-cos)+cos\n matrix[1, 2] = y*z*(1-cos)-sin*x\n matrix[2, 1] = y*z*(1-cos)+sin*x\n matrix[2, 2] = z*z*(1-cos)+cos\n return matrix", "def z_rotmat(theta):\n cos_t = np.cos(theta)\n sin_t = np.sin(theta)\n return np.array([[cos_t, -sin_t, 0],\n [sin_t, cos_t, 0],\n [0, 0, 1]])", "def rot_z(theta):\n theta_rad = np.radians(theta)\n rotation_matrix = [[np.cos(theta_rad), -np.sin(theta_rad), 0],\n [np.sin(theta_rad), np.cos(theta_rad), 0],\n [0, 0, 1]]\n return np.matrix(rotation_matrix)", "def create_rotation_matrix_3d(angles) -> np.array:\n\n mat1 = np.array([[1., 0., 0.],\n [0., math.cos(angles[0]), math.sin(angles[0])],\n [0., -math.sin(angles[0]), math.cos(angles[0])]],\n dtype='float')\n\n mat2 = np.array([[math.cos(angles[1]), 0., -math.sin(angles[1])],\n [0., 1., 0.],\n [math.sin(angles[1]), 0., math.cos(angles[1])]],\n dtype='float')\n\n mat3 = np.array([[math.cos(angles[2]), math.sin(angles[2]), 0.],\n [-math.sin(angles[2]), math.cos(angles[2]), 0.],\n [0., 0., 1.]],\n dtype='float')\n\n mat = (mat1 @ mat2) @ mat3\n return mat", "def rotation3Dx(theta):\n rmat = np.zeros((3,3))\n rmat[0,0], rmat[0,1], rmat[0,2] = 1.0, 0.0, 0.0\n rmat[1,0], rmat[1,1], rmat[1,2] = 0.0, np.cos(theta), np.sin(theta)\n rmat[2,0], rmat[2,1], rmat[2,2] = 0.0, -np.sin(theta), np.cos(theta)\n \n return rmat", "def matrix_rotate_3d_x(deg: float) -> np.matrix:\n from numpy import cos, sin, pi\n rad_x = -deg * pi/180\n c_x = cos(rad_x)\n s_x = sin(rad_x)\n return np.matrix([[1, 0, 0], [0, c_x, -s_x], [0, s_x, c_x]])", "def rot_z(theta):\n theta = np.radians(theta)\n return np.array([[np.cos(theta), - np.sin(theta), 0],\n [np.sin(theta), np.cos(theta), 0], [0, 0, 1]])", "def random_rotation_matrix():\n\n x = np.random.uniform(size=3)\n theta = x[0]*2*math.pi\n phi = x[1]*2*math.pi\n z = x[2]*2\n\n r = math.sqrt(z)\n vx = math.sin(phi)*r\n vy = math.cos(phi)*r\n vz = math.sqrt(2.0-z)\n\n st = math.sin(theta)\n ct = math.cos(theta)\n\n sx = vx*ct-vy*st\n sy = vx*st+vy*ct\n\n return np.array([[vx*sx-ct, vx*sy-st, vx*vz],\n [vy*sx+st, vy*sy-ct, vy*vz],\n [vz*sx,vz*sy,1.0-z]])", "def rotate_z(self, angle):\n angle *= np.pi / 180\n return self.transform(np.matrix([[np.cos(angle), -np.sin(angle), 0],\n [np.sin(angle), np.cos(angle), 0],\n [0, 0, 1]]))", "def rotation3DFromRotation(*args):\n return _almathswig.rotation3DFromRotation(*args)", "def rotation_matrix3(axis, theta):\n R = np.eye(3)\n c = math.cos(theta)\n s = math.sin(theta)\n a1 = (axis + 1) % 3\n a2 = (axis + 2) % 3\n R[a1, a1] = c\n R[a1, a2] = -s\n R[a2, a1] = s\n R[a2, a2] = c\n return np.matrix(R)", "def generate_rotation_matrix(self):\n phi = self.get_phi()\n theta = self.get_theta()\n gamma = self.get_gamma()\n matrices = [\n rotation_about_z(-theta - 90 * DEGREES),\n rotation_matrix(-phi, RIGHT),\n rotation_about_z(gamma),\n ]\n result = np.identity(3)\n for matrix in matrices:\n result = np.dot(matrix, result)\n return result", "def transform3D(x: float, y: float, z: float, R: np.array) -> np.array:\n T = np.zeros((4, 4))\n T[:3, :3] = R\n T[:, 3] = [x, y, z, 1.0]\n\n return T", "def rotateZ(self, angle):\n rad = angle\n cosa = math.cos(rad)\n sina = math.sin(rad)\n x = self.x * cosa - self.y * sina\n y = self.x * sina + self.y * cosa\n return Point3D(x, y, self.z)", "def rotateZ(self, angle):\r\n rad = angle * math.pi / 180\r\n cosa = math.cos(rad)\r\n sina = math.sin(rad)\r\n x = self.x * cosa - self.y * sina\r\n y = self.x * sina + self.y * cosa\r\n return Point3D(x, y, self.z)", "def rotateZ(self, angle):\n rad = angle * math.pi / 180\n cosa = math.cos(rad)\n sina = math.sin(rad)\n x = self.x * cosa - self.y * sina\n y = self.x * sina + self.y * cosa\n return Point3D(x, y, self.z)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compute the geodesic distance on the sphere for two points. The points are assumed to lie on the surface of the same sphere.
def spherical_distances(x, y): # Compute the norms of all points, we do NOT check they actually all lie on # the same sphere (that's the caller's responsibility). xn = np.sqrt((x**2).sum(axis=1)) yn = np.sqrt((y**2).sum(axis=1)) ang_cos = np.dot(x, y.T)/(xn[:, None]*yn[None, :]) # Protect against numerical noise giving us cosine values outside the -1,1 # range, where arccos would return nans. ang_cos = np.clip(ang_cos, -1, 1) return xn[:, None]*np.arccos(ang_cos)
[ "def spherical_distance(lat1: u.deg, lon1: u.deg, lat2: u.deg, lon2: u.deg):\n\n lat1m, lat2m = np.meshgrid(lat1, lat2)\n lon1m, lon2m = np.meshgrid(lon1, lon2)\n\n lat_dif = lat1m - lat2m\n lon_dif = lon1m - lon2m\n\n slatsq = np.sin(0.5 * lat_dif)**2\n slonsq = np.sin(0.5 * lon_dif)**2\n spsi2 = np.sqrt(slatsq + slonsq * np.cos(lat1m) * np.cos(lat2m))\n\n psi = 2 * np.arcsin(spsi2)\n\n return psi", "def earth_dist_in_meters(point1, point2):\n # Convert latitude and longitude to\n # spherical coordinates in radians.\n\n # phi = 90 - latitude\n phi1 = np.radians(90.0 - point1[1])\n phi2 = np.radians(90.0 - point2[1])\n\n # theta = longitude\n theta1 = np.radians(point1[0])\n theta2 = np.radians(point2[0])\n\n # Compute spherical distance from spherical coordinates.\n\n # For two locations in spherical coordinates\n # (1, theta, phi) and (1, theta, phi)\n # cosine( arc length ) =\n # sin phi sin phi' cos(theta-theta') + cos phi cos phi'\n # distance = rho * arc length\n\n cos = (np.sin(phi1) * np.sin(phi2) * np.cos(theta1 - theta2) +\n np.cos(phi1) * np.cos(phi2))\n\n if cos > 1:\n cos = 1\n elif cos < -1:\n cos = -1\n\n # This happens when for precision issue the cos is greater than 1 by a small fraction\n arc = np.arccos(cos)\n\n # Remember to multiply arc by the radius of the earth\n # in your favorite set of units to get length.\n return arc * EARTH_RADIUS_IN_KM * 1000", "def earth_dist_in_meters_for_multiple_points(point1, points):\n # Convert latitude and longitude to\n # spherical coordinates in radians.\n\n # phi = 90 - latitude\n phi1 = np.radians(90.0 - point1[1])\n phi2 = np.radians(90.0 - points[:, 1])\n\n # theta = longitude\n theta1 = np.radians(point1[0])\n theta2 = np.radians(points[:, 0])\n\n # Compute spherical distance from spherical coordinates.\n\n # For two locations in spherical coordinates\n # (1, theta, phi) and (1, theta, phi)\n # cosine( arc length ) =\n # sin phi sin phi' cos(theta-theta') + cos phi cos phi'\n # distance = rho * arc length\n\n cos = (np.sin(phi1) * np.sin(phi2) * np.cos(theta1 - theta2) +\n np.cos(phi1) * np.cos(phi2))\n\n if cos > 1:\n cos = 1\n elif cos < -1:\n cos = -1\n\n # This happens when for precision issue the cos is greater than 1 by a small fraction\n arc = np.arccos(cos)\n\n # Remember to multiply arc by the radius of the earth\n # in your favorite set of units to get length.\n return arc * EARTH_RADIUS_IN_KM * 1000", "def distOnUnitSphere(lat1, lon1, lat2, lon2):\n # Convert latitude and longitude to \n # spherical coordinates in radians.\n degrees_to_radians = math.pi/180.0\n # phi = 90 - latitude\n phi1 = (90.0 - lat1)*degrees_to_radians\n phi2 = (90.0 - lat2)*degrees_to_radians\n # theta = longitude\n theta1 = lon1*degrees_to_radians\n theta2 = lon2*degrees_to_radians\n # Compute spherical distance from spherical coordinates.\n # For two locations in spherical coordinates \n # (1, theta, phi) and (1, theta, phi)\n # cosine( arc length ) = \n # sin phi sin phi' cos(theta-theta') + cos phi cos phi'\n # distance = rho * arc length\n cos = (math.sin(phi1)*math.sin(phi2)*math.cos(theta1 - theta2) + \n math.cos(phi1)*math.cos(phi2))\n arc = math.acos( cos )\n # Remember to multiply arc by the radius of the earth \n # in your favorite set of units to get length.\n return arc", "def get_distance_between_points(lat1,lon1,lat2,lon2):\n \treturn Geodesic.WGS84.Inverse(lat1,lon1, lat2, lon2)['s12']", "def get_dist(lon1, lat1, lon2, lat2):\n from math import sin, cos, sqrt, atan2, radians\n import gpxpy.geo\n\n R = gpxpy.geo.EARTH_RADIUS/1000 # Radius of the Earth in km\n \n # convert to input lon/lat to radians \n lat1 = radians(lat1)\n lon1 = radians(lon1)\n lat2 = radians(lat2)\n lon2 = radians(lon2)\n\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n\n a = sin(dlat / 2)**2 + cos(lat1) * cos(lat2) * sin(dlon / 2)**2\n c = 2 * atan2(sqrt(a), sqrt(1 - a))\n\n distance = R * c\n\n return distance", "def distance_between_points(latlon1, latlon2):\r\n ##calculation of distance using haversine formula (in km)\r\n\r\n latlon1=np.radians(latlon1)\r\n latlon2=np.radians(latlon2)\r\n\r\n dlat=latlon1[0]-latlon2[0]\r\n dlon=latlon1[1]-latlon2[1]\r\n\r\n a=np.sin(dlat/2) * np.sin(dlat/2) + np.sin(dlon/2) * np.sin(dlon/2) * np.cos(latlon1[0]) * np.cos(latlon2[0])\r\n b=2 * np.arctan2(np.sqrt(a), np.sqrt(1-a))\r\n return b*earth_radius", "def geodesic_distance(coord1, coord2):\n # convert coordinates to radians\n s = math.pi * np.squeeze(np.array(coord1)) / 180\n f = math.pi * np.squeeze(np.array(coord2)) / 180\n\n delta = (f - s)/2\n t = math.cos(f[0]) * math.cos(s[0]) * math.sin(delta[1])**2 + math.sin(delta[0])**2\n\n return earth_radius() * 2 * math.atan2(t**(1/2),(1-t)**(1/2))", "def get_geo_distance(c_1: City, c_2: City) -> float:\n return great_circle(c_1.coordinates, c_2.coordinates).km", "def distance_from_sphere(self, points, params, sqrt=False):\n center, radius = params\n center = center.reshape((1, 3))\n distance = (torch.norm(points - center, p=2, dim=1) - radius) ** 2\n if sqrt:\n distance = guard_sqrt(distance)\n\n if self.reduce:\n distance = torch.mean(distance)\n return distance", "def calculate_distance(point1, point2):\n import math\n\n def convert_to_radians(degrees):\n return degrees * math.pi / 180\n\n radius_earth = 6.371E3 # km\n phi1 = convert_to_radians(point1[0])\n phi2 = convert_to_radians(point2[0])\n\n delta_phi = convert_to_radians(point1[0] - point2[0])\n delta_lam = convert_to_radians(point1[1] - point2[1])\n\n a = math.sin(0.5 * delta_phi)**2 + math.cos(phi1) * math.cos(phi2) * math.sin(0.5 * delta_lam)**2\n c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))\n return radius_earth * c / 1.60934 # convert km to miles", "def calculate_distance(point1, point2):\n\n def convert_to_radians(degrees):\n return degrees * math.pi / 180\n\n radius_earth = 6.371E3 # km\n phi1 = convert_to_radians(point1[0])\n phi2 = convert_to_radians(point2[0])\n delta_phi = convert_to_radians(point1[0] - point2[0])\n delta_lam = convert_to_radians(point1[1] - point2[1])\n\n a = math.sin(0.5 * delta_phi)**2 + math.cos(phi1) * math.cos(phi2) * math.sin(0.5 * delta_lam)**2\n c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))\n return radius_earth * c / 1.60934 # convert km to miles", "def distance(self, other):\n if not isinstance(other, GEOSGeometry):\n raise TypeError(\"distance() works only on other GEOS Geometries.\")\n return capi.geos_distance(self.ptr, other.ptr, byref(c_double()))", "def distance_between_points(p1,p2):\n import math;\n distance=math.sqrt((p1.x-p2.x)**2+(p1.y-p2.y)**2);\n return distance;", "def calculate_distance(lat_long_pair_1, lat_long_pair_2):\n lat1, lng1 = lat_long_pair_1[0], lat_long_pair_1[1]\n lat2, lng2 = lat_long_pair_2[0], lat_long_pair_2[1]\n\n r = 6371000 # radius of the Earth in m\n\n x = (lng2 - lng1) * cos(0.5*(lat2+lat1))\n y = (lat2 - lat1)\n return r * sqrt( x * x + y * y )", "def distance_on_earth(a, b):\n return distance(a, b).meters", "def geodesicDistance(A, B = geolocate(\"Colosseo\")):\n # colosseo = (41.890183, 12.492369)\n return geopy.distance.vincenty(A, B).meters", "def distance(gps1, gps2):\n return math.sqrt((gps1.lat-gps2.lat)**2 + (gps1.lng-gps2.lng)**2)", "def points_distance(self, point1, point2):\n x = point1.x - point2.x\n y = point1.y - point2.y\n return math.sqrt(x*x + y*y)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compute a similarity matrix for a set of points. The points are assumed to lie on the surface of the same sphere.
def similarity_matrix(points, sigma): distances_squared = spherical_distances(points, points)**2 return np.exp( -distances_squared / (2.0 * sigma) )
[ "def dists_on_sphere(x):\n k = x.shape[0]\n dist_mat = np.zeros((k, k))\n for i in range(k):\n for j in range(k):\n if i == j:\n dist_mat[i, j] = -1\n else:\n dist_mat[i, j] = np.arccos(np.dot(x[i], x[j]))**2\n return dist_mat", "def self_similarity_matrix(feature_vectors):\n norm_feature_vectors, mean, std = at.normalize_features([feature_vectors.T])\n norm_feature_vectors = norm_feature_vectors[0].T\n sim_matrix = 1.0 - distance.squareform(\n distance.pdist(norm_feature_vectors.T, 'cosine'))\n return sim_matrix", "def calculate_similarity_matrix(set_a, set_b):\n\n similarity = np.zeros([len(set_a), len(set_b)], dtype=np.float32)\n for i, _ in enumerate(set_a):\n for j, _ in enumerate(set_b):\n similarity[i, j] = iou(set_a[i], set_b[j])\n return similarity", "def build_matrix(self):\n \n for p1 in self._properties: \n p1 = p1.get_vectorized_data()\n \n for p2 in self._properties:\n p2 = p2.get_vectorized_data()\n v1, v2 = self.prepare_vectors(p1, p2)\n self._similarity_matrix.append(cosine_similarity([v1],[v2]))", "def build_sphere_points(numpoints=1000):\n\trandom = np.random.normal(size=(numpoints, 3))\n\tnorm = np.sqrt(np.sum(random*random, axis=1))\n\treturn (random.T/norm).T", "def compute_similarity_matrix(X):\n euclidian_distances = euclidean_distances(X)\n similarity_matrix = 1 - euclidian_distances/euclidian_distances.max()\n similarity_matrix = np.exp(-1 * euclidian_distances / euclidian_distances.std())\n # similarity_matrix = cosine_similarity(X)\n return similarity_matrix", "def cosine_similarity_matrix(references: np.ndarray, queries: np.ndarray) -> np.ndarray:\n size1 = references.shape[0]\n size2 = queries.shape[0]\n scores = np.zeros((size1, size2))\n for i in range(size1):\n for j in range(size2):\n scores[i, j] = cosine_similarity(references[i, :], queries[j, :])\n return scores", "def compute_similarity_transform(source_points, target_points):\n assert target_points.shape[0] == source_points.shape[0]\n assert target_points.shape[1] == 3 and source_points.shape[1] == 3\n source_points = source_points.T\n target_points = target_points.T\n mu1 = source_points.mean(axis=1, keepdims=True)\n mu2 = target_points.mean(axis=1, keepdims=True)\n X1 = source_points - mu1\n X2 = target_points - mu2\n var1 = np.sum(X1 ** 2)\n K = X1.dot(X2.T)\n U, _, Vh = np.linalg.svd(K)\n V = Vh.T\n Z = np.eye(U.shape[0])\n Z[-1, -1] *= np.sign(np.linalg.det(U.dot(V.T)))\n R = V.dot(Z.dot(U.T))\n scale = np.trace(R.dot(K)) / var1\n t = mu2 - scale * R.dot(mu1)\n source_points_hat = scale * R.dot(source_points) + t\n source_points_hat = source_points_hat.T\n return source_points_hat", "def create_similarity_matrix_cosine(matrix):\n mc_matrix = matrix - matrix.mean(axis = 0)\n return pd.DataFrame(pw.cosine_similarity(mc_matrix.fillna(0)), index = matrix.index, columns = matrix.index)", "def calcSphereSimilarity(self):\n # Get the theoretical radius a sphere should have for the hotspot volume\n # Remember the sphere volume formula: V = (4/3)*pi*r^3\n expectedRadius = ((3*self.volume)/(4*npy.pi))**(1/3.)\n expectedDiameter = expectedRadius*2 + self.spacing\n\n # Calculate relation extension/expectedRadius\n self.sphereRatios = self.extension/expectedDiameter # times 2 because extension would be a diameter\n self.sphereindex = npy.linalg.norm(self.sphereRatios-1) + 1", "def feature_cosine_similarity(self, points_features):\n\n mean_points_feats = torch.mean(points_features, dim=1, keepdim=True)\n norm_pts_feats = torch.norm(\n points_features, p=2, dim=2).unsqueeze(dim=2).clamp(min=1e-2)\n norm_mean_pts_feats = torch.norm(\n mean_points_feats, p=2, dim=2).unsqueeze(dim=2).clamp(min=1e-2)\n\n unity_points_features = points_features / norm_pts_feats\n unity_mean_points_feats = mean_points_feats / norm_mean_pts_feats\n\n cos_similarity = nn.CosineSimilarity(dim=2, eps=1e-6)\n feats_similarity = 1.0 - cos_similarity(unity_points_features,\n unity_mean_points_feats)\n\n max_correlation, _ = torch.max(feats_similarity, dim=1)\n\n return max_correlation", "def CalcDistanceMatrix(points):\n \n retMatrix = cdist(points,points)\n for xx in range(len(points)):\n for yy in range(len(points)):\n retMatrix[xx][yy] = M_Projection.LatLong2DistanceValue(points[xx][0], points[xx][1], points[yy][0], points[yy][1])\n \n return retMatrix", "def similarity(self, set_obj):\n raise NotImplementedError", "def get_sims(centroids):\n\n sims = []\n length = len(centroids)\n \n for i in xrange(0, length):\n for j in xrange(i + 1, length):\n sims.append(similarity(centroids[i], centroids[j]))\n \n return sims", "def similarity_matrix(source, target):\n result = numpy.zeros((len(source), len(target)))\n for i, source_item in enumerate(source):\n for j, target_item in enumerate(target):\n result[i, j] = similarity(source_item, target_item)\n return result", "def get_sim_matrix(centroids):\n\n matrix = {}\n length = len(centroids)\n\n for i in xrange(0, length):\n matrix[i] = {}\n\n for j in xrange(i + 1, length):\n matrix[i][j] = similarity(centroids[i], centroids[j])\n\n return matrix", "def distance_matrix(points):\n points = numpy.array(points)\n num, dim = points.shape\n delta = numpy.zeros((num,num), 'd')\n for d in range(dim):\n data = points[:,d]\n delta += (data - data[:, None])**2\n return numpy.sqrt(delta)", "def compute_homography(self, point_pairs):\n if len(point_pairs) < 4:\n raise ValueError(\"Please specify at least 4 point correspondences for computing homography.\")\n\n # perform Hartley normalization\n source = point_pairs[:, 0, :]\n target = point_pairs[:, 1, :]\n source_mean = source.mean(axis=0)\n target_mean = target.mean(axis=0)\n source_T = np.eye(3)\n source_T[0:2, 2] = -source_mean\n target_T = np.eye(3)\n target_T[0:2, 2] = -target_mean\n source = source - source_mean\n target = target - target_mean\n source_max = source.max()\n target_max = target.max()\n source_S = np.diag([1 / source_max, 1 / source_max, 1])\n target_S = np.diag([1 / target_max, 1 / target_max, 1])\n source = source / source_max\n target = target / target_max\n\n A = np.zeros((2 * len(point_pairs), 9))\n A[::2, 0:2] = source\n A[::2, 2] = 1\n A[1::2, 3:5] = source\n A[1::2, 5] = 1\n A[:, 8] = -target.ravel()\n A[::2, 6:8] = source\n A[1::2, 6:8] = source\n A[:, 6:8] = A[:, 6:8] * A[:, 8].reshape(-1, 1)\n\n evals, evecs = la.eig(A.T @ A)\n hom_matrix = evecs[:, np.argmin(evals)].reshape(3, 3)\n hom_matrix = la.inv(target_T) @ la.inv(target_S) @ hom_matrix @ source_S @ source_T\n\n return hom_matrix", "def similarity_matrix(P, similarity_measure, normalize=True, inverse=True):\n N = len(P) \n S = np.zeros((N, N))\n for i in range(N): \n for j in range(i): \n S[i][j] = similarity_measure(P[i], P[j])\n\n S = square(S)\n if normalize: \n S = S / np.max(S)\n if inverse:\n S = 1 - S # Higher value = more similar\n\n return S" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Decorator to help verify that a function was actually executed. Annotates a function with an attribute 'didrun', and only sets it to True if the function is actually called.
def checkrun(f): @functools.wraps(f) def wrapper(*args, **kwargs): wrapper.didrun = True return f(*args, **kwargs) wrapper.didrun = False return wrapper
[ "def _can_run(self, func):\n self.can_run = func", "def check_called(self, func):\n self.called[func] = False\n def _check(*args, **kwargs):\n self.called[func] = True\n return func(*args, **kwargs)\n return _check", "def expect_pass(self, func: Callable, *args, **kwargs) -> None:\n _, failures = self.post_process(func, *args, **kwargs)\n self.assertEqual(len(failures), 0)", "def run_once(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n if not wrapper.has_run:\n result = func(*args, **kwargs)\n wrapper.has_run = True\n return result\n wrapper.has_run = False\n return wrapper", "def post_run_func_checked(driver: HammerDriver) -> None:\n if post_run_func is not None:\n post_run_func(driver)", "def assertion_passed(self, func):", "def check_before_executing(f):\n @functools.wraps(f)\n def wrapper(self, *args, **kwargs):\n if not self._checked:\n assert self.is_correct, (\n 'The MatchList is incorrectly constructed. '\n 'Run check_and_print_if_error() for details.')\n return f(self, *args, **kwargs)\n return wrapper", "def run_once(f):\n @functools.wraps(f)\n def wrapper(*args, **kwargs):\n if not wrapper.has_run:\n result = f(*args, **kwargs)\n wrapper.has_run = True\n return result\n wrapper.has_run = False\n return wrapper", "def call_once(func):\n argspec = inspect.getfullargspec(func)\n argspec_args = argspec.args\n argspec_varargs = argspec.varargs\n argspec_keywords = argspec.varkw\n if argspec_args or argspec_varargs or argspec_keywords:\n raise ValueError('Can only decorate functions with no args', func, argspec)\n\n @functools.wraps(func)\n def _wrapper():\n # If we haven't been called yet, actually invoke func and save the result.\n if not _wrapper.has_run():\n _wrapper.mark_as_run()\n _wrapper.return_value = func()\n return _wrapper.return_value\n\n _wrapper._has_run = False # pylint: disable=protected-access\n _wrapper.has_run = lambda: _wrapper._has_run # pylint: disable=protected-access\n _wrapper.mark_as_run = lambda: setattr(_wrapper, '_has_run', True)\n return _wrapper", "def run_once(func):\n def wrapper(*args, **kwargs):\n if not wrapper.has_run:\n wrapper.has_run = True\n return func(*args, **kwargs)\n wrapper.has_run = False\n return wrapper", "def test_decorator(self):\n\n func = lambda: None\n func_ = hidden(func)\n self.assertTrue(func is func_)\n self.assertTrue(hasattr(func_, '__hidden__'))\n self.assertTrue(func.__hidden__)", "def record(self, func):\n signature= inspect.signature(func)\n def wrapper(*args, **kwargs):\n bound_args= signature.bind(*args, **kwargs)\n ret = func(*args, **kwargs)\n if not self.argstr(bound_args.arguments) in self.arg_hash:\n self.tests.append((bound_args.arguments, ret))\n self.arg_hash.add(self.argstr(bound_args.arguments))\n return ret\n return wrapper", "def assert_code_block_called(code_block: CodeBlock) -> None:\n for ins in code_block.instructions:\n assert ins.result is not NOT_YET_RUN", "def tests_function(fnname):\n def decorator(cls):\n cls.FUNCTION_TESTED = fnname\n\n return cls\n return decorator", "def check_mocked_functions_called(*mocked_functions):\n for mocked_function in mocked_functions:\n assert_that(mocked_function.called, f\"The function was not called - {mocked_function}\")", "def _check_func_complete(self, func):\n complete = False\n if self._completed_funcs is None:\n return complete\n\n if func.addr in self._completed_funcs:\n complete = True\n\n return complete", "def test_function_redefinition_robustness(self):\n\n entered_function = [False]\n\n @caching.cache_results(cache_path=CACHE_PATH)\n def _any_cached_function(arg, kwarg=None):\n entered_function[0] = True\n return \"arg: %s, kwarg: %s\" % (arg, kwarg)\n\n self.assertFalse(entered_function[0], \"Test failed - wrong setup.\")\n arg = 1\n first_res = _any_cached_function(arg)\n self.assertTrue(os.path.exists(CACHE_PATH), \"Cache in unknown \"\n \"location.\")\n self.assertTrue(entered_function[0], \"Did not enter the function in \"\n \"first call.\")\n\n @caching.cache_results(cache_path=CACHE_PATH)\n # pylint: disable=function-redefined\n # This behaviour is under test\n def _any_cached_function(arg, kwarg=None):\n entered_function[0] = True\n return \"arg: %s, kwarg: %s\" % (arg, kwarg)\n\n entered_function[0] = False\n second_res = _any_cached_function(arg)\n self.assertFalse(entered_function[0], \"Entered function in second \"\n \"call.\")\n self.assertEqual(first_res, second_res, \"Return value changed.\")\n third_res = _any_cached_function(arg, kwarg=None)\n self.assertEqual(first_res, third_res, \"Return value changed on kwarg \"\n \"supply.\")", "def track_last_called(decorated_function):\n last_invoked = 0\n def wrapper(sum_to):\n \"\"\"\n The wrapper that will actually wrap the decorated_function\n \"\"\"\n nonlocal last_invoked\n if time() - last_invoked < seconds_before_next_call:\n raise AssertionError(\"Called too soon!\")\n last_invoked = time()\n return decorated_function(sum_to)\n return wrapper", "def failure_detector_exec(self, func):\n module = self.modules[Module.FAILURE_DETECTOR_MODULE]\n if func == Function.SUSPECTED:\n return module.suspected()\n else:\n raise ValueError(\"Bad function parameter\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Users can specify environment variables in their config file which will be set in the driver and worker environments. Make sure those variables are set during the workflow, but not after.
def test_workflow_environment(): config = { "workflow-name": "workflow", "cluster-type": CLUSTER_TYPE, "environment-variables": { "FOO": "BAR", "FOO2": "BAR2" } } template_dir = tempfile.mkdtemp(suffix="test-workflow-environment-template") with open(f"{template_dir}/workflow.yaml", 'w') as f: yaml.dump(config, f) @checkrun def execute(workflow_inst): def _check(): assert os.environ['FOO'] == "BAR" assert os.environ["OMP_NUM_THREADS"] == '1' return True # driver env _check() # worker env assert all(workflow_inst.run_on_each_worker(_check).values()) os.environ['FOO'] = 'ORIGINAL_FOO' _execution_dir, _workflow = launch_flow(template_dir, 1, _custom_execute_fn=execute) assert execute.didrun # Environment is restored after execution is finished. assert os.environ['FOO'] == 'ORIGINAL_FOO' assert 'FOO2' not in os.environ
[ "def setup_env():\n os.environ['AUTH_DOMAIN'] = \"appscale.com\"\n os.environ['USER_EMAIL'] = \"\"\n os.environ['USER_NICKNAME'] = \"\"\n os.environ['APPLICATION_ID'] = \"\"", "def env_config():\n # setup\n env = {'ELB_GCP_PROJECT': 'expected-gcp-project',\n 'ELB_GCP_REGION': 'expected-gcp-region',\n 'ELB_GCP_ZONE': 'expected-gcp-zone',\n 'ELB_BATCH_LEN': '93',\n 'ELB_CLUSTER_NAME': 'expected-cluster-name',\n 'ELB_RESULTS': 'gs://expected-results',\n 'ELB_USE_PREEMPTIBLE': 'true',\n 'ELB_BID_PERCENTAGE': '91'}\n\n for var_name in env:\n os.environ[var_name] = str(env[var_name])\n\n yield env\n\n # cleanup\n for var_name in env:\n # os.unsetenv does not work on every system\n del os.environ[var_name]", "def test_local_env_pass_explicit(fileutils) -> None:\n exp_value = str(uuid.uuid4())\n env_key = \"test_local_env_pass_explicit\"\n\n assert env_key not in os.environ\n\n test_dir = fileutils.make_test_dir()\n script = fileutils.get_test_conf_path(\"check_env.py\")\n\n exp_dir = f\"{test_dir}/exp\"\n os.makedirs(exp_dir)\n exp = Experiment(\"LRZ\", exp_path=exp_dir, launcher=\"slurm\")\n\n exe_name = \"python\"\n exe_args = [script, env_key]\n\n # Create the RunSettings associated with the workload manager (WLM) run command\n run_args = {\"--nodes\": 1, \"--ntasks\": 1, \"--time\": \"00:01:00\"}\n env_vars = {env_key: exp_value} # <-- explicitly passing a new env var to task\n settings = RunSettings(\n exe_name, exe_args, run_command=\"srun\", run_args=run_args, env_vars=env_vars\n )\n app_name = \"echo_app\"\n app = exp.create_model(app_name, settings)\n\n # generate the experiment structure and start the model\n exp.generate(app, overwrite=True)\n exp.start(app, block=True, summary=False)\n\n assert env_key in settings.env_vars\n\n with open(f\"{exp_dir}/{app_name}/{app_name}.out\") as app_outfile:\n app_output = app_outfile.read()\n \n # verify application was able to access the env var\n assert f\"{env_key}=={exp_value}\" in app_output", "def _set_distributed_environment(training_env):\n # According to https://docs.aws.amazon.com/sagemaker/latest/dg/your-algorithms-training-algo.html\n # hosts are sorted lexicographically.\n os.environ['MASTER_ADDR'] = training_env.master_hostname\n os.environ['MASTER_PORT'] = MASTER_PORT", "def set_envs(self):\n # pylint:disable=protected-access\n # Need to call sys.__getframe() to get the filename and method/func\n # for logging information.\n\n # Useful for logging\n # Logging output: TIME UTC |TYPE (DEBUG, INFO, WARNING, etc.) |\n # [File : function]| Message\n cur_filename = sys._getframe().f_code.co_filename\n cur_function = sys._getframe().f_code.co_name\n\n self.logger.info('Setting env variables from config file...')\n # Set all the environment variables that are needed by the\n # MET config file.\n\n tmp_amodel = self.c_dict['AMODEL']\n if tmp_amodel:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_amodel_str = str(tmp_amodel).replace(\"\\'\", \"\\\"\")\n tmp_amodel = ''.join(tmp_amodel_str.split())\n self.add_env_var('AMODEL', tmp_amodel)\n else:\n self.add_env_var('AMODEL', \"[]\")\n\n tmp_bmodel = self.c_dict['BMODEL']\n if tmp_bmodel:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_bmodel_str = str(tmp_bmodel).replace(\"\\'\", \"\\\"\")\n tmp_bmodel = ''.join(tmp_bmodel_str.split())\n self.add_env_var('BMODEL', tmp_bmodel)\n else:\n self.add_env_var('BMODEL', \"[]\")\n\n tmp_desc = self.c_dict['DESC']\n if tmp_desc:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_desc_str = str(tmp_desc).replace(\"\\'\", \"\\\"\")\n tmp_desc = ''.join(tmp_desc_str.split())\n self.add_env_var('DESC', tmp_desc)\n else:\n self.add_env_var('DESC', \"[]\")\n\n tmp_storm_id = self.c_dict['STORM_ID']\n if tmp_storm_id:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_storm_id_str = str(tmp_storm_id).replace(\"\\'\", \"\\\"\")\n tmp_storm_id = ''.join(tmp_storm_id_str.split())\n self.add_env_var('STORM_ID', tmp_storm_id)\n else:\n self.add_env_var('STORM_ID', \"[]\")\n\n tmp_basin = self.c_dict['BASIN']\n if tmp_basin:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_basin_str = str(tmp_basin).replace(\"\\'\", \"\\\"\")\n tmp_basin = ''.join(tmp_basin_str.split())\n self.add_env_var('BASIN', tmp_basin)\n else:\n self.add_env_var('BASIN', \"[]\")\n\n tmp_cyclone = self.c_dict['CYCLONE']\n if tmp_cyclone:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_cyclone_str = str(tmp_cyclone).replace(\"\\'\", \"\\\"\")\n tmp_cyclone = ''.join(tmp_cyclone_str.strip())\n self.add_env_var('CYCLONE', tmp_cyclone)\n else:\n self.add_env_var('CYCLONE', \"[]\")\n\n tmp_storm_name = self.c_dict['STORM_NAME']\n if tmp_storm_name:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_storm_name_str = str(tmp_storm_name).replace(\"\\'\", \"\\\"\")\n tmp_storm_name = ''.join(tmp_storm_name_str.strip())\n self.add_env_var('STORM_NAME', tmp_storm_name)\n else:\n self.add_env_var('STORM_NAME', \"[]\")\n\n if self.c_dict['INIT_BEG']:\n self.add_env_var('INIT_BEG', self.c_dict['INIT_BEG'])\n else:\n self.add_env_var('INIT_BEG', \"\")\n\n if self.c_dict['INIT_END']:\n self.add_env_var('INIT_END', self.c_dict['INIT_END'])\n else:\n self.add_env_var('INIT_END', \"\")\n\n tmp_init_include = self.c_dict['INIT_INCLUDE']\n if tmp_init_include:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_init_include_str = str(tmp_init_include).replace(\"\\'\", \"\\\"\")\n tmp_init_include = ''.join(tmp_init_include_str.strip())\n self.add_env_var('INIT_INCLUDE', tmp_init_include)\n else:\n self.add_env_var('INIT_INCLUDE', \"[]\")\n\n tmp_init_exclude = self.c_dict['INIT_EXCLUDE']\n if tmp_init_exclude:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_init_exclude_str = str(tmp_init_exclude).replace(\"\\'\", \"\\\"\")\n tmp_init_exclude = ''.join(tmp_init_exclude_str.strip())\n self.add_env_var('INIT_EXCLUDE', tmp_init_exclude)\n else:\n self.add_env_var('INIT_EXCLUDE', \"[]\")\n\n tmp_init_hour = self.c_dict['INIT_HOUR']\n if tmp_init_hour:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_init_hour_str = str(tmp_init_hour).replace(\"\\'\", \"\\\"\")\n tmp_init_hour = ''.join(tmp_init_hour_str.split())\n self.add_env_var('INIT_HOUR', tmp_init_hour)\n else:\n self.add_env_var('INIT_HOUR', \"[]\")\n\n tmp_valid_begin = self.c_dict['VALID_BEG']\n if tmp_valid_begin:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_valid_begin_str = str(tmp_valid_begin).replace(\"\\'\", \"\\\"\")\n tmp_valid_begin = ''.join(tmp_valid_begin_str.strip())\n self.add_env_var('VALID_BEG', tmp_valid_begin)\n else:\n self.add_env_var('VALID_BEG', '')\n\n tmp_valid_end = self.c_dict['VALID_END']\n if tmp_valid_end:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_valid_end_str = str(tmp_valid_end).replace(\"\\'\", \"\\\"\")\n tmp_valid_end = ''.join(tmp_valid_end_str.strip())\n self.add_env_var('VALID_END', tmp_valid_end)\n else:\n self.add_env_var('VALID_END', \"\")\n\n tmp_valid_include = self.c_dict['VALID_INCLUDE']\n if tmp_valid_include:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_valid_include_str = str(tmp_valid_include).replace(\"\\'\", \"\\\"\")\n tmp_valid_include = ''.join(tmp_valid_include_str.strip())\n self.add_env_var('VALID_INCLUDE', tmp_valid_include)\n else:\n self.add_env_var('VALID_INCLUDE', \"[]\")\n\n tmp_valid_exclude = self.c_dict['VALID_EXCLUDE']\n if tmp_valid_exclude:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_valid_exclude_str = str(tmp_valid_exclude).replace(\"\\'\", \"\\\"\")\n tmp_valid_exclude = ''.join(tmp_valid_exclude_str.strip())\n self.add_env_var('VALID_EXCLUDE', tmp_valid_exclude)\n else:\n self.add_env_var('VALID_EXCLUDE', \"[]\")\n\n tmp_valid_hour = self.c_dict['VALID_HOUR']\n if tmp_valid_hour:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_valid_hour_str = str(tmp_valid_hour).replace(\"\\'\", \"\\\"\")\n tmp_valid_hour = ''.join(tmp_valid_hour_str.strip())\n self.add_env_var('VALID_HOUR', tmp_valid_hour)\n else:\n self.add_env_var('VALID_HOUR', \"[]\")\n\n tmp_lead_req = self.c_dict['LEAD_REQ']\n if tmp_lead_req:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_lead_req_str = str(tmp_lead_req).replace(\"\\'\", \"\\\"\")\n tmp_lead_req = ''.join(tmp_lead_req_str.strip())\n self.add_env_var('LEAD_REQ', tmp_lead_req)\n else:\n self.add_env_var('LEAD_REQ', \"[]\")\n\n tmp_lead = self.c_dict['LEAD']\n if tmp_lead:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_lead_str = str(tmp_lead).replace(\"\\'\", \"\\\"\")\n tmp_lead = ''.join(tmp_lead_str.strip())\n self.add_env_var('LEAD', tmp_lead)\n else:\n self.add_env_var('LEAD', \"[]\")\n\n tmp_init_mask = self.c_dict['INIT_MASK']\n if tmp_init_mask:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_init_mask_str = str(tmp_init_mask).replace(\"\\'\", \"\\\"\")\n tmp_init_mask = ''.join(tmp_init_mask_str.strip())\n self.add_env_var('INIT_MASK', tmp_init_mask)\n else:\n self.add_env_var('INIT_MASK', \"[]\")\n\n tmp_valid_mask = self.c_dict['VALID_MASK']\n if tmp_valid_mask:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_valid_mask_str = str(tmp_valid_mask).replace(\"\\'\", \"\\\"\")\n tmp_valid_mask = ''.join(tmp_valid_mask_str.strip())\n self.add_env_var('VALID_MASK', tmp_valid_mask)\n else:\n self.add_env_var('VALID_MASK', \"[]\")\n\n tmp_track_watch_warn = self.c_dict['TRACK_WATCH_WARN']\n if tmp_track_watch_warn:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_track_watch_warn_str = str(tmp_track_watch_warn).replace(\"\\'\",\n \"\\\"\")\n tmp_track_watch_warn = ''.join(tmp_track_watch_warn_str.strip())\n self.add_env_var('TRACK_WATCH_WARN', tmp_track_watch_warn)\n else:\n self.add_env_var('TRACK_WATCH_WARN', \"[]\")\n\n tmp_column_thresh_name = self.c_dict['COLUMN_THRESH_NAME']\n if tmp_column_thresh_name:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_column_thresh_name_str = str(tmp_column_thresh_name).replace(\n \"\\'\", \"\\\"\")\n tmp_column_thresh_name = ''.join(tmp_column_thresh_name_str.strip())\n self.add_env_var('COLUMN_THRESH_NAME', tmp_column_thresh_name)\n else:\n self.add_env_var('COLUMN_THRESH_NAME', \"[]\")\n\n tmp_column_thresh_val = self.c_dict['COLUMN_THRESH_VAL']\n if tmp_column_thresh_val:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_column_thresh_val_str = str(tmp_column_thresh_val).replace(\"\\'\",\n \"\\\"\")\n tmp_column_thresh_val = ''.join(tmp_column_thresh_val_str.strip())\n self.add_env_var('COLUMN_THRESH_VAL', tmp_column_thresh_val)\n else:\n self.add_env_var('COLUMN_THRESH_VAL', \"[]\")\n\n tmp_column_str_name = self.c_dict['COLUMN_STR_NAME']\n if tmp_column_str_name:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_column_str_name = str(tmp_column_str_name).replace(\"\\'\",\n \"\\\"\")\n tmp_column_str_name = ''.join(tmp_column_str_name.strip())\n self.add_env_var('COLUMN_STR_NAME', tmp_column_str_name)\n else:\n self.add_env_var('COLUMN_STR_NAME', \"[]\")\n\n tmp_column_str_val = self.c_dict['COLUMN_STR_VAL']\n if tmp_column_str_val:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_column_str_val_str = str(tmp_column_str_val).replace(\"\\'\", \"\\\"\")\n tmp_column_str_val = ''.join(tmp_column_str_val_str.strip())\n self.add_env_var('COLUMN_STR_VAL', tmp_column_str_val)\n else:\n self.add_env_var('COLUMN_STR_VAL', \"[]\")\n\n tmp_init_thresh_name = self.c_dict['INIT_THRESH_NAME']\n if tmp_init_thresh_name:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_init_thresh_name_str = str(tmp_init_thresh_name).replace(\"\\'\",\n \"\\\"\")\n tmp_init_thresh_name = ''.join(tmp_init_thresh_name_str.strip())\n\n self.add_env_var('INIT_THRESH_NAME', tmp_init_thresh_name)\n\n else:\n self.add_env_var('INIT_THRESH_NAME', \"[]\")\n\n tmp_init_thresh_val = self.c_dict['INIT_THRESH_VAL']\n if tmp_init_thresh_val:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_init_thresh_val_str = str(tmp_init_thresh_val).replace(\"\\'\",\n \"\\\"\")\n tmp_init_thresh_val = ''.join(tmp_init_thresh_val_str.strip())\n self.add_env_var('INIT_THRESH_VAL', tmp_init_thresh_val)\n else:\n self.add_env_var('INIT_THRESH_VAL', \"[]\")\n\n tmp_init_str_name = self.c_dict['INIT_STR_NAME']\n if tmp_init_str_name:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_init_str_name_str = str(tmp_init_str_name).replace(\"\\'\", \"\\\"\")\n tmp_init_str_name = ''.join(tmp_init_str_name_str.strip())\n self.add_env_var('INIT_STR_NAME', tmp_init_str_name)\n else:\n self.add_env_var('INIT_STR_NAME', \"[]\")\n\n tmp_init_str_val = self.c_dict['INIT_STR_VAL']\n if tmp_init_str_val:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_init_str_val_str = str(tmp_init_str_val).replace(\"\\'\", \"\\\"\")\n tmp_init_str_val = ''.join(tmp_init_str_val_str.strip())\n self.add_env_var('INIT_STR_VAL', tmp_init_str_val)\n else:\n self.add_env_var('INIT_STR_VAL', \"[]\")\n\n # boolean values for WATER_ONLY\n if self.c_dict['WATER_ONLY']:\n flag = \"TRUE\"\n else:\n flag = \"FALSE\"\n self.add_env_var('WATER_ONLY', flag)\n\n # boolean value for LANDFALL\n if self.c_dict['LANDFALL']:\n flag = \"TRUE\"\n else:\n flag = \"FALSE\"\n self.add_env_var('LANDFALL', flag)\n\n if self.c_dict['LANDFALL_BEG']:\n self.add_env_var('LANDFALL_BEG',\n self.c_dict['LANDFALL_BEG'])\n else:\n # Set to default\n self.add_env_var('LANDFALL_BEG', '-24')\n\n if self.c_dict['LANDFALL_END']:\n self.add_env_var('LANDFALL_END',\n self.c_dict['LANDFALL_END'])\n else:\n # Set to default\n self.add_env_var('LANDFALL_END', '00')\n\n # boolean value for MATCH_POINTS\n if self.c_dict['MATCH_POINTS'] == 'true':\n flag = \"TRUE\"\n else:\n flag = \"FALSE\"\n self.add_env_var('MATCH_POINTS', flag)\n\n if self.c_dict['CONFIG_FILE']:\n self.add_env_var('CONFIG_FILE',\n self.c_dict['CONFIG_FILE'])\n else:\n self.log_error(\n cur_filename + '|' + cur_function +\n ': no MET TC-Stat config file found. Exiting')\n sys.exit(1)\n\n jobs_list_tmp = self.c_dict['JOBS_LIST']\n if jobs_list_tmp:\n # MET is expecting a string\n jobs_list_str = '\"' + jobs_list_tmp + '\"'\n self.add_env_var('JOBS', jobs_list_str)\n else:\n self.log_error('No jobs list defined. Please check your METplus'\n 'config file. Exiting...')\n sys.exit(1)\n return 0", "def load_environment_vars():\n bootstrap_servers = os.environ.get('BOOTSTRAP_SERVERS')\n backend = os.environ.get('BACKEND')\n control_topic = os.environ.get('CONTROL_TOPIC')\n\n return (bootstrap_servers, backend, control_topic)", "def _setup_torch_env(torch_config: dict) -> None:\n for env_variable in torch_config.keys():\n os.environ[env_variable] = str(torch_config[env_variable])", "def enable_test_environment_variables():\n\n os.environ.setdefault(\"GOOGLE_CLOUD_PROJECT\", \"example\")\n os.environ.setdefault(\"GAE_APPLICATION\", \"e~example\")\n os.environ.setdefault(\"GAE_ENV\", \"development\")", "def set_up_environment(self):\n self.aftl_host = os.environ.get('AFTL_HOST')\n self.aftl_pubkey = os.environ.get('AFTL_PUBKEY')\n self.aftl_apikey = os.environ.get('AFTL_APIKEY')\n self.vbmeta_image = os.environ.get('AFTL_VBMETA_IMAGE')\n self.manufacturer_key = os.environ.get('AFTL_MANUFACTURER_KEY')\n\n if (not self.aftl_host or not self.aftl_pubkey or not self.vbmeta_image\n or not self.manufacturer_key):\n self.fail('Environment variables not correctly set up. See description of'\n ' this test case for details')", "def make_environment(self):\n\n env = os.environ.copy()\n\n # Sets the name\n env[\"COMPOSE_PROJECT_NAME\"] = self.config.observatory.docker_compose_project_name\n\n # Host settings\n env[\"HOST_USER_ID\"] = str(self.host_uid)\n env[\"HOST_GROUP_ID\"] = str(self.host_gid)\n env[\"HOST_OBSERVATORY_HOME\"] = os.path.normpath(self.config.observatory.observatory_home)\n env[\"HOST_REDIS_PORT\"] = str(self.config.observatory.redis_port)\n env[\"HOST_FLOWER_UI_PORT\"] = str(self.config.observatory.flower_ui_port)\n env[\"HOST_AIRFLOW_UI_PORT\"] = str(self.config.observatory.airflow_ui_port)\n env[\"HOST_ELASTIC_PORT\"] = str(self.config.observatory.elastic_port)\n env[\"HOST_KIBANA_PORT\"] = str(self.config.observatory.kibana_port)\n\n # Secrets\n if self.config.google_cloud is not None and self.config.google_cloud.credentials is not None:\n env[\"HOST_GOOGLE_APPLICATION_CREDENTIALS\"] = self.config.google_cloud.credentials\n env[\"AIRFLOW_FERNET_KEY\"] = self.config.observatory.airflow_fernet_key\n env[\"AIRFLOW_SECRET_KEY\"] = self.config.observatory.airflow_secret_key\n env[\"AIRFLOW_UI_USER_EMAIL\"] = self.config.observatory.airflow_ui_user_email\n env[\"AIRFLOW_UI_USER_PASSWORD\"] = self.config.observatory.airflow_ui_user_password\n env[\"POSTGRES_PASSWORD\"] = self.config.observatory.postgres_password\n\n # Create Airflow variables\n airflow_variables = self.config.make_airflow_variables()\n for variable in airflow_variables:\n env[variable.env_var_name] = str(variable.value)\n\n # Airflow connections\n for conn in self.config.airflow_connections:\n env[conn.conn_name] = conn.value\n\n return env", "def setup_environment():\n data_path = os.path.join(os.path.dirname(__file__), 'data')\n assert os.path.exists(os.path.join(data_path, '7_param.dat'))\n key = 'GEOTRANS_DATA'\n os.environ[key] = data_path", "def __init__(self):\n try:\n # For local testing\n from dotenv import load_dotenv\n load_dotenv(os.path.join(os.path.dirname(__file__), ENV_FILE))\n dotenv = os.environ\n os.environ.update(dotenv)\n except:\n # For heroku\n dotenv = os.environ", "def test_env_var_settings_set(config, environment_vars_set_wowww):\n sms = YesssSMS.YesssSMS()\n assert sms._logindata[\"login_rufnummer\"] == \"03211234567\"\n assert sms._logindata[\"login_passwort\"] == \"MySecr3t\"\n assert sms._provider == \"wowww\"\n\n os.environ[\"YESSSSMS_PROVIDER\"] = \"goood\"\n sms = YesssSMS.YesssSMS(\"123456\", \"password\")\n assert sms._logindata[\"login_rufnummer\"] == \"03211234567\"\n assert sms._logindata[\"login_passwort\"] == \"MySecr3t\"\n assert sms._provider == \"goood\"\n\n del os.environ[\"YESSSSMS_PROVIDER\"]\n sms = YesssSMS.YesssSMS(\"123456\")\n assert sms._logindata[\"login_rufnummer\"] == \"03211234567\"\n assert sms._logindata[\"login_passwort\"] == \"MySecr3t\"\n assert sms._provider == \"yesss\"\n\n del os.environ[\"YESSSSMS_LOGIN\"]\n sms = YesssSMS.YesssSMS(\"123456\", \"password\")\n assert sms._logindata[\"login_rufnummer\"] == \"123456\"\n assert sms._logindata[\"login_passwort\"] == \"password\"\n assert sms._provider == \"yesss\"", "def staging():\n env.user = 'tathros'\n env.environment = 'staging'\n env.hosts = ['136.243.151.13']\n _setup_path()", "def set_env():\n env.local_dotenv_path = os.path.join(\n os.path.dirname(__file__), 'etc/base_image/.env')\n dotenv.load_dotenv(env.local_dotenv_path)\n env.project_name = os.environ.get('PROJECT_NAME', '')\n env.project_dir = posixpath.join('/srv/images/', env.project_name)\n env.use_ssh_config = True\n\n # Bug: when setting this inside a function. Using host_string as workaround\n env.hosts = [os.environ.get('HOST_NAME', ''), ]\n env.host_string = os.environ.get('HOST_NAME', '')\n\n env.base_image_name = os.environ.get('BASE_IMAGE_NAME', '')\n env.build_dir = '/srv/build'\n env.local_path = os.path.dirname(__file__)", "def qa():\n env.config_file = 'config_production.py'\n env.hosts = ['ombu@d2.ombuweb.com:34165']\n env.host_type = 'qa'\n env.user = 'ombu'\n env.host_webserver_user = 'www-data'\n env.host_site_path = '/mnt/main/qa/qa2/public'", "def _setup_local_environment():\n if not env.has_key(\"user\"):\n env.user = os.environ[\"USER\"]\n if not env.has_key(\"java_home\"):\n env.java_home = os.environ.get(\"JAVA_HOME\", \"/usr/lib/jvm/java-6-openjdk\")", "def prepare_environment(self):\n os.environ['NEGOTIATOR_GUEST'] = self.guest_name", "def prepare_env(self, *args, **kwargs):\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The config can specify a resource manager server address as "driver", which means the workflow should launch the resource manager on the scheduler machine. Make sure it launches, but is also shut down after the workflow exits.
def test_resource_manager_on_driver(): config = { "workflow-name": "workflow", "cluster-type": CLUSTER_TYPE, "resource-manager": { "server": "driver", "port": 4000, "config": { "read_reqs": 123, "read_data": 456, "write_reqs": 789, "write_data": 321 } } } template_dir = tempfile.mkdtemp(suffix="test-resource-manager-on-driver-template") with open(f"{template_dir}/workflow.yaml", 'w') as f: yaml.dump(config, f) @checkrun def execute(workflow_inst): client = ResourceManagerClient('127.0.0.1', 4000) mgr_config = client.read_config() assert mgr_config == config["resource-manager"]["config"], \ "Resource manager config does not match the one in the workflow config" _execution_dir, _workflow = launch_flow(template_dir, 1, _custom_execute_fn=execute) assert execute.didrun # FIXME: For mysterious reasons, the check below does not work on Travis-CI. # Somehow, read_config() succeeds despite the fact that # the resource manager server was already terminated?? if os.environ.get('TRAVIS', '') == 'true': pytest.skip("Skipping resource manager shutdown check on Travis-CI") # Server should not be running any more after workflow exits. with pytest.raises(TimeoutError): client2 = ResourceManagerClient('127.0.0.1', 4000) client2.read_config()
[ "def run(application_config):\n\n executor = init_executor(application_config)\n\n framework = mesos_pb2.FrameworkInfo()\n framework.user = \"\" # Have Mesos fill in the current user.\n framework.name = application_config['framework_name']\n logger = logging.getLogger(\"pisaura.scheduler\")\n scheduler = FailureScheduler(\n executor, logger, application_config['task_retry'])\n\n run_driver(scheduler, framework, application_config['master'])\n return scheduler", "def resource_manager(args):\n conf = oci_config.OCIConfig(args.config, regions=args.regions, profile=args.profile)\n\n training_tools.run(conf)", "def test_two_launchers(self):\n launchers = [\n MySQLClusterLauncher(\n self._driver,\n MySQLCluster(\n \"cluster0\",\n \"user0\",\n self._password_box.encrypt(\"pass0\"),\n 1,\n DEFAULT_TASK_CPUS,\n DEFAULT_TASK_MEM,\n DEFAULT_TASK_DISK),\n self._state_provider,\n self._zk_url,\n self._zk_client,\n self._framework_user,\n \"./executor.pex\",\n \"cmd.sh\",\n Amount(5, Time.SECONDS),\n \"/etc/mysos/admin_keyfile.yml\",\n self._scheduler_key),\n MySQLClusterLauncher(\n self._driver,\n MySQLCluster(\n \"cluster1\",\n \"user1\",\n self._password_box.encrypt(\"pass1\"),\n 2,\n DEFAULT_TASK_CPUS,\n DEFAULT_TASK_MEM,\n DEFAULT_TASK_DISK),\n self._state_provider,\n self._zk_url,\n self._zk_client,\n self._framework_user,\n \"./executor.pex\",\n \"cmd.sh\",\n Amount(5, Time.SECONDS),\n \"/etc/mysos/admin_keyfile.yml\",\n self._scheduler_key)]\n self._launchers.extend(launchers)\n\n resources = create_resources(\n cpus=DEFAULT_TASK_CPUS * 3,\n mem=DEFAULT_TASK_MEM * 3,\n disk=DEFAULT_TASK_DISK * 3,\n ports=set([10000, 10001, 10002]))\n self._offer.resources.extend(resources)\n\n # Three nodes in total across two clusters.\n # Simulate the scheduler.\n for i in range(3):\n for launcher in launchers:\n task_id, remaining = launcher.launch(self._offer)\n if task_id:\n # Update the offer so other launchers will use its remaining resources.\n del self._offer.resources[:]\n self._offer.resources.extend(remaining)\n break\n\n tasks = self._driver.method_calls[\"launchTasks\"]\n assert len(tasks) == 3", "def run_xenon_simple(workflow, machine, worker_config):\n scheduler = Scheduler()\n\n return scheduler.run(\n xenon_interactive_worker(machine, worker_config),\n get_workflow(workflow)\n )", "def test_set_power_schedule_for_deployment_run(self):\n pass", "def test_relaunch_deployment_run(self):\n pass", "def main():\n cp = config(\"test_configs/red.conf\")\n stream = streamHandler(cp)\n runTest(cp, TestSlurmDynamic, stream, per_site=False)", "def test_launch_deployment(self):\n pass", "def _run_workflow(data, workflow_file, work_dir):\n utils.remove_safe(os.path.join(work_dir, \"workspace\"))\n cmd = [utils.get_program_python(\"configureStrelkaGermlineWorkflow.py\"),\n workflow_file, \"-m\", \"local\", \"-j\", dd.get_num_cores(data), \"--quiet\"]\n do.run(cmd, \"Run Strelka2: %s\" % dd.get_sample_name(data))\n utils.remove_safe(os.path.join(work_dir, \"workspace\"))", "def _manageWorkersConfig(event):\n if event.info.get('key') != PluginSettings.SLICER_CLI_WEB_WORKER_CONFIG_ITEM:\n return\n if _loadWorkerConfig():\n _manageWorkers(None)", "def _configure_regular_job(config, job_exe, job_type, system_logging_level):\n config.create_tasks(['pull', 'pre', 'main', 'post'])\n config.add_to_task('pull', args=create_pull_command(job_exe.docker_image))\n config.add_to_task('pre', args=PRE_TASK_COMMAND_ARGS)\n config.add_to_task('post', args=POST_TASK_COMMAND_ARGS)\n\n # Configure input workspaces\n ro_input_workspaces = {}\n rw_input_workspaces = {}\n for input_workspace in config.get_input_workspace_names():\n ro_input_workspaces[input_workspace] = TaskWorkspace(input_workspace, MODE_RO)\n rw_input_workspaces[input_workspace] = TaskWorkspace(input_workspace, MODE_RW)\n config.add_to_task('pre', workspaces=ro_input_workspaces)\n config.add_to_task('main', workspaces=ro_input_workspaces)\n # Post tasks have access to input workspaces in case input files need moved as part of parse results\n config.add_to_task('post', workspaces=rw_input_workspaces)\n\n # Configure output workspaces\n output_workspaces = {}\n for output_workspace in config.get_output_workspace_names():\n output_workspaces[output_workspace] = TaskWorkspace(output_workspace, MODE_RW)\n config.add_to_task('post', workspaces=output_workspaces)\n\n # Configure input/output mounts\n input_mnt_name = 'scale_input_mount'\n output_mnt_name = 'scale_output_mount'\n input_vol_name = get_job_exe_input_vol_name(job_exe)\n output_vol_name = get_job_exe_output_vol_name(job_exe)\n input_vol_ro = Volume(input_vol_name, SCALE_JOB_EXE_INPUT_PATH, MODE_RO, is_host=False)\n input_vol_rw = Volume(input_vol_name, SCALE_JOB_EXE_INPUT_PATH, MODE_RW, is_host=False)\n output_vol_ro = Volume(output_vol_name, SCALE_JOB_EXE_OUTPUT_PATH, MODE_RO, is_host=False)\n output_vol_rw = Volume(output_vol_name, SCALE_JOB_EXE_OUTPUT_PATH, MODE_RW, is_host=False)\n\n config.add_to_task('pre', mount_volumes={input_mnt_name: input_vol_rw, output_mnt_name: output_vol_rw},\n env_vars={'SYSTEM_LOGGING_LEVEL': system_logging_level})\n config.add_to_task('main', mount_volumes={input_mnt_name: input_vol_ro, output_mnt_name: output_vol_rw})\n config.add_to_task('post', mount_volumes={output_mnt_name: output_vol_ro},\n env_vars={'SYSTEM_LOGGING_LEVEL': system_logging_level})\n\n\n # Configure output directory\n env_vars = {'OUTPUT_DIR': SCALE_JOB_EXE_OUTPUT_PATH, 'INPUT_METADATA': SCALE_INPUT_METADATA_PATH}\n args = config._get_task_dict('main')['args']\n\n args = environment_expansion(env_vars, args)\n\n config.add_to_task('main', args=args, env_vars=env_vars)\n\n # Configure task resources\n resources = job_exe.get_resources()\n # Pull-task and pre-task require full amount of resources\n config.add_to_task('pull', resources=resources)\n config.add_to_task('pre', resources=resources)\n # Main-task no longer requires the input file space\n resources.subtract(NodeResources([Disk(job_exe.input_file_size)]))\n config.add_to_task('main', resources=resources)\n # Post-task no longer requires any disk space\n resources.remove_resource('disk')\n config.add_to_task('post', resources=resources)", "def launch(config_list):\n p = PyRosLaunch(config_list)\n p.start()\n p.spin()", "def update_worker():\n from test import get_remote_runner\n runner = get_remote_runner()\n runner.run(\"python2.7 /vagrant/bootstrap_lxc_manager.py --update_only=True\")", "def test_get_server_runnable(self):\n global locator, config_paths\n locator.load_config(config_paths[2])\n\n self.assertIsNotNone(locator.get_server_runnable())", "def test_mgr_start_stop(self, runpath):\n driver = self.MyDriver(name=\"MyDriver\", runpath=runpath)\n\n assert not driver.pre_start_called\n assert not driver.post_start_called\n\n with driver:\n assert driver.pre_start_called\n assert driver.post_start_called\n assert not driver.pre_stop_called\n assert not driver.post_stop_called\n\n assert driver.pre_stop_called\n assert driver.post_stop_called", "def main(configurationDirectory): \r\n runtime=ExperimentRuntime(configurationDirectory, \"experiment_config.yaml\") \r\n runtime.start(configurationDirectory)", "def test_cron_workflow_service_create_cron_workflow(self):\n pass", "def run_driver(*args, **kwargs):\n\n driver = mesos.native.MesosSchedulerDriver(*args, **kwargs)\n\n def run_driver_async():\n status = 0 if driver.run() == mesos_pb2.DRIVER_STOPPED else 1\n driver.stop()\n sys.exit(status)\n\n framework_thread = Thread(target=run_driver_async)\n framework_thread.daemon = True\n framework_thread.start()\n\n if current_thread().name == \"MainThread\":\n signal.signal(signal.SIGINT, lambda signal, frame: driver.stop())", "def _load_and_run_scenario(self, args, config):\n crash_message = \"\"\n entry_status = \"Started\"\n\n print(\"\\n\\033[1m========= Preparing {} (repetition {}) =========\".format(config.name, config.repetition_index))\n print(\"> Setting up the agent\\033[0m\")\n\n # Prepare the statistics of the route\n self.statistics_manager.set_route(config.name, config.index)\n\n # Set up the user's agent, and the timer to avoid freezing the simulation\n try:\n self._agent_watchdog.start()\n agent_class_name = getattr(self.module_agent, 'get_entry_point')()\n self.agent_instance = getattr(self.module_agent, agent_class_name)(args.agent_config)\n config.agent = self.agent_instance\n\n # Check and store the sensors\n if not self.sensors:\n self.sensors = self.agent_instance.sensors()\n track = self.agent_instance.track\n\n AgentWrapper.validate_sensor_configuration(self.sensors, track, args.track)\n\n self.sensor_icons = [sensors_to_icons[sensor['type']] for sensor in self.sensors]\n self.statistics_manager.save_sensors(self.sensor_icons, args.checkpoint)\n\n self._agent_watchdog.stop()\n\n except SensorConfigurationInvalid as e:\n # The sensors are invalid -> set the ejecution to rejected and stop\n print(\"\\n\\033[91mThe sensor's configuration used is invalid:\")\n print(\"> {}\\033[0m\\n\".format(e))\n traceback.print_exc()\n\n crash_message = \"Agent's sensors were invalid\"\n entry_status = \"Rejected\"\n\n self._register_statistics(config, args.checkpoint, entry_status, crash_message)\n self._cleanup()\n sys.exit(-1)\n\n except Exception as e:\n # The agent setup has failed -> start the next route\n print(\"\\n\\033[91mCould not set up the required agent:\")\n print(\"> {}\\033[0m\\n\".format(e))\n traceback.print_exc()\n\n crash_message = \"Agent couldn't be set up\"\n\n self._register_statistics(config, args.checkpoint, entry_status, crash_message)\n self._cleanup()\n return\n\n print(\"\\033[1m> Loading the world\\033[0m\")\n\n # Load the world and the scenario\n try:\n self._load_and_wait_for_world(args, config.town, config.ego_vehicles)\n self._prepare_ego_vehicles(config.ego_vehicles, False)\n scenario = RouteScenario(world=self.world, config=config, debug_mode=args.debug)\n self.statistics_manager.set_scenario(scenario.scenario)\n\n # Night mode\n if config.weather.sun_altitude_angle < 0.0:\n for vehicle in scenario.ego_vehicles:\n vehicle.set_light_state(carla.VehicleLightState(self._vehicle_lights))\n\n # Load scenario and run it\n if args.record:\n self.client.start_recorder(\"{}/{}_rep{}.log\".format(args.record, config.name, config.repetition_index))\n self.manager.load_scenario(scenario, self.agent_instance, config.repetition_index)\n\n except Exception as e:\n # The scenario is wrong -> set the ejecution to crashed and stop\n print(\"\\n\\033[91mThe scenario could not be loaded:\")\n print(\"> {}\\033[0m\\n\".format(e))\n traceback.print_exc()\n\n crash_message = \"Simulation crashed\"\n entry_status = \"Crashed\"\n\n self._register_statistics(config, args.checkpoint, entry_status, crash_message)\n\n if args.record:\n self.client.stop_recorder()\n\n self._cleanup()\n sys.exit(-1)\n\n print(\"\\033[1m> Running the route\\033[0m\")\n\n # Run the scenario\n try:\n self.manager.run_scenario()\n\n except AgentError as e:\n # The agent has failed -> stop the route\n print(\"\\n\\033[91mStopping the route, the agent has crashed:\")\n print(\"> {}\\033[0m\\n\".format(e))\n traceback.print_exc()\n\n crash_message = \"Agent crashed\"\n\n except Exception as e:\n print(\"\\n\\033[91mError during the simulation:\")\n print(\"> {}\\033[0m\\n\".format(e))\n traceback.print_exc()\n\n crash_message = \"Simulation crashed\"\n entry_status = \"Crashed\"\n\n # Stop the scenario\n try:\n print(\"\\033[1m> Stopping the route\\033[0m\")\n self.manager.stop_scenario()\n self._register_statistics(config, args.checkpoint, entry_status, crash_message)\n\n if args.record:\n self.client.stop_recorder()\n\n # Remove all actors\n scenario.remove_all_actors()\n\n self._cleanup()\n\n except Exception as e:\n print(\"\\n\\033[91mFailed to stop the scenario, the statistics might be empty:\")\n print(\"> {}\\033[0m\\n\".format(e))\n traceback.print_exc()\n\n crash_message = \"Simulation crashed\"\n\n if crash_message == \"Simulation crashed\":\n sys.exit(-1)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The config can specify a script to be run on each worker upon cluster initialization. This test verifies that it is launched and active while the workflow runs, and that it is launched on each worker, or just once per machine, depending on the config.
def test_worker_initialization(setup_worker_initialization_template): template_dir, _config, once_per_machine = setup_worker_initialization_template num_workers = 2 if once_per_machine or CLUSTER_TYPE in ("synchronous", "processes"): expected_script_count = 1 else: expected_script_count = num_workers @checkrun def execute(workflow_inst): script_dir = Path(workflow_inst.config['worker-initialization']['script-path']).parent script_count = len(find_processes('_TEST_SCRIPT_FAKE_ARG_')) assert script_count > 0, f"Worker script is not running. Check logs in:\n{script_dir}" assert script_count <= expected_script_count, f"Worker script started too many times. Check logs in:\n{script_dir}" assert script_count == expected_script_count, f"Worker script not started on all workers. Check logs in:\n{script_dir}" _execution_dir, workflow_inst = launch_flow(template_dir, num_workers, _custom_execute_fn=execute) script_dir = Path(workflow_inst.config['worker-initialization']['script-path']).parent script_count = len(find_processes('_TEST_SCRIPT_FAKE_ARG_')) assert script_count == 0, \ ("Worker script(s) remained running after the workflow exited."\ f"Check logs in:\n{script_dir}")
[ "def test_cluster_jobs_script(self):\r\n\r\n qiime_config = load_qiime_config()\r\n submit_script = qiime_config['cluster_jobs_fp']\r\n\r\n if (submit_script):\r\n full_path = which(submit_script)\r\n if full_path:\r\n submit_script = full_path\r\n self.assertTrue(exists(submit_script),\r\n \"cluster_jobs_fp is not set to a valid path in qiime config: %s\" % submit_script)\r\n # check if executable\r\n self.assertTrue(access(submit_script, X_OK),\r\n \"cluster_jobs_fp is not executable: %s\" % submit_script)\r\n else:\r\n # Can't run in parallel, but not a critical error\r\n pass", "def test_worker_dvid_initialization():\n repo_dir = Path(flyemflows.__file__).parent.parent\n template_dir = tempfile.mkdtemp(suffix=\"test-worker-dvid\")\n \n # Copy worker script/config into the template\n shutil.copy(f'{repo_dir}/scripts/worker-dvid/dvid.toml',\n f'{template_dir}/dvid.toml')\n \n shutil.copy(f'{repo_dir}/scripts/worker-dvid/launch-worker-dvid.sh',\n f'{template_dir}/launch-worker-dvid.sh')\n \n config = {\n \"workflow-name\": \"workflow\",\n \"cluster-type\": CLUSTER_TYPE,\n \n \"worker-initialization\": {\n \"script-path\": \"launch-worker-dvid.sh\",\n \"only-once-per-machine\": True,\n \"script-args\": [\"_TEST_SCRIPT_FAKE_ARG_\"], # This is just here to make it easy to identify the process\n \"launch-delay\": 1.0\n }\n }\n \n with open(f\"{template_dir}/workflow.yaml\", 'w') as f:\n yaml.dump(config, f)\n\n def is_worker_dvid_running():\n return len(find_processes('_TEST_SCRIPT_FAKE_ARG_')) > 0\n \n @checkrun\n def execute(workflow_inst):\n script_dir = Path(workflow_inst.config['worker-initialization']['script-path']).parent\n assert is_worker_dvid_running(), f\"Worker DVID is not running. Check logs in:\\n{script_dir}\"\n \n _execution_dir, workflow_inst = launch_flow(template_dir, 1, _custom_execute_fn=execute)\n script_dir = Path(workflow_inst.config['worker-initialization']['script-path']).parent\n assert not is_worker_dvid_running(), \\\n (\"Worker DVID remained running after the workflow exited.\"\\\n f\"Check logs in:\\n{script_dir}\")", "def main(configurationDirectory): \r\n runtime=ExperimentRuntime(configurationDirectory, \"experiment_config.yaml\") \r\n runtime.start(configurationDirectory)", "def main(configurationDirectory):\r\n runtime=ExperimentRuntime(configurationDirectory, \"experiment_config.yaml\") \r\n runtime.start(sys.argv)", "def run_experiment(experiment: str):\n print_color(\"***************************************************************************************************\", bcolors.OKBLUE)\n print_color(f\"* {datetime.now().strftime('%Y-%m-%d %H:%M:%S')} Experiment: {experiment}\", bcolors.OKBLUE)\n print_color(\"***************************************************************************************************\", bcolors.OKBLUE)\n\n experiment_file = experiment + \".yaml\"\n\n # Set namespace to check\n with open(f\"./litmus/{experiment_file}\") as f:\n spec = yaml.load(f, Loader=yaml.FullLoader)\n result_name = spec['metadata']['name']\n namespace = spec['metadata']['namespace']\n\n print_color(f\"Running Litmus ChaosEngine Experiment {experiment_file} in namespace {namespace}\")\n print_color(f\"Deploying {experiment_file}...\")\n run_shell(f\"kubectl delete chaosengine {result_name} -n {namespace}\")\n run_shell(f\"kubectl create -f ./litmus/{experiment_file} -n {namespace}\")\n\n # Check status of experiment execution\n startTime = datetime.now()\n print_color(f\"{startTime.strftime('%Y-%m-%d %H:%M:%S')} Running experiment...\")\n expStatusCmd = \"kubectl get chaosengine \" + result_name + \" -o jsonpath='{.status.experiments[0].status}' -n \" + namespace\n run_shell(expStatusCmd)\n logs_cmd = f\"kubectl logs --since=10s -l name={experiment} -n {namespace}\"\n print(f\"\\n{bcolors.OKGREEN}//** Experiment Logs ({logs_cmd}) **//\\n\\n\")\n try:\n while subprocess.check_output(expStatusCmd, shell=True).decode('unicode-escape') != \"Completed\":\n os.system(logs_cmd)\n os.system(\"sleep 10\")\n\n print(f\"\\n\\n//** End of Experiment Logs **//{bcolors.ENDC}\\n\")\n\n # View experiment results\n run_shell(f\"kubectl describe chaosresult {result_name}-{experiment} -n {namespace}\")\n\n except:\n print_color(\"User has cancelled script execution.\", bcolors.FAIL)\n sys.exit(2)\n\n # Store Experiment Result\n status = subprocess.check_output(\"kubectl get chaosresult \" + result_name + \"-\" + experiment + \" -n \" + namespace + \" -o jsonpath='{.status.experimentstatus.verdict}'\", shell=True).decode('unicode-escape')\n return ExperimentResult(experiment, status, startTime)", "def verify_runconfig(master_host, namespace, job_name, replica, num_ps,\n num_workers, num_evaluators):\n is_chief = True\n num_replicas = 1\n if replica == \"ps\":\n is_chief = False\n num_replicas = num_ps\n elif replica == \"worker\":\n is_chief = False\n num_replicas = num_workers\n elif replica == \"evaluator\":\n is_chief = False\n num_replicas = num_evaluators\n\n # Construct the expected cluster spec\n chief_list = [\n \"{name}-chief-0.{ns}.svc:2222\".format(name=job_name, ns=namespace)\n ]\n ps_list = []\n for i in range(num_ps):\n ps_list.append(\"{name}-ps-{index}.{ns}.svc:2222\".format(\n name=job_name, index=i, ns=namespace))\n worker_list = []\n for i in range(num_workers):\n worker_list.append(\"{name}-worker-{index}.{ns}.svc:2222\".format(\n name=job_name, index=i, ns=namespace))\n evaluator_list = []\n for i in range(num_evaluators):\n evaluator_list.append(\"{name}-evaluator-{index}.{ns}.svc:2222\".format(\n name=job_name, index=i, ns=namespace))\n cluster_spec = {\n \"chief\": chief_list,\n \"ps\": ps_list,\n \"worker\": worker_list,\n }\n if num_evaluators > 0:\n cluster_spec[\"evaluator\"] = evaluator_list\n\n for i in range(num_replicas):\n full_target = \"{name}-{replica}-{index}\".format(\n name=job_name, replica=replica.lower(), index=i)\n actual_config = get_runconfig(master_host, namespace, full_target)\n full_svc = \"{ft}.{ns}.svc\".format(ft=full_target, ns=namespace)\n expected_config = {\n \"task_type\": replica,\n \"task_id\": i,\n \"cluster_spec\": cluster_spec,\n \"is_chief\": is_chief,\n \"master\": \"grpc://{fs}:2222\".format(fs=full_svc),\n \"num_worker_replicas\": num_workers + 1, # Chief is also a worker\n \"num_ps_replicas\": num_ps,\n } if not replica == \"evaluator\" else {\n # Evaluator has special config.\n \"task_type\": replica,\n \"task_id\": 0,\n \"cluster_spec\": {},\n \"is_chief\": is_chief,\n \"master\": \"\",\n \"num_worker_replicas\": 0,\n \"num_ps_replicas\": 0,\n }\n\n # Compare expected and actual configs\n if actual_config != expected_config:\n msg = \"Actual runconfig differs from expected. Expected: {0} Actual: {1}\".format(\n str(expected_config), str(actual_config))\n logging.error(msg)\n raise RuntimeError(msg)", "def main():\n cp = config(\"test_configs/red.conf\")\n stream = streamHandler(cp)\n runTest(cp, TestSlurmDynamic, stream, per_site=False)", "def test_by_config(self):\n # addon_executor = AddonExecutor(execute_order, stop_order)\n # self.assertEqual(expected, addon_executor.execute_with_config(addon))\n\n self.run_mgr.by_default(self.cli_inst)\n output = self._get_lines_as_list(sys.stdout)\n\n self.assertTrue(output[0].startswith('Start'))\n self.assertTrue(output[1].startswith('Execute'))\n self.assertTrue(output[2].startswith('Stop'))", "def test_setup_cluster(self):\n self.logger.info(\"Verify setup cluster using server manager \")\n assert self.smgr_fixture.setup_cluster()\n return True", "def cluster():\n\n print colors.cyan('Running cluster: %s' % ', '.join(_load_hosts().keys()))\n env.parallel = True", "def test_two_launchers(self):\n launchers = [\n MySQLClusterLauncher(\n self._driver,\n MySQLCluster(\n \"cluster0\",\n \"user0\",\n self._password_box.encrypt(\"pass0\"),\n 1,\n DEFAULT_TASK_CPUS,\n DEFAULT_TASK_MEM,\n DEFAULT_TASK_DISK),\n self._state_provider,\n self._zk_url,\n self._zk_client,\n self._framework_user,\n \"./executor.pex\",\n \"cmd.sh\",\n Amount(5, Time.SECONDS),\n \"/etc/mysos/admin_keyfile.yml\",\n self._scheduler_key),\n MySQLClusterLauncher(\n self._driver,\n MySQLCluster(\n \"cluster1\",\n \"user1\",\n self._password_box.encrypt(\"pass1\"),\n 2,\n DEFAULT_TASK_CPUS,\n DEFAULT_TASK_MEM,\n DEFAULT_TASK_DISK),\n self._state_provider,\n self._zk_url,\n self._zk_client,\n self._framework_user,\n \"./executor.pex\",\n \"cmd.sh\",\n Amount(5, Time.SECONDS),\n \"/etc/mysos/admin_keyfile.yml\",\n self._scheduler_key)]\n self._launchers.extend(launchers)\n\n resources = create_resources(\n cpus=DEFAULT_TASK_CPUS * 3,\n mem=DEFAULT_TASK_MEM * 3,\n disk=DEFAULT_TASK_DISK * 3,\n ports=set([10000, 10001, 10002]))\n self._offer.resources.extend(resources)\n\n # Three nodes in total across two clusters.\n # Simulate the scheduler.\n for i in range(3):\n for launcher in launchers:\n task_id, remaining = launcher.launch(self._offer)\n if task_id:\n # Update the offer so other launchers will use its remaining resources.\n del self._offer.resources[:]\n self._offer.resources.extend(remaining)\n break\n\n tasks = self._driver.method_calls[\"launchTasks\"]\n assert len(tasks) == 3", "def run(config):\n locator = cea.inputlocator.InputLocator(config.scenario)\n print('Key in run')\n print(config.bigmacc.key)\n i = config.bigmacc.key\n print(i)\n # SCENARIO SETUP ---\n config.general.project = os.path.join(config.bigmacc.data, config.general.parent, i)\n print(config.general.project)\n cea.datamanagement.data_initializer.main(config)\n # use the scenario code to set the year for the lca and other operations that need the current year\n pathway_code = config.general.parent\n pathway_items = pathway_code.split('_')\n scenario_year = int(pathway_items[1])\n config.emissions.year_to_calculate = scenario_year\n\n bigmacc_outputs_path = os.path.join(config.bigmacc.data, config.general.parent, 'bigmacc_out', config.bigmacc.round)\n\n scen_check = pd.read_csv(os.path.join(bigmacc_outputs_path, 'logger.csv'), index_col='Unnamed: 0')\n experiment_key = 'exp_{}'.format(i)\n print(experiment_key)\n keys = [int(x) for x in str(i)]\n if experiment_key in scen_check['Experiments'].values.tolist():\n print('Experiment was finished previously, moving to next.')\n pass\n else:\n print('START: experiment {}.'.format(i))\n\n # INITIALIZE TIMER ---\n t0 = time.perf_counter()\n if os.path.exists(os.path.join(config.bigmacc.data, config.general.parent, i)):\n print(' - Folder exists for experiment {}.'.format(i))\n else:\n os.mkdir(os.path.join(config.bigmacc.data, config.general.parent, i))\n print(' - Folder does not exist for experiment {}, creating now.'.format(i))\n\n # run the archetype mapper to leverage the newly loaded typology file and set parameters\n print(' - Running archetype mapper for experiment {} to remove changes made in the last experiment.'.format(i))\n cea.datamanagement.archetypes_mapper.main(config)\n\n # run the rule checker to set the scenario parameters\n print(' - Running rule checker for experiment {}.'.format(i))\n cea.bigmacc.bigmacc_rules.main(config)\n\n # SIMULATIONS ---\n\n print(' - Run radiation is {}.'.format(config.bigmacc.runrad))\n print(' - Write sensor data is {}.'.format(config.radiation.write_sensor_data))\n # checking on need for radiation simulation\n\n if config.bigmacc.runrad == True:\n # this nested statement is for when we rerun the simulations and no longer need to run the unique radiation\n if config.bigmacc.rerun != True:\n print(' - Running radiation simulation for experiment {}.'.format(i))\n if os.path.exists(locator.get_radiation_building('B000')):\n print(' - Radiation folder exists for experiment {}, copying.'.format(i))\n else:\n print(' - Radiation running for experiment {}.'.format(i))\n cea.resources.radiation_daysim.radiation_main.main(config)\n else:\n # print(' - Copying radiation simulation data from previous run for experiment {}.'.format(i))\n old_rad_files = os.path.join(config.bigmacc.data, config.general.parent, i,\n config.general.scenario_name, 'outputs', 'data', 'solar-radiation')\n # distutils.dir_util.copy_tree(old_rad_files, locator.get_solar_radiation_folder())\n else:\n radfiles = config.bigmacc.copyrad\n # print(' - Copying radiation results from {}.'.format(radfiles))\n # distutils.dir_util.copy_tree(radfiles, locator.get_solar_radiation_folder())\n print(' - Experiment {} does not require new radiation simulation.'.format(i))\n\n # running demand forecasting\n if os.path.exists(locator.get_schedule_model_file('B000')):\n print(' - Schedules exist for experiment {}.'.format(i))\n else:\n print(' - Schedule maker running for experiment {}.'.format(i))\n schedule_maker.main(config)\n\n # check to see if we need to rerun demand or if we can copy\n if config.bigmacc.rerun != True:\n print(' - Running demand simulation for experiment {}.'.format(i))\n cea.demand.demand_main.main(config)\n else:\n if keys[0] == 1:\n print(' - Running demand simulation for experiment {}.'.format(i))\n cea.demand.demand_main.main(config)\n elif keys[6] == 1:\n print(' - Running demand simulation for experiment {}.'.format(i))\n cea.demand.demand_main.main(config)\n else:\n cea.demand.demand_main.main(config)\n # print(' - Looking for demand results data from previous run for experiment {}.'.format(i))\n # old_demand_files = os.path.join(config.bigmacc.data, config.general.parent, i,\n # config.general.scenario_name, 'outputs', 'data', 'demand')\n # if os.path.exists(old_demand_files):\n # # print(' - Copy demand results files from previous run of experiment {}.'.format(i))\n # # distutils.dir_util.copy_tree(old_demand_files, locator.get_demand_results_folder())\n # pass\n # else:\n # print(' - No results found.')\n # print(' - Running demand simulation for experiment {}.'.format(i))\n # cea.demand.demand_main.main(config)\n\n if config.bigmacc.pv == True:\n print(' - Run PV is {}.'.format(config.bigmacc.pv))\n if config.bigmacc.rerun == True:\n print(' - Looking for radiation simulation data from previous run for experiment {}.'.format(i))\n old_pv_files = os.path.join(config.bigmacc.data, config.general.parent, i,\n config.general.scenario_name, 'outputs', 'data', 'potentials', 'solar')\n if os.path.exists(old_pv_files):\n # print(' - Copying PV files from previous run of experiment {}.'.format(i))\n # distutils.dir_util.copy_tree(old_pv_files, locator.solar_potential_folder())\n pass\n else:\n print(' - PV files do not exist for previous run of experiment {} at {}.'.format(i, old_pv_files))\n print(' - Running PV simulation for experiment {}.'.format(i))\n photovoltaic.main(config)\n else:\n # if PV simulation is needed, run it.\n print(' - Running PV simulation for experiment {}.'.format(i))\n photovoltaic.main(config)\n\n print('Run water-body exchange is {}.'.format(config.bigmacc.water))\n # if water-body simulation is needed, run it.\n if config.bigmacc.water == True:\n print(' - Running water body simulation for experiment {}.'.format(i))\n water.main(config)\n\n # recalculating the supply split between grid and ng in the websrook DH\n if keys[4] == 1:\n print(' - Do not run district heat recalculation.')\n else:\n print(' - Run district heat recalculation.')\n cea.bigmacc.wesbrook_DH.main(config)\n\n if keys[7] == 1:\n print(' - PV use detected. Adding PV generation to demand files.')\n util.write_pv_to_demand(config)\n else:\n print(' - No PV use detected.')\n\n # running the emissions and costing calculations\n print(' - Run cost and emissions scripts.')\n cea.analysis.costs.system_costs.main(config)\n cea.analysis.lca.main.main(config)\n\n # clone out the simulation inputs and outputs directory\n print(' - Transferring results directory for experiment {}.'.format(i))\n\n new_inputs_path = os.path.join(config.bigmacc.data, config.general.parent, i,\n config.general.scenario_name, 'inputs')\n new_outputs_path = os.path.join(config.bigmacc.data, config.general.parent, i,\n config.general.scenario_name, 'outputs', 'data')\n\n if config.bigmacc.rerun != True:\n distutils.dir_util.copy_tree(locator.get_data_results_folder(), new_outputs_path)\n distutils.dir_util.copy_tree(locator.get_input_folder(), new_inputs_path)\n\n time_elapsed = time.perf_counter() - t0\n\n # save log information\n log_df = pd.read_csv(os.path.join(bigmacc_outputs_path, 'logger.csv'),\n index_col='Unnamed: 0')\n log_df = log_df.append(pd.DataFrame({'Experiments': 'exp_{}'.format(i),\n 'Completed': 'True',\n 'Experiment Time': '%d.2 seconds' % time_elapsed,\n 'Unique Radiation': config.bigmacc.runrad}, index=[0]), ignore_index=True)\n log_df.to_csv(os.path.join(bigmacc_outputs_path, 'logger.csv'))\n log_df.to_csv(r\"C:\\Users\\justi\\Desktop\\126logger_backup.csv\", )\n\n # write netcdf of hourly_results\n netcdf_writer.main(config, time='hourly')\n\n if config.bigmacc.rerun != True:\n shutil.rmtree(locator.get_costs_folder())\n shutil.rmtree(locator.get_demand_results_folder())\n shutil.rmtree(locator.get_lca_emissions_results_folder())\n shutil.rmtree(locator.get_solar_radiation_folder())\n shutil.rmtree(locator.get_potentials_folder())\n else:\n print(' - Rerun does not require purging of the files.')\n\n # when the setpoint is changed it is in a deeper database than the archetypes mapper can reach so reset it here\n if keys[0] == 1:\n cea.datamanagement.data_initializer.main(config)\n else:\n pass\n print('END: experiment {}. \\n'.format(i))", "def test_resource_manager_on_driver():\n config = {\n \"workflow-name\": \"workflow\",\n \"cluster-type\": CLUSTER_TYPE,\n \n \"resource-manager\": {\n \"server\": \"driver\",\n \"port\": 4000,\n \"config\": {\n \"read_reqs\": 123,\n \"read_data\": 456,\n \"write_reqs\": 789,\n \"write_data\": 321\n }\n }\n }\n \n template_dir = tempfile.mkdtemp(suffix=\"test-resource-manager-on-driver-template\")\n with open(f\"{template_dir}/workflow.yaml\", 'w') as f:\n yaml.dump(config, f)\n \n @checkrun\n def execute(workflow_inst):\n client = ResourceManagerClient('127.0.0.1', 4000)\n mgr_config = client.read_config()\n assert mgr_config == config[\"resource-manager\"][\"config\"], \\\n \"Resource manager config does not match the one in the workflow config\"\n \n _execution_dir, _workflow = launch_flow(template_dir, 1, _custom_execute_fn=execute)\n assert execute.didrun\n \n # FIXME: For mysterious reasons, the check below does not work on Travis-CI.\n # Somehow, read_config() succeeds despite the fact that\n # the resource manager server was already terminated??\n if os.environ.get('TRAVIS', '') == 'true':\n pytest.skip(\"Skipping resource manager shutdown check on Travis-CI\")\n\n # Server should not be running any more after workflow exits.\n with pytest.raises(TimeoutError):\n client2 = ResourceManagerClient('127.0.0.1', 4000)\n client2.read_config()", "def provision(args):\n cfg_file = os.path.join(xbow.XBOW_CONFIGDIR, \"settings.yml\")\n\n with open(cfg_file, 'r') as ymlfile:\n cfg = yaml.safe_load(ymlfile)\n\n scheduler = get_by_name(cfg['scheduler_name'])\n if len(scheduler) == 0:\n raise ValueError('Error - cannot find the scheduler')\n elif len(scheduler) > 1:\n raise ValueError('Error - more than one scheduler found')\n workers = get_by_name(cfg['worker_pool_name'])\n if len(workers) == 0:\n print('Warning: no workers found')\n all_nodes = scheduler + workers\n all_cis = [ConnectedInstance(i) for i in all_nodes]\n with open(args.script, 'r') as f:\n for line in f:\n if len(line) > 0 and line[0] == '#':\n print(line[:-1])\n elif len(line) > 0 :\n command = line[:-1]\n if command.split()[0] != 'sudo':\n command = 'sudo ' + command\n print(command + ' : ', end='', flush=True)\n result = exec_all(all_cis, command)\n status = np.all(np.array(result) == 0)\n if status:\n print('OK')\n else:\n print('FAILED')\n for i in range(len(result)):\n if result[i] != 0:\n if i == 0:\n print('Error on scheduler:')\n else:\n print('Error on worker {}'.format(i-1))\n print(all_cis[i].output)\n break\n else:\n status = False\n print(line[:-1], ' : ERROR')\n break\n\n return status", "def setUp(self):\n self.spark, self.log, self.config = start_spark(app_name = \"test_etl_job\",\n files='configs/etl_config.json')", "def test_run_started(self):", "def setup_run(args, config): \n\n token = jwtfile.read() # read the JWT so we can send it in the header\n api = config['API']\n if not args.cwl: # beginning of process\n # request to get available options\n hdrs = {'begin-setup': 'True', 'token': token}\n r = Request(api['setup-run-start'], headers=hdrs)\n try:\n resp = urlopen(r)\n # if marked as unverified, we must login first to get a new token\n except HTTPError as e:\n # TODO deal with plain 400\n if e.code in [401, 406]:\n print('Your token is unverified. Please log in for another token.')\n login(args, config) # trigger login method\n return\n else:\n print('Was expecting a 401 or 406, got a {}'.format(e.code))\n return\n # print out options to command line\n jsn = json.loads(resp.read().decode()).get('opts', None)\n print('\\nPlease select a CWL and job (.yml) file and re-run this command'\\\n ' with the `--cwl <cwl>` option:\\n')\n print('Available Options\\n----------------')\n for k, v in jsn.items():\n print('{}: {}'.format(k, v))\n return\n cwl_file = args.cwl # get the .cwl\n # ask for a job title so the sevrer can store this\n title = None\n while not title: # can't skip\n title = input('Please enter a title for the job you are creating: ')\n hdrs = {'cwl-input': 'True', 'cwl': cwl_file, 'token': token}\n pld = {'cwl': cwl_file, 'job_title': title}\n r = Request(api['setup-run-select-wkflow'], data=urlencode(pld).encode(), headers=hdrs, method='POST')\n try:\n resp = urlopen(r)\n # we expect a response to ask us questions\n except HTTPError as e:\n if e.getcode() in [401, 406]:\n print('Uh oh, looks like your token has expired. Please re-login.')\n elif e.getcode() == 404: # notfound\n print('A template couldn\\'t be properly generated for that Workflow.')\n else:\n print('Expected 401, 404, 406, got {}'.format(e.getcode()))\n return\n # invoke the questions prompt; iterate through each CWL key\n job_input_dict = {} # initialize empty dict to be updated\n # send the inputs back as JSON\n print('You requested the following Workflow: \\n')\n jsn = json.loads(resp.read().decode()) # bytes to str to dict\n wkflow = jsn.get('workflow', None)\n print(wkflow)\n print('\\n')\n _req = jsn.get('required') # dict, but only because we're using requests lib...\n _opt = jsn.get('optional')\n job_input_dict.update(ask_wkflow(_req, _opt))\n job_inputs = json.dumps(job_input_dict)\n d = {\n 'cwl': cwl_file, \n 'job_inputs': job_inputs,\n 'job_title': title, \n }\n h = {'token': token}\n r = Request(api['setup-run-job-input'], data=urlencode(d).encode(), headers=h, method='POST')\n try:\n resp = urlopen(r)\n except HTTPError as e:\n if e.getcode() in [401, 406]:\n print('Token expired; please re-login')\n else:\n print('Huh?')\n return\n jsn = json.loads(resp.read().decode())\n if jsn.get('errors', {}) == {}: # empty dict means no errors!\n print('Your JOB sucessfully validated.')\n else: # print all errors and ask person to do it again\n #print(r.json.get('errors'))\n print(jsn.get('errors'))\n return", "def run_starter(self, expect_to_fail=False):", "def run_worker(self):\n # TODO(xiejw): To allow execution framework to add train hooks.\n return self._start_distributed_training()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
You can provide an initialization script for each worker to call before the workflow starts. The most common usecase for such a script is to launch a local dvid server on each worker (for posting in parallel to the cloud). We provide the necessary script for local dvid workers outofthebox, in scripts/workerdvid. This test verifies that it works.
def test_worker_dvid_initialization(): repo_dir = Path(flyemflows.__file__).parent.parent template_dir = tempfile.mkdtemp(suffix="test-worker-dvid") # Copy worker script/config into the template shutil.copy(f'{repo_dir}/scripts/worker-dvid/dvid.toml', f'{template_dir}/dvid.toml') shutil.copy(f'{repo_dir}/scripts/worker-dvid/launch-worker-dvid.sh', f'{template_dir}/launch-worker-dvid.sh') config = { "workflow-name": "workflow", "cluster-type": CLUSTER_TYPE, "worker-initialization": { "script-path": "launch-worker-dvid.sh", "only-once-per-machine": True, "script-args": ["_TEST_SCRIPT_FAKE_ARG_"], # This is just here to make it easy to identify the process "launch-delay": 1.0 } } with open(f"{template_dir}/workflow.yaml", 'w') as f: yaml.dump(config, f) def is_worker_dvid_running(): return len(find_processes('_TEST_SCRIPT_FAKE_ARG_')) > 0 @checkrun def execute(workflow_inst): script_dir = Path(workflow_inst.config['worker-initialization']['script-path']).parent assert is_worker_dvid_running(), f"Worker DVID is not running. Check logs in:\n{script_dir}" _execution_dir, workflow_inst = launch_flow(template_dir, 1, _custom_execute_fn=execute) script_dir = Path(workflow_inst.config['worker-initialization']['script-path']).parent assert not is_worker_dvid_running(), \ ("Worker DVID remained running after the workflow exited."\ f"Check logs in:\n{script_dir}")
[ "def test_worker_initialization(setup_worker_initialization_template):\n template_dir, _config, once_per_machine = setup_worker_initialization_template\n \n num_workers = 2\n if once_per_machine or CLUSTER_TYPE in (\"synchronous\", \"processes\"):\n expected_script_count = 1\n else:\n expected_script_count = num_workers\n \n @checkrun\n def execute(workflow_inst):\n script_dir = Path(workflow_inst.config['worker-initialization']['script-path']).parent\n script_count = len(find_processes('_TEST_SCRIPT_FAKE_ARG_'))\n assert script_count > 0, f\"Worker script is not running. Check logs in:\\n{script_dir}\"\n assert script_count <= expected_script_count, f\"Worker script started too many times. Check logs in:\\n{script_dir}\"\n assert script_count == expected_script_count, f\"Worker script not started on all workers. Check logs in:\\n{script_dir}\"\n \n _execution_dir, workflow_inst = launch_flow(template_dir, num_workers, _custom_execute_fn=execute)\n script_dir = Path(workflow_inst.config['worker-initialization']['script-path']).parent\n script_count = len(find_processes('_TEST_SCRIPT_FAKE_ARG_'))\n\n assert script_count == 0, \\\n (\"Worker script(s) remained running after the workflow exited.\"\\\n f\"Check logs in:\\n{script_dir}\")", "def init_worker(self, worker_id) :\n\n # since this is called in a separate process,\n # we need to get a consistent view of the settings\n startup.main(self.mode, self.rank)\n\n # initialize the random seed for this process\n # we don't use just the worker_id but also the rank\n # so we truly get different random numbers in all workers,\n # not restricted to the current pool\n # note that we get some entropy from the time\n # so different epochs get different data augmentations\n np.random.seed((hash(time())\n + (settings.RANK * torch.utils.data.get_worker_info().num_workers\n + worker_id)) % 2**32)", "def init_worker(*shared_args_list):\n global SHARED_ARGS\n SHARED_ARGS = shared_args_list", "def initialize(self,init):\n logger.info('*** initialize: worker id=%d',self._agent.wid)\n self.commands = {'initialize':None, 'before_do_work':None, 'after_do_work':None, 'finalize':None}\n self.commands.update(init.get(self._agent.wid,{}))\n exec_command(self.commands['initialize'])", "def worker_init_fn(worker_id):\n base_seed = torch.IntTensor(1).random_().item()\n # print(worker_id, base_seed)\n np.random.seed(base_seed + worker_id)", "def worker_init_fn(worker_id):\n worker_info = torch.utils.data.get_worker_info() # type: ignore\n if hasattr(worker_info.dataset, \"transform\") and hasattr(worker_info.dataset.transform, \"set_random_state\"):\n worker_info.dataset.transform.set_random_state(worker_info.seed % (2 ** 32))", "def worker_init_fn(worker_id):\r\n base_seed = torch.IntTensor(1).random_().item()\r\n #print(worker_id, base_seed)\r\n np.random.seed(base_seed + worker_id)", "def setUp(self):\n self.weblab_instance_runner = TestWeblabInstanceRunner()\n self.weblab_instance_runner.start()\n self.weblab_instance_runner.wait_until_ready(10)\n self.core_server_url = self.weblab_instance_runner.core_server_url", "def init_workers():\n party_queue = Queue()\n p = Producer(party_queue)\n p.daemon = True\n c = Consumer(party_queue)\n c.deamon= True\n m = MasterUpdater(db,application_name)\n m.deamon = True\n p.start()\n c.start()\n m.start()", "def setup_run(args, config): \n\n token = jwtfile.read() # read the JWT so we can send it in the header\n api = config['API']\n if not args.cwl: # beginning of process\n # request to get available options\n hdrs = {'begin-setup': 'True', 'token': token}\n r = Request(api['setup-run-start'], headers=hdrs)\n try:\n resp = urlopen(r)\n # if marked as unverified, we must login first to get a new token\n except HTTPError as e:\n # TODO deal with plain 400\n if e.code in [401, 406]:\n print('Your token is unverified. Please log in for another token.')\n login(args, config) # trigger login method\n return\n else:\n print('Was expecting a 401 or 406, got a {}'.format(e.code))\n return\n # print out options to command line\n jsn = json.loads(resp.read().decode()).get('opts', None)\n print('\\nPlease select a CWL and job (.yml) file and re-run this command'\\\n ' with the `--cwl <cwl>` option:\\n')\n print('Available Options\\n----------------')\n for k, v in jsn.items():\n print('{}: {}'.format(k, v))\n return\n cwl_file = args.cwl # get the .cwl\n # ask for a job title so the sevrer can store this\n title = None\n while not title: # can't skip\n title = input('Please enter a title for the job you are creating: ')\n hdrs = {'cwl-input': 'True', 'cwl': cwl_file, 'token': token}\n pld = {'cwl': cwl_file, 'job_title': title}\n r = Request(api['setup-run-select-wkflow'], data=urlencode(pld).encode(), headers=hdrs, method='POST')\n try:\n resp = urlopen(r)\n # we expect a response to ask us questions\n except HTTPError as e:\n if e.getcode() in [401, 406]:\n print('Uh oh, looks like your token has expired. Please re-login.')\n elif e.getcode() == 404: # notfound\n print('A template couldn\\'t be properly generated for that Workflow.')\n else:\n print('Expected 401, 404, 406, got {}'.format(e.getcode()))\n return\n # invoke the questions prompt; iterate through each CWL key\n job_input_dict = {} # initialize empty dict to be updated\n # send the inputs back as JSON\n print('You requested the following Workflow: \\n')\n jsn = json.loads(resp.read().decode()) # bytes to str to dict\n wkflow = jsn.get('workflow', None)\n print(wkflow)\n print('\\n')\n _req = jsn.get('required') # dict, but only because we're using requests lib...\n _opt = jsn.get('optional')\n job_input_dict.update(ask_wkflow(_req, _opt))\n job_inputs = json.dumps(job_input_dict)\n d = {\n 'cwl': cwl_file, \n 'job_inputs': job_inputs,\n 'job_title': title, \n }\n h = {'token': token}\n r = Request(api['setup-run-job-input'], data=urlencode(d).encode(), headers=h, method='POST')\n try:\n resp = urlopen(r)\n except HTTPError as e:\n if e.getcode() in [401, 406]:\n print('Token expired; please re-login')\n else:\n print('Huh?')\n return\n jsn = json.loads(resp.read().decode())\n if jsn.get('errors', {}) == {}: # empty dict means no errors!\n print('Your JOB sucessfully validated.')\n else: # print all errors and ask person to do it again\n #print(r.json.get('errors'))\n print(jsn.get('errors'))\n return", "def test_setup_sync(self):\n worker_helper = WorkerHelper()\n self.assertEqual(worker_helper.setup(), None)", "def worker_init_fn(worker_id, num_workers, rank, seed):\n\n worker_seed = num_workers * rank + worker_id + seed\n np.random.seed(worker_seed)\n random.seed(worker_seed)", "def _dataloader_worker_init(*args, **kwargs):\n worker_seed = torch.initial_seed() % 2 ** 32\n np.random.seed(worker_seed)\n random.seed(worker_seed)", "def setUp(self):\n self.launch = kafka_bin_directory + '{}-server-start.sh '\n self.stop = kafka_bin_directory + '{}-server-stop.sh '\n self.start_network()\n kafka_download()\n self.clients_initialize()\n self.data_directories_cleanup()\n self.components_start()\n # wait for fuse-kafka to be ready\n time.sleep(2)", "def setUp(self) :\n self.longMessage = True\n logger = corAna.makeLogger(isTestMode=True,isMaster=True,isViewer=True,isServer=True,rank=0)\n isFirstWorker = True\n self.numTimes = 5\n numDataPointsThisWorker = 1\n\n self.workerData = corAna.WorkerData(logger, isFirstWorker, self.numTimes,\n numDataPointsThisWorker, addRemoveCallbackObject = None)", "def create_worker(num_worker, server_ip, server_port):\n for i in range(int(num_worker)):\n print \"-- worker initializing --\"\n dask_server = Worker('tcp://'+server_ip+\":\"+str(server_port), loop=loop)\n dask_server.start()", "def compute_worker_init(BROKER_URL, BROKER_USE_SSL=False):\n # Get proper SSL flag value\n BROKER_USE_SSL = bool(BROKER_USE_SSL)\n\n # Make .env file settings for worker\n env_file = 'BROKER_URL={}'.format(BROKER_URL)\n\n if BROKER_USE_SSL:\n env_file += \"\\nBROKER_USE_SSL=True\"\n\n # Custom hostname?\n host_group_name = env['tasks'][0].split(':')[1]\n\n config = yaml.load(open('server_config.yaml').read())\n\n # if the configuration has an entry for custom hostnames, add them to the server\n if 'hostnames' in config[host_group_name]:\n host_name_index = env['hosts'].index(env['host_string'])\n hostname = config[host_group_name]['hostnames'][host_name_index]\n else:\n hostname = env['host_string']\n env_file += \"\\nCODALAB_HOSTNAME={}\".format(hostname)\n\n run('echo \"{}\" > .env'.format(env_file))\n\n # Install docker\n run('curl https://get.docker.com | sudo sh')\n\n # Add user to group and reset user group settings so we can run docker without sudo\n user = str(run(\"echo $USER\")) # we have to get the user name this way...\n sudo('usermod -aG docker {}'.format(user))", "def _start(self, workerid, job_count=None, job_name=None):\n return slurm.submit(\n \"{} -m cluster_tools.remote {}\".format(sys.executable, workerid),\n job_resources=self.job_resources,\n job_name=self.job_name if self.job_name is not None else job_name,\n additional_setup_lines=self.additional_setup_lines,\n job_count=job_count,\n )", "def main():\n config = get_environment_config()\n\n create_test_buckets(config, BUCKET_NAMES)\n create_test_datasets(config, DATASET_NAMES)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the next power of 10
def nextpow10(n): if n == 0: return 0 else: return math.ceil(math.log10(abs(n)))
[ "def nextpower (n, base = 2.0):\n x = base**np.ceil(np.log(n) / np.log(base))\n if type(n) == np.ndarray:\n return np.asarray (x, dtype=int)\n else:\n return int (x)", "def next_pow_two(n):\n i = 1\n while i < n:\n i = i << 1\n return i", "def _next_power_of_two(self, n):\n if n == 0:\n return 1\n return int(2 ** math.ceil(math.log2(n)))", "def next_power2(num):\n return 2 ** int(np.ceil(np.log2(num)))", "def next_power_of(k, n):\n res = np.ceil(np.log(n) / np.log(k))\n return k ** res.astype('int')", "def pseudo_pow(num):\r\n \r\n pseudo = num\r\n divisor = math.pow(10, 10)\r\n for i in range(num - 1):\r\n pseudo = int((pseudo * num) % divisor)\r\n return pseudo", "def next_power_2(x):\n # Function which finds the nearest number that is 2 raised to some power\n return 1 if x == 0 else 2**(x-1).bit_length()", "def next_power_2(x: int) -> int:\n return 0 if x < 1 else shift_left_bit_length(x)", "def nextpow2(i):\n n = 1\n while n < i:\n n *= 2\n return n", "def non_recursive_power(base, power):\n result = 1\n i = 0\n while i < power:\n result = result * base\n i = i+1\n\n return result", "def lastndigits(n, p):\n return p % 10**n", "def last_ten():\r\n val = 28433 * pow(2, 7830457) + 1\r\n return str(val)[-10:]", "def power(num, n=2):\n new_value = num ** n\n return (new_value)", "def calc_power(val):\n\tpower = int(input(\"Enter the power of 10: \"))\n\treturn val**power", "def _pow_(self, n):\n assert n > 0\n return generic_power(self, n)", "def power_with_base(a, b, base=2):\n assert base >= 2\n result = 1\n while b > 0:\n x = 1\n residual = b % base\n for i in range(residual):\n x *= a\n result *= x\n for i in range(base - residual):\n x *= a\n a = x\n b = b // base\n return result", "def nextPow2(length): \n return int(2**np.ceil(np.log2(length)))", "def power(x, n):\n power = 1\n for i in range(abs(n)):\n power = multiply(power, x) \n return power", "def my_power(base, exponent):\r\n \r\n mult = base\r\n while exponent > 1 and type(base) != str and type(exponent) == int:\r\n mult = mult * base\r\n exponent = exponent - 1\r\n return mult" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a number that looks 'nice', with a maximum error
def magicnr(value, error): magics = [ (10 ** (nextpow10(error))), (10 ** (nextpow10(error))) / 2.0, (10 ** (nextpow10(error))) / 4.0, (10 ** (nextpow10(error))) / 10.0, (10 ** (nextpow10(error))) / 20.0, (10 ** (nextpow10(error))) / 40.0, (10 ** (nextpow10(error))) / 100.0, ] magics.sort() magics.reverse() magic = magics[-1] for n in magics: if n < abs(value): magic = n break return fround(value, magic)
[ "def _get_precision(err):\n return max(0, int(-math.log10(2 * err)) + 1)", "def current_limit_error():\n raise ValueError(\"Current can not be higher than 0.1A\")", "def class_maximum_value(class_number):\n return (10 ** class_number - 1)", "def im_not_sleepy() -> str:\n digit_values = {0: 6, 1: 2, 2: 5, 3: 5, 4: 4, 5: 5, 6: 6, 7: 3, 8: 7, 9: 6} # set values of digits with # of bars\n digits = get_digits(digit_values) # get the digits of the time requiring the most bars\n bars = get_bars(digit_values, digits) # sum of bars is all digits' corresponding value together\n time = get_time(digits) # get the time with by giving get_time the digits found prior\n return f\"The time requiring the most amount of bars is:\\n{time} with {bars} bars\"", "def enlarge(n):\n return n * 100", "def safe_calc(exponent):\n\n if exponent > 700:\n return sys.float_info.max\n else:\n return math.exp(exponent)", "def ghmult_plain(x: int) -> str:\n mult = x / 10000\n if int(mult) == mult:\n mult = int(mult)\n return '{}'.format(mult)", "def max(self) -> int:", "def get_g_unperturbed(n: int) -> int:\n return int(2*(n**2))", "def enlarge(n):\n\n return n* 100", "def MakeHumanReadable(num):\n i = 0\n while i+1 < len(EXP_STRINGS) and num >= (2 ** EXP_STRINGS[i+1][0]):\n i += 1\n rounded_val = round(float(num) / 2 ** EXP_STRINGS[i][0], 2)\n return '%s %s' % (rounded_val, EXP_STRINGS[i][1])", "def _get_max_scale(self) -> int:", "def max_primer_length(tm_max):\n return int((float(tm_max) + 7.5) / 2.5)", "def fail_max(self) -> int:\n return self._fail_max", "def Nice(self, value):\n if sys.platform == \"win32\":\n return\n if (value < 0):\n value = 0\n nice_inc = value - os.nice(0)\n os.nice(nice_inc)", "def _nice(x, round=False):\n if x <= 0:\n import warnings\n warnings.warn(\"Invalid (negative) range passed to tick interval calculation\")\n x = abs(x)\n expv = floor(log10(x))\n f = x / pow(10, expv)\n if round:\n if f < 1.75:\n nf = 1.0\n elif f < 3.75:\n nf = 2.5\n elif f < 7.0:\n nf = 5.0\n else:\n nf = 10.0\n else:\n if f <= 1.0:\n nf = 1.0\n elif f <= 2.5:\n nf = 2.5\n elif f <= 5.0:\n nf = 5.0\n else:\n nf = 10.0\n return nf * pow(10, expv)", "def _rand_value(max_value):\n return randint(1, max_value)", "def bayes_error_upper_bound(mutual_information):\n return 0.5 * (1.0 - mutual_information)", "def max_error(self) -> float:\n return float(np.max(np.abs(self._flattened_errors())))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the path to a CSV by name.
def _get_csv_path(name): return os.path.join(cwd, 'output/app_info', name)
[ "def csv_path(name):\n return \"./data/%s\" % name", "def songs_csv_file_path() -> Path:\n return data_dir_path().joinpath(\"songs.csv\")", "def __csvPath__(self):\n return \"%s/%s_%s_%s%s.csv\" % ( self.analysis_dir ,\n self.input_data.input_data.name ,\n self.input_data.name ,\n self.granularity ,\n self.name )", "def open_csv(self, csv_name):\n dirpath = normpath(join(dirname(__file__), \"../../csvs\"))\n csv_name = csv_name + \".csv\"\n filepath = join(dirpath, csv_name)\n try:\n file = pd.read_csv(filepath)\n return file\n except FileNotFoundError:\n print(\"FileNotFoundError: file %s not found in %s\" % (csv_name, dirpath))\n exit(1)", "def csv_filename(self):\n return self.prefix + '_' + self.imagename + '.csv'", "def csv_dir(self):\n return op.join(self.root_dir, 'csv')", "def get_cached_csv(self, category: str) -> str:\n csv_path = f\"{self.csv_dir}/{category.lower()}.csv\"\n if path.exists(csv_path):\n return csv_path\n raise FileNotFoundError(f\"There is no {category.lower()} CSV written yet.\")", "def get_directory_fn():\n return self.csv_directory.get_absolute_path()", "def get_csv_filename(name, lowered):\n return (\n \"individual-results/\"\n + name.replace(\" \", \"_\")\n + (\"-Lowered\" if lowered else \"\")\n + \".csv\"\n )", "def __csvPath__(self):\n return \"%s/%s_%s_analysis.csv\" % (self.strategy.analysis_dir, self.strategy.input_data.name, self.strategy.name)", "def csvPathname(self, scenario, baseline=None, outputDir='.', type=RESULT_TYPE_SCENARIO):\n # Output files are stored in the output dir with same name as query file but with 'csv' extension.\n basename = os.path.basename(self.queryFile)\n mainPart, extension = os.path.splitext(basename)\n middle = scenario if type == RESULT_TYPE_SCENARIO else (\"%s-%s\" % (scenario, baseline))\n csvFile = \"%s-%s.csv\" % (mainPart, middle)\n csvPath = os.path.abspath(os.path.join(outputDir, csvFile))\n return csvPath", "def csv_path(self) -> str:\n return os.path.join(self.download_folder, \"oapen_metadata.csv\")", "def get_loc_year_csv(csv_name):\n fname = (csv_name.split('.'))[0].split('-')\n return fname[0], fname[1]", "def set_csv_file_name_and_download(csv_filename):\n if not os.path.exists(LOCAL_DATA_DIR):\n os.mkdir(LOCAL_DATA_DIR)\n csv_path = os.path.join(LOCAL_DATA_DIR, csv_filename)\n\n if os.path.exists(csv_path):\n # If the CSV already exists on disk, just use it.\n pass\n else:\n # The CSV has to come from Civis Platform.\n file_id = civis.find_one(CSV_FILES, file_name=csv_filename).id\n if file_id is None:\n raise ValueError(f\"CSV file not retrievable without a Civis file ID\")\n civis.io.civis_to_file(file_id, csv_path)\n logging.info(\"CSV downloaded to %s\", csv_path)\n return csv_path", "def create_by_path_csv_convert(path: Path) -> FileCsvConvert:\n for file_csv_convert in FileCsvConvert:\n if path.name == file_csv_convert.value.name:\n return file_csv_convert\n msg = \"can't detect account type by csv file name. Please confirm csv file name.\"\n raise ValueError(msg)", "def get_curve_path(curves_dir, star_id):\n curve_file = \"%s.csv\" % star_id\n curve_path = path.join(curves_dir, curve_file)\n\n return curve_path", "def openCsv():\n csvFile = 'BDO_app/modules/crafting/alchemyRecipes.csv'\n return csvFile", "def _get_csv_details(csv_in_filename, csv_out_filename):\n var_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), 'var'))\n csv_in_path = os.path.join(var_dir, csv_in_filename)\n csv_out_path = os.path.join(var_dir, csv_out_filename)\n\n assert os.access(csv_in_path, os.R_OK), \\\n \"Unable to read CSV path: {}\".format(csv_in_path)\n\n csv_out_dir = os.path.dirname(csv_out_path)\n assert os.access(csv_out_dir, os.W_OK), \\\n \"Unable to write to CSV out dir: {}\".format(csv_out_dir)\n\n return csv_in_path, csv_out_path", "def _filename_for(self, ticker):\n return self.data_location + ticker.upper() + \".csv\"" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the app's name.
def _get_app_name(app): return app[APP_NAME_KEY]
[ "def get_name():\n return config.APP_NAME", "def app_name(self) -> str:\n return self._app_name", "def app_name(self):\n return self._app_name", "def app_name(self):\n if getattr(self, \"_app_name\", None):\n return self._app_name\n return self.__class__.__name__.lower().replace(\"_\", \"-\")", "def get_application_name(): # real signature unknown; restored from __doc__\n return \"\"", "def GetAppName(*args, **kwargs):\n return _core_.PyApp_GetAppName(*args, **kwargs)", "def get_app_final_name(self):\n return self.props.get('app_final_name', self.project)", "def GetAppDisplayName(*args, **kwargs):\n return _core_.PyApp_GetAppDisplayName(*args, **kwargs)", "def app_display_name(self):\n return self.properties.get(\"appDisplayName\", None)", "def module_name(self) -> str | None:\n try:\n return self._app_name.replace(\"-\", \"_\")\n except AttributeError:\n # If the app was created from an interactive prompt,\n # there won't be a module name.\n return None", "def roma_app_name(self):\n return self._roma_app_name", "def app_name(self):\n return self._celery_app_name", "def getAppName(self,appID):\n #if type(appID) in [str,unicode]: appID = int(appID)\n if type(appID) in [str]: appID = int(appID)\n if self['entities'] is None: return str(appID)\n for appType in self['entities']:\n if type(self['entities'][appType]) is dict:\n if self['entities'][appType]['id'] == appID:\n return self['entities'][appType]['name']\n elif type(self['entities'][appType]) is list:\n for application in self['entities'][appType]:\n if application['id'] == appID:\n return application['name']\n if 'DEBUG' in locals(): sys.stderr.write(\"Application \"+str(appID)+\" is not loaded.\\n\")", "def module_name(self):\n return self.app_name.replace('-', '_')", "def get_app_name(i):\n return app_id + '-' + str(i)", "def select_app_name(self):\n name = generate_app_name()\n while App.objects.filter(id=name).exists():\n name = generate_app_name()\n\n return name", "def _get_app_name(self, app_old):\n return app_old if app_old not in ak_rename else ak_rename[app_old]", "def function_app_name(self) -> Optional[str]:\n return pulumi.get(self, \"function_app_name\")", "def fallback_application_name() -> str:\n # Import here instead of at the top to avoid an ImportError caused by an\n # import cycle. This can be removed once the import graph of id3c.cli is\n # less tangled.\n from ..cli.utils import running_command_name\n\n # \"The application_name can be any string of less than NAMEDATALEN\n # characters (64 characters in a standard build).\"¹\n #\n # psycopg2 / libpq will truncate for us, but they will issue a NOTICE log\n # message if they do. Avoid the cluttery notice by truncating ourselves.\n #\n # ¹ https://www.postgresql.org/docs/current/runtime-config-logging.html#GUC-APPLICATION-NAME\n max_len = 64\n appname = running_command_name()\n\n return shorten(appname, max_len, \"...\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the contact's first name.
def _get_contact_first_name(app): name = app.get(CONTACT_NAME_KEY) if name: return ' {}'.format(name.split(' ')[0])
[ "def first_name(self):\n return self._first_name", "def first_name(self, instance):\r\n return instance.user.first_name", "def user_first_name(self, instance):\n return instance.user.first_name", "def first_name_title(self):\n\t\tlocator = self._locator.FIRST_NAME_TITLE\n\t\treturn self.get_text_property(locator)", "def get_user_firstname():\n if not is_authenticated() or 'samlUserdata' not in session:\n return None\n\n first_name = session.get('samlUserdata', {}).get(SAML_ATTRIBUTES.get('first_name', None), False)\n\n return first_name[0] if first_name else not_found('first_name')\n return None", "def contact_full_name(self):\n return self._contact_full_name", "def get_first_name(self):\n element = self.driver.find_element(*self.firstname_textbox_selector)\n return element.get_attribute(\"value\")", "def GetForename(self):\n return self.first_name", "def contact_name(self) -> str:\n return pulumi.get(self, \"contact_name\")", "def pref_first_name(self):\n return self.known_as if self.known_as else self.first_name", "def get_first_name(tweet):\n if 'user' in tweet and 'name' in tweet['user']:\n parts = tweet['user']['name'].split()\n if len(parts) > 0:\n return parts[0].lower()", "def get_contact_name(contact: dict) -> str:\n contact_name = \"\"\n if contact[\"first_name\"]:\n contact_name = f\"{contact['first_name']} \"\n if contact[\"last_name\"]:\n contact_name += f\"{contact['last_name']} \"\n\n if contact_name:\n return f\"{contact_name}({contact['email']})\"\n return contact[\"email\"]", "def get_users_first_name(user_id):\n l = get_users()\n user = [u for u in l if u['id'] == user_id]\n if user:\n return user[0]['profile']['first_name']\n return ''", "def first_name(self):\n\t\tlocator = self._locator.FIRST_NAME_INPUT\n\t\telement = self.create_web_element(locator)\n\t\tvalue = element.element_value\n\t\treturn value", "def contact_first_name(self, contact_first_name):\n\n self._contact_first_name = contact_first_name", "def get_full_name(self):\n return self.email", "def first_name(self, name):\n self._first_name = name", "def get_contact_name(prefix: str) -> Optional[str]:\n entry = get_resource(prefix)\n if entry is None:\n return None\n return entry.get_contact_name()", "def lastname_first(self):\n return \", \".join([n for n in [self.last_name, self.first_name] if n])" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the email template name for the first contact email.
def _get_first_contact_email_template_name(app): return app[FIRST_CONTACT_EMAIL_TEMPLATE_NAME_KEY]
[ "def get_template_name(self):\n template = None\n if self.template:\n template = self.template\n if not template:\n for p in self.get_ancestors(ascending=True):\n if p.template:\n template = p.template\n break\n if not template:\n template = settings.CMS_TEMPLATES[0][0]\n for t in settings.CMS_TEMPLATES:\n if t[0] == template:\n return t[1] \n return _(\"default\")", "def _get_contact_first_name(app):\n name = app.get(CONTACT_NAME_KEY)\n if name:\n return ' {}'.format(name.split(' ')[0])", "def get_template_name(self):\n\t\treturn self.template_name", "def get_contact_name(contact: dict) -> str:\n contact_name = \"\"\n if contact[\"first_name\"]:\n contact_name = f\"{contact['first_name']} \"\n if contact[\"last_name\"]:\n contact_name += f\"{contact['last_name']} \"\n\n if contact_name:\n return f\"{contact_name}({contact['email']})\"\n return contact[\"email\"]", "def get_template(self, template):\n\n template_path = aj.config.data['email']['templates'].get(template, 'default')\n\n if template_path == 'default' or not os.path.isfile(template_path):\n template_path = DEFAULT_TEMPLATES[template]\n\n return template_path", "def contact_name(self) -> str:\n return pulumi.get(self, \"contact_name\")", "def template_name(self):\n return self._template_name", "def template_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"template_name\")", "def template_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"template_name\")", "def get_name_from_email(email):\r\n individual_name = email.split('@')[0]\r\n parts = individual_name.split('.')\r\n name = \" \".join(parts).title()\r\n return name", "def get_email_template_id(self):\n return self.email_template_id", "def get_name_from_email(email: str) -> str:\n before_at_symbol = email.split(\"@\")[0]\n name_parts = before_at_symbol.split(\".\")\n name = \" \".join(name_parts).title()\n return name", "def get_full_name(self):\n return self.email", "def templateitemname(self):\n return self[\"templateitemname\"]", "def get_name_from_email(email):\n prefix = email.split('@')[0]\n name_parts = prefix.split('.')\n name = \" \".join(name_parts).title()\n return name", "def _get_template_fname(self):\n template_fname = self._context.get('template_fname', False)\n return template_fname", "def get_template():\r\n try:\r\n return CourseEmailTemplate.objects.get()\r\n except CourseEmailTemplate.DoesNotExist:\r\n log.exception(\"Attempting to fetch a non-existent course email template\")\r\n raise", "def find_template_name(self, regex, template_env=None):\n # Select template_env\n if not template_env:\n template_env = self._template_env\n\n # Find templates matching the regex\n template_list = template_env.list_templates(\n filter_func=lambda template_name: re.match(regex, template_name))\n\n # Select the first match\n if template_list:\n return template_list[0]\n else:\n return ''", "def launch_template_name(self) -> Optional[str]:\n return pulumi.get(self, \"launch_template_name\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the tote store url for this app.
def _get_app_tote_store_url(app): return app[APP_TOTE_STORE_URL]
[ "def getNoteStoreUrl(self, authenticationToken):\r\n pass", "def getNoteStoreUrl(self, authenticationToken):\r\n self.send_getNoteStoreUrl(authenticationToken)\r\n return self.recv_getNoteStoreUrl()", "def get_store_path(cls):\n user_data_dir = cls.user_data_dir()\n store_path = os.path.join(user_data_dir, 'store.json')\n return store_path", "def tradeoffer_url(self):\n return self._tradeoffer_url", "def token_store_path(self) -> str:\n return pulumi.get(self, \"token_store_path\")", "def get_store(self):\n return self.store", "def get_storename(self):\n return self.storename", "def store_path(self):\n return path.join(env.store_home, self._store_path)", "def url(self):\n return self.app.ticket_url(self)", "def get_save_url(self):\n return self.save_url % self._construct_object_dictionary(self.object)", "def logstore(self) -> str:\n return pulumi.get(self, \"logstore\")", "def get_secretsurl(self):\n return str(self.__secretsurl)", "def helper_get_alt_task_store_name(self):\n return self.helper_retrieve_last_request_get_dict_key_val_index_zero_or_return_none(\"alt_task_store_name\")", "def _siteStore(self):\n return self.webapp.store.parent", "def get_store(self, store_name: str) -> Any:\n pass", "def log_store(self) -> str:\n return pulumi.get(self, \"log_store\")", "def _get_store(self):\n return self._store", "def get_url(self):\n return self.url", "def token_alternate_url(self) -> ConfigNodePropertyString:\n return self._token_alternate_url" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check if we already sent the first contact email.
def _did_send_first_contact_email(app): first_contact = app[FIRST_CONTACT_EMAIL_SENT_KEY] if first_contact and first_contact.lower() == 'y': return True return False
[ "def is_first_message(self):\n return self._is_first_message", "def is_from_ourselves(self, email = None):\n\n\t\tif not email:\n\t\t\temail = self.get_sender()[1].lower()\n\n\t\tfor lvar in self.config.rc_mymails:\n\t\t\tif email in lvar.lower():\n\t\t\t\treturn 1\n\n\t\treturn 0", "def is_send_email(self):\n return self._is_send_email", "def fetch_mail(self):\n from interlink import DEFAULT_MAIL_CHECKER\n checker = DEFAULT_MAIL_CHECKER(self)\n try:\n checker.fetch_mail()\n return True\n except:\n traceback.print_exc()\n return False", "def is_replied_to(thread):\r\n messages = thread['messages']\r\n if len(messages) < 2:\r\n return False\r\n user_email = get_sender_email(messages[0])\r\n for i in range(1, len(messages)):\r\n sender_email = get_sender_email(messages[i])\r\n if user_email != sender_email:\r\n return True\r\n return False", "def is_first_message(self, is_first_message):\n self._is_first_message = is_first_message", "def is_email_sent(subject):\n return any([msg.subject == subject for msg in mail.outbox])", "def isOneWay(self):\n return len(self.messages) == 1", "def alreadyRequested( self , email = False ):\n \n if not email:\n return True\n upc = UserProfileClient( REG_PROFILE_NAME , getRPCClient )\n result = upc.retrieveVar( email )\n gLogger.info( result )\n if result[ \"OK\" ]:\n return True\n return False", "def valid_smtp_sender(self):\n\n\t\tret = self.mail.smtp_validate(email = self.get_sender()[1], \n\t\t\t\t\t\t\t\t\t envelope_from = self.get_matching_recipient())\n\n\t\tself.log.write(5, \" valid_smtp_sender: SMTP authentication returned %d\" % ret)\n\n\t\t## Returns possibly valid in case of errors.\n\n\t\tif ret != 0:\n\t\t\treturn 1\n\t\telse:\n\t\t\treturn 0", "def test_email_good(self):\n ad_rep_leads = list(\n AD_REP_LEAD_FACTORY.create_ad_rep_leads(create_count=2))\n for ad_rep_lead in ad_rep_leads:\n ad_rep_lead.create_datetime -= datetime.timedelta(1)\n ad_rep_lead.save()\n AD_REP_INVITE_TASK.run()\n self.assertEqual(len(mail.outbox), 2)\n ad_rep_lead_1_found = False\n ad_rep_lead_2_found = False\n for email in mail.outbox:\n if ad_rep_leads[0].first_name in email.alternatives[0][0]:\n ad_rep_lead_1_found = True\n self.assertTrue(ad_rep_leads[0].first_name in email.body)\n elif ad_rep_leads[1].first_name in email.alternatives[0][0]:\n ad_rep_lead_2_found = True\n self.assertTrue(ad_rep_leads[1].first_name in email.body)\n self.assertTrue(ad_rep_lead_1_found and ad_rep_lead_2_found)", "def passed_enough_time_since_last_email(last_email_sent_time):\n return time.time() - last_email_sent_time > TIME_BETWEEN_EACH_EMAIL_MSG", "def check_pending_customer_first_record(self):\n try:\n assert self.get_element(\n SupplierPageLocators.first_record_customer_name).text == contactInfo['company_name']\n assert self.get_element(\n SupplierPageLocators.first_record_state).text == companyInfo['state']\n total_locations = 0 if not bool(\n locationInfo) else 2 if 'name1' in locationInfo and 'name2' in locationInfo else 1\n assert self.get_element(\n SupplierPageLocators.first_record_no_of_location).text == str(total_locations)\n assert self.get_element(\n SupplierPageLocators.first_record_main_contact).text == contactInfo['contact_name']\n assert self.get_element(SupplierPageLocators.first_record_phone_number).text == contactInfo[\n 'phone_no'] if contactInfo['phone_no'][0] == '+' else '+' + contactInfo['phone_no']\n assert self.get_element('first_record_account_status').text == 'Pending'\n\n print (\"Success -> Pending Customer partial first record\")\n\n except:\n\n print (\"AssertionError --------> Customer partial details not found\")", "def test_previously_sent_message_not_sent_twice(self):\n thread = self.create_thread()\n message = thread.first_message\n message.sent = True\n message.save()\n\n send_message(message.pk)\n\n self.assertFalse(self.groupnotify_mock.called)", "def sending(self):\n return not self._send_queue.empty()", "def check_fill_client_emails(self):\n from apps.client.models import Client\n clients = db.Query(Client) # generator\n for client in clients:\n if not client.email and client.merchant:\n client.email = merchant.get_attr('email')\n self.response.out.write('putting %r\\n' % client)\n client.put()", "def _check_email(self, cr, uid, ids, context=None):\n for email in self.browse(cr, uid, ids, context=context):\n if email.state == 'validated':\n cnt = self.search_count(cr, uid, [('state', '=', 'validated'),\n ('trigger', '=', email.trigger),\n ('target', '=', email.target),\n ('stylegroup_id', '=', email.stylegroup_id.id),\n ('id', '!=', email.id)],\n context=context)\n if cnt != 0:\n return False\n return True", "def eh_impacta(self):\n if 'faculdadeimpacta.com.br' in self._email:\n return True\n else:\n return False", "def waitforemail(self, sid=None, orig=None, session=None, nreq=None, nmail=None, last=None,\n globalcheck=None, deleteafteruse=True):\n othersearch = [\"HEADER\", \"Subject\", self.EXPECTED_SUBJECT, \"NOT DELETED\"\n ]\n for grp in self.SUBJECT_GROUPS:\n if type(locals()[grp]) is str:\n othersearch.append('HEADER')\n othersearch.append('Subject')\n othersearch.append(self.SEARCH_SUBJECT_PATTERN.format(group=grp, value=locals()[grp]))\n \n found = None\n LOGGER.debug(\"Searching for emails that contains %s\", othersearch)\n while not found:\n # searching e-mails\n with self.connlock:\n typ, uids = self.conn.uid('search', *othersearch)\n if not uids[0]:\n LOGGER.info(\"Nothing found.\")\n time.sleep(self.SEARCH_INTERVAL)\n continue\n # fetching them all\n LOGGER.info(\"Emails corresponding to %s found: %s\", othersearch, uids[0].decode())\n with self.connlock:\n typ, responses = self.conn.uid('fetch', \",\".join(uids[0].decode().split()), \"(UID RFC822.HEADER)\")\n LOGGER.debug(\"Fetched headers %s: %r\", typ, responses)\n for response in responses:\n if type(response) is not tuple:\n #LOGGER.debug(\"Discarded response %s: %s\", type(response), response)\n continue\n # read response header\n msgnum, _, uid, _, hsize = response[0].decode().split()\n # hsize = int(hsize.replace('{','').replace('}'))\n sheader = response[1]\n header = self.parser.parsebytes(sheader)\n subject = header['Subject']\n LOGGER.debug(\"Checking email uid=%s msgnum=%s: %s\", uid, msgnum, subject) \n # parse the header\n m = self.SUBJECT_RX.match(subject)\n if not m:\n LOGGER.warn(\"Unable to parse subject: %s\", subject)\n continue\n # tests the header\n corresponds = True\n for k, v in m.groupdict().items():\n if locals()[k] is not None:\n if callable(locals()[k]) and not locals()[k](v):\n corresponds = False\n LOGGER.info(\"Email uid %s does not meet awaited condition on %s\", uid, k)\n break\n if corresponds and globalcheck is not None:\n corresponds = globalcheck(header, m)\n # stop if ok\n if corresponds:\n found = uid\n break\n if found: break\n if found: break\n time.sleep(self.SEARCH_INTERVAL)\n # fetch full e-mail\n parsedemail = self.email2mime(found)\n if deleteafteruse:\n self.deleteemail(found)\n return parsedemail" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sends out emails to the apps in the provided csv.
def send(app_csv='apps.csv', verbose=True, dry_run=True): results = [] app_info = _csv_to_dict(app_csv) for app in app_info: # Get all the app info needed for this request. app_name = _get_app_name(app) contact_first_name = _get_contact_first_name(app) email_address = _get_contact_email(app) app_tote_store_url = _get_app_tote_store_url(app) subject = _get_email_subject(app_name) # If we already sent the first contact email, continue. if _did_send_first_contact_email(app): result = dict( app_name=app_name, contact_first_name=contact_first_name, email_address=email_address, app_tote_store_url=app_tote_store_url, subject=subject, status='skipped', error=None, ) logger.info(result) results.append(result) continue try: # Get the appropriate template to send. email_template = _get_first_contact_email_template_name(app) template = env.get_template(email_template) # Render the template with app info. content = template.render( app_name=app_name, contact_first_name=contact_first_name, app_tote_store_url=app_tote_store_url, ) send_email(to=email_address, subject=subject, html=content, dry_run=dry_run) result = dict( app_name=app_name, contact_first_name=contact_first_name, email_address=email_address, app_tote_store_url=app_tote_store_url, subject=subject, status='success', error=None, ) except Exception as e: result = dict( app_name=app_name, contact_first_name=contact_first_name, email_address=email_address, app_tote_store_url=app_tote_store_url, subject=subject, status='failure', error=str(e), ) logger.info(result) results.append(result) # Sleep momentarily to avoid dos'ing the server. if not dry_run: time.sleep(0.1) if verbose: _print_summary(results)
[ "def send_email(self):\n server = self.config_smtp_server()\n contacts = open('contacts.csv', 'rb')\n reader = csv.DictReader(contacts)\n for person_details in reader:\n to_email = person_details['email']\n message = self.compose_message(person_details).as_string()\n server.sendmail(self.settings['from_email'], [to_email], message)\n server.quit()", "def send_emails_gmail():\n port = 587\n server = smtplib.SMTP('smtp.gmail.com',587)\n server.ehlo()\n server.starttls()\n sender_email = os.environ.get('COURSEEXPLOREREMAIL')\n password = os.environ.get('COURSEEXPLORERPASSWORD')\n\n server.login(sender_email, password)\n\n with open('emails.csv') as f:\n r = csv.reader(f)\n next(r)\n\n for email, coursecode, availability, capacity in r:\n\n if int(availability) > 0:\n message = \"From: \" + sender_email + \"\\nTo: \" + email + \"\\nSubject: Course Availability\\n\" + \"Good news, a spot has opened up! Here's the current availability for \" + coursecode + \"\\n\" + availability + \"/\" + capacity\n receiver_email = email\n server.sendmail(sender_email, receiver_email, message)\n time.sleep(0.5)\n\n remove_email_from_csv(email, coursecode)\n\n server.quit()", "def send_csv(self):\n\n csv_file = csv.reader(open(self.options['csv_file']))\n\n # Print CSV Header\n print \"Header:\" , csv_file.next() , \"Send:\" , self.options['send']\n print \"\"\n\n sent , missing , skipped = 0 , 0 , 0\n for row in csv_file:\n _ , phone_number , _ , action , text = row\n phone_number = \"+\" + phone_number\n\n if action.strip() == \"\":\n try:\n contact = cont.Contact.objects.get_from_phone_number(phone_number)\n except cont.Contact.DoesNotExist as e:\n print \"Missing:\" , phone_number , \" -> \" , text\n if self.options['send']:\n transports.send( phone_number , text )\n missing += 1\n else:\n if self.options['send']:\n contact.send_message( text )\n sent += 1\n else:\n skipped += 1\n\n print \"Sent:\" , sent , \"Missing:\" , missing , \"Skipped:\" , skipped", "def send_email(db, server, rows):\r\n\r\n col_names = dict()\r\n for loc, key in enumerate(rows[0]):\r\n col_names[key] = loc\r\n\r\n for row in rows[1:]:\r\n \r\n confirmation = row[col_names[\"confirmation\"]]\r\n if int(confirmation): continue\r\n \r\n email = row[col_names[\"email\"]]\r\n prod_id = row[col_names[\"product_id\"]]\r\n row_id = row[col_names[\"id\"]]\r\n if not re.match(\"\\w+@\\w+\\.\\w+\", email, re.ASCII):\r\n continue\r\n try:\r\n server.sendmail(EMAIL_ADDR, email,\r\n \"thank you for your order of %s\" % (prod_id))\r\n changed = change_value(db, row_id, \"confirmation\", 1)\r\n \r\n except ValueError as e:\r\n print(\"Email error \", e)\r\n continue\r\n\r\n time.sleep(1)\r\n\r\n return", "def send_email_to_all():\n # for a single name, create an email file for each of their donations\n x = [send_email(str(s), False) for s in donor_db]", "def main(arguments, emailer):\n emailer.read_config()\n print(\"Config read.\")\n emailer.setup_config(pages=arguments.pages,\n email_list=arguments.email_list,\n items_range=arguments.range,\n config=arguments.config,\n database=arguments.database,\n file=arguments.file,\n email_address=arguments.email_address,\n email_password=arguments.email_password,\n send_time=arguments.time,\n frequency=arguments.frequency)\n emailer.write_config()\n \n emailer.setup_database()\n if emailer.pull_items_search() != 'bot':\n print(\"Items retrieved\")\n else:\n return\n \n emailer.items_to_xls()\n print(\"xls file created.\")\n emailer.items_to_csv()\n print(\"csv file created\")\n\n print(\"Sending emails.\")\n emailer.send_email()", "def send_emails():\n\tprint('sending email...')\n\t\n\tcontacts = get_contacts()\n\tfsa_internship = get_fsa_internship()\n\n\tif not fsa_internship:\n\t\tsend_ack_email()\n\t\treturn\n\t\n\tfor name, email in contacts.items():\n\t\temail_text = MIMEMultipart()\n\n\t\tmessage = read_template().substitute(who=name, internship=fsa_internship, url=url)\n\n\t\ttoday = datetime.date.today()\n\t\tdate = today.strftime('%d/%m')\n\n\t\temail_text['From'] = ADDRESS\n\t\temail_text['To'] = email\n\t\temail_text['Subject'] = 'Chamada MPBA - {}'.format(date)\n\n\t\temail_text.attach(MIMEText(message, 'plain'))\n\n\t\temail_server.send_message(email_text)\n\n\t\tdel email_text", "def emailJobs(\n df, \n retainedCompany, \n senderName, \n defaultSenderEmail, \n emailPassword, \n senderTitle, \n senderCompany, \n senderCompanyHomePage, \n senderPhone, \n noContactCompanyListPickleFileName, \n port=465, \n returnHTML=True\n ):\n try:\n with open(noContactCompanyListPickleFileName, 'rb') as inputFile:\n noContactCompanyList = pickle.load(inputFile) \n except:\n noContactCompanyList = []\n\n for i in range(len(df)):\n companyName = df['Organization Name'][i]\n if companyName.lower() in noContactCompanyList:\n pass\n try:\n domainName = df['Domain'][i]\n jobsEmails = [prefix + '@' + domainName for prefix in ['jobs', 'careers']]\n # email all the jobs pages for that copmany\n sendEmails( \n 'guys', # addressing general company, so use 'guys' instead of individual name\n retainedCompany,\n companyName,\n jobsEmails,\n senderName,\n defaultSenderEmail,\n emailPassword,\n senderTitle,\n senderCompany,\n senderCompanyHomePage,\n senderPhone,\n port=port,\n returnHTML = returnHTML \n ) \n except:\n pass", "def email_alert(smtp_server, sender, recipient, results):\n msg = email.mime.multipart.MIMEMultipart()\n msg['Subject'] = 'Execution result of command output to csv script'\n msg['From'] = sender\n msg['To'] = recipient\n mailobj = smtplib.SMTP(smtp_server)\n for result in results:\n try:\n part = MIMEApplication(open(result, 'rb').read())\n except TypeError:\n return\n part.add_header('Content-Disposition', 'attachment', filename=result)\n msg.attach(part)\n try:\n mailobj.sendmail(sender, recipient, msg.as_string())\n print('Email is sent for the result to %s' % recipient)\n mailobj.quit()\n except Exception as exc:\n print(str(exc))", "def write_emails_to_file(result_emails, category):\r\n\tf = open('emails.csv', 'wb')\r\n\tcsvWriter = csv.writer(f, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\r\n\tfor email in result_emails:\r\n\t\tcsvWriter.writerow([email, category])\t\r\n\tf.close()", "def _auto_email_send(self):\n records = self.search([('send_by', '=', 'mail')])\n\n for supplier in records:\n send_at = datetime.combine(fields.Date.today(),\n float_to_time(supplier.automatic_email_time, supplier.moment, supplier.tz)).astimezone(pytz.UTC).replace(tzinfo=None)\n if supplier.available_today and fields.Datetime.now() > send_at:\n lines = self.env['lunch.order'].search([('supplier_id', '=', supplier.id),\n ('state', '=', 'ordered'), ('date', '=', fields.Date.today())])\n\n if lines:\n order = {\n 'company_name': lines[0].company_id.name,\n 'currency_id': lines[0].currency_id.id,\n 'supplier_id': supplier.partner_id.id,\n 'supplier_name': supplier.name,\n 'email_from': supplier.responsible_id.email_formatted,\n }\n\n _lines = [{\n 'product': line.product_id.name,\n 'note': line.note,\n 'quantity': line.quantity,\n 'price': line.price,\n 'toppings': line.display_toppings,\n 'username': line.user_id.name,\n } for line in lines]\n\n order['amount_total'] = sum(line.price for line in lines)\n\n self.env.ref('lunch.lunch_order_mail_supplier').with_context(order=order, lines=_lines).send_mail(supplier.id)\n\n lines.action_confirm()", "def enviarMail(asunto,msg,lista):\n for l in lista:\n send_mail(asunto,\n msg, \n 'noreply.sgpa@gmail.com', \n [l.email],\n fail_silently=False)", "def _generate(self, address=None):\n if address is None:\n csv_file = open(self.csv_file_path, 'rt')\n users = csv.DictReader(csv_file)\n else:\n users = [dict(mail=address),]\n\n add_stats = self.config.get('default', 'stats', False)\n if not add_stats:\n mail_template = unicode(self.generate_mail())\n else:\n newsletter = self.config.get('default', 'newsletter-name')\n\n mfrom = '##From:%s\\n'%self.config.get('default', 'mfrom')\n\n for user in users:\n email = user['mail']\n if add_stats:\n mail_template = unicode(self.generate_mail_with_stats(newsletter, email))\n\n mail = '##To:%s\\n'%email\n mail += mfrom\n mail += mail_template.replace('$newsletter_to_addr', email)\n self.send_to_spool(mail)", "def write_to_csv(list_of_emails):\n import csv\n # use newline='' to prevent double-spaced rows\n with open('emails.csv', 'w', newline='') as outFile:\n outWriter = csv.writer(outFile)\n charNum = outWriter.writerow(['email'])\n for i in list_of_emails:\n charNum = outWriter.writerow([i])\n outFile.close()", "def send_email_users():\n\n # Get users emails\n users_emails = User.objects.exclude(\n Q(email='') |\n Q(email=None)\n ).values_list(\n 'email',\n flat=True\n )\n\n # Send email to each user\n # for email_user in users_emails:\n\n title = 'Se han calculado nuevos Hard Flag'\n msg = 'Actualmente se han agregado nuevos hard flag '\n msg += ' a la base de datos'\n\n email = EmailMessage(\n title,\n msg,\n to=users_emails\n )\n email.send()", "def bulk_email(self, email_data):\n email_list = []\n for edata in email_data:\n if not isinstance(edata, EmailParameters):\n print(\"Invalid emails parameters\")\n continue\n try:\n email = edata.to\n cc_email = edata.cc\n bcc_email = edata.bcc\n subject = edata.subject\n data = edata.body\n template = edata.body[\"template\"]\n except Exception as e:\n print(\"Cannot send mail to {}\".format(e))\n continue\n if email is None:\n print(\"Email is empty!\")\n continue\n for em in email:\n if self.EMAIL_REGX.match(em) is None:\n print(\"Invalid email address!\")\n continue\n message = self._mail_render(data, template)\n email_list.append((email, subject, message, cc_email, bcc_email))\n if email_list:\n self.send_bulk(email_list)\n else:\n print(\"Cannot send mail to Email is empty!\")", "def send_validation_emails(self, **_):\n email_config = config.getSettingJson(config.GENOMIC_DAILY_VALIDATION_EMAILS, {})\n\n if not email_config.get('send_emails'):\n return\n\n validation_incidents = self.incident_dao.get_new_ingestion_incidents()\n\n if not validation_incidents:\n logging.info('No records found for validation email notifications')\n return\n\n recipients, cc_recipients = email_config.get('recipients'), email_config.get('cc_recipients')\n\n for gc, recipient_list in recipients.items():\n gc_validation_emails_to_send = list(filter(lambda x: x.submitted_gc_site_id == gc, validation_incidents))\n\n if gc_validation_emails_to_send:\n for gc_validation_email in gc_validation_emails_to_send:\n validation_message = gc_validation_email.message.split(':', 1)[1]\n message = f\"{validation_message.strip()}\\n\\n\"\n message += f\"Full file path: gs://{gc_validation_email.filePath}\\n\\n\"\n message += \"Please correct this file and re-upload to designated bucket.\"\n\n email_message = Email(\n recipients=recipient_list,\n cc_recipients=cc_recipients,\n subject=\"All of Us GC/DRC Manifest Ingestion Failure\",\n plain_text_content=message\n )\n\n EmailService.send_email(email_message)\n\n self.incident_dao.batch_update_incident_fields([obj.id for obj in gc_validation_emails_to_send])", "def send_bulk_course_email(entry_id, _xmodule_instance_args):\r\n # Translators: This is a past-tense verb that is inserted into task progress messages as {action}.\r\n action_name = ugettext_noop('emailed')\r\n visit_fcn = perform_delegate_email_batches\r\n return run_main_task(entry_id, visit_fcn, action_name)", "def send_mails(self):\n for recepient in self.recipients:\n self.send_mail(recepient)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
writes data from instream into additional allocated clusters of given file. Metadata of this file will be stored in Metadata object
def write(self, instream: typ.BinaryIO, filepath: str, filename: str = None) -> None: if filename is not None: filename = path.basename(filename) if self.fs_type == 'FAT': allocator_metadata = self.fs.write(instream, filepath) self.metadata.add_file(filename, allocator_metadata) elif self.fs_type == 'NTFS': allocator_metadata = self.fs.write(instream, filepath) self.metadata.add_file(filename, allocator_metadata) else: raise NotImplementedError()
[ "def addFileToInfos(self, infos):\n with open(self.loc, 'rb') as fhandle:\n pos = 0L\n piece_length = 0\n for info in infos:\n piece_length = max(piece_length, info.hasher.pieceLength)\n info.add_file_info(self.size, self.path)\n\n while pos < self.size:\n nbytes = min(piece_length, self.size - pos)\n buf = fhandle.read(nbytes)\n pos += nbytes\n for info in infos:\n info.add_data(buf)", "def WriteClustersToImage(self):\n # Use the array we built earlier\n print(f\"Writing the following list of clusters to FAT structure: {self.cluster_list}\")\n padding = 3\n with open(self.output_file, \"r+b\") as fh:\n # The first cluster goes into offset 26 (2 Bytes) in root directory\n seeker = (self.root_directory_offset*self.sector_size)+((self.index_number-1)*self.directory_index_size)+(self.starting_cluster_offset)\n # Convert first item in list to two bytes\n first_address = (self.cluster_list[0]).to_bytes(2, byteorder='little')\n print(f\"If I were me, I'd write {first_address} to {seeker}\")\n fh.seek(seeker)\n fh.write(first_address)\n # Now, the rest are written to FAT area\n for i, item in enumerate(self.cluster_list):\n # If Entry 1 then the byte calculation returned a whole number\n # If Entry 2 then the byte calculation returned a half number\n # This item determines where we write the data\n entry1, entry2, seeker = self.IsEntryHighOrLow(item)\n # The data we are writing is the next item\n if i+1 >= len(self.cluster_list):\n next_item = 4095\n else:\n next_item = self.cluster_list[i+1]\n # If we're at the end of the list then write 0xfff\n print(f\"Ready to perform calculations on {next_item} (hex:{hex(next_item)}) [entry1={entry1}; entry2={entry2}, seeker={seeker}]\")\n fh.seek(seeker)\n my_bytes = b'\\x00'+fh.read(3)\n if self.debug:\n print(f\"bytes from disk image: {my_bytes}\")\n unpacked_bytes, = struct.unpack('>I', bytes(my_bytes))\n if self.debug:\n print(type(unpacked_bytes), unpacked_bytes)\n nstr = str(hex(unpacked_bytes)).replace('0x', '').zfill(6)\n le_three_bytes = \"\".join(map(str.__add__, nstr[-2::-2] ,nstr[-1::-2]))\n if self.debug:\n print(f\"Existing values: unpacked_bytes:{hex(unpacked_bytes)}|nstr:{nstr}|(le)three_bytes:{le_three_bytes}|Entry1={le_three_bytes[-3:]}|Entry2={le_three_bytes[:3]}\")\n if entry1:\n # We need to deal with entry1 (see page 7 of scan24 paper)\n if self.debug:\n print(\"Updating entry1\")\n entry1_bytes = hex(next_item)[2:].zfill(3)\n entry2_bytes = le_three_bytes[:3]\n else:\n if self.debug:\n print(\"Updating entry2\")\n entry1_bytes = le_three_bytes[-3:]\n entry2_bytes = hex(next_item)[2:].zfill(3)\n new_entry = f\"{entry2_bytes}{entry1_bytes}\"\n if self.debug:\n print(f\"new_entry: {new_entry}\")\n packed_bytes = struct.pack('<I', int(new_entry, 16))\n if self.debug:\n print(f\"Writing packed_bytes ({packed_bytes[:-1]}) to {seeker}\")\n fh.seek(seeker)\n fh.write(packed_bytes[:-1])\n print(f\"{self.filename}.{self.extension} written to root directory index #{self.index_number}\")\n return True", "def __write_matlab_clusters(tel, filename):\n # type: (TelescopeAnalysis, str) -> None\n centre_x = np.array([])\n centre_y = np.array([])\n points_x = np.array([])\n points_y = np.array([])\n for name in tel.layouts:\n if name == 'ska1_v5':\n continue\n layout = tel.layouts[name]\n centre_x = np.hstack((centre_x, layout['cx']))\n centre_y = np.hstack((centre_y, layout['cy']))\n if points_x.size == 0:\n points_x = layout['x']\n points_y = layout['y']\n else:\n points_x = np.vstack((points_x, layout['x']))\n points_y = np.vstack((points_y, layout['y']))\n savemat(filename, dict(centre_x=centre_x, centre_y=centre_y,\n antennas_x=points_x, antennas_y=points_y))", "def insert_bicluster_info( self, db, db_file, run2id, row2id, col2id ):\n\t\t# Get all biclusters from cmonkey run\n\t\tconn = sqlite3.connect(db_file)\n\t \tc = conn.cursor()\n\t \tc.execute(\"SELECT max(iteration) FROM cluster_stats;\")\n\t \tlast_run = c.fetchone()[0] # i think there is an indexing problem in cMonkey python!! \n\t \tw = (last_run,)\n\t \tc.execute(\"SELECT cluster FROM cluster_stats WHERE iteration = ?;\",w)\n\t\tbiclusters = [self.assemble_bicluster_info_single( db, db_file, c, last_run, i[0], run2id, row2id, col2id ) for i in c.fetchall()]\n\t\tbicluster_info_collection = self.db.bicluster_info\n\n\t\t# Check whether documents are already present in the collection before insertion\n\t\tif bicluster_info_collection.count() > 0:\n\t\t\td_f = filter( None, [ self.check4existence( bicluster_info_collection, i, \"run_id\", i[\"run_id\"], \"cluster\", i[\"cluster\"] ) for i in biclusters ] )\n\t\telse:\n\t\t\td_f = biclusters\n\t\t\n\n\t\tprint \"%s new records to write\" % len( d_f )\n\n\t\tif len(d_f) > 0:\n\t\t\tbicluster_info_collection.insert( d_f )\n\n\t\treturn bicluster_info_collection", "def write_block(self, block):\n # set basename\n if self.basename is None:\n logging.warning(\"warning: no basename provided, using `basename`\")\n self.basename = 'basename'\n\n # First create file handles for each group which will be stored\n self._make_all_file_handles(block)\n\n # We'll detect how many features belong in each group\n self._group2features = {}\n\n # Iterate through segments in this block\n for seg in block.segments:\n # Write each spiketrain of the segment\n for st in seg.spiketrains:\n # Get file handles for this spiketrain using its group\n group = self.st2group(st)\n fetfilehandle = self._fetfilehandles[group]\n clufilehandle = self._clufilehandles[group]\n\n # Get the id to write to clu file for this spike train\n cluster = self.st2cluster(st)\n\n # Choose sampling rate to convert to samples\n try:\n sr = st.annotations['sampling_rate']\n except KeyError:\n sr = self.sampling_rate\n\n # Convert to samples\n spike_times_in_samples = np.rint(\n np.array(st) * sr).astype(np.int64)\n\n # Try to get features from spiketrain\n try:\n all_features = st.annotations['waveform_features']\n except KeyError:\n # Use empty\n all_features = [\n [] for _ in range(len(spike_times_in_samples))]\n all_features = np.asarray(all_features)\n if all_features.ndim != 2:\n raise ValueError(\"waveform features should be 2d array\")\n\n # Check number of features we're supposed to have\n try:\n n_features = self._group2features[group]\n except KeyError:\n # First time through .. set number of features\n n_features = all_features.shape[1]\n self._group2features[group] = n_features\n\n # and write to first line of file\n fetfilehandle.write(\"%d\\n\" % n_features)\n if n_features != all_features.shape[1]:\n raise ValueError(\"inconsistent number of features: \" +\n \"supposed to be %d but I got %d\" %\n (n_features, all_features.shape[1]))\n\n # Write features and time for each spike\n for stt, features in zip(spike_times_in_samples, all_features):\n # first features\n for val in features:\n fetfilehandle.write(str(val))\n fetfilehandle.write(\" \")\n\n # now time\n fetfilehandle.write(\"%d\\n\" % stt)\n\n # and cluster id\n clufilehandle.write(\"%d\\n\" % cluster)\n\n # We're done, so close the files\n self._close_all_files()", "def writePointwiseData(self, writeTo):\n rlz = self._writeSegmentsRealization(writeTo)\n # add some cluster stuff\n # cluster features\n ## both scaled and unscaled\n featureNames = sorted(list(self._clusterInfo['features']['unscaled'].keys()))\n for scaling in ['unscaled','scaled']:\n for name in featureNames:\n varName = 'ClusterFeature|{}|{}'.format(name, scaling)\n writeTo.addVariable(varName, np.array([]), classify='meta', indices=['segment_number'])\n rlz[varName] = np.asarray(self._clusterInfo['features'][scaling][name])\n varName = 'ClusterLabels'\n writeTo.addVariable(varName, np.array([]), classify='meta', indices=['segment_number'])\n rlz[varName] = np.asarray(self._clusterInfo['labels'])\n writeTo.addRealization(rlz)", "def write(self):\n if self.nxfile:\n if self.nxfile.mode == napi.ACC_READ:\n raise NeXusError(\"NeXus file is readonly\")\n if not self.infile:\n shape = self.shape\n if shape == (): shape = (1,)\n with self.nxgroup as path:\n if np.prod(shape) > 10000:\n # Compress the fastest moving dimension of large datasets\n slab_dims = np.ones(len(shape),'i')\n if shape[-1] < 100000:\n slab_dims[-1] = shape[-1]\n else:\n slab_dims[-1] = 100000\n path.compmakedata(self.nxname, self.dtype, shape, 'lzw', \n slab_dims)\n else:\n # Don't use compression for small datasets\n path.makedata(self.nxname, self.dtype, shape)\n self._infile = True\n if not self.saved: \n with self as path:\n path._writeattrs(self.attrs)\n value = self.nxdata\n if value is not None:\n path.putdata(value)\n self._saved = True\n else:\n raise IOError(\"Data is not attached to a file\")", "def _write_cluster(metacluster, cluster, loci, idx, path):\n out_file = op.join(path, 'log', str(idx) + '.bed')\n with utils.safe_run(out_file):\n with open(out_file, 'w') as out_handle:\n for idc in metacluster:\n for idl in cluster[idc].loci2seq:\n pos = [e for e in loci[idl].list()]\n print(\"\\t\".join(pos[:4] + [str(len(cluster[idc].loci2seq[idl]))] + [pos[-1]]), file=out_handle, end=\"\")", "def clustering(self): \n clusterOfFiles=self.getClusters()\n \n #group files based on the hash of their contents\n self.keyingMethod=md5Hash\n [self.addFile(afile) for acluster in clusterOfFiles for afile in acluster]\n clusterOfFiles=self.getClusters()\n self.showClusters(clusterOfFiles)", "def ior_write_dataset(self):\n for oclass in self.obj_class:\n for sizes in self.ior_chu_trs_blk_size:\n # Skip the object type if server count does not meet the minimum\n # EC object server count\n if oclass[1] > self.server_count:\n continue\n self.ior_param_update(oclass, sizes)\n\n # Create the new container with correct redundancy factor\n # for EC object type\n self.ec_contaier_create(oclass[0])\n self.update_ior_cmd_with_pool(oclass=oclass[0],\n create_cont=False)\n # Start IOR Write\n self.container.uuid = self.ec_container.uuid\n self.start_ior_load(operation=\"WriteRead\", percent=1,\n create_cont=False)\n self.cont_uuid.append(self.ior_cmd.dfs_cont.value)", "def clientapp_write_file(blocks_info): \n file_name = blocks_info['file_name']\n block_size = blocks_info['block_size']\n blocks_to_dns = blocks_info['data_blocks']\n # Sort the blocks \n block_l = []\n for block_id in blocks_to_dns.keys():\n block_l.append( int( block_id ))\n block_l.sort()\n # Read the file in chunks and send to datanodes\n read_blocks_and_send_to_dns( file_name, block_size, block_l , blocks_to_dns )", "def seek_to_cluster(self, cluster):\n self.infile.seek(self.cluster_to_physical_offset(cluster))", "def save_clusters(self, filehandle):\n with open(filehandle, 'w') as fh:\n for c in self.clusters:\n fh.write(' '.join(c) + \"\\n\")", "def write(self, file):\n pos = file.tell()\n pickle.dump((self.index, self.meta, self.info), file)\n file.seek(0)\n\n # update the header with the position of the content index.\n file.write(struct.pack('<Q', pos))", "def cluster_data(data_loc, num_clusters, base_destination, vectorizer):\n cluster_df = __title_cluster_df(data_loc, num_clusters, vectorizer)\n if not os.path.isdir(base_destination):\n os.mkdir(base_destination)\n vec_path = os.path.join(base_destination, 'vectorizer.pkl')\n with open(vec_path, 'wb') as f:\n pickle.dump(vectorizer, f)\n cluster_stats = {}\n for i in range(num_clusters):\n titles = cluster_df[cluster_df['cluster']==i]['title']\n cluster_stats[i] = titles.shape[0]\n cluster_data = __get_data_with_titles(data_loc, titles)\n dest = os.path.join(base_destination, 'cluster_{}.json'.format(i))\n with open(dest, 'w') as f:\n json.dump(cluster_data, f)\n stats_path = os.path.join(base_destination, 'cluster_statistics.txt')\n with open(stats_path, 'w') as f:\n for cluster in cluster_stats.keys():\n f.write('cluster {}: '.format(cluster))\n f.write(str(cluster_stats[cluster]) + '\\n')", "def store_clusters(mapping, sff_fp, outdir=\"/tmp/\", store_members=False):\r\n\r\n # get mapping read to cluster\r\n invert_map = invert_mapping(mapping)\r\n (flowgrams, header) = lazy_parse_sff_handle(open(sff_fp))\r\n\r\n leftover_fasta_fh = open(outdir + \"/singletons.fasta\", \"w\")\r\n centroids = []\r\n for f in flowgrams:\r\n try:\r\n key = invert_map[f.Name]\r\n except KeyError:\r\n # this flowgram has not been clustered\r\n continue\r\n if (len(mapping[key]) == 0):\r\n # do not store singletons in a separate cluster\r\n leftover_fasta_fh.write(f.toFasta() + \"\\n\")\r\n continue\r\n elif(f.Name in mapping):\r\n # save as a centroid\r\n centroids.append((len(mapping[f.Name]) + 1, f.Name, f.toSeq()))\r\n\r\n if (store_members):\r\n flows_fh = open(outdir + key + \".flows\", \"a\")\r\n fasta_fh = open(outdir + key + \".fasta\", \"a\")\r\n flows_fh.write(\"%s\\n\" % f)\r\n fasta_fh.write(f.toFasta() + \"\\n\")\r\n fasta_fh.close()\r\n flows_fh.close()\r\n\r\n leftover_fasta_fh.close()\r\n\r\n # sort and store ordered by cluster_size\r\n centroids.sort(reverse=True)\r\n centroid_fh = open(outdir + \"/centroids.fasta\", \"w\")\r\n for size, name, seq in centroids:\r\n centroid_fh.write(\">%s | cluster size: %d \\n%s\\n\" %\r\n (name, size, seq))\r\n centroid_fh.close()", "def exportFlatClusterData(filename, new_row_header,new_column_header,xt,ind1,ind2):\n\n filename = string.replace(filename,'.pdf','.txt')\n export_text = open(filename,'w')\n column_header = string.join(['UID','row_clusters-flat']+new_column_header,'\\t')+'\\n' ### format column-names for export\n export_text.write(column_header)\n column_clusters = string.join(['column_clusters-flat','-']+ map(str, ind2),'\\t')+'\\n' ### format column-flat-clusters for export\n export_text.write(column_clusters)\n\n ### The clusters, dendrogram and flat clusters are drawn bottom-up, so we need to reverse the order to match\n new_row_header = new_row_header[::-1]\n xt = xt[::-1]\n\n ### Export each row in the clustered data matrix xt\n i=0\n for row in xt:\n export_text.write(string.join([new_row_header[i],str(ind1[i])]+map(str, row),'\\t')+'\\n')\n i+=1\n export_text.close()\n\n ### Transpose text file for easier reading!\n oldfile_h = open(filename, 'rb')\n\n elements = [ line.split() for line in oldfile_h ]\n oldfile_h.close()\n\n biglist = []\n for splitline in elements:\n #print len(splitline)\n #print splitline\n biglist.append(splitline)\n newarray = numpy.array(biglist)\n #print numpy.shape(newarray)\n t_array = newarray.transpose()\n #print numpy.shape(t_array)\n #print newarray[:,0]\n\n newfile_h = open(filename[:-4] + \"_transposed.txt\" , 'w')\n for row in t_array:\n #print \"The row is currently: %r\" % row\n newfile_h.write(\"\\t\".join(row) + \"\\n\")\n newfile_h.close()\n\n\n ### Export as CDT file\n filename = string.replace(filename,'.txt','.cdt')\n export_cdt = open(filename,'w')\n column_header = string.join(['UNIQID','NAME','GWEIGHT']+new_column_header,'\\t')+'\\n' ### format column-names for export\n export_cdt.write(column_header)\n eweight = string.join(['EWEIGHT','','']+ ['1']*len(new_column_header),'\\t')+'\\n' ### format column-flat-clusters for export\n export_cdt.write(eweight)\n\n ### Export each row in the clustered data matrix xt\n i=0\n for row in xt:\n export_cdt.write(string.join([new_row_header[i]]*2+['1']+map(str, row),'\\t')+'\\n')\n i+=1\n export_cdt.close()", "def write_chunks(self, file):\n\n for c in self.chunks:\n\n self.chunk(file, c[0], c[1])", "def save_cluster_metrics_on_check_point(self) -> None:\n pd.read_csv(f'{self.path_to_cluster_metrics}/{self.file_name}.csv')\\\n .append(pd.DataFrame(self.cluster_metrics,\n columns=['stream_index', 'timestamp', 'check point', 'cluster id',\n 'x', 'y', 'radius', 'weight', 'cluster type']))\\\n .to_csv(f'{self.path_to_cluster_metrics}/{self.file_name}.csv', index=False)\n self.cluster_metrics = []" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
clears the slackspace of files. Information of them is stored in metadata.
def clear(self): if self.fs_type == 'FAT': for file_entry in self.metadata.get_files(): file_metadata = file_entry['metadata'] file_metadata = FATAllocatorMeta(file_metadata) self.fs.clear(file_metadata) elif self.fs_type == 'NTFS': for file_entry in self.metadata.get_files(): file_metadata = file_entry['metadata'] file_metadata = NTFSAllocatorMeta(file_metadata) self.fs.clear(file_metadata) else: raise NotImplementedError()
[ "def clean_files(self):\n self.filenames.clear()", "def clear(self):\n self.statistics['maxSize'].clear()\n self.statistics['minSize'].clear()\n self.statistics['avgSize'].clear()\n self.statistics['sumSize'].clear()\n self.statistics['numFiles'] = 0\n self.files = []", "def clear(self):\n self.delete_content_of_dir(\"\")", "def ClearAllFilesFunctionality(self):\n \n self.opened_files_dict.clear()\n self.update_dataframe()", "def cleanup(self):\n\n if os.path.exists(self.name):\n print >>stderr,\"Cleaning up temp srclist file:\",self.name\n os.remove(self.name)\n\n for fd in self.hdfs_files:\n for k in ['image','shear','fitpsf']:\n fd[k].cleanup()", "def clear_file_info(self):\n self.file_info = {}", "def clear(self):\n\n Console.info(\"Cleaning sprite files...\")\n Console.indent()\n \n for dirPath, dirNames, fileNames in os.walk(self.base):\n for fileName in fileNames:\n if fileName.startswith(\"jasysprite\"):\n filePath = os.path.join(dirPath, fileName)\n Console.debug(\"Removing file: %s\", filePath)\n os.remove(filePath)\n \n Console.outdent()", "def clean(self):\r\n\r\n for _, data in self.composition.items():\r\n index_file = Path(data['file'] + '.fxi')\r\n if index_file.exists():\r\n index_file.unlink()", "def clear_data_files():\n demo_folder = osp.join(osp.dirname(osp.dirname(__file__)), 'demo_files')\n if osp.isdir(demo_folder):\n for file in os.listdir(demo_folder):\n full_file = osp.join(demo_folder, file)\n if osp.isfile(full_file):\n os.remove(full_file)\n print(\"Pylinac data files cleared.\")", "def _clear(self):\n self._buffer = None\n # ensure cleaning of tmp files and free memory\n for i in self._images:\n i.disable_memmap()\n i.data = None\n self._images = []\n self._shape = None\n self._unit = None", "def clean_filesystem(files=[]):\n remove_files(files + find_cache_files())", "def clear_lists(self): \n self.fp_config_files = []\n self.txt_files = []\n self.fr_config_files = []", "def clear(self):\n self.file_name.delete(\"1.0\", END)\n self.youtube_link.delete(\"1.0\", END)\n self.set_status_label(\"Hello \" + getuser() + \", Welcome to Panther Youtube Downloader\")", "def clear_files(self,file_type):\n try:\n self.files[file_type] = []\n mbuild.msgb(\"REMOVING FILE TYPE\", file_type)\n except:\n xbc.cdie(\"Invalid type of file (%s) not found: \" %\n (file_type))", "def cleanup_files(self):\n\n self.backup_files()\n self.delete_files()", "def clean(self):\n# logging.info(\"Cleaning clusterkafka/temp\")\n# api.local(\"rm -r ./temp/* 2>/dev/null\")\n# TODO remove comments on above two lines, commented out to stop cleaning removing downloaded kafka\n log.info(\"Cleaning clusterkafka/templates 2>/dev/null\")\n api.local(\"rm -r ./templates/*.properties 2>/dev/null\")\n\n log.info(\"Cleaning ./logs/CK_logs/\")\n api.local(\"rm -r ./logs/CK_logs/* 2>/dev/null\")", "def ResetFiles(self):\n if self._summary_file:\n self._summary_file.close()\n timestamp = time.strftime('%Y%m%d%H%M%S', time.gmtime())\n\n self._summary_file = open(\n os.path.join(self._outdir, 'summary%s' % timestamp), 'w')", "def __clear_file_list(self):\n self.filenames = []\n self.lineedit.setText(\"\")\n self.__update_status()\n\n self.checks = []\n self.checkboxes = []", "def clear_loaded_data():\n global _last_mono_file, _loaded_data\n _last_mono_file = None\n for data_ws in _loaded_data:\n DeleteWorkspace(data_ws)\n _loaded_data = []" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the namespace_name of this ClairpbVulnerability.
def namespace_name(self, namespace_name): self._namespace_name = namespace_name
[ "def namespace_name(self, namespace_name):\n self._namespace_name = namespace_name", "def set_namespace(self, namespace: str) -> None:\n self._namespace = namespace", "def namespace(self, namespace: str):\n\n self._namespace = namespace", "def name_space(self, name_space: str):\n\n self._name_space = name_space", "def set_current_namespace(self, namespace: N) -> None:\n pass", "def set_target_namespace(self, namespace):\n # do shit\n self.target_namespace = namespace.strip(\":\")", "def replace_namespace(self, request, namespace):\n return self._replace(\n request,\n u\"namespaces\",\n None,\n namespace,\n )", "def set(namespace, name):", "def _set_namespace(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=six.text_type, is_leaf=True, yang_name=\"namespace\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/module-catalog', defining_module='openconfig-module-catalog', yang_type='string', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"namespace must be of a type compatible with string\"\"\",\n 'defined-type': \"string\",\n 'generated-type': \"\"\"YANGDynClass(base=six.text_type, is_leaf=True, yang_name=\"namespace\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/module-catalog', defining_module='openconfig-module-catalog', yang_type='string', is_config=True)\"\"\",\n })\n\n self.__namespace = t\n if hasattr(self, '_set'):\n self._set()", "def set_ns_prefix(self, ns_for_name: Dict[str, Tuple[str, str]]) -> None:\n self.c_prefix, self.f_prefix = ns_for_name[self.name]", "def set_test_namespace_value(namespace_name=None):\r\n global namespace_value\r\n namespace_value = namespace_name", "def namespace_name(self) -> str:\n return pulumi.get(self, \"namespace_name\")", "def namespace_name(self):\n return self._namespace_name", "def namespaces(self, namespaces):\n self._namespaces = namespaces", "def conventionalize_namespace(self, namespace: str) -> str:\n return _conventionalize(self.options, \"namespace\", namespace)", "def namespace_count(self, namespace_count):\n self._namespace_count = namespace_count", "def nvmf_namespace_num(self, nvmf_namespace_num):\n\n self._nvmf_namespace_num = nvmf_namespace_num", "def namespaces(self, namespaces):\n\n self._namespaces = namespaces", "def setElementNamespace(self, *args):\n return _libsbml.ASTBasePlugin_setElementNamespace(self, *args)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the severity of this ClairpbVulnerability.
def severity(self, severity): self._severity = severity
[ "def severity(self, severity):\n self._severity = severity", "def severity_level(self, severity_level):\n self._severity_level = severity_level", "def issue_change_severity(self, issue, severity):", "def severity(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"severity\")", "def severity_name(self, severity_name):\n\n self._severity_name = severity_name", "def severity(self) -> str:\n return pulumi.get(self, \"severity\")", "def setSeverityOverride(self, *args):\n return _libsbml.XMLErrorLog_setSeverityOverride(self, *args)", "def severity(self):\n return self._severity", "def severity(self) -> Optional[str]:\n return pulumi.get(self, \"severity\")", "def severity_level(self):\n return self._severity_level", "def case_severity(self, operator, case_severity):\n self._tql.add_filter('caseSeverity', operator, case_severity, TQL.Type.STRING)", "def changeErrorSeverity(self, *args):\n return _libsbml.XMLErrorLog_changeErrorSeverity(self, *args)", "def _add_impact_severity(self, variant_obj):\n if variant_obj.most_severe_consequence:\n variant_obj.impact_severity = IMPACT_SEVERITIES.get(\n variant_obj.most_severe_consequence\n )", "def severity(self) -> Optional[pulumi.Input['TestIssueSeverity']]:\n return pulumi.get(self, \"severity\")", "def severity(self) -> pulumi.Input['EndpointSeverity']:\n return pulumi.get(self, \"severity\")", "def normalise_severity(self, severity):\n return \"Info\" if severity == \"Unknown\" else severity", "def severity(self, operator: Enum, severity: list | str):\n if isinstance(severity, list) and operator not in self.list_types:\n raise RuntimeError(\n 'Operator must be CONTAINS, NOT_CONTAINS, IN'\n 'or NOT_IN when filtering on a list of values.'\n )\n\n self._tql.add_filter('severity', operator, severity, TqlType.STRING)", "def setVerbosity(self, level):\n\n self.__verbosity = int(level);", "def severity_justification(self, severity_justification):\n\n self._severity_justification = severity_justification" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the fixed_by of this ClairpbVulnerability.
def fixed_by(self, fixed_by): self._fixed_by = fixed_by
[ "def caused_by(self, caused_by):\n\n self._caused_by = caused_by", "def resolved_by(self, resolved_by):\n\n self._resolved_by = resolved_by", "def fixed_fee(self, fixed_fee):\n\n self._fixed_fee = fixed_fee", "def fixed_amount(self, fixed_amount):\n\n self._fixed_amount = fixed_amount", "def issued_by(self, issued_by):\n\n self._issued_by = issued_by", "def mitigated_by(self, mitigated_by):\n\n self._mitigated_by = mitigated_by", "def fixed(self, fixed):\n\n self._fixed = fixed", "def found_by(self, found_by):\n\n self._found_by = found_by", "def marked_by(self, marked_by):\n\n self._marked_by = marked_by", "def modified_by(self, modified_by):\n \n self._modified_by = modified_by", "def modified_by(self, modified_by):\n\n self._modified_by = modified_by", "def sealed_by(self, sealed_by):\n\n self._sealed_by = sealed_by", "def changed_by(self, changed_by):\n\n self._changed_by = changed_by", "def released_by(self, released_by):\n\n self._released_by = released_by", "def updated_by(self, updated_by):\n self._updated_by = updated_by", "def updated_by(self, updated_by):\n\n self._updated_by = updated_by", "def by(self, by):\n\n self._by = by", "def funded_by(self, funded_by):\n\n self._funded_by = funded_by", "def assigned_by_user(self, assigned_by_user):\n\n self._assigned_by_user = assigned_by_user" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the affected_versions of this ClairpbVulnerability.
def affected_versions(self, affected_versions): self._affected_versions = affected_versions
[ "def vulnerabilities(self, vulnerabilities):\n\n self._vulnerabilities = vulnerabilities", "def versions(self, versions):\n self.__versions = versions", "def versions(self, versions):\n\n self._versions = versions", "def set_versions(self, consumer, versions):\n for resource_type, resource_version in versions.items():\n self._set_version(consumer, resource_type,\n resource_version)\n\n if versions:\n self._cleanup_removed_versions(consumer, versions)\n else:\n self._handle_no_set_versions(consumer)", "def known_vulnerabilities(self, known_vulnerabilities):\n\n self._known_vulnerabilities = known_vulnerabilities", "def violations(self, violations):\n\n self._violations = violations", "def vulnerable_versions(self):\n raise NotImplementedError()", "def update_versions(consumer, resource_versions):\n _get_cached_tracker().update_versions(consumer, resource_versions)", "def max_affected_version(self, max_affected_version):\n\n self._max_affected_version = max_affected_version", "def pipeline_versions(self, pipeline_versions):\n if (self.local_vars_configuration.client_side_validation and\n pipeline_versions is not None and not isinstance(pipeline_versions, int)):\n raise ValueError(\"Parameter `pipeline_versions` must be an integer\") # noqa: E501\n\n self._pipeline_versions = pipeline_versions", "def test_changeVersions(self):\n self._testVersionChanging(8, 2, 3)", "def depend_version_list(self, depend_version_list):\n self._depend_version_list = depend_version_list", "def can_version_flows(self, can_version_flows):\n\n self._can_version_flows = can_version_flows", "def update_case_versions(self):\r\n # we don't need all the runcaseversions for a series. It is the\r\n # series member runs that will use them. So only lock the caseversions\r\n # if this is NOT a series.\r\n if not self.is_series:\r\n self._lock_case_versions()", "def affected_portfolios(self, affected_portfolios):\n if self.local_vars_configuration.client_side_validation and affected_portfolios is None: # noqa: E501\n raise ValueError(\"Invalid value for `affected_portfolios`, must not be `None`\") # noqa: E501\n\n self._affected_portfolios = affected_portfolios", "def ces_version(self, ces_version):\n self._ces_version = ces_version", "def changeable_values(self, changeable_values):\n\n self._changeable_values = changeable_values", "def ordinals(self, ordinals):\n\n self._ordinals = ordinals", "def min_affected_version(self, min_affected_version):\n\n self._min_affected_version = min_affected_version" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Optimizes the distribution of allocations for a set of stock symbols.
def optimize_portfolio(sd=dt.datetime(2008,1,1), ed=dt.datetime(2009,1,1), \ syms=['GOOG','AAPL','GLD','XOM'], gen_plot=False): # Read in adjusted closing prices for given symbols, date range dates = pd.date_range(sd, ed) prices_all = get_data(syms, dates) # automatically adds SPY prices = prices_all[syms] # only portfolio symbols prices_SPY = prices_all['SPY'] # only SPY, for comparison later # find the allocations for the optimal portfolio #1 provide an initial guess for x allocs = np.ones(len(syms))/len(syms) #2 Provide constraints to the optimizer bounds = [(0,1) for i in syms] constraints = ({ 'type': 'eq', 'fun': lambda inputs: 1.0 - np.sum(inputs) }) #3 call the optimizer res = spo.minimize(get_sharpe_ratio, allocs, args=prices, bounds = bounds, constraints=constraints) allocs = res.x # Get daily portfolio value port_val = get_portfolio_value(prices, allocs, 1.0) # Get portfolio statistics cr, adr, sddr, sr = get_portfolio_stats(port_val, daily_rf=0.0, samples_per_year=252) # Compare daily portfolio value with SPY using a normalized plot if gen_plot: # add code to plot here df_temp = pd.concat([port_val, prices_SPY], keys=['Portfolio', 'SPY'], axis=1) plot_normalized_data(df_temp) return allocs, cr, adr, sddr, sr
[ "def find_optimal_allocations(prices):\n new_prices = prices.copy()\n noa = len(new_prices.keys())\n global df\n df = prices.copy()\n cons =({ 'type': 'ineq', 'fun': lambda inputs: 1.0 - np.sum(abs(inputs)) })\n bnds = tuple((0,1) for x in range(noa))\n weights = np.random.random(noa)\n weights/=np.sum(weights)\n allocs= sco.minimize(min_func_sharpe, noa * [1. /noa,], method='SLSQP', bounds=bnds, constraints=cons)\n return allocs.x", "def test_best_allocation():\n\n # symbols = ['BRCM', 'TXN', 'IBM', 'HNZ'] \n symbols = ['AAPL', 'GOOG', 'IBM', 'MSFT']\n # ['GOOG','AAPL','GLD','XOM']\n basic_portfolio = BasicPortfolio(symbols, dt.datetime(2014, 1, 1), dt.datetime(2014, 12, 31))\n\n alloc = range(4)\n\n sharpe_max = 0\n alloc_max = alloc[:]\n\n for i in range(11):\n alloc[0] = i * 0.1\n for j in range(11 - i):\n alloc[1] = j * 0.1\n for k in range(11 - i - j):\n alloc[2] = k * 0.1\n alloc[3] = (10 - i - j - k) * 0.1\n\n vol, daily_ret, sharpe, cum_ret = \\\n basic_portfolio.analyze(alloc)\n\n if sharpe > sharpe_max:\n sharpe_max = sharpe\n alloc_max = alloc[:]\n\n print 'Best sharpe ratio is ', sharpe_max\n print 'Best allocation is', alloc_max\n\n ref_symbol = '$SPX'\n\n basic_portfolio.plot_with_reference(alloc_max, ref_symbol, source='local')", "def allocate_stocks(order_lines_info: Iterable[\"OrderLineData\"], country_code: str):\n # allocation only applied to order lines with variants with track inventory\n # set to True\n order_lines_info = [\n line_info\n for line_info in order_lines_info\n if line_info.variant and line_info.variant.track_inventory\n ]\n variants = [line_info.variant for line_info in order_lines_info]\n\n stocks = (\n Stock.objects.select_for_update(of=(\"self\",))\n .for_country(country_code)\n .filter(product_variant__in=variants)\n .order_by(\"pk\")\n )\n\n quantity_allocation_list = list(\n Allocation.objects.filter(\n stock__in=stocks,\n quantity_allocated__gt=0,\n )\n .values(\"stock\")\n .annotate(Sum(\"quantity_allocated\"))\n )\n quantity_allocation_for_stocks: Dict = defaultdict(int)\n for allocation in quantity_allocation_list:\n quantity_allocation_for_stocks[allocation[\"stock\"]] += allocation[\n \"quantity_allocated__sum\"\n ]\n\n variant_to_stocks: Dict[str, List[StockData]] = defaultdict(list)\n for stock_data in stocks.values(\"product_variant\", \"pk\", \"quantity\"):\n variant = stock_data.pop(\"product_variant\")\n variant_to_stocks[variant].append(StockData(**stock_data))\n\n insufficient_stock: List[\"ProductVariant\"] = []\n allocations: List[Allocation] = []\n for line_info in order_lines_info:\n stocks = variant_to_stocks[line_info.variant.pk] # type: ignore\n insufficient_stock, allocation_items = _create_allocations(\n line_info, stocks, quantity_allocation_for_stocks, insufficient_stock\n )\n allocations.extend(allocation_items)\n\n if insufficient_stock:\n raise InsufficientStock(insufficient_stock)\n\n if allocations:\n Allocation.objects.bulk_create(allocations)", "def allocate_stock(self, output, allocations):\n\n for item, quantity in allocations.items():\n BuildItem.objects.create(\n build=self.build,\n stock_item=item,\n quantity=quantity,\n install_into=output\n )", "def optimize(self, searchBudget):\n import itertools\n import multiprocessing as mp\n\n # Define an output queue\n output = mp.Queue()\n\n # Define the objective function\n def obj_func(sigma, searchBudget=searchBudget):\n expectedReward = self.expectedRewards(sigma=sigma, searchBudget=searchBudget)\n cost = self.cost(sigma=sigma)\n return (-expectedReward + cost)\n\n # Define the optimisation function with output sent to queue\n def optimize_local(output=output):\n # \n if self.allocationMethod=='variablePrecision':\n # importing CMAES libarary\n import cma\n\n # setting up parameters and running the optimization \n x0 = 50 + 15*np.random.randn(12*(self.depth-1))\n res = cma.fmin(obj_func, x0, 30, options={'bounds':[1,100],\\\n 'tolfun':1, 'maxfevals': int(1e4)})\n sigma_opt = res[0] \n\n elif self.allocationMethod=='equalPrecision':\n # importing Bayesian optimization libraries\n import GPy, GPyOpt\n from GPyOpt.methods import BayesianOptimization\n\n # setting up parameters and running the optimization\n kernel = GPy.kern.Matern52(input_dim=1)\n domain = [{'name': 'var_1', 'type': 'continuous', 'domain': (0,100)}]\n optimizer = BayesianOptimization(obj_func, domain=domain, kernel=kernel)\n optimizer.run_optimization(max_iter=50)\n sigma_opt = optimizer.X[optimizer.Y.argmin()]\n\n # appending the result (scalar sigma) to output queue\n output.put(sigma_opt)\n\n # Setup a list of processes that we want to run\n processes = [mp.Process(target=optimize_local) for x in range(self.n_restarts)]\n\n # Run processes\n for p in processes:\n p.start()\n\n # Exit the completed processes\n for p in processes:\n p.join()\n\n # Get process results from the output queue\n results = [output.get() for p in processes]\n\n return results", "def optimize_portfolio(sd, ed, syms, gen_plot=False):\n\n data = util.get_data(syms, sd, ed)\n prices = data[syms] # with or without SPY\n prices_SPY = data['SPY']\n\n # generate an initial guess for the minimizer\n initial_guess = np.ones(len(syms))/len(syms)\n\n # result = [ 1.35003613, -1.29013143, 0.30216938, 1.01683222]\n \n # boundaries\n bnds = [(0.0,1.0)] * len(syms)\n constraints = ({'type': 'eq', 'fun': lambda x: np.sum(x)-1.0},)\n # constraints = ({ 'type': 'ineq', 'fun': lambda inputs: 1 - np.sum(inputs) },{ 'type': 'ineq', 'fun': lambda x: x[0] * (x[0]-1) })\n\n result = spo.minimize(calc_error_sharpe, initial_guess, args=(prices,), method='SLSQP', bounds = bnds, constraints=constraints, options={'disp': True})\n optimal_alloc = result.x\n\n print(\"OPTIMAL_ALLOC: {}\".format(str(optimal_alloc)))\n # take the optimal allocation and recalc stats for output\n cr, adr, sddr, sr = compute_portfolio_stats(optimal_alloc, prices)\n \n print(\"sharpe ratio: {}\".format(sr))\n \n if gen_plot:\n port_val = compute_port_val(optimal_alloc, prices)\n SPY_val = prices_SPY/prices_SPY.iloc[0] \n\n df_temp = pd.concat([port_val, SPY_val], keys=['Portfolio', 'SPY'], axis=1)\n util.plot_data(df_temp)\n\n return result, cr, adr, sddr, sr\n # optimize for Sharpe Ratio - 1", "def get_new_allocation(self, day, init=False):\n \"\"\n if init and self.data_train is None:\n # Use uniform allocation\n cur_day_op = self.data.get_op(relative=False)[day, :] # opening prices on |cur_day|\n return util.get_uniform_allocation(self.num_stocks, cur_day_op)\n\n predicted_price_rel = self.predict_price_relatives(day)\n\n # Compute mean price relative of available stocks (x bar at t+1)\n today_op = self.data.get_op(relative=False)[day, :]\n avail_stocks = util.get_avail_stocks(today_op)\n avail_idxs = util.get_available_inds(avail_stocks)\n ppr_avail = predicted_price_rel[avail_idxs] # predicted price relatives of available stocks\n mean_price_rel = np.mean(ppr_avail)\n\n lam = self.compute_lambda(ppr_avail, mean_price_rel, avail_idxs) # lambda at t+1\n\n # limit lambda to avoid numerical problems from acting too aggressively.\n # (referenced from marigold's implementation: https://github.com/Marigold/universal-portfolios)\n lam = min(100000, lam)\n\n # Note: we don't perform simplex project b/c negative values (shorting) is allowed.\n new_b = np.zeros(self.num_stocks)\n for i, _ in enumerate(new_b):\n ppr = predicted_price_rel[i]\n if ppr > 0:\n new_b[i] = self.b[i] + lam * (ppr - mean_price_rel)\n\n # Normalize b so that it sums to 1\n sum_b = np.linalg.norm(new_b, ord=1)\n return (1.0 / sum_b) * new_b", "def recalc_stocks(stocks, feature, args):\r\n \r\n for stock in stocks:\r\n expression = 'stock.' + feature + '_calc(' + args + ')'\r\n exec(expression)\r\n \r\n return", "def get_market_caps(self, symbols):\n uncached_symbols = []\n stocks = []\n for symbol in symbols:\n state_manager = StateManager()\n stock_data = state_manager.get_key_value(f\"SYMBOL_{symbol}\")\n if stock_data != {}:\n print(f\"using cached data for {symbol}\")\n stocks.append(stock_data)\n else:\n uncached_symbols.append(symbol)\n try:\n stocks += self.make_stock_request(uncached_symbols)\n except Exception as e:\n print (e)\n\n market_cap_return = []\n if stocks:\n for stock in stocks:\n symbol = stock.get(\"symbol\", \"\")\n print (f\"... evaluating stock data for {symbol}\")\n market_cap_raw = stock.get(\"market_cap\", -1)\n print (f\"... market cap = {market_cap_raw}\")\n market_cap = format_significance(market_cap_raw)\n print (f\"... market cap = {market_cap}\")\n name = stock.get(\"name\", \"\")\n\n try:\n cache_key = f\"SYMBOL_{symbol}\"\n cache_value = {\n \"symbol\": symbol,\n \"market_cap\": market_cap_raw,\n \"name\": name,\n \"refresh_date\": f\"{datetime.today().strftime('%Y-%m-%d')}\"\n }\n except Exception as e:\n print (e)\n\n try:\n state_manager.save_key_value(cache_key, cache_value)\n except Exception as e:\n print (e)\n\n if int(market_cap_raw) > 0:\n market_cap_return.append((symbol, name, market_cap_raw, market_cap))\n\n return market_cap_return", "def get_52_week_high_low_for_stocks(stocks):\n print(\"Fetching stock quotes.\")\n # Build a full list of symbols\n symbols = []\n for key in stocks.keys():\n symbols.append(key)\n\n num_of_batches = int(len(symbols)/BATCH_SIZE) + 1\n\n all_stocks_df = pandas.DataFrame()\n\n #all_stocks_df = pandas.DataFrame()\n\n # Get quotes for all the stocks in batches\n for i in range(0, num_of_batches):\n print(\"Fetching quotes in batch: \" + str(i+1) + \"/\" + str(num_of_batches))\n start = i*BATCH_SIZE\n end = start + BATCH_SIZE\n batch_symbols = symbols[start: end]\n batch_symbols_query = '+'.join(batch_symbols)\n request_url = YAHOO_FINANCE_API + \"?\" + YAHOO_FINANCE_SYMBOL_PARAM + \"=\" + batch_symbols_query +\\\n \"&\" + YAHOO_FINANCE_FORMAT_PARAM + \"=\" + YAHOO_FINANCE_SYMBOL_PARAM + YAHOO_FINANCE_52_ASK_PRICE +\\\n YAHOO_FINANCE_BID_PRICE + YAHOO_FINANCE_52_CLOSE_PRICE + YAHOO_FINANCE_52_WEEK_LOW +\\\n YAHOO_FINANCE_52_WEEK_HIGH + YAHOO_FINANCE_52_LOW_CHANGE +\\\n YAHOO_FINANCE_52_HIGH_CHANGE + YAHOO_FINANCE_DIV_YIELD\n r = requests.get(request_url)\n\n # Read the returned CSV as a pandas table\n # Returned format is NAME,ASK,BID,52-wLow,52-wHigh\n df = pandas.read_table(StringIO(r.text), header=None, sep=',')\n all_stocks_df = all_stocks_df.append(df, ignore_index=True)\n\n # Delay to slow down things\n time.sleep(1)\n\n\n # Assign columns\n print(\"Stock quotes have been fetched. Beginning analysis...\")\n all_stocks_df.columns=['symbol', 'ask', 'bid', 'close', '52w-low', '52w-high', '52w-low-change', '52w-high-change', 'div-iteryield']\n\n # Add the percent change columns\n all_stocks_df['52w-%-low-change'] = all_stocks_df['52w-low-change']/all_stocks_df['52w-low']*100\n all_stocks_df['52w-%-high-change'] = all_stocks_df['52w-high-change'] / all_stocks_df['52w-high'] * 100\n\n # Add the names and sectors\n all_stocks_df['name'] = \"\"\n all_stocks_df['sector'] = \"\"\n for index, row in all_stocks_df.iterrows():\n all_stocks_df.loc[index, 'name'] = stocks[row['symbol']][0]\n all_stocks_df.loc[index, 'sector'] = stocks[row['symbol']][1]\n\n\n # Process the received quotes\n sorted_values = all_stocks_df.sort_values('52w-%-low-change')\n\n # Done\n print(\"Analysis completed.\")\n return sorted_values", "def portfolio_allocation(self, data, total_risk):\n total_rating = data[\"rating\"].sum()\n shares = {}\n risk_amt = total_risk\n for _, row in data.iterrows():\n numshares = int(float(row[\"rating\"]) / float(total_rating) * float(risk_amt) / float(row[\"price\"]))\n if numshares > 10:\n multiplier = int(numshares / 10)\n numshares = multiplier * 10\n shares[row[\"symbol\"]] = numshares\n\n risk_amt -= numshares * row[\"price\"]\n # debug\n # for k, v in shares.items():\n # print(\"[*] Ticker: {}, Shares: {}\".format(k, v))\n return shares", "async def _garbage_collect_sim(self, base: str, trade_size: float, reserved: float):\n\n if not config['trade_garbage_collect']:\n return\n\n base_mult = await self.market.get_base_mult(config['trade_base'], base)\n current_balance = self.balancer.sim_balances[base] * base_mult - reserved\n\n if current_balance >= trade_size:\n return\n\n open_trades_by_time = []\n for pair in self.trades:\n if pair.split('-')[0] == base:\n for trade in self.trades[pair]['open']:\n open_trades_by_time.append((trade['open_time'], trade))\n\n open_trades_sorted = [trade_tuple[1] for trade_tuple in sorted(open_trades_by_time, key=lambda x: x[0])]\n\n if open_trades_sorted:\n collect_trade = open_trades_sorted[0]\n await self._sell_sim(collect_trade, 'GARBAGE COLLECT SELL', remit=False)\n self.trades[collect_trade['pair']]['open'].remove(collect_trade)", "def build_window(self, random_normal_demand: list, period_length: int = 0,\n holding_cost_percentage: Decimal = 0.48,\n shortage_cost_percentage: Decimal = 0.3) -> dict:\n\n getcontext().prec = 7\n getcontext().rounding = ROUND_FLOOR\n\n clist = []\n\n revenue = lambda unit_cost, units_sold,: Decimal(unit_cost) * Decimal(units_sold)\n\n # lambda functions for calculating the main values in the monte carlo analysis\n closing_stock = lambda opening_stock, orders, deliveries, backlog: Decimal((Decimal(opening_stock)\n - Decimal(orders)) + Decimal(\n deliveries)) - Decimal(backlog) if Decimal((Decimal(opening_stock) - Decimal(orders)) +\n Decimal(deliveries)) - Decimal(backlog) > 0 else 0\n\n backlog = lambda opening_stock, deliveries, demand: Decimal(abs(\n (Decimal(opening_stock + deliveries)) - Decimal(demand))) if \\\n Decimal((opening_stock + deliveries)) - Decimal(demand) < 0 else 0\n\n holding_cost = lambda cls_stock, unit_cost: cls_stock * (\n Decimal(unit_cost) * Decimal(holding_cost_percentage)) if cls_stock > 0 else 0\n\n shortages = lambda opening_stock, orders, deliveries: abs((Decimal(opening_stock) - Decimal(orders)) +\n Decimal(deliveries)) if \\\n ((Decimal(opening_stock) - Decimal(orders)) + Decimal(deliveries)) < 0 else 0\n\n # shortage cost as a percentage of unit cost. Increase to percentage to have a bigger affect change more\n shortage_cost = lambda cls_stock, unit_cost: cls_stock * (\n Decimal(unit_cost) * Decimal(shortage_cost_percentage)) if int(cls_stock) > 0 else 0\n\n raise_po = lambda reorder_lvl, cls_stock: True if cls_stock <= reorder_lvl else False\n\n po_qty = lambda eoq, reorder_lvl, backlog, cls_stock: Decimal(eoq) + Decimal(backlog) + Decimal(\n (Decimal(reorder_lvl) - Decimal(cls_stock))) if Decimal(eoq) + Decimal(backlog) + Decimal(\n (Decimal(reorder_lvl) - Decimal(cls_stock))) > 0 else 0\n\n # calculate period to receive po and quantity to receive\n\n for sku in self._analysed_orders:\n if period_length != len(random_normal_demand[0][sku.sku_id]):\n raise ValueError(\"The period_length is currently {} and the actual length of the demand is {}. \"\n \"Please make sure that the two values are equal\".format(period_length,\n len(random_normal_demand[0][\n sku.sku_id])))\n\n sim_frame_collection = []\n index_item = 1\n for sku in self._analysed_orders:\n\n period = 1\n order_receipt_index = {}\n final_stock = 0\n sim_window_collection = {}\n previous_backlog = Decimal('0')\n order_receipt_index = {}\n # create the sim_window for each sku, suing the random normal demand generated\n for i in range(0, period_length):\n\n po_qty_raised = 0\n\n # instantiate sim_window\n sim_window = simulation_window.MonteCarloWindow\n\n # add sku_id\n sim_window.sku_id = sku.sku_id\n\n # add closing stock\n previous_closing_stock = final_stock\n\n # mark sim_window.position or period in analysis\n sim_window.position = period\n\n # add average orders to opening_stock if first period else add closing stock\n if sim_window.position == 1:\n sim_window.opening_stock = (sku.reorder_level - Decimal(sku.safety_stock)) + Decimal(sku.safety_stock) #calculated ltd until put into analyse orders\n else:\n sim_window.opening_stock = previous_closing_stock\n\n # add random demand\n demand = random_normal_demand[0][sku.sku_id][i][0]\n sim_window.demand = demand\n\n #\n if sim_window.position in order_receipt_index.keys():\n sim_window.purchase_order_receipt_qty = order_receipt_index[sim_window.position]\n sim_window.po_number_received = 'PO {:.0f}{}'.format(sim_window.position, sim_window.index)\n del order_receipt_index[sim_window.position]\n else:\n sim_window.purchase_order_receipt_qty = 0\n sim_window.po_number_received = ''\n\n sim_window.index = index_item\n\n sim_window.backlog = backlog(opening_stock=sim_window.opening_stock,\n deliveries=sim_window.purchase_order_receipt_qty,\n demand=demand) + previous_backlog\n sim_window.closing_stock = closing_stock(opening_stock=sim_window.opening_stock,\n orders=demand,\n deliveries=sim_window.purchase_order_receipt_qty,\n backlog=sim_window.backlog)\n\n sim_window.holding_cost = holding_cost(sim_window.closing_stock, sku.unit_cost)\n\n sim_window.shortage_units = shortages(opening_stock=sim_window.opening_stock,\n orders=demand,\n deliveries=sim_window.purchase_order_receipt_qty)\n\n sim_window.shortage_cost = shortage_cost(cls_stock=(sim_window.backlog - previous_backlog),\n unit_cost=sku.unit_cost)\n\n sim_window.po_raised_flag = raise_po(reorder_lvl=sku.reorder_level, cls_stock=sim_window.closing_stock)\n\n po_receipt_period = period + sku.lead_time\n\n po_qty_raised = po_qty(eoq=sku.economic_order_qty,\n reorder_lvl=sku.reorder_level,\n backlog=sim_window.backlog,\n cls_stock=sim_window.closing_stock)\n\n if po_qty_raised > 0:\n order_receipt_index.update({po_receipt_period: po_qty_raised})\n sim_window.purchase_order_raised_qty = order_receipt_index.get(po_receipt_period)\n else:\n sim_window.purchase_order_raised_qty = 0\n\n sim_window.po_number_raised = ''\n\n if int(sim_window.purchase_order_raised_qty) > 0:\n sim_window.po_number_raised = 'PO {:.0f}{}'.format(po_receipt_period, sim_window.index)\n del po_receipt_period\n\n final_stock = sim_window.closing_stock\n\n if int(sim_window.closing_stock) == 0:\n previous_backlog += sim_window.backlog\n else:\n previous_backlog = 0\n\n units_sold = self._units_sold(backlog=sim_window.backlog, opening_stock=sim_window.opening_stock,\n delivery=sim_window.purchase_order_receipt_qty, demand=sim_window.demand)\n sim_window.sold = units_sold\n sim_window.revenue = Decimal(revenue(sku.unit_cost, units_sold))\n yield sim_window\n\n del sim_window\n del po_qty_raised\n period += 1\n\n index_item += 1", "def identify_qss_dependencies(self, syms):\n self.dict_qssdepend_scqss = {}\n self.dict_qssdepend_sc = {}\n self.dict_qssdepend_gRTqss = {}\n self.dict_qssdepend_gRT = {}\n self.dict_qssdepend_kf = {}\n self.dict_qssdepend_kr = {}\n self.sc_qss_chain_stop = []\n for symbol in self.dict_qss_species:\n free_symb = syms.sc_qss_smp[\n self.dict_qss_species[symbol]\n ].free_symbols\n qss_symb = []\n sc_symb = []\n g_rt_qss_symb = []\n g_rt_symb = []\n kf_symb = []\n kr_symb = []\n for ss in free_symb:\n if \"sc_qss\" in str(ss):\n qss_symb.append(ss)\n elif \"sc\" in str(ss):\n sc_symb.append(ss)\n elif \"g_RT_qss\" in str(ss):\n g_rt_qss_symb.append(ss)\n elif \"g_RT\" in str(ss):\n g_rt_symb.append(ss)\n elif \"kf\" in str(ss):\n kf_symb.append(ss)\n elif \"kr\" in str(ss):\n kr_symb.append(ss)\n\n self.dict_qssdepend_scqss[symbol] = qss_symb\n self.dict_qssdepend_sc[symbol] = sc_symb\n self.dict_qssdepend_gRTqss[symbol] = g_rt_qss_symb\n self.dict_qssdepend_gRT[symbol] = g_rt_symb\n self.dict_qssdepend_kf[symbol] = kf_symb\n self.dict_qssdepend_kr[symbol] = kr_symb\n\n if not self.dict_qssdepend_scqss[symbol]:\n self.sc_qss_chain_stop.append(symbol)", "def _pools_with_changed_devs(devs_to_search):\n # pylint: disable=import-outside-toplevel\n from ._data import MODev\n\n (increased, decreased) = (set(), set())\n for _, info in devs_to_search:\n modev = MODev(info)\n size = Range(modev.TotalPhysicalSize())\n observed_size = get_property(modev.NewPhysicalSize(), Range, size)\n if observed_size > size: # pragma: no cover\n increased.add(modev.Pool())\n if observed_size < size: # pragma: no cover\n decreased.add(modev.Pool())\n\n return (increased, decreased)", "def update_buy_caps(self):\n for stock in self.stocks:\n core.configure_item(\n f\"stock.{stock.name}.owned\",\n max_value=self.player.get_owned_stocks(stock)\n + int(self.player.cash / stock.price),\n )", "def improve_population(self):\r\n for index in range(len(self.district_population)):\r\n district = self.district_population[index]\r\n districtsolution = hillclimber.HillClimber(district, self.cable_cost, self.battery_cost)\r\n self.district_population[index] = districtsolution.run(1000, 80000)\r\n self.cost_populations[index] = district.total_cost(self.battery_cost, self.cable_cost)", "def alloc_for(self, names, dg, model):\n for name in names:\n self.alloc(name, dg, model)", "def __init__(self, symbols, start_date, end_date, \n\t\tema_days_low=34, ema_days_high=200, ma_days_low=40, ma_days_high=200, ema_factor=1.001):\n\t\tsuper().__init__(start_date, end_date)\n\t\tfor sym in symbols:\n\t\t\tself.set_stock_obj(sym)\n\n\t\t# calculate exponential moving average alpha parameters\n\t\t# use 99.9% of weight (0.001 not included)\n\t\tself.alpha_low = 2 / (ema_days_low + 1)\n\t\tself.alpha_high = 2 / (ema_days_high + 1)\n\n\t\t# calculate EMAs for stock series\n\t\tema_low = calc_ema(self.alpha_low, self.stock_objs[symbols[0]].adj_close)\n\t\tema_high = calc_ema(self.alpha_high, self.stock_objs[symbols[0]].adj_close)\n\n\t\t# calculate MAs for stock series\n\t\tma_low = calc_ma(ma_days_low, self.stock_objs[symbols[0]].adj_close)\n\t\tma_high = calc_ma(ma_days_high, self.stock_objs[symbols[0]].adj_close)\n\n\t\t# now apply model. start off in stock model and sell when low moving average crosses\n\t\t# below high moving average. buy when low EMA becomes greater than ema_factor * ...\n\t\t# high EMA.\n\t\tnum_trading_days = len(self.stock_objs[symbols[0]].date)\n\t\tself.symbol_seq = [symbols[0] for x in range(num_trading_days)]\n\n\t\tcurrent_sym = symbols[0]\n\t\tfor ii in range(num_trading_days):\n\t\t\t# wait at least ma_days_high before doing anything.\n\t\t\tif ii >= ma_days_high:\n\t\t\t\tif ma_low[ii] < ma_high[ii]:\n\t\t\t\t\tcurrent_sym = symbols[1]\n\t\t\t\tif ema_low[ii] > ema_factor * ema_high[ii]:\n\t\t\t\t\tcurrent_sym = symbols[0]\n\n\t\t\tself.symbol_seq[ii] = current_sym" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given a starting value and prices of stocks in portfolio with allocations return the portfolio value over time.
def get_portfolio_value(prices, allocs, start_val): normed = prices/prices.iloc[0] alloced = np.multiply(allocs, normed) pos_vals = alloced * start_val port_val = pos_vals.sum(axis=1) return port_val
[ "def get_portfolio_value(prices, allocs, start_val=1):\n # TODO: Your code here\n normed = prices/prices.ix[0,:]\n alloced = normed * allocs\n pos_vals = alloced * start_val\n port_val = pos_vals.sum(axis=1)\n return port_val", "def compute_portvals(start_date, end_date, orders_file, start_val):\n \n #Read order file\n orders = pd.read_csv( orders_file, parse_dates = [0])\n \n #Get symbols making up the portfolio\n stock_symbols = list( set( orders[\"Symbol\"] ) )\n dates = pd.date_range(start_date, end_date)\n \n #Read stock prices\n stock_prices = get_data(stock_symbols, dates)\n \n #Create a portfolio keeping track of positions, \n #_CASH column indicates cash position, _VALUE total portfolio value\n #_LEVERAGE the leverage of portfolio when we allow for short selling\n symbols = stock_symbols[:] #Shallow copy of the list\n symbols.append(\"_CASH\")\n symbols.append(\"_VALUE\")\n symbols.append(\"_LEVERAGE\")\n \n #Index contains only business days, same dates as stock prices\n portfolio = pd.DataFrame(index=stock_prices.index, columns = symbols )\n portfolio.fillna(0) \n portfolio[\"_CASH\"][0] = start_val\n portfolio[\"_VALUE\"][0] = start_val\n \n #Snapshot of a portfolio at any time. To avoid using numerical indexes\n portfolio_snapshot = dict.fromkeys ( symbols, 0 )\n portfolio_snapshot[\"_CASH\"] = start_val\n portfolio[\"_VALUE\"] = start_val\n \n #Now calcualte portfolio day by day\n for date in portfolio.index:\n #Check transactions for the day\n day_orders = orders[ orders[\"Date\"] == date ] \n \n for ord in day_orders.iterrows():\n symbol = ord[1][ \"Symbol\"] \n stock_price = stock_prices[ symbol ][ date ]\n shares = ord[1][\"Shares\" ]\n side = ord[1][\"Order\"]\n \n if side == \"BUY\":\n portfolio_snapshot[ \"_CASH\" ] -= stock_price * shares\n portfolio_snapshot[ symbol ] += shares \n elif side == \"SELL\":\n portfolio_snapshot[ \"_CASH\" ] += stock_price * shares\n portfolio_snapshot[ symbol ] -= shares\n else:\n raise \"Order not recognized.\"\n \n #Compute portfolio value\n portfolio_snapshot[ \"_VALUE\" ] = portfolio_snapshot[ \"_CASH\" ]\n shorts = longs = 0\n for symbol in stock_symbols: \n stock_price = stock_prices[ symbol ][ date ]\n shares = portfolio_snapshot[ symbol ]\n notional = stock_price*shares\n if shares > 0:\n longs += notional\n else:\n shorts += notional\n \n portfolio_snapshot[ \"_VALUE\" ] += notional\n \n #Compute leverage\n leverage = (longs+shorts)/(longs-shorts + portfolio_snapshot[ \"_CASH\" ] )\n portfolio_snapshot[ \"_LEVERAGE\" ] = leverage\n \n #Assert we never achieve a leverage > 2.0\n if leverage > 2:\n raise \"Leverage > 2.0 achieved\"\n \n #Update portfolio from the daily snapshot\n #TODO: Is this causing performance issues?\n for symbol in portfolio.keys():\n portfolio[ symbol ][ date ] = portfolio_snapshot[ symbol ]\n \n return portfolio", "def portfolio_value(stocks, alloc, base):\n\n # Calculate % return\n stock_alloc = alloc * stocks\n portfolio_cumulative = stock_alloc.sum(axis=1)\n\n # Portfolio Value\n p_value = portfolio_cumulative * base\n\n return p_value", "def getPortfolioValue(self, start_t, t):\n sum_tmp=0\n for item in self.portfolio.keys():\n if \"DJI_\" in item:\n t_tmp=datetime.strftime(pd.date_range(end=t,periods=1,freq='B')[0],'%Y-%m-%d')\n price=universe.get_price_in_currency(item,t_tmp,'CAD')\n elif 'rf_rate' in item:\n price=universe.get_security(item).get_cc_return(start_t,t) \n else:\n price=universe.get_price_in_currency(item,t,'CAD')\n #price=universe.get_security(item).price[t]\n amount=self.portfolio[item]\n sum_tmp=sum_tmp+price*amount\n \n return sum_tmp", "def compute_portvals(start_date, end_date, trades_df, start_val):\n # SETTING UP ORDERS DATAFRAME\n # Read orders file into a dataframe http://pandas.pydata.org/pandas-docs/stable/io.html#io-read-csv-table \n orders = trades_df\n symbols = np.unique(orders['Symbol']).tolist() # List of all the symbols used in orders\n\n # SETTING UP PRICES DATAFRAME\n # Read in adjusted closing prices for given symbols, date range... drop non-trading days... add cash column\n dates = pd.date_range(start_date, end_date)\n prices = get_data(symbols, dates, addSPY=False).dropna()\n prices['cash'] = 1.00\n\n # SETTING UP TRADES DATAFRAME\n # Daily snapshot of portfolio changes (+ = Buy Order, - = Sell Order) with cash adjustments\n trades = pd.DataFrame(0.00, index=prices.index, columns=symbols)\n trades['cash'] = 0.00\n\n for row_index, row in orders.iterrows():\n try:\n if row.Order == 'SELL':\n trades.ix[row.Date,row.Symbol] += (-1 * row.Shares) # Subtract ShareAmount for Sell \n trades.ix[row.Date,'cash'] += (row.Shares * prices.ix[row.Date, row.Symbol]) #adjust cash value for Sell\n elif row.Order == 'BUY':\n trades.ix[row.Date,row.Symbol] += (row.Shares) # Add ShareAmount for Buy\n trades.ix[row.Date,'cash'] += (-1 * row.Shares * prices.ix[row.Date, row.Symbol]) #adjust cash value for Buy\n else:\n print 'ERROR: order type not recognized, looking for BUY or SELL'\n except:\n print 'Unknown Error:'\n\n\n # SETTING UP HOLDINGS DATAFRAME \n # accumulating trades into holdings dataframe, snapshot of shares and cash for given day\n holdings = pd.DataFrame(0.00, index=prices.index, columns=symbols)\n holdings['cash'] = 0.00\n holdings.ix[start_date,'cash'] = start_val # add starting cash value\n previous_row = holdings.iloc[0]\n for row_index, row in holdings.iterrows():\n holdings.ix[row_index] = previous_row + trades.ix[row_index] #previous day's value + trades\n previous_row = row\n\n #SETTING UP VALUES DATAFRAME\n # convert shares into their respective dollar amounts\n values = pd.np.multiply(holdings, prices)\n #DAILY VALUE OF THE PORTFOLIO\n portvals = values.sum(axis=1)\n return portvals", "def test_best_allocation():\n\n # symbols = ['BRCM', 'TXN', 'IBM', 'HNZ'] \n symbols = ['AAPL', 'GOOG', 'IBM', 'MSFT']\n # ['GOOG','AAPL','GLD','XOM']\n basic_portfolio = BasicPortfolio(symbols, dt.datetime(2014, 1, 1), dt.datetime(2014, 12, 31))\n\n alloc = range(4)\n\n sharpe_max = 0\n alloc_max = alloc[:]\n\n for i in range(11):\n alloc[0] = i * 0.1\n for j in range(11 - i):\n alloc[1] = j * 0.1\n for k in range(11 - i - j):\n alloc[2] = k * 0.1\n alloc[3] = (10 - i - j - k) * 0.1\n\n vol, daily_ret, sharpe, cum_ret = \\\n basic_portfolio.analyze(alloc)\n\n if sharpe > sharpe_max:\n sharpe_max = sharpe\n alloc_max = alloc[:]\n\n print 'Best sharpe ratio is ', sharpe_max\n print 'Best allocation is', alloc_max\n\n ref_symbol = '$SPX'\n\n basic_portfolio.plot_with_reference(alloc_max, ref_symbol, source='local')", "def compute_portvals(start_date, end_date, orders_file, start_val):\n\n # read csv file\n reader = csv.reader(open(orders_file, 'rU'), delimiter=',')\n\n # eliminate duplicate symbols\n symbols = []\n symbol_dict = {}\n i = 0\n for row in reader:\n if i != 0:\n if (symbol_dict.get(row[1], -1)) == -1:\n symbol_dict[row[1]] = row[1]\n symbols.append(row[1])\n\n i += 1\n\n # create dataframes\n dates = pd.date_range(start_date, end_date)\n prices_all = get_data(symbols, dates) # automatically adds SPY\n prices_df = prices_all[symbols] # only portfolio symbols\n prices_df['Cash'] = 1.0\n # pd.set_option('display.max_rows', len(prices_df))\n # print prices_df\n count_df = pd.DataFrame(index=prices_df.index, columns=symbols)\n count_df = count_df.fillna(0)\n\n cash_df = pd.DataFrame(index=prices_df.index, columns=['Cash_Value'])\n cash_df = cash_df.fillna(start_val)\n\n leverage_df = pd.DataFrame(index=prices_df.index, columns=['Leverage'])\n leverage_df = leverage_df.fillna(0)\n\n # populate dataframes\n reader = csv.reader(open(orders_file, 'rU'), delimiter=',')\n i = 0\n for row in reader:\n if i != 0:\n if row[0] in count_df.index:\n if row[2] == 'SELL':\n count_df.ix[row[0], row[1]] += -float(row[3])\n cash_df.ix[row[0]] += -float(row[3]) * prices_df.ix[row[0], row[1]]\n if row[2] == 'BUY':\n count_df.ix[row[0], row[1]] += float(row[3])\n cash_df.ix[row[0]] += float(row[3]) * prices_df.ix[row[0], row[1]]\n\n i += 1\n\n value = start_val\n\n symbols_sum = []\n for i in range(len(symbols)):\n symbols_sum.append(0)\n\n for date_index, row in count_df.iterrows():\n longs = 0\n shorts = 0\n for i in range(0, len(row), 1):\n if date_index in count_df.index and date_index in prices_df.index:\n symbols_sum[i] += count_df.ix[date_index, symbols[i]]\n # print date_index, symbols_sum[i]\n value += -(prices_df.ix[date_index, symbols[i]] * count_df.ix[date_index, symbols[i]])\n if symbols_sum[i] > 0:\n longs += (prices_df.ix[date_index, symbols[i]] * symbols_sum[i])\n if symbols_sum[i] < 0:\n shorts += abs((prices_df.ix[date_index, symbols[i]] * symbols_sum[i]))\n leverage = (longs + shorts)/(longs - shorts + value)\n leverage_df.ix[date_index] = leverage\n if leverage > 2.0:\n longs = 0\n shorts = 0\n # print \"Raise Alert\"\n # print date_index, leverage, temp_value\n for i in range(0, len(symbols_sum), 1):\n symbols_sum[i] -= count_df.ix[date_index, symbols[i]]\n\n for i in range(0, len(symbols_sum), 1):\n if symbols_sum[i] > 0:\n longs += (prices_df.ix[date_index, symbols[i]] * symbols_sum[i])\n if symbols_sum[i] < 0:\n shorts += abs((prices_df.ix[date_index, symbols[i]] * symbols_sum[i]))\n previous_leverage = (longs + shorts)/(longs - shorts + value)\n\n # print leverage, previous_leverage\n if leverage > previous_leverage > 2.0:\n leverage_df.ix[date_index] = previous_leverage\n cash_df.ix[date_index] = value\n temp_value = value\n else:\n count_df.ix[date_index] = 0\n cash_df.ix[date_index] = temp_value\n value = temp_value\n else:\n cash_df.ix[date_index] = value\n temp_value = value\n\n count_df['Cash'] = cash_df\n count_df['Leverage'] = leverage_df\n\n # print\n # pd.set_option('display.max_rows', len(count_df))\n # print count_df\n # print\n # find cumulative sum\n for i in range(0, len(symbols), 1):\n count_df[symbols[i]] = count_df[symbols[i]].cumsum()\n # print count_df\n # print\n # dot product of matrices\n count_df = prices_df * count_df\n count_df['Sum'] = count_df.sum(axis=1)\n # print count_df\n\n # rearrange columns\n columns = count_df.columns.tolist()\n columns = columns[-1:] + columns[:-1]\n count_df = count_df[columns]\n\n # pd.set_option('display.max_rows', len(count_df['Sum']))\n # print count_df['Sum']\n return count_df", "def portfolio_manager(portfolio_start, portfolio_now,volume,price,gain):\n hysteresis = 1.2 # get each from ML Model\n base = 100.00\n outer_scale = 1.2\n inner_scale = 100.00\n \n total_price = volume * price \n gain_percentage = gain / (total_price)\n\n portfolio_percentage = (hysteresis - outer_scale*(base ** -(inner_scale*gain_percentage)))\n #check out this equation on wolfram alpha with the query: (1.2 - 1.2*(100 ^ -(100*x))) from 0 to .005\n #x axis is percentage gain, y is portfolio percentage that can be used\n\n acceptable_total_price = portfolio_percentage * portfolio_start\n \n portfolio_currently_invested = portfolio_start - portfolio_now\n acceptable_total_price -= portfolio_currently_invested #account for money already invested. if you have 100 $, and we are willing to invest 10, but we've already invested 6, we are only willing to invest 4.\n \n if(acceptable_total_price <= 0):\n return 0\n \n if(acceptable_total_price > total_price): #if we're willing to invest more than is possible at current volume, invest all\n return volume\n #else, invest the amount we're willing to based on the percentage\n acceptable_volume = int(acceptable_total_price / price)\n \n return acceptable_volume", "def compute_portfolio_rri_for_range(stocks, start_date, end_date):\n total_rri = 0.0\n total_quantity = 0\n for i in range(len(stocks)):\n ticker = stocks[i].stock.stock_ticker\n quantity = stocks[i].quantity\n stock_rri = compute_stock_rri_for_range(ticker, start_date, end_date)\n total_rri = total_rri + (stock_rri * quantity)\n total_quantity = total_quantity + quantity\n\n portfolio_rri = (total_rri / total_quantity)\n\n return portfolio_rri", "def calculatePurchasePrice(portfolio):\n\n price = 0\n for stock in portfolio:\n price += stock[1] * stock[2]\n return price", "def simulate(startdate, enddate, symbols, allocations, use_cache=0):\n\n # Get actual end timestamps for trading days on NYSE\n trading_duration = dt.timedelta(hours=16)\n trading_timestamps = du.getNYSEdays(startdate, enddate, trading_duration)\n\n # Get data from Yahoo\n data_provider = da.DataAccess('Yahoo', cachestalltime=use_cache) \n data_keys = ['open', 'high', 'low', 'close', 'volume', 'actual_close']\n data_list = data_provider.get_data(trading_timestamps, symbols, data_keys)\n data = dict(zip(data_keys, data_list))\n\n # Get 'close' prices and normalized then\n close_prices = data['close'].values\n normalized_close = close_prices / close_prices[0, :]\n\n # Compute portfolio by multiplying weights\n portfolio_prices = (allocations * normalized_close).sum(axis=1)\n\n # Compute daily returns for portfolio\n portfolio_rets = portfolio_prices.copy()\n tsu.returnize0(portfolio_rets)\n\n # Final statistics\n volatility = portfolio_rets.std()\n avg_return = portfolio_rets.mean()\n sharpe = tsu.get_sharpe_ratio(portfolio_rets)\n cum_return = np.prod(1 + portfolio_rets)\n\n return (volatility, avg_return, sharpe, cum_return)", "def new_portfolio_equity(portfolio_returns, weights, old_portfolio_equity):\n profit_value = np.sum(portfolio_returns) * old_portfolio_equity * (1 - weights[0])\n return pd.Series(max(profit_value + old_portfolio_equity, 0.01))", "def portfolio_from_prices(prices, b_variable):\r\n\r\n # Calculated as: alpha * Asset_1 - beta * Asset_2\r\n portfolio_price = ((1 / prices[0][0]) * prices[0][:]\r\n - (b_variable / prices[1][0]) * prices[1][:])\r\n\r\n return portfolio_price", "def portfolio_allocation(self, data, total_risk):\n total_rating = data[\"rating\"].sum()\n shares = {}\n risk_amt = total_risk\n for _, row in data.iterrows():\n numshares = int(float(row[\"rating\"]) / float(total_rating) * float(risk_amt) / float(row[\"price\"]))\n if numshares > 10:\n multiplier = int(numshares / 10)\n numshares = multiplier * 10\n shares[row[\"symbol\"]] = numshares\n\n risk_amt -= numshares * row[\"price\"]\n # debug\n # for k, v in shares.items():\n # print(\"[*] Ticker: {}, Shares: {}\".format(k, v))\n return shares", "def test_interest_vs_stockprice(self):\n stock_prices = np.array([[5, 10, 20, 40]], dtype=float)\n interest_rate = 2.0 # 200%\n test_case = StockMarket(5, stock_prices, interest_rate)\n test_case.dynamic_programming_bottom_up()\n for portfolio in set(test_case.backtracing_portfolio()):\n self.assertEqual(0, portfolio)", "def cumulative_returns(shares_allocation, capital, test_data):\n\n # list of DataFrames of cumulative returns for each stock\n daily_returns = []\n\n # iterates over every stock in the portfolio\n for stock in shares_allocation.index:\n\n # multiples shares by share prices in the validation dataset\n daily_returns.append(shares_allocation.loc[stock].values * test_data[stock])\n\n # concatenates every DataFrame in the above list to a single DataFrame\n daily_returns_df = pd.concat(daily_returns, axis=1).reset_index()\n\n # sets the index as the date\n daily_returns_df.set_index(\"Day\", inplace=True)\n\n # adds the cumulative returns for every stock\n cumulative_daily_returns = daily_returns_df.sum(axis=1)\n\n # returns the cumulative daily returns of the portfolio\n return cumulative_daily_returns", "def optimize_portfolio(sd=dt.datetime(2008,1,1), ed=dt.datetime(2009,1,1), \\\n syms=['GOOG','AAPL','GLD','XOM'], gen_plot=False):\n\n # Read in adjusted closing prices for given symbols, date range\n dates = pd.date_range(sd, ed)\n prices_all = get_data(syms, dates) # automatically adds SPY\n prices = prices_all[syms] # only portfolio symbols\n prices_SPY = prices_all['SPY'] # only SPY, for comparison later\n\n\t# find the allocations for the optimal portfolio\n #1 provide an initial guess for x\n allocs = np.ones(len(syms))/len(syms)\n #2 Provide constraints to the optimizer\n bounds = [(0,1) for i in syms]\n constraints = ({ 'type': 'eq', 'fun': lambda inputs: 1.0 - np.sum(inputs) })\n #3 call the optimizer\n res = spo.minimize(get_sharpe_ratio, allocs, \n \t\t\t\t\targs=prices, \n \t\t\t\t\tbounds = bounds,\n \t\t\t\t\tconstraints=constraints)\n allocs = res.x\n \n # Get daily portfolio value\n port_val = get_portfolio_value(prices, allocs, 1.0)\n \n # Get portfolio statistics\n cr, adr, sddr, sr = get_portfolio_stats(port_val, \n \t\t\t\t\t\t\t\t\t\tdaily_rf=0.0, \n \t\t\t\t\t\t\t\t\t\tsamples_per_year=252)\n \n # Compare daily portfolio value with SPY using a normalized plot\n if gen_plot:\n # add code to plot here\n df_temp = pd.concat([port_val, prices_SPY], keys=['Portfolio', 'SPY'], axis=1)\n plot_normalized_data(df_temp)\n\n return allocs, cr, adr, sddr, sr", "def portfolio_performance(returns,weights):\r\n print('Calculating Portfolio Performance')\r\n # returns=target_asset_port_data_attributes['component_returns']\r\n # weights =target_asset_port_data_attributes['effective_weights']\r\n\r\n component_returns= returns\r\n compnent_weights = pd.DataFrame(data=np.nan,index= component_returns.index,columns=component_returns.columns)\r\n compnent_weights.loc[weights.index,:] = weights\r\n\r\n portfolio_dates = component_returns.index\r\n components = component_returns.columns\r\n\r\n # pre-allocate\r\n BoP_df = pd.DataFrame(data=np.nan,index=portfolio_dates,columns=components)\r\n EoP_df = pd.DataFrame(data=np.nan,index=portfolio_dates,columns=components)\r\n PnL_df = pd.DataFrame(data=np.nan,index=portfolio_dates,columns=components)\r\n portfolio_BoP = pd.DataFrame(data=np.nan,index=portfolio_dates,columns=['Portfolio BoP'])\r\n portfolio_EoP = pd.DataFrame(data=np.nan,index=portfolio_dates,columns=['Portfolio EoP'])\r\n portfolio_PnL = pd.DataFrame(data=np.nan,index=portfolio_dates,columns=['Portfolio PnL'])\r\n \r\n portfolio_index = pd.DataFrame(data=np.nan,index=portfolio_dates,columns=['Index'])\r\n previous_index_value = np.int64(1)\r\n\r\n pre_date = portfolio_dates[0]\r\n # set BoP to start weights\r\n for date,row in component_returns.iterrows():\r\n # print(date)\r\n # 1st date\r\n if date == portfolio_dates[0]:\r\n BoP_df.loc[date] = compnent_weights.iloc[0,:]\r\n EoP_df.loc[date] = BoP_df.loc[date] * (1+component_returns.loc[date])\r\n PnL_df.loc[date] = EoP_df.loc[date].subtract(BoP_df.loc[date])\r\n\r\n portfolio_BoP.loc[date] = BoP_df.loc[date].sum()\r\n portfolio_EoP.loc[date] = EoP_df.loc[date].sum()\r\n portfolio_PnL.loc[date] = PnL_df.loc[date].sum()\r\n\r\n portfolio_index.loc[date] = np.nansum([previous_index_value,portfolio_PnL.loc[date].values])\r\n previous_index_value = portfolio_index.loc[date]\r\n pre_date = date\r\n\r\n # after first date\r\n else:\r\n BoP_df.loc[date] = EoP_df.loc[pre_date]\r\n # weights override\r\n if date in compnent_weights.index:\r\n none_NaN_index = ~compnent_weights.loc[date].isnull()\r\n if not compnent_weights.loc[date][none_NaN_index].empty:\r\n tmp_sum = BoP_df.loc[date].sum()\r\n BoP_df.loc[date][none_NaN_index.values] = (compnent_weights.loc[date][none_NaN_index.values].values)*tmp_sum\r\n\r\n \r\n EoP_df.loc[date] = BoP_df.loc[date] * (1+component_returns.loc[date])\r\n PnL_df.loc[date] = EoP_df.loc[date].subtract(BoP_df.loc[date])\r\n\r\n portfolio_BoP.loc[date] = BoP_df.loc[date].sum()\r\n portfolio_EoP.loc[date] = EoP_df.loc[date].sum()\r\n portfolio_PnL.loc[date] = PnL_df.loc[date].sum()\r\n \r\n portfolio_index.loc[date] = np.nansum([previous_index_value,portfolio_PnL.loc[date].values])\r\n previous_index_value = portfolio_index.loc[date]\r\n pre_date = date\r\n\r\n\r\n portfolio_returns = portfolio_index.pct_change(1) \r\n portfolio_returns.columns = ['Returns']\r\n\r\n portfolio_index\r\n perf = portfolio_index.calc_stats()\r\n \r\n output = pd.Series(data = [perf,PnL_df,portfolio_index,portfolio_BoP,portfolio_EoP,BoP_df], index=['Portfolio Perf','Component PnL','portfolio_index','portfolio_BoP','portfolio_EoP','BoP_df'])\r\n return output", "def calculateGain(portfolio):\n\n gained = 0\n for stock in portfolio:\n #Calculate (current price - purchase price) * total shared\n gained += (stock[4]-stock[1])*stock[2]\n\n return gained" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculate sharpe ratio for minimizer.
def get_sharpe_ratio(allocs, prices): port_val = get_portfolio_value(prices, allocs, start_val=1.0) sharpe_ratio = get_portfolio_stats(port_val, daily_rf=0.0, samples_per_year=252)[3] return -sharpe_ratio
[ "def sharpe_ratio(returns):\n sr = annualized_returns(returns) / annualized_risk(returns)\n return sr.rename('Sharpe Ratio')", "def spRatio(spFront, spRear):\r\n spRatio = spRear / spFront\r\n return spRatio", "def foundation_shear_reduction_factor():\n # According to Millen (2016)\n return 0.76", "def sharpe_ratio(r1, r2, rf, o1, o2, cov):\n def sr(x):\n w1 = x[0]\n w2 = 1 - w1\n\n Rp = w1 * r1 + w2 * r2\n STDEVp = math.sqrt(portfolio_variance(o1, o2, cov)(x))\n R = (Rp - rf) / STDEVp\n return R\n return sr", "def sharpe_ratio(returns1,returns2):\n len1 = len(returns1)\n len2 = len(returns2)\n len_min = min(len1,len2)\n returns1 = returns1[:len_min]\n returns2 = returns2[:len_min]\n return (returns1 - returns2).mean() / (returns1 - returns2).std()", "def sharpe_ratio(returns1,returns2):\n len1 = len(returns1)\n len2 = len(returns2)\n len_min = min(len1,len2)\n returns1 = returns1[:len_min]\n returns2 = returns2[:len_min]\n return (returns1 - returns2).mean() / (returns1 - returns2).std()", "def calculate_gear_ratio(front_gear, back_gear):\n return front_gear/back_gear", "def golden_ratio():\n\n return ratio(1)", "def sharpe_ratio(port_returns, risk_free_rate, asset_returns, weights):\n\n # calculate the standard deviation of the returns of the portfolio\n portfolio_standard_deviation = np.sqrt(portfolio_volatility(asset_returns, weights))\n\n # calculate the Sharpe ratio of the portfolio\n sr = (np.mean(port_returns) - risk_free_rate)/portfolio_standard_deviation\n\n return sr", "def sharpe_ratio(port_returns, risk_free_rate, asset_returns, weights):\n\n # calculate the standard deviation of the returns of the portfolio\n portfolio_standard_deviation = np.sqrt(portfolio_volatility(asset_returns, weights))\n\n # calculate the Sharpe ratio of the portfolio\n sr = (port_returns[-1] - risk_free_rate)/portfolio_standard_deviation\n\n return sr", "def adv_ratio(self): # XXX\r\n bw = StatsRouter.global_bw_mean\r\n if bw == 0.0: return 0\r\n else: return self.bw/bw", "def sharpe_ratio(factor_returns, annualization_factor):\r\n\r\n return annualization_factor * factor_returns.mean() / factor_returns.std()", "def silver_ratio():\n\n return ratio(2)", "def bronze_ratio():\n\n return ratio(3)", "def calc_ratio_of_moves(game, player):\n player_factor = 1\n opp_factor = 1\n player_moves = game.get_legal_moves(player)\n opp_moves = game.get_legal_moves(game.get_opponent(player))\n if not opp_moves:\n return float(\"inf\")\n elif not player_moves:\n return float(\"-inf\")\n else:\n return float(player_factor * len(player_moves) / (opp_factor * len(opp_moves)))", "def starsize(self, hipid):\n #if hipid<0 or len(self.hip_stars)<=hipid: return 0\n s = self.hip_stars[hipid]\n if s==None: return 0\n #return self.zerosize*(.8**(s[1]))\n #return self.zerosize-s[1]-2\n return self.dimmest_mag-s[1]+1", "def neg_Sharpe_ratio(weights, riskfree_rate, er, cov):\n returns = portfolio_return(weights, er)\n volatility = portfolio_vol(weights, cov)\n return -(returns - riskfree_rate)/volatility", "def downsample_ratio(self):\n return self.resolution / self.mip_resolution(0)", "def quality(a, rhof, rhog):\n\treturn 1/(1 + rhof/rhog*(1-a)/a)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates a SnowflakeSource from a protobuf representation of a SnowflakeSource.
def from_proto(data_source: DataSourceProto): return SnowflakeSource( field_mapping=dict(data_source.field_mapping), database=data_source.snowflake_options.database, schema=data_source.snowflake_options.schema, table=data_source.snowflake_options.table, event_timestamp_column=data_source.event_timestamp_column, created_timestamp_column=data_source.created_timestamp_column, date_partition_column=data_source.date_partition_column, query=data_source.snowflake_options.query, )
[ "def FromProto(cls, proto_obj):\n source = GameSource()\n source.type = proto_obj.type\n if proto_obj.update_time_utc_str:\n source.update_date_time = datetime.strptime(\n proto_obj.update_time_utc_str, tweets.DATE_PARSE_FMT_STR)\n else:\n source.update_date_time = datetime.now()\n if proto_obj.twitter_account:\n source.account_id = long(proto_obj.twitter_account.id_str)\n source.tweet_text = proto_obj.tweet_text\n if proto_obj.score_reporter_url:\n source.score_reporter_url = proto_obj.score_reporter_url\n if not (source.account_id or source.score_reporter_url):\n raise GameModelError('Converting GameSource from malformed proto')\n return source", "def to_proto(self) -> DataSourceProto:\n data_source_proto = DataSourceProto(\n type=DataSourceProto.BATCH_SNOWFLAKE,\n field_mapping=self.field_mapping,\n snowflake_options=self.snowflake_options.to_proto(),\n )\n\n data_source_proto.event_timestamp_column = self.event_timestamp_column\n data_source_proto.created_timestamp_column = self.created_timestamp_column\n data_source_proto.date_partition_column = self.date_partition_column\n\n return data_source_proto", "def from_proto(data_source: DataSourceProto) -> Any:\n data_source_type = data_source.type\n if not data_source_type or (\n data_source_type\n not in list(_DATA_SOURCE_OPTIONS.keys())\n + [DataSourceProto.SourceType.CUSTOM_SOURCE]\n ):\n raise ValueError(\"Could not identify the source type being added.\")\n\n if data_source_type == DataSourceProto.SourceType.CUSTOM_SOURCE:\n cls = get_data_source_class_from_type(data_source.data_source_class_type)\n return cls.from_proto(data_source)\n cls = get_data_source_class_from_type(_DATA_SOURCE_OPTIONS[data_source_type])\n return cls.from_proto(data_source)", "def from_proto(cls, snowflake_options_proto: DataSourceProto.SnowflakeOptions):\n snowflake_options = cls(\n database=snowflake_options_proto.database,\n schema=snowflake_options_proto.schema,\n table=snowflake_options_proto.table,\n query=snowflake_options_proto.query,\n )\n\n return snowflake_options", "def create_from(cls, source):\n if isinstance(source, str):\n if exists(expanduser(source)):\n return ContractInterface.from_file(source)\n return ContractInterface.from_michelson(source)\n return ContractInterface.from_micheline(source)", "def from_proto(cls, hive_options_proto: DataSourceProto.CustomSourceOptions):\n hive_configuration = pickle.loads(hive_options_proto.configuration)\n\n hive_options = cls(\n table_ref=hive_configuration.table_ref\n )\n\n return hive_options", "def from_source(cls, *args, **kwargs):\n raise NotImplementedError", "def make(self, source):\n if isinstance(source, str):\n return copy(self.get(source))\n elif self.PB_CLASS and isinstance(source, self.PB_CLASS):\n item = copy(self.get(source.name))\n item._pb = source\n return item\n else:\n return copy(source)", "def _MakeSource(self, source_crd):\n self.source = source.Source.New(self.mock_client, 'source-namespace',\n source_crd.source_kind,\n source_crd.source_api_category)\n self.source.name = 'source-for-my-trigger'", "def to_proto(self) -> DataSourceProto.SnowflakeOptions:\n snowflake_options_proto = DataSourceProto.SnowflakeOptions(\n database=self.database,\n schema=self.schema,\n table=self.table,\n query=self.query,\n )\n\n return snowflake_options_proto", "def from_proto(cls, kinesis_options_proto: DataSourceProto.KinesisOptions):\n\n kinesis_options = cls(\n record_format=StreamFormat.from_proto(kinesis_options_proto.record_format),\n region=kinesis_options_proto.region,\n stream_name=kinesis_options_proto.stream_name,\n )\n\n return kinesis_options", "def fromstring(cls, source: str,\n frmat: str = 'table',\n **kwargs) -> 'Context':\n frmat = formats.Format[frmat]\n args = frmat.loads(source, **kwargs)\n if args.serialized is not None:\n return cls.fromdict(args.serialized)\n return cls(args.objects, args.properties, args.bools)", "def _proto2object(\n proto: CreateDatasetMessage_PB,\n ) -> \"CreateDatasetMessage\":\n\n return CreateDatasetMessage(\n msg_id=_deserialize(blob=proto.msg_id),\n address=_deserialize(blob=proto.address),\n dataset=proto.dataset,\n metadata=dict(proto.metadata),\n reply_to=_deserialize(blob=proto.reply_to),\n platform=proto.platform,\n )", "def _record_from_source(es_dict):\n record_json = es_dict['_source']\n return ArxivRecord.from_json(record_json)", "def FromProto(cls, proto_obj):\n if not proto_obj.last_update_source:\n raise GameModelError('No update source specified in Game creation.')\n # TODO(P2): refactor all constructors into one base function like in tweets.\n return Game(id_str=proto_obj.id_str,\n teams=[Team.FromProto(tm) for tm in proto_obj.teams],\n scores=proto_obj.scores,\n name=proto_obj.name,\n tournament_id=proto_obj.tournament_id_str,\n tournament_name=proto_obj.tournament_name,\n game_status=proto_obj.game_status,\n division=proto_obj.division,\n league=proto_obj.league,\n age_bracket=proto_obj.age_bracket,\n sources=[GameSource.FromProto(proto_obj.last_update_source)],\n key=game_key(proto_obj))", "def test__WebhookSourceChannel__from_data():\n channel_id = 202302010002\n name = 'senya'\n \n data = {\n 'id': str(channel_id),\n 'name': name,\n }\n \n channel = WebhookSourceChannel.from_data(data)\n _assert_fields_set(channel)\n \n vampytest.assert_eq(channel.id, channel_id)\n vampytest.assert_eq(channel.name, name)", "def from_proto(cls, hive_options_proto: Any):\n\n pass", "def _proto2object(\n proto: CreateInitialSetUpMessage_PB,\n ) -> \"CreateInitialSetUpMessage\":\n\n return CreateInitialSetUpMessage(\n msg_id=_deserialize(blob=proto.msg_id),\n address=_deserialize(blob=proto.address),\n content=json.loads(proto.content),\n reply_to=_deserialize(blob=proto.reply_to),\n )", "def from_proto(cls, kafka_options_proto: DataSourceProto.KafkaOptions):\n watermark_delay_threshold = None\n if kafka_options_proto.HasField(\"watermark_delay_threshold\"):\n watermark_delay_threshold = (\n timedelta(days=0)\n if kafka_options_proto.watermark_delay_threshold.ToNanoseconds() == 0\n else kafka_options_proto.watermark_delay_threshold.ToTimedelta()\n )\n kafka_options = cls(\n kafka_bootstrap_servers=kafka_options_proto.kafka_bootstrap_servers,\n message_format=StreamFormat.from_proto(kafka_options_proto.message_format),\n topic=kafka_options_proto.topic,\n watermark_delay_threshold=watermark_delay_threshold,\n )\n\n return kafka_options" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the database of this snowflake source.
def database(self): return self.snowflake_options.database
[ "def source_db(self):\n return self._source_db", "def target_db(self):\n return self._target_db", "def replicate_source_db(self) -> str:\n return pulumi.get(self, \"replicate_source_db\")", "def source_db_info(self):\n return self._source_db_info", "def database_name(self) -> str:\n return pulumi.get(self, \"database_name\")", "def database():\n return _databases[_active_db]", "def get_db_name(self):\n\t\treturn conf.db_name", "def schema(self):\n return self.snowflake_options.schema", "def get_db(self):\n return self._db", "def default_database(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"default_database\")", "def schema_database_name(self):\n if 'schema-database' in self._config:\n return self._config['schema-database']\n return None", "def get_db_name():\n return config.get('db_name')", "def get_db_path(self):\n return (\n self.spark_session.sql(f\"DESCRIBE DATABASE EXTENDED {self.database_name}\")\n .filter(\"database_description_item=='Location'\")\n .collect()[0]\n .database_description_value\n )", "def get_database_url(self):\n return self.config['dbase_path']", "def getUseDB(self):\n\t\tuse_db\t\t= self.getEnvironmentVariable( name='USE_DB', default='NULL' )\n\t\treturn use_db", "def db_name(self):\n return self._db_name", "def redshift_database(self):\n if not self._redshift_database:\n self._redshift_database = self.create_pipeline_object(\n object_class=RedshiftDatabase\n )\n return self._redshift_database", "def db(self) -> DB:\n return DB.get_db()", "def postgres_database(self):\n if not self._postgres_database:\n self._postgres_database = self.create_pipeline_object(\n object_class=PostgresDatabase\n )\n return self._postgres_database" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the schema of this snowflake source.
def schema(self): return self.snowflake_options.schema
[ "def schema(self):\n return self.table_info.schema", "def schema(self):\n if not self._schema:\n response = self.api.make_request('GET', '%s/schema' % self.path)\n self._schema = response.data\n \n return self._schema", "def schema(self):\n return self._schema", "def get_schema(self):\n response = self.client.get(self._get_collection_url('schema'))\n\n return response.get('schema', {})", "def get_schema(self):\n return self.client._perform_json(\n \"GET\", \"/projects/%s/streamingendpoints/%s/schema\" % (self.project_key, self.streaming_endpoint_name))", "def table_schema(self):\n return self._table_schema", "def get_schema(self) -> ArchiveSchema:\n return self.schema", "def schema(self) -> ArchiveSchema:\n return self.store.get_schema()", "def schema(self) -> Dict[str, Dict]:\n return self._query_provider.schema", "def schema_path(self):\n return self._schema_path", "def get_database_schema(self):\n\n cursor = self.db.cursor()\n cursor.execute('SELECT version FROM schema')\n results = cursor.fetchone()\n cursor.close()\n return results[0]", "def get_schema(self, engine_name):\n endpoint = \"engines/{}/schema\".format(engine_name)\n return self.swiftype_session.request('get', endpoint)", "def get_discover_schema(self, source_id):\n body = {\"sourceId\": source_id}\n url = f\"{self.base_url}/api/v1/sources/discover_schema\"\n response = requests.post(url, json=body)\n try:\n response_json = response.json()['catalog']\n except Exception as e:\n logging.exception(f\"Error getting schema for subscription_id: {self.subscription_id}\\nError {e}\")\n response_json = {}\n return response_json", "def get_schema(self) -> dict:", "def schema_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"schema_name\")", "def schema(self) -> Dict[str, Dict]:\n return self._schema", "def get_meta_schema(self):\n return self._tc_meta_schema", "def getSchema( sourceDirectory ):\r\n if( sourceDirectory == settings.LEXISNEXIS_FILETAG ): return LexisNexisSchema()\r\n raise Exception( \"Filer for source <%s> is not registered in getSchema( source ).\" % ( sourceDirectory ) )", "def get_schema_defs():\n return SCHEMA_DEFS" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the table of this snowflake source.
def table(self): return self.snowflake_options.table
[ "def parse_source_table(self):\n name = self.control.source_name\n table = self._parse_source_table(name)\n return table", "def source_table_name(self):\n return self._source_table_name", "def source_tables(self):\n return self._source_tables", "def table(self):\n return self.reference.table", "def getTable(self):\n return self.db.table(self.entity)", "def read_table(self):\r\n return self.read_table_name(files.build_table_name())", "def get_table_name(self):\n return self._config['table']", "def table_name(self) -> str:\n return pulumi.get(self, \"table_name\")", "def table_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"table_name\")", "def table(self, table_name):\n return self._get_storage().table(table_name)", "def destination_table(self) -> str:\n return pulumi.get(self, \"destination_table\")", "def parse_source_table_a(self):\n name = self.control.source_name_a\n table = self._parse_source_table(name)\n return table", "def getTableDefForTable(self, tableName):\n\t\tif not \".\" in tableName:\n\t\t\ttableName = \"public.\"+tableName\n\t\t\n\t\tfor row in self.readerConnection.queryToDicts(\n\t\t\t\t\"select sourcerd, tablename from dc.tablemeta where\"\n\t\t\t\t\" lower(tableName)=%(tableName)s\",\n\t\t\t\t{\"tableName\": tableName.lower()}):\n\t\t\tbreak\n\t\telse:\n\t\t\traise base.ui.logOldExc(\n\t\t\t\tbase.NotFoundError(tableName, \"table\", \"dc_tables\"))\n\n\t\treturn base.caches.getRD(row[\"sourcerd\"]\n\t\t\t).getById(row[\"tablename\"].split(\".\")[-1])", "def table_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"table_name\")", "def get_analysis_table(self):\n\t\treturn self.analysisTable", "def parse_source_table_b(self):\n name = self.control.source_name_b\n table = self._parse_source_table(name)\n return table", "def get_catalog_source_results(self):\n return getattr(self.app, '_catalog_source_table', None)", "def retrieve_table(self):\n if self.use_local_table:\n self.retrieve_table_local()\n else:\n self.retrieve_table_from_url()", "def _select_table(self):\n\n return self.postgres.execute(f\"SELECT * FROM {self.table_name};\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Converts a SnowflakeSource object to its protobuf representation.
def to_proto(self) -> DataSourceProto: data_source_proto = DataSourceProto( type=DataSourceProto.BATCH_SNOWFLAKE, field_mapping=self.field_mapping, snowflake_options=self.snowflake_options.to_proto(), ) data_source_proto.event_timestamp_column = self.event_timestamp_column data_source_proto.created_timestamp_column = self.created_timestamp_column data_source_proto.date_partition_column = self.date_partition_column return data_source_proto
[ "def from_proto(data_source: DataSourceProto):\n return SnowflakeSource(\n field_mapping=dict(data_source.field_mapping),\n database=data_source.snowflake_options.database,\n schema=data_source.snowflake_options.schema,\n table=data_source.snowflake_options.table,\n event_timestamp_column=data_source.event_timestamp_column,\n created_timestamp_column=data_source.created_timestamp_column,\n date_partition_column=data_source.date_partition_column,\n query=data_source.snowflake_options.query,\n )", "def to_proto(self) -> DataSourceProto.SnowflakeOptions:\n snowflake_options_proto = DataSourceProto.SnowflakeOptions(\n database=self.database,\n schema=self.schema,\n table=self.table,\n query=self.query,\n )\n\n return snowflake_options_proto", "def to_proto(self) -> DataSourceProto:\n raise NotImplementedError", "def to_proto(self) -> DataSourceProto.CustomSourceOptions:\n hive_options_proto = DataSourceProto.CustomSourceOptions(\n configuration=pickle.dumps(self)\n )\n return hive_options_proto", "def _object2proto(self) -> SaveObjectMessage_PB:\n return SaveObjectMessage_PB(\n msg_id=serialize(self.id),\n address=serialize(self.address),\n content=json.dumps(self.content),\n )", "def FromProto(cls, proto_obj):\n source = GameSource()\n source.type = proto_obj.type\n if proto_obj.update_time_utc_str:\n source.update_date_time = datetime.strptime(\n proto_obj.update_time_utc_str, tweets.DATE_PARSE_FMT_STR)\n else:\n source.update_date_time = datetime.now()\n if proto_obj.twitter_account:\n source.account_id = long(proto_obj.twitter_account.id_str)\n source.tweet_text = proto_obj.tweet_text\n if proto_obj.score_reporter_url:\n source.score_reporter_url = proto_obj.score_reporter_url\n if not (source.account_id or source.score_reporter_url):\n raise GameModelError('Converting GameSource from malformed proto')\n return source", "def from_proto(data_source: DataSourceProto) -> Any:\n data_source_type = data_source.type\n if not data_source_type or (\n data_source_type\n not in list(_DATA_SOURCE_OPTIONS.keys())\n + [DataSourceProto.SourceType.CUSTOM_SOURCE]\n ):\n raise ValueError(\"Could not identify the source type being added.\")\n\n if data_source_type == DataSourceProto.SourceType.CUSTOM_SOURCE:\n cls = get_data_source_class_from_type(data_source.data_source_class_type)\n return cls.from_proto(data_source)\n cls = get_data_source_class_from_type(_DATA_SOURCE_OPTIONS[data_source_type])\n return cls.from_proto(data_source)", "def to_proto(self) -> FeatureViewProto:\n meta = self.to_proto_meta()\n ttl_duration = self.get_ttl_duration()\n\n batch_source_proto = self.batch_source.to_proto()\n batch_source_proto.data_source_class_type = f\"{self.batch_source.__class__.__module__}.{self.batch_source.__class__.__name__}\"\n\n stream_source_proto = None\n if self.stream_source:\n stream_source_proto = self.stream_source.to_proto()\n stream_source_proto.data_source_class_type = f\"{self.stream_source.__class__.__module__}.{self.stream_source.__class__.__name__}\"\n\n spec = FeatureViewSpecProto(\n name=self.name,\n entities=self.entities,\n entity_columns=[field.to_proto() for field in self.entity_columns],\n features=[field.to_proto() for field in self.features],\n description=self.description,\n tags=self.tags,\n owner=self.owner,\n ttl=(ttl_duration if ttl_duration is not None else None),\n online=self.online,\n batch_source=batch_source_proto,\n stream_source=stream_source_proto,\n )\n\n return FeatureViewProto(spec=spec, meta=meta)", "def to_proto(self) -> DataSourceProto.KinesisOptions:\n\n kinesis_options_proto = DataSourceProto.KinesisOptions(\n record_format=self.record_format.to_proto(),\n region=self.region,\n stream_name=self.stream_name,\n )\n\n return kinesis_options_proto", "def get_source_unicode(obj):\n return inspect.getsource(obj).decode(get_encoding(obj))", "def to_proto(self) -> FeatureSetReferenceProto:\n return self.proto", "def from_proto(cls, snowflake_options_proto: DataSourceProto.SnowflakeOptions):\n snowflake_options = cls(\n database=snowflake_options_proto.database,\n schema=snowflake_options_proto.schema,\n table=snowflake_options_proto.table,\n query=snowflake_options_proto.query,\n )\n\n return snowflake_options", "def test_external_source_serialization(self):\n\n # Construct dict forms of any model objects needed in order to build this model.\n\n external_source_git_model = {} # ExternalSourceGit\n external_source_git_model['git_repo_url'] = 'testString'\n external_source_git_model['git_token'] = 'testString'\n external_source_git_model['git_repo_folder'] = 'testString'\n external_source_git_model['git_release'] = 'testString'\n external_source_git_model['git_branch'] = 'testString'\n\n # Construct a json representation of a ExternalSource model\n external_source_model_json = {}\n external_source_model_json['source_type'] = 'local'\n external_source_model_json['git'] = external_source_git_model\n\n # Construct a model instance of ExternalSource by calling from_dict on the json representation\n external_source_model = ExternalSource.from_dict(external_source_model_json)\n assert external_source_model != False\n\n # Construct a model instance of ExternalSource by calling from_dict on the json representation\n external_source_model_dict = ExternalSource.from_dict(external_source_model_json).__dict__\n external_source_model2 = ExternalSource(**external_source_model_dict)\n\n # Verify the model instances are equivalent\n assert external_source_model == external_source_model2\n\n # Convert model instance back to dict and verify no loss of data\n external_source_model_json2 = external_source_model.to_dict()\n assert external_source_model_json2 == external_source_model_json", "def to_proto(self) -> FeatureSetProto:\n\n meta = FeatureSetMetaProto(\n created_timestamp=self.created_timestamp, status=self.status\n )\n\n spec = FeatureSetSpecProto(\n name=self.name,\n project=self.project,\n max_age=self.max_age,\n labels=self.labels,\n source=self.source.to_proto() if self.source is not None else None,\n features=[\n field.to_proto()\n for field in self._fields.values()\n if type(field) == Feature\n ],\n entities=[\n field.to_proto()\n for field in self._fields.values()\n if type(field) == Entity\n ],\n )\n\n return FeatureSetProto(spec=spec, meta=meta)", "def to_proto(self) -> DataSourceProto.KafkaOptions:\n\n kafka_options_proto = DataSourceProto.KafkaOptions(\n bootstrap_servers=self.bootstrap_servers,\n message_format=self.message_format.to_proto(),\n topic=self.topic,\n )\n\n return kafka_options_proto", "def make(self, source):\n if isinstance(source, str):\n return copy(self.get(source))\n elif self.PB_CLASS and isinstance(source, self.PB_CLASS):\n item = copy(self.get(source.name))\n item._pb = source\n return item\n else:\n return copy(source)", "def _stringify_proto(obj):\n return obj.SerializeToString()", "def test__WebhookSourceChannel__to_data():\n channel_id = 202302010025\n name = 'senya'\n \n channel = WebhookSourceChannel(\n channel_id = channel_id,\n name = name,\n )\n \n expected_output = {\n 'id': str(channel_id),\n 'name': name,\n }\n \n vampytest.assert_eq(\n channel.to_data(defaults = True),\n expected_output,\n )", "def to_pb(self):\n _pb_obj = self.pb_model()\n _dj_field_map = {f.name: f for f in self._meta.get_fields()}\n for _f in _pb_obj.DESCRIPTOR.fields:\n _dj_f_name = self.pb_2_dj_field_map.get(_f.name, _f.name)\n if _dj_f_name not in _dj_field_map:\n LOGGER.warning(\"No such django field: {}\".format(_f.name))\n continue\n try:\n _dj_f_value, _dj_f_type = getattr(\n self, _dj_f_name), _dj_field_map[_dj_f_name]\n if _dj_f_type.is_relation:\n self._relation_to_protobuf(_pb_obj, _f, _dj_f_type,\n _dj_f_value)\n else:\n self._value_to_protobuf(_pb_obj, _f,\n _dj_f_type.get_internal_type(),\n _dj_f_value)\n except AttributeError as e:\n LOGGER.error(\"Fail to serialize field: {} for {}. Error: {}\".\n format(_dj_f_name, self._meta.model, e))\n raise DjangoPBModelError(\n \"Can't serialize Model({})'s field: {}. Err: {}\".format(\n _dj_f_name, self._meta.model, e))\n\n LOGGER.info(\"Coverted Protobuf object: {}\".format(_pb_obj))\n return _pb_obj" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a string that can directly be used to reference this table in SQL.
def get_table_query_string(self) -> str: if self.database and self.table: return f'"{self.database}"."{self.schema}"."{self.table}"' elif self.table: return f'"{self.table}"' else: return f"({self.query})"
[ "def get_table_query_string(self) -> str:\n return f\"`{self.table_ref}`\"", "def get_table_query_string(self) -> str:\n if self.table_ref:\n return f\"`{self.table_ref}`\"\n else:\n return f\"({self.query})\"", "def table_name() -> str:\n pass", "def table_name(self) -> str:\n return pulumi.get(self, \"table_name\")", "def table_name(cls) -> str:\n return cls.TABLE", "def name(self):\n if self.table:\n return \"{}.{}\".format(self.table, self.field_name)\n return self.field_name", "def table_name(self):\n return self._table_name", "def get_expression(self) -> str:\r\n expression = self._expression.format(**self._table._sources)\r\n return f\"{expression} as {self._pname}\"", "def table(self):\n return self.reference.table", "def table_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"table_name\")", "def table_name_str(self) -> str:\n table_name = self.table_name\n if isinstance(table_name, bytes):\n table_name = self.table_name.decode(self._encoding)\n return table_name", "def sql_for_tablespace(self, tablespace, inline=False):\n return \"ON %s\" % self.quote_name(tablespace)", "def tablespace_sql(self, tablespace, inline=False):\n return \"ON %s\" % self.quote_name(tablespace)", "def tablespace_sql(self, tablespace, inline=False):\n return ''", "def name(self) -> str:\n return self.fqtable.replace(\".\", \"_\")", "def __str__(self):\n\n table_list = [self.headers]\n\n for row in self.data:\n table_list.append([row[col] or \"\" for col in self.headers])\n\n return create_table_string(table_list)", "def TableName(cls):\n if cls._TABLE:\n return cls._TABLE\n name = cls.__name__\n return name[0].lower() + name[1:]", "def encodeTableName(self, schema, table):\r\n return '\"{}\".\"{}\"'.format(schema, table)", "def _get_table_as_string(self):\n tablelist = []\n tablelist.append(\"!%s_table_begin\" % self.geotype.lower())\n tablelist.append(\"\\t\".join(self.table.columns))\n for idx, row in self.table.iterrows():\n tablelist.append(\"\\t\".join(map(str, row)))\n tablelist.append(\"!%s_table_end\" % self.geotype.lower())\n return \"\\n\".join(tablelist)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates a SnowflakeOptions from a protobuf representation of a snowflake option.
def from_proto(cls, snowflake_options_proto: DataSourceProto.SnowflakeOptions): snowflake_options = cls( database=snowflake_options_proto.database, schema=snowflake_options_proto.schema, table=snowflake_options_proto.table, query=snowflake_options_proto.query, ) return snowflake_options
[ "def to_proto(self) -> DataSourceProto.SnowflakeOptions:\n snowflake_options_proto = DataSourceProto.SnowflakeOptions(\n database=self.database,\n schema=self.schema,\n table=self.table,\n query=self.query,\n )\n\n return snowflake_options_proto", "def from_proto(cls, hive_options_proto: Any):\n\n pass", "def from_proto(cls, hive_options_proto: DataSourceProto.CustomSourceOptions):\n hive_configuration = pickle.loads(hive_options_proto.configuration)\n\n hive_options = cls(\n table_ref=hive_configuration.table_ref\n )\n\n return hive_options", "def from_proto(cls, kafka_options_proto: DataSourceProto.KafkaOptions):\n\n kafka_options = cls(\n bootstrap_servers=kafka_options_proto.bootstrap_servers,\n message_format=StreamFormat.from_proto(kafka_options_proto.message_format),\n topic=kafka_options_proto.topic,\n )\n\n return kafka_options", "def from_proto(cls, kinesis_options_proto: DataSourceProto.KinesisOptions):\n\n kinesis_options = cls(\n record_format=StreamFormat.from_proto(kinesis_options_proto.record_format),\n region=kinesis_options_proto.region,\n stream_name=kinesis_options_proto.stream_name,\n )\n\n return kinesis_options", "def from_proto(cls, kafka_options_proto: DataSourceProto.KafkaOptions):\n watermark_delay_threshold = None\n if kafka_options_proto.HasField(\"watermark_delay_threshold\"):\n watermark_delay_threshold = (\n timedelta(days=0)\n if kafka_options_proto.watermark_delay_threshold.ToNanoseconds() == 0\n else kafka_options_proto.watermark_delay_threshold.ToTimedelta()\n )\n kafka_options = cls(\n kafka_bootstrap_servers=kafka_options_proto.kafka_bootstrap_servers,\n message_format=StreamFormat.from_proto(kafka_options_proto.message_format),\n topic=kafka_options_proto.topic,\n watermark_delay_threshold=watermark_delay_threshold,\n )\n\n return kafka_options", "def from_proto(cls, file_options_proto: DataSourceProto.FileOptions):\n file_options = cls(\n file_format=FileFormat.from_proto(file_options_proto.file_format),\n file_url=file_options_proto.file_url,\n )\n return file_options", "def from_proto(cls, redshift_options_proto: DataSourceProto.RedshiftOptions):\n\n redshift_options = cls(\n table=redshift_options_proto.table, query=redshift_options_proto.query,\n )\n\n return redshift_options", "def from_proto(cls, bigquery_options_proto: DataSourceProto.BigQueryOptions):\n\n bigquery_options = cls(\n table_ref=bigquery_options_proto.table_ref,\n query=bigquery_options_proto.query,\n )\n\n return bigquery_options", "def to_proto(self) -> DataSourceProto.CustomSourceOptions:\n hive_options_proto = DataSourceProto.CustomSourceOptions(\n configuration=pickle.dumps(self)\n )\n return hive_options_proto", "def from_dict(options):\n\n client_options = ClientOptions()\n\n for key, value in options.items():\n if hasattr(client_options, key):\n setattr(client_options, key, value)\n else:\n raise ValueError(f\"ClientOptions does not accept an option '{key}'\")\n\n return client_options", "def to_proto(self) -> DataSourceProto.KafkaOptions:\n\n kafka_options_proto = DataSourceProto.KafkaOptions(\n bootstrap_servers=self.bootstrap_servers,\n message_format=self.message_format.to_proto(),\n topic=self.topic,\n )\n\n return kafka_options_proto", "def to_proto(self) -> DataSourceProto.KafkaOptions:\n watermark_delay_threshold = None\n if self.watermark_delay_threshold is not None:\n watermark_delay_threshold = Duration()\n watermark_delay_threshold.FromTimedelta(self.watermark_delay_threshold)\n\n kafka_options_proto = DataSourceProto.KafkaOptions(\n kafka_bootstrap_servers=self.kafka_bootstrap_servers,\n message_format=self.message_format.to_proto(),\n topic=self.topic,\n watermark_delay_threshold=watermark_delay_threshold,\n )\n\n return kafka_options_proto", "def to_proto(self) -> DataSourceProto.BigQueryOptions:\n\n bigquery_options_proto = DataSourceProto.BigQueryOptions(\n table_ref=self.table_ref, query=self.query,\n )\n\n return bigquery_options_proto", "def from_options(cls, **kwargs):\n return cls(cls.default_options().set_values(kwargs))", "def _option_jschema_to_pb_option(opt_jschema_d):\n opt_id = opt_jschema_d['pb_option']['option_id']\n\n name = opt_jschema_d['pb_option']['name']\n default = opt_jschema_d['pb_option']['default']\n desc = opt_jschema_d['pb_option']['description']\n\n # This should be migrated to PacBio option type ids, Example pacbio.option_types.int32\n jschema_type = opt_jschema_d['pb_option']['type']\n\n pb_option_type_id = _jschema_to_pacbio_option_type_id(jschema_type)\n\n return PacBioOption(opt_id, name, default, desc, pb_option_type_id)", "def to_proto(self) -> DataSourceProto.FileOptions:\n\n file_options_proto = DataSourceProto.FileOptions(\n file_format=(\n None if self.file_format is None else self.file_format.to_proto()\n ),\n file_url=self.file_url,\n )\n\n return file_options_proto", "def to_proto(self) -> DataSourceProto.KinesisOptions:\n\n kinesis_options_proto = DataSourceProto.KinesisOptions(\n record_format=self.record_format.to_proto(),\n region=self.region,\n stream_name=self.stream_name,\n )\n\n return kinesis_options_proto", "def create_from_pb2(cls, pb2_obj: _BaseOptionsProto) -> 'BaseOptions':\n return BaseOptions(\n model_asset_path=pb2_obj.model_asset.file_name,\n model_asset_buffer=pb2_obj.model_asset.file_content)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Converts an SnowflakeOptionsProto object to its protobuf representation.
def to_proto(self) -> DataSourceProto.SnowflakeOptions: snowflake_options_proto = DataSourceProto.SnowflakeOptions( database=self.database, schema=self.schema, table=self.table, query=self.query, ) return snowflake_options_proto
[ "def to_proto(self) -> DataSourceProto.KafkaOptions:\n\n kafka_options_proto = DataSourceProto.KafkaOptions(\n bootstrap_servers=self.bootstrap_servers,\n message_format=self.message_format.to_proto(),\n topic=self.topic,\n )\n\n return kafka_options_proto", "def from_proto(cls, snowflake_options_proto: DataSourceProto.SnowflakeOptions):\n snowflake_options = cls(\n database=snowflake_options_proto.database,\n schema=snowflake_options_proto.schema,\n table=snowflake_options_proto.table,\n query=snowflake_options_proto.query,\n )\n\n return snowflake_options", "def to_proto(self) -> DataSourceProto.BigQueryOptions:\n\n bigquery_options_proto = DataSourceProto.BigQueryOptions(\n table_ref=self.table_ref, query=self.query,\n )\n\n return bigquery_options_proto", "def to_proto(self) -> DataSourceProto.KafkaOptions:\n watermark_delay_threshold = None\n if self.watermark_delay_threshold is not None:\n watermark_delay_threshold = Duration()\n watermark_delay_threshold.FromTimedelta(self.watermark_delay_threshold)\n\n kafka_options_proto = DataSourceProto.KafkaOptions(\n kafka_bootstrap_servers=self.kafka_bootstrap_servers,\n message_format=self.message_format.to_proto(),\n topic=self.topic,\n watermark_delay_threshold=watermark_delay_threshold,\n )\n\n return kafka_options_proto", "def to_proto(self) -> DataSourceProto.FileOptions:\n\n file_options_proto = DataSourceProto.FileOptions(\n file_format=(\n None if self.file_format is None else self.file_format.to_proto()\n ),\n file_url=self.file_url,\n )\n\n return file_options_proto", "def to_proto(self) -> DataSourceProto.CustomSourceOptions:\n hive_options_proto = DataSourceProto.CustomSourceOptions(\n configuration=pickle.dumps(self)\n )\n return hive_options_proto", "def to_proto(self) -> DataSourceProto.KinesisOptions:\n\n kinesis_options_proto = DataSourceProto.KinesisOptions(\n record_format=self.record_format.to_proto(),\n region=self.region,\n stream_name=self.stream_name,\n )\n\n return kinesis_options_proto", "def to_pb2(self) -> _FaceDetectorGraphOptionsProto:\n base_options_proto = self.base_options.to_pb2()\n base_options_proto.use_stream_mode = (\n False if self.running_mode == _RunningMode.IMAGE else True\n )\n return _FaceDetectorGraphOptionsProto(\n base_options=base_options_proto,\n min_detection_confidence=self.min_detection_confidence,\n min_suppression_threshold=self.min_suppression_threshold,\n )", "def from_proto(cls, kafka_options_proto: DataSourceProto.KafkaOptions):\n\n kafka_options = cls(\n bootstrap_servers=kafka_options_proto.bootstrap_servers,\n message_format=StreamFormat.from_proto(kafka_options_proto.message_format),\n topic=kafka_options_proto.topic,\n )\n\n return kafka_options", "def from_proto(cls, kafka_options_proto: DataSourceProto.KafkaOptions):\n watermark_delay_threshold = None\n if kafka_options_proto.HasField(\"watermark_delay_threshold\"):\n watermark_delay_threshold = (\n timedelta(days=0)\n if kafka_options_proto.watermark_delay_threshold.ToNanoseconds() == 0\n else kafka_options_proto.watermark_delay_threshold.ToTimedelta()\n )\n kafka_options = cls(\n kafka_bootstrap_servers=kafka_options_proto.kafka_bootstrap_servers,\n message_format=StreamFormat.from_proto(kafka_options_proto.message_format),\n topic=kafka_options_proto.topic,\n watermark_delay_threshold=watermark_delay_threshold,\n )\n\n return kafka_options", "def to_proto(self) -> DataSourceProto.RedshiftOptions:\n\n redshift_options_proto = DataSourceProto.RedshiftOptions(\n table=self.table, query=self.query,\n )\n\n return redshift_options_proto", "def to_proto(self):\n prototxt = str()\n opts = self.options('solver')\n for opt in opts:\n val = self.get('solver',opt)\n prototxt += opt + ': ' + val + '\\n'\n return prototxt", "def from_proto(cls, file_options_proto: DataSourceProto.FileOptions):\n file_options = cls(\n file_format=FileFormat.from_proto(file_options_proto.file_format),\n file_url=file_options_proto.file_url,\n )\n return file_options", "def from_proto(cls, kinesis_options_proto: DataSourceProto.KinesisOptions):\n\n kinesis_options = cls(\n record_format=StreamFormat.from_proto(kinesis_options_proto.record_format),\n region=kinesis_options_proto.region,\n stream_name=kinesis_options_proto.stream_name,\n )\n\n return kinesis_options", "def to_pb2(self) -> _TextClassifierGraphOptionsProto:\n base_options_proto = self.base_options.to_pb2()\n classifier_options_proto = _ClassifierOptionsProto(\n score_threshold=self.score_threshold,\n category_allowlist=self.category_allowlist,\n category_denylist=self.category_denylist,\n display_names_locale=self.display_names_locale,\n max_results=self.max_results)\n\n return _TextClassifierGraphOptionsProto(\n base_options=base_options_proto,\n classifier_options=classifier_options_proto)", "def from_proto(cls, bigquery_options_proto: DataSourceProto.BigQueryOptions):\n\n bigquery_options = cls(\n table_ref=bigquery_options_proto.table_ref,\n query=bigquery_options_proto.query,\n )\n\n return bigquery_options", "def from_proto(cls, hive_options_proto: Any):\n\n pass", "def from_proto(cls, hive_options_proto: DataSourceProto.CustomSourceOptions):\n hive_configuration = pickle.loads(hive_options_proto.configuration)\n\n hive_options = cls(\n table_ref=hive_configuration.table_ref\n )\n\n return hive_options", "def from_proto(cls, redshift_options_proto: DataSourceProto.RedshiftOptions):\n\n redshift_options = cls(\n table=redshift_options_proto.table, query=redshift_options_proto.query,\n )\n\n return redshift_options" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given a dict of lang>names, return a default one
def primary_name(names): langs = names.keys() if 'en' in langs: return names['en'] return names[langs[0]]
[ "def get_default_language():\r\n lang = getattr(settings, 'SOURCE_LANGUAGE_CODE', settings.LANGUAGE_CODE)\r\n default = [l[0] for l in settings.LANGUAGES if l[0] == lang]\r\n if len(default) == 0:\r\n # when not found, take first part ('en' instead of 'en-us')\r\n lang = lang.split('-')[0]\r\n default = [l[0] for l in settings.LANGUAGES if l[0] == lang]\r\n if len(default) == 0:\r\n raise ImproperlyConfigured(\"The [SOURCE_]LANGUAGE_CODE '%s' is not found in your LANGUAGES setting.\" % lang)\r\n return default[0]", "def get_dictionary_for(lang=\"ml_IN\"):\n return language_dictionary.get(lang, \"ml_IN\")", "def localizedWithFallback(field, allowEmpty=True):\n for lang in [''] + FallbackLanguages():\n t = field[lang]\n if allowEmpty:\n if isinstance(t, basestring):\n return t\n elif t:\n return t\n return u\"\"", "def _default(vars_tuple, key, default):\n for vars_dict in vars_tuple:\n if vars_dict and key in vars_dict:\n return vars_dict[key]\n return default", "def fallback_trans(x):\r\n t = _(x)\r\n if t == x:\r\n l = h.get_lang()\r\n h.set_lang('en', graceful_fail = True)\r\n t = _(x)\r\n if l and l[0] != 'en':\r\n h.set_lang(l[0])\r\n return t", "def get_default_language(default=None):\n try:\n import locale\n tag = locale.getlocale()[0]\n if tag is None:\n tag = locale.getdefaultlocale()[0]\n if tag is None:\n raise Error(\"No default language available\")\n return tag\n except Exception:\n pass\n return default", "def get_lang_dict(fortype: str, name: str | None = None) -> dict[str, str]:\n\tfrom frappe.translate import get_dict\n\n\treturn get_dict(fortype, name)", "def get_language(lang_code) -> str:\n langs = defaultdict(lambda: \"en\", {\"ru\": \"ru\"})\n return langs[lang_code.split(\"-\")[0]] if lang_code else \"en\"", "def get_default_lang_slug(instance):\n try:\n default_language = settings.LANGUAGES[0][0]\n slug_name = 'slug_%s' % default_language\n return getattr(instance, slug_name, '')\n\n except Exception:\n return ''", "def find_lang_param():\n if \"RU\" in forLastStrokes()[0]:\n TRIG_LNG = 'RU'\n return TRIG_LNG\n elif \"EN\" in forLastStrokes()[0]:\n TRIG_LNG = 'EN'\n return TRIG_LNG", "def get_en_name(arr):\n\n for l in arr:\n if l.get('language').get('name') == 'en':\n return l.get('name')", "def get_lang(ix):\n\tlang = None\n\tif ix == 0:\n\t\tlang = setting.TLA_ENG\n\telif ix == 1:\n\t\tlang = setting.TLA_JP\n\telse:\n\t\tlang = setting.TLA_VN\n\n\tf = open (f\"lang\\\\{lang}.json\", encoding=setting.TLA_UTF8)\n\tglobal data_json\n\tdata_json = json.load(f)\n\n\treturn lang", "def lang(n):\n return langs[n.platformID][n.langID]", "def get_default_variant(variants):\n for variant in variants:\n if variant.default:\n return variant", "def get_fallback_language():\n return settings.DEFAULT_LANGUAGE", "def language():\n return random.choice(get_dictionary('languages')).strip()", "def get_default_language2():\n from django.conf import settings\n return getattr(settings, 'TRANSLATIONS_DEFAULT_LANGUAGE', None) or \\\n getattr(settings, 'LANGUAGE_CODE', None)", "def test_defaultdict_config():\n lang_configs = defaultdict(lambda: dict(processors=\"tokenize\"))\n run_multilingual_pipeline(en_has_dependencies=False, fr_has_dependencies=False, lang_configs=lang_configs)\n\n lang_configs = defaultdict(lambda: dict(processors=\"tokenize\"))\n lang_configs[\"en\"] = {\"processors\": \"tokenize,pos,lemma,depparse\"}\n run_multilingual_pipeline(en_has_dependencies=True, fr_has_dependencies=False, lang_configs=lang_configs)", "def get_single_language(kanton):\n if (kanton == \"TI\"):\n return \"IT\"\n elif (kanton in [\"FR\", \"VD\", \"NE\", \"JU\", \"GE\", \"VS\"]):\n return \"FR\"\n else:\n return \"DE\"" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Initializes an instance of the InstagramBot class.
def __init__(self, username = None, password = None): self.username = config['AUTH']['USERNAME'] self.password = config['AUTH']['PASSWORD'] self.login = config['URL']['LOGIN'] self.nav_url = config['URL']['NAV'] self.tag_url = config['URL']['TAGS'] self.direct_url = config['URL']['DM'] self.driver = webdriver.Chrome(config['ENVIRONMENT']['CHROMEDRIVER']) self.stay_logged = False self.api = InstagramAPI(self.username, self.password)
[ "def start(self):\r\n self._instagram_api = InstagramAPI(mongo_api=self._mongo_api)\r\n self._inst_run()", "def __init__(self):\n self._attack_counter = 0\n self._attempt_counter = 0\n # initiliazing botnet client unique id\n self._botnet_identity = str(abs(hash(os.path.expanduser('~'))))\n # initializing queue \n self._botnet_queue = Queue()\n # botnet instance\n self._botnet_instance = None", "def bot_init(self):\n\n\t\t############################\n\t\t# REQUIRED: LOGIN DETAILS! #\n\t\t############################\n\t\tself.config['api_key'] = keys.consumer_key\t\n\t\tself.config['api_secret'] = keys.consumer_secret\n\t\tself.config['access_key'] = keys.access_token\n\t\tself.config['access_secret'] = keys.access_token_secret\n\n\n\t\t######################################\n\t\t# SEMI-OPTIONAL: OTHER CONFIG STUFF! #\n\t\t######################################\n\n\t\t# how often to tweet, in seconds (30 seconds minimum)\n\t\tself.config['tweet_interval'] = 1 * 60 # default: run once a minute\n\n\t\t# use this to define a (min, max) random range of how often to tweet\n\t\t# e.g., self.config['tweet_interval_range'] = (5*60, 10*60) # tweets every 5-10 minutes\n\t\tself.config['tweet_interval_range'] = None\n\n\t\t# only reply to tweets that specifically mention the bot\n\t\tself.config['reply_direct_mention_only'] = True\n\n\t\t# only include bot followers (and original tweeter) in @-replies\n\t\tself.config['reply_followers_only'] = False\n\n\t\t# fav any tweets that mention this bot?\n\t\tself.config['autofav_mentions'] = False\n\n\t\t# fav any tweets containing these keywords?\n\t\tself.config['autofav_keywords'] = []\n\n\t\t# follow back all followers?\n\t\tself.config['autofollow'] = False\n\n\t\t# State variable - will only be set if we are not loading a previously saved state \n\t\tself.state['last_image_id'] = 0\n\n\t\tself.log(\"Custom Initalization Complete\")", "def instagram(self, instagram):\n\n self._instagram = instagram", "def __init__(self, username, password, bot, channel):\n super().__init__(username, password)\n\n self.queue = deque()\n self.ingame_cog = Ingame(bot)\n\n self.bot = bot\n self.channel = channel\n self.chat_breakout = False\n self.loop = asyncio.get_event_loop()\n self.ingame_cog.is_pycraft_instance = True", "def __init__(self, mongo_api, cnn_model):\r\n self._mongo_api = mongo_api\r\n self._cnn_model = cnn_model\r\n\r\n self._instagram_api = None", "def __init__(self, config):\n self._slack_client = self._connect(config[\"slack_bot_token\"])\n self.bot_id = self._get_user_id()\n self.default_channel = config[\"default_channel\"]", "def __init__(self):\n self.config_data = get_config()\n\n # initialize twitter api\n auth = tweepy.OAuthHandler(self.config_data['auth']['api_key'],\n self.config_data['auth']['api_secret_key'])\n auth.set_access_token(self.config_data['auth']['access_token'],\n self.config_data['auth']['access_token_secret'])\n\n self.twitter_api = tweepy.API(auth)\n try:\n self.twitter_api.verify_credentials()\n except ValueError:\n LOGGER.error(\"Error while authenticating to twitter\")\n self.twitter_api = None\n\n try:\n self.news_api = NewsApiClient(self.config_data['news']['api_key'])\n self.news_sources = self.config_data['news']['sources'].split(',')\n except ValueError:\n LOGGER.error(\"Error while authenticating to news api\")\n self.news_api = None", "def __init__(self, im_prefix = '/', muc_prefix = '!' ):\n\n prefix = self.botconfig.find('prefix')\n if prefix is None:\n self.im_prefix = im_prefix\n self.muc_prefix = muc_prefix\n else:\n self.im_prefix = prefix.attrib.get('im', im_prefix)\n self.muc_prefix = prefix.attrib.get('muc', muc_prefix)\n\n self.__event = threading.Event()\n CommandBot.start(self)", "def __init__(self, *, session=None, api_id, api_hash):\n\n self.telethon = TelegramClient(session, api_id, api_hash)", "def __init__(self):\n\n # This environment variable should be set before using the bot\n self.token = os.environ['STATS_BOT_TOKEN']\n\n\n # These will be checked against as substrings within each\n # message, so different variations are not required if their\n # radix is present (e.g. \"all\" covers \"/all\" and \"ball\")\n self.menu_trigger = ['/all', '/stats']\n self.loan_stats_trigger = ['/loans']\n self.il_trigger = ['/IL']\n self.assets_trigger = ['/assets']\n\n\n # Stops runtime if the token has not been set\n if self.token is None:\n raise RuntimeError(\n \"FATAL: No token was found. \" + \\\n \"You might need to specify one or more environment variables.\")\n\n # Configures logging in debug level to check for errors\n logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',\n level=logging.INFO)", "def __init__(self):\n \n self.r = reddit.Reddit(user_agent = \"null\")\n self.limit = 0\n self.logged_in = False\n self.info = \"null\"", "def __init__(self):\n log.msg(\"Initializing Twitch parser.\")\n\n # initialize our data members\n self.streams = tuple()\n self.crc32 = 0", "def __init__(self):\n self.bot_manager = BotManager()\n self.vision_manager = VisionManager()\n return", "def __init__(self, imag_bot, block_, player_loc):\n self._bot = imag_bot\n self._block = block_\n self._player_loc = player_loc", "def __init__(self):\n self.emotions_list = EmotionsList('NRC-Emotion-Intensity-Lexicon-v1.txt')\n self.tweets_list = None\n self.nickname = None", "def __init__(self, config):\n self.config = config\n\n self.slack_client = SlackClient(self.config.SLACK_TOKEN)", "def __init__(self, settings):\n self.connection = self._init_connection(settings)", "def __init__(self):\n self.client = rest.TwilioRestClient(\n settings.TWILIO_ACCOUNT_SID,\n settings.TWILIO_AUTH_TOKEN,\n )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Method gets a list of users who like a post
def get_likes_list(self, username): api = self.api api.searchUsername(username) result = api.LastJson username_id = result['user']['pk'] #Gets the user ID user_posts = api.getUserFeed(username_id) # gets the user feed result = api.LastJson media_id = result['items'][0]['id'] #gets the most recent post api.getMediaLikers(media_id) #gets users who liked users = api.LastJson('users') for user in users: #appends the users to the list users_list.append({'pk':user['pk'], 'username':user['username']})
[ "def get_user_likes(self, data_base):\n cursor = data_base.cursor(dictionary=True)\n cursor.execute(f\"SELECT user_id FROM user_like WHERE post_id = {self.id}\")\n user_likes = tuple(map(lambda x: str(x['user_id']), cursor.fetchall()))\n if not user_likes:\n return []\n cursor.execute(f\"SELECT username FROM user WHERE id IN ({', '.join(user_likes)})\")\n users = cursor.fetchall()\n cursor.close()\n return list(map(lambda x: x['username'], users))", "def get_all_likes(obj):\n\t\tobj_type = ContentType.objects.get_for_model(obj)\n\t\treturn User.objects.filter(\n\t\t\tlikes_content_type=obj_type, likes_object_id=obj.id)", "def get_user_likes(self, user, data, *args, **kwargs):\n date_from = make_aware(from_date_to_datetime(data.get('date_from')))\n date_to = make_aware(from_date_to_datetime(data.get('date_to')))\n\n result = Like.objects.filter(\n Q(create_at__gte=date_from) &\n Q(create_at__lte=date_to) &\n # Q(user=user) # like made by user\n Q(post__user=user) # like made for user's post\n ).annotate(\n day=TruncDay('create_at', output_field=DateField())\n ).values('day').annotate(count=Count('id'))\n return result", "def likes(self):\n return UserEntity.gql(\"WHERE liked_posts = :1\", self.key().id()).count()", "def check_user_liked(cls, user, post):\n likes = Like.gql(\"WHERE author = :1 AND post = :2\", user.key(),\n post.key())\n return likes.count()", "def like_user_posts(self, user:str, n_posts:int, like:bool=True):\n\n action = 'Like' if like else 'Unlike'\n\n self._nav_user(user)\n\n imgs = []\n elements = self._find_element(EC.presence_of_all_elements_located((By.CLASS_NAME, '_9AhH0')))\n imgs.extend(elements)\n\n for img in imgs[:n_posts]:\n img.click() \n time.sleep(1) \n try:\n self.driver.find_element_by_xpath(\"//*[@aria-label='{}']\".format(action)).click()\n except Exception as e:\n LOGGER.error(e)\n\n self.driver.find_elements_by_class_name('ckWGn')[0].click()", "def get_meals_user_liked(username):\n meals_user_liked = []\n user_liked = Rating.objects.filter(member__username=username, like=True)\n for ratting in user_liked:\n meals_user_liked.append(ratting.meal)\n return meals_user_liked", "def get_user_posts(self, request):\n post_objects = Post.objects.filter(liked_users__id=request.user.id)\n avg_user_liked_post_weight = self.__avg_user_interested_post_weight(post_objects)\n queryset = self.__user_interested_post_filter(avg_user_liked_post_weight)\n context = {'user':request.user}\n serializer = PostSerializer(queryset, many=True, context=context)\n return Response({'data': serializer.data}, status=status.HTTP_200_OK)", "def by_post(cls, post):\n likes = Like.gql(\"WHERE post = :1\", post.key())\n return likes", "def get_likes():\n like_models = db.session.query(Like).all()\n return jsonify([{\n \"user_id\": like.user_id,\n \"blog_id\": like.blog_id\n } for like in like_models])", "def getLikeCommentOfertas(self, user, listado_ofertas, filtrar_like):\n listado_ofertas_likes = []\n if filtrar_like:\n for elemento in listado_ofertas:\n try:\n oferta = {}\n oferta[\"oferta\"] = elemento\n oferta[\"like\"] = LikeOferta.objects.filter(oferta=elemento).count()\n oferta[\"comment\"] = Comments.objects.filter(page=elemento).count()\n if user.is_authenticated():\n LikeOferta.objects.get(oferta=elemento, usuario=user)\n oferta[\"likeOferta\"] = True\n else:\n oferta[\"likeOferta\"] = False\n listado_ofertas_likes.append(oferta)\n except ObjectDoesNotExist, e:\n pass\n else:\n for elemento in listado_ofertas:\n oferta = {}\n oferta[\"oferta\"] = elemento\n oferta[\"like\"] = LikeOferta.objects.filter(oferta=elemento).count()\n oferta[\"comment\"] = Comments.objects.filter(page=elemento).count()\n try:\n if user.is_authenticated():\n LikeOferta.objects.get(oferta=elemento, usuario=user)\n oferta[\"likeOferta\"] = True\n else:\n oferta[\"likeOferta\"] = False\n except ObjectDoesNotExist, e:\n oferta[\"likeOferta\"] = False\n listado_ofertas_likes.append(oferta)\n return listado_ofertas_likes", "def get_users_who_liked_object(*, obj: 'Model'):\n ct = ContentType.objects.get_for_model(obj)\n\n return (\n User.objects\n .filter(\n likes__content_type=ct,\n likes__object_id=obj.pk\n )\n )", "def get_likes(user: str, only_place: bool = False) -> list:\n global SECRETS, PARSE_SERVER_URL, HEADERS\n result = []\n \n constraint = {\n \"where\": json.dumps({\n \"user\": {\n \"__type\": \"Pointer\",\n \"className\": \"_User\",\n \"objectId\": user\n }\n })\n }\n \n url = PARSE_SERVER_URL + \"/parse/classes/Like?\" + urlencode(constraint)\n \n response = r.get(url=url, headers=HEADERS)\n \n if response.status_code != 200:\n print(\"Request on likes could not be completed\")\n print(response.json())\n exit(1)\n \n else:\n _likes = response.json().get(\"results\", [])\n for _like in _likes:\n if not only_place:\n result.append({\n \"user\": _like[\"user\"][\"objectId\"],\n \"place\": _like[\"place\"][\"objectId\"]\n })\n else:\n result.append(_like[\"place\"][\"objectId\"])\n \n return result", "def post_likes(self):\n return PostLike.query().filter(PostLike.post_key == self.key)", "async def generate_likes(self):\n if self.__exhausted_limit('likes') or not User._total_posts:\n return\n\n await asyncio.gather(*[\n\n self.like(pid)\n for pid in random.choices(User._total_posts, k=self.left_likes)\n ])", "def extract_likes(user_to_scrape):\n try:\n\n url_favorites = []\n\n print(\"Extracting Likes, please wait...!\")\n\n favorites = tweepy.Cursor(api.favorites, user_to_scrape, count=200).items()\n\n for tweet in favorites:\n url_favorites.append(\"https://twitter.com/i/web/status/\" + tweet.id_str)\n\n print(\"Likes: \" + str(len(url_favorites)))\n return url_favorites\n\n except Exception as error:\n print(\"extract_likes function failed.\")\n print(\"Error details: \" + error)", "def is_liked(obj, user) ->bool:\n\tif not user.is_authenticated:\n\t\treturn False\n\tobj_type = ContentType.objects.get_for_model(obj):\n\tlikes = Like.objects.filter(\n\t\tcontent_type = obj_type, object_id=obj.id, user=user)\n\treturn likes.exists()\n\n\tdef get_all_likes(obj):\n\t\t\"\"\"\n\t\t\tGets all users, who liked object\n\t\t\"\"\"\n\t\tobj_type = ContentType.objects.get_for_model(obj)\n\t\treturn User.objects.filter(\n\t\t\tlikes_content_type=obj_type, likes_object_id=obj.id)", "def can_user_like(user, post_id, username):\n return (not BlogPostLikes.has_user_liked(post_id, user.username) and\n not user.username == username) # pylint: disable=redefined-builtin", "def get_likes(self):\n source, edge = self.id, \"likes\"\n return User.graph().get_connections(source, edge, limit=100000)[\"data\"]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }