query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
listlengths
19
20
metadata
dict
Generate a ``gnsstime`` object from a year, the day of year, and optionally second of day.
def fromdoy(cls, year, doy=1, sod=0): # Find the day and month month = 1 while month <= 12 and doy - calendar.monthrange(year, month)[1] > 0: doy -= calendar.monthrange(year, month)[1] month += 1 day = doy # Find the hour, minute, second, microsecond (if `sod` was a float) hour, rest = divmod(sod, 3600) minute, second = divmod(rest, 60) microsecond, second = math.modf(second) # Convert to integers month = math.floor(month) day = math.floor(day) hour = math.floor(hour) minute = math.floor(minute) second = math.floor(second) microsecond, second = math.modf(second) microsecond = math.floor(microsecond * 1e6) return gnsstime(year, month=month, day=day, hour=hour, minute=minute, second=second, microsecond=microsecond)
[ "def greenwich_sidereal_time(year,doy):\n year_from_1966 = year-1966\n dt = (year_from_1966*365 + int((year_from_1966 + 1)/4.) + int(doy)-1)/36525.\n dst = 0.278329562 + (8640184.67*dt+0.0929*dt**2)/86400\n gst0 = dst % 1 # GST on Jan. 0 of current year\n return 24*(gst0 + (doy % 1)/0.997269566) % 24", "def get_year():\r\n try:\r\n ts = cache.get('year')\r\n if ts:\r\n return ts\r\n except AppRegistryNotReady:\r\n pass\r\n\r\n now = datetime.now()\r\n try:\r\n ts = Year.objects.get(Q(Begin__lte=now) & Q(End__gte=now))\r\n except Year.DoesNotExist:\r\n y = now.year\r\n if now.month < 8:\r\n # second half year\r\n name = '{}-{}'.format(y - 1, y)\r\n start = date(year=y - 1, month=8, day=1)\r\n end = date(year=y, month=7, day=31)\r\n else:\r\n name = '{}-{}'.format(y, y + 1)\r\n start = date(y, month=8, day=1)\r\n end = date(year=y + 1, month=7, day=31)\r\n ts = Year(\r\n Name=name,\r\n Begin=start,\r\n End=end\r\n )\r\n ts.save()\r\n cache.set('year', ts, settings.STATIC_OBJECT_CACHE_DURATION)\r\n return ts", "def get_day_of_year(time: datetime) -> int:\n return time.timetuple().tm_yday - 1", "def getYear():", "def get_year():\n return dt.now().year", "def test_unix_sec_to_string_year(self):\n\n this_time_string = time_conversion.unix_sec_to_string(\n UNIX_TIME_SEC, TIME_FORMAT_YEAR)\n self.assertTrue(this_time_string == TIME_STRING_YEAR)", "def from_time(year=None, month=None, day=None, hours=None, minutes=None, seconds=None, microseconds=None, timezone=None):\n \n \n def str_or_stars(i, length):\n if i is None:\n return \"*\" * length\n else:\n return str(i).rjust(length, \"0\")\n \n \n wmi_time = \"\"\n wmi_time += str_or_stars(year, 4)\n wmi_time += str_or_stars(month, 2)\n wmi_time += str_or_stars(day, 2)\n wmi_time += str_or_stars(hours, 2)\n wmi_time += str_or_stars(minutes, 2)\n wmi_time += str_or_stars(seconds, 2)\n wmi_time += \".\"\n wmi_time += str_or_stars(microseconds, 6)\n if timezone >= 0:\n wmi_time += \"+\"\n else:\n wmi_time += \"-\"\n timezone = abs(timezone)\n wmi_time += str_or_stars(timezone, 3)\n \n return wmi_time", "def DateAddYears(Seconds, Count):\n if not isinstance(Count, (int, long)):\n _Throw(\"Count argument not an int!\")\n\n dtd = datetime.date.fromtimestamp(Seconds)\n if not Count == 0:\n if (dtd.month == 2) and (dtd.day == 29):\n dtd = dtd.replace(day=28)\n dtd = dtd.replace(year=(dtd.year + Count))\n return mktime(dtd.timetuple())", "def time_year_plus_frac(ds, time_name):\n\n # this is straightforward if time has units='days since 0000-01-01' and calendar='noleap'\n # so convert specification of time to that representation\n\n # get time values as an np.ndarray of cftime objects\n if np.dtype(ds[time_name]) == np.dtype(\"O\"):\n tvals_cftime = ds[time_name].values\n else:\n tvals_cftime = cftime.num2date(\n ds[time_name].values,\n ds[time_name].attrs[\"units\"],\n ds[time_name].attrs[\"calendar\"],\n )\n\n # convert cftime objects to representation mentioned above\n tvals_days = cftime.date2num(\n tvals_cftime, \"days since 0000-01-01\", calendar=\"noleap\"\n )\n\n return tvals_days / 365.0", "def get_time_initializer(self):\n (_hour, _minute, _seconds,\n _month, _day_of_month, _year,\n gmt_offset, _DAYLIGHT_SAVINGS_ENABLED) = self._get_time()\n date_string = \"20\" + str(_year).zfill(2) + \"-\" + \\\n str(_month).zfill(2) + \"-\" + \\\n str(_day_of_month).zfill(2) + \"T\" + \\\n str(_hour).zfill(2) + \\\n \":\" + str(_minute).zfill(2) + \\\n \":\" + str(_seconds).zfill(2)\n return date_string", "def get_year_and_semester(course_run, course_run_key):\n match = re.search(\"[1|2|3]T[0-9]{4}\", course_run_key) # e.g. \"3T2019\" -> Semester \"3\", Year \"2019\"\n if match:\n year = int(match.group(0)[-4:])\n semester = semester_mapping.get(match.group(0)[-6:-4])\n else:\n semester = None\n if course_run.get(\"start\"):\n year = course_run.get(\"start\")[:4]\n else:\n year = None\n\n log.debug(f\"{course_run_key} {year} {semester}\")\n return year, semester", "def fromjd50(cls, jd50):\n jd = jd50 + JD_1950\n return gnsstime.fromjd(jd)", "def fromjd(cls, jd):\n return gnsstime.frommjd(jd - JD)", "def get_year(year):\n years = factory.get_elem_solo(Year, year)\n return years", "def __init__(self, year: int, start_m: int = 0, end_m: int = 11):\n self._year = year\n self._first = year_starts_on(year)\n self._start_m = min(start_m, end_m)\n self._end_m = max(start_m, end_m)", "def gen_date(date, year=None):\n\n year = parse_year(year) or parse_year(date)\n if year:\n return ' (%s).' % year\n return ''", "def get_time_and_season(self):\r\n # get the current time as parts of year and parts of day\r\n # returns a tuple (years,months,weeks,days,hours,minutes,sec)\r\n time = gametime.gametime(format=True)\r\n month, hour = time[1], time[4]\r\n season = float(month) / MONTHS_PER_YEAR\r\n timeslot = float(hour) / HOURS_PER_DAY\r\n\r\n # figure out which slots these represent\r\n if SEASONAL_BOUNDARIES[0] <= season < SEASONAL_BOUNDARIES[1]:\r\n curr_season = \"spring\"\r\n elif SEASONAL_BOUNDARIES[1] <= season < SEASONAL_BOUNDARIES[2]:\r\n curr_season = \"summer\"\r\n elif SEASONAL_BOUNDARIES[2] <= season < 1.0 + SEASONAL_BOUNDARIES[0]:\r\n curr_season = \"autumn\"\r\n else:\r\n curr_season = \"winter\"\r\n\r\n if DAY_BOUNDARIES[0] <= timeslot < DAY_BOUNDARIES[1]:\r\n curr_timeslot = \"night\"\r\n elif DAY_BOUNDARIES[1] <= timeslot < DAY_BOUNDARIES[2]:\r\n curr_timeslot = \"morning\"\r\n elif DAY_BOUNDARIES[2] <= timeslot < DAY_BOUNDARIES[3]:\r\n curr_timeslot = \"afternoon\"\r\n else:\r\n curr_timeslot = \"evening\"\r\n\r\n return curr_season, curr_timeslot", "def test_string_to_unix_sec_year(self):\n\n this_time_unix_sec = time_conversion.string_to_unix_sec(\n TIME_STRING_YEAR, TIME_FORMAT_YEAR)\n self.assertTrue(this_time_unix_sec == UNIX_TIME_YEAR_SEC)", "def get_data_date_interval_for_year(year: int = 2022) -> tuple[datetime, datetime]:\n now = datetime.now(tz=ZoneInfo(\"Europe/Paris\"))\n\n date_interval = (\n datetime(year, 1, 1, tzinfo=ZoneInfo(\"Europe/Paris\")),\n datetime(year + 1, 1, 1, tzinfo=ZoneInfo(\"Europe/Paris\")),\n )\n date_start, date_end = date_interval\n\n if year == datetime.utcnow().year:\n max_date: datetime = now - timedelta(days=(now.toordinal() % 7) - 1)\n date_end = max_date.replace(hour=0, minute=0, second=0, microsecond=0)\n\n return date_start, date_end" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generate a ``gnsstime`` object from a Julian Day.
def fromjd(cls, jd): return gnsstime.frommjd(jd - JD)
[ "def _fromJulian(self, j):\n days = j - 40587 # From Jan 1 1900\n sec = days * 86400.0\n return time.gmtime(sec)", "def JulianDay():", "def get_JD(year, month, day, h, m, s):\n return get_JDN(year, month, day) + (float(h) - 12) / float(24) + float(m) / 1440 + float(s) / 86400", "def fromjd50(cls, jd50):\n jd = jd50 + JD_1950\n return gnsstime.fromjd(jd)", "def fromdoy(cls, year, doy=1, sod=0):\n # Find the day and month\n month = 1\n while month <= 12 and doy - calendar.monthrange(year, month)[1] > 0:\n doy -= calendar.monthrange(year, month)[1]\n month += 1\n day = doy\n\n # Find the hour, minute, second, microsecond (if `sod` was a float)\n hour, rest = divmod(sod, 3600)\n minute, second = divmod(rest, 60)\n microsecond, second = math.modf(second)\n\n # Convert to integers\n month = math.floor(month)\n day = math.floor(day)\n hour = math.floor(hour)\n minute = math.floor(minute)\n second = math.floor(second)\n microsecond, second = math.modf(second)\n microsecond = math.floor(microsecond * 1e6)\n return gnsstime(year, month=month, day=day, hour=hour, minute=minute, second=second, microsecond=microsecond)", "def greenwich_sidereal_time(year,doy):\n year_from_1966 = year-1966\n dt = (year_from_1966*365 + int((year_from_1966 + 1)/4.) + int(doy)-1)/36525.\n dst = 0.278329562 + (8640184.67*dt+0.0929*dt**2)/86400\n gst0 = dst % 1 # GST on Jan. 0 of current year\n return 24*(gst0 + (doy % 1)/0.997269566) % 24", "def now_jd(self):\n now_utc = self.now()\n J2000 = 2451545.0\n J2000_date = datetime(2000, 1, 1, 12, 00, 00) # UTC time of J2000\n delta = now_utc - J2000_date\n return J2000 + delta.total_seconds() / (60.0 * 60 * 24)", "def JDplusSeconds(JD, t):\n return JD + t/(3600*24)", "def to_JulianDay(date):\n year1 = 1721424.5\n # Need to compute days fraction because .toordinal only computes floor(days)\n hh = date.hour\n mm = date.minute\n ss = date.second\n ms = date.microsecond\n fraction = hh / 24 + mm / (24 * 60) + ss / (24 * 60 * 60) + ms / (24 * 60 * 60 * 10 ** 6)\n t = date.toordinal() + year1 + fraction\n return t", "def toJ2000(dt):\n return (dt - J2000_EPOCH).total_seconds()", "def get_JDN(year, month, day):\n a = (14 - int(month)) // 12\n y = int(year) + 4800 - a\n m = int(month) + 12 * a - 3\n JDN = int(day) + (153 * m + 2) // 5 + 365 * y + y // 4 - y // 100 + y // 400 - 32045\n return JDN", "def make_day_julian(isodate):\n dt = time.strptime(isodate, \"%Y-%m-%d\")\n return int(time.strftime(\"%j\", dt))", "def get_current_JD():\n date = datetime.now().isoformat()\n t = Time(date, format='isot', scale='utc')\n jd = t.jd\n return jd", "def sunpos(jd=None):\n if jd: t = astropy.time.Time(jd,format='jd')\n else: t = astropy.time.Time(time.time(),format='unix')\n sun = astropy.coordinates.get_sun(time=t)\n return sun.ra.deg, sun.dec.deg", "def mean_sidereal_time_greenwich(y, m, d):\n\n jd = julian_day(y, m, d) - J2000\n t = jd/36525.0\n return (280.46061837 + 360.98564736629*jd + 0.000387933*t*t - t*t*t/38710000.0) % 360.0", "def datetime2julian(date):\n\n # January 1, 2000 at midday corresponds to JD = 2451545.0\n reference=datetime.datetime(year=2000,month=1,day=1,hour=12,minute=0,second=0,microsecond=0)\n\n temp=date-reference\n\n return 2451545+temp.days+(temp.seconds+temp.microseconds*1.e-6)/(24*3600)", "def jd_to_sec( jd):\n jd_f = float( jd)\n return 86400.0 * (jd_f - int(jd_f))", "def julian2datetimeindex(jd: np.ndarray, tz: pytz.BaseTzInfo = pytz.UTC):\n return pd.DatetimeIndex(jd2dt(jd), tz=tz)", "def iso_first(cls):\n # converts MJD to unix timestamp\n return sa.func.to_timestamp((cls.mjd_first - 40_587) * 86400.0)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generate a ``gnsstime`` object from a Julian Day at 1950.
def fromjd50(cls, jd50): jd = jd50 + JD_1950 return gnsstime.fromjd(jd)
[ "def fromjd(cls, jd):\n return gnsstime.frommjd(jd - JD)", "def _fromJulian(self, j):\n days = j - 40587 # From Jan 1 1900\n sec = days * 86400.0\n return time.gmtime(sec)", "def greenwich_sidereal_time(year,doy):\n year_from_1966 = year-1966\n dt = (year_from_1966*365 + int((year_from_1966 + 1)/4.) + int(doy)-1)/36525.\n dst = 0.278329562 + (8640184.67*dt+0.0929*dt**2)/86400\n gst0 = dst % 1 # GST on Jan. 0 of current year\n return 24*(gst0 + (doy % 1)/0.997269566) % 24", "def fromdoy(cls, year, doy=1, sod=0):\n # Find the day and month\n month = 1\n while month <= 12 and doy - calendar.monthrange(year, month)[1] > 0:\n doy -= calendar.monthrange(year, month)[1]\n month += 1\n day = doy\n\n # Find the hour, minute, second, microsecond (if `sod` was a float)\n hour, rest = divmod(sod, 3600)\n minute, second = divmod(rest, 60)\n microsecond, second = math.modf(second)\n\n # Convert to integers\n month = math.floor(month)\n day = math.floor(day)\n hour = math.floor(hour)\n minute = math.floor(minute)\n second = math.floor(second)\n microsecond, second = math.modf(second)\n microsecond = math.floor(microsecond * 1e6)\n return gnsstime(year, month=month, day=day, hour=hour, minute=minute, second=second, microsecond=microsecond)", "def JulianDay():", "def get_JD(year, month, day, h, m, s):\n return get_JDN(year, month, day) + (float(h) - 12) / float(24) + float(m) / 1440 + float(s) / 86400", "def toJ2000(dt):\n return (dt - J2000_EPOCH).total_seconds()", "def mean_sidereal_time_greenwich(y, m, d):\n\n jd = julian_day(y, m, d) - J2000\n t = jd/36525.0\n return (280.46061837 + 360.98564736629*jd + 0.000387933*t*t - t*t*t/38710000.0) % 360.0", "def get_JDN(year, month, day):\n a = (14 - int(month)) // 12\n y = int(year) + 4800 - a\n m = int(month) + 12 * a - 3\n JDN = int(day) + (153 * m + 2) // 5 + 365 * y + y // 4 - y // 100 + y // 400 - 32045\n return JDN", "def to_JulianDay(date):\n year1 = 1721424.5\n # Need to compute days fraction because .toordinal only computes floor(days)\n hh = date.hour\n mm = date.minute\n ss = date.second\n ms = date.microsecond\n fraction = hh / 24 + mm / (24 * 60) + ss / (24 * 60 * 60) + ms / (24 * 60 * 60 * 10 ** 6)\n t = date.toordinal() + year1 + fraction\n return t", "def now_jd(self):\n now_utc = self.now()\n J2000 = 2451545.0\n J2000_date = datetime(2000, 1, 1, 12, 00, 00) # UTC time of J2000\n delta = now_utc - J2000_date\n return J2000 + delta.total_seconds() / (60.0 * 60 * 24)", "def iso_first(cls):\n # converts MJD to unix timestamp\n return sa.func.to_timestamp((cls.mjd_first - 40_587) * 86400.0)", "def julian_centuries(t=None):\n DAYS_IN_YEAR = 36525.0\n\n result = (julian_day(t) - JULIAN_DAY_ON_NOON01JAN1900) / DAYS_IN_YEAR\n return result", "def googledate_fromtimestamp(posixstamp: int) -> DatetimeWithNanoseconds:\n return DatetimeWithNanoseconds.utcfromtimestamp(int(posixstamp/1000))", "def jdaten(njour,s):\n d=datelundi(s)+timedelta(days=njour-1)\n return str(d.day)+\"/\"+str(d.month)+\"/\"+str(d.year-2000)", "def greenwich_mean_sidereal_time(model_time):\n jul_centuries = days_from_2000(model_time) / 36525.0\n theta = 67310.54841 + jul_centuries * (876600 * 3600 + 8640184.812866 + jul_centuries *\n (0.093104 - jul_centuries * 6.2 * 10e-6))\n\n theta_radians = np.deg2rad(theta / 240.0) % (2 * np.pi)\n\n if theta_radians < 0:\n theta_radians += 2*np.pi\n\n return theta_radians", "def make_day_julian(isodate):\n dt = time.strptime(isodate, \"%Y-%m-%d\")\n return int(time.strftime(\"%j\", dt))", "def timestamp(self):\n def get_tstp(y, mo, d, h, mi, s):\n ts = time.strptime(str(y) + '-' + str(mo) + '-' + str(d) + 'T' + str(h) + ':' + \\\n str(mi) + ':' + str(s), '%Y-%m-%dT%H:%M:%S')\n return time.mktime(ts)\n y = 1970\n mo = 1\n d = 1\n h = 0\n mi = 0\n s = 0\n # syntacic hack - 'while' stmt is not important, but 'break' makes there goto stmt\n while 1:\n if self._content['year'] is None: break\n y = self._content['year']\n if self._content['month'] is None: break\n mo = self._content['month']\n if self._content['day'] is None: break\n d = self._content['day']\n if self._content['hour'] is None: break\n h = self._content['hour']\n if self._content['minute'] is None: break\n mi = self._content['minute']\n if self._content['second'] is None: break\n s = self._content['second']\n break\n if y < 1970: return 0.0\n return get_tstp(y, mo, d, h, mi, s)", "def datetime2julian(date):\n\n # January 1, 2000 at midday corresponds to JD = 2451545.0\n reference=datetime.datetime(year=2000,month=1,day=1,hour=12,minute=0,second=0,microsecond=0)\n\n temp=date-reference\n\n return 2451545+temp.days+(temp.seconds+temp.microseconds*1.e-6)/(24*3600)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generate a ``gnsstime`` object from a numpy datetime64.
def fromdatetime64(cls, datetime64): return gnsstime.utcfromtimestamp(datetime64.astype('O') / 1e9)
[ "def datetime64_to_datetime(np_datetime64: datetime64) -> datetime.datetime:\n\n return datetime.datetime.utcfromtimestamp((np_datetime64 - datetime64(0, 's')) / timedelta64(1, 's'))", "def get_obs_datetime_obj(cls):\n obs_time = []\n #SAMPLE: '2014-06-04T06:00:00.000000000'\n year = np.array(cls.obs_data['YEAR'], dtype=int)\n month = np.array(cls.obs_data['MONTH'], dtype=int)\n day = np.array(cls.obs_data['DAY'], dtype=int)\n hour = np.array(cls.obs_data['HOUR'], dtype=int)\n minute = np.array(cls.obs_data['MINUTE'], dtype=int)\n for idx in range(len(year)):\n utc_dt = datetime(year[idx], month[idx], day[idx], hour[idx], minute[idx])\n obs_time.append(utc_dt)\n cls.t_time = obs_time", "def googledate_fromdatetime(date: datetime.datetime) -> DatetimeWithNanoseconds:\n return DatetimeWithNanoseconds.fromisoformat(date.isoformat())", "def nanotime2datetime(nt: nanotime) -> datetime:\n return datetime.utcfromtimestamp(nt.timestamp())", "def fromjd(cls, jd):\n return gnsstime.frommjd(jd - JD)", "def to_numpy(t):\n if NUMPY_VERSION_MAJOR_ == 1 and NUMPY_VERSION_MAJOR_ < 14 and isinstance( t, bytes ): t = t.decode( 'utf-8' ) # quick and dirty, since some packages, e.g. ubuntu 18.04 python3-numpy, still install numpy 1.13; remove once move on with the version since it's waste of cpu cycles\n if t in ['', 'not-a-date-time']: return NOT_A_DATE_TIME\n if t in ['+infinity', '+inf', 'infinity', 'inf']: return POSITIVE_INFINITY\n if t in ['-infinity', '-inf']: return NEGATIVE_INFINITY\n if not (isinstance(t, BASESTRING) and re.match(r'^(\\d{8}T\\d{6}(\\.\\d{0,12})?)$', t)):\n msg = \"expected comma time, got '{}'\".format(repr(t))\n raise TypeError(msg)\n v = list(t)\n for i in [13, 11]: v.insert(i, ':')\n for i in [6, 4]: v.insert(i, '-')\n return np.datetime64(''.join(v), UNIT)", "def googledate_fromtimestamp(posixstamp: int) -> DatetimeWithNanoseconds:\n return DatetimeWithNanoseconds.utcfromtimestamp(int(posixstamp/1000))", "def epoch_to_datetime(seconds):\n return time.gmtime(seconds)", "def return_time_index(ds):\n time_vals = ds['time'].values\n times = [(t - np.datetime64('1970-01-01T00:00:00Z'))/np.timedelta64(1, 's') for t in time_vals]\n return np.array(times)", "def time_arr(start=0, stop=100, spacing=1., dim=TIME_STR):\n return coord_arr_1d(start, stop, spacing, dim)", "def cftime_to_nptime(times, raise_on_invalid: bool = True) -> np.ndarray:\n times = np.asarray(times)\n # TODO: the strict enforcement of nanosecond precision datetime values can\n # be relaxed when addressing GitHub issue #7493.\n new = np.empty(times.shape, dtype=\"M8[ns]\")\n for i, t in np.ndenumerate(times):\n try:\n # Use pandas.Timestamp in place of datetime.datetime, because\n # NumPy casts it safely it np.datetime64[ns] for dates outside\n # 1678 to 2262 (this is not currently the case for\n # datetime.datetime).\n dt = nanosecond_precision_timestamp(\n t.year, t.month, t.day, t.hour, t.minute, t.second, t.microsecond\n )\n except ValueError as e:\n if raise_on_invalid:\n raise ValueError(\n \"Cannot convert date {} to a date in the \"\n \"standard calendar. Reason: {}.\".format(t, e)\n )\n else:\n dt = \"NaT\"\n new[i] = np.datetime64(dt)\n return new", "def small_time(time_var):\n if np.issubdtype(time_var.dtype, np.datetime64):\n time_var = xray.conventions.encode_cf_variable(time_var)\n else:\n # if the time_var does not have datetime values we want\n # to make sure it is an encoded datetime.\n assert np.issubdtype(time_var.dtype, np.int)\n assert 'since' in time_var.attrs.get('units', '')\n\n assert time_var.attrs['units'].lower().startswith('hour')\n origin = xray.conventions.decode_cf_datetime([0],\n time_var.attrs['units'])[0]\n origin = pd.to_datetime(origin)\n # diffs should be integer valued\n diffs = np.diff(np.concatenate([[0], time_var.values[:]]))\n diffs = np.round(diffs, 6)\n np.testing.assert_array_equal(diffs.astype('int32'), diffs)\n diffs = diffs.astype(np.int32)\n # we use from_ordinal since thats what expansion will use.\n # this way if the roundtrip to_ordinal doesn't work we'll\n # still have correct times.\n fromordinal = datetime.datetime.fromordinal(origin.toordinal())\n seconds = np.int32(datetime.timedelta.total_seconds(origin - fromordinal))\n augmented = np.concatenate([[origin.toordinal(), seconds],\n diffs]).astype(np.int32)\n return small_array(augmented, least_significant_digit=0)", "def from_numpy(t):\n if not ((isinstance(t, np.datetime64) and t.dtype == DTYPE) or\n is_undefined(t) or\n isinstance(t, np.timedelta64)):\n msg = \"expected numpy time or timedelta of type '{}' or '{}', got '{}'\".format(repr(DTYPE), repr(np.timedelta64), repr(t))\n raise TypeError(msg)\n if isinstance(t, np.timedelta64): return str(t.astype('i8'))\n if is_undefined(t): return 'not-a-date-time'\n if is_negative_infinity(t): return '-infinity'\n if is_positive_infinity(t): return '+infinity'\n s = re.sub(r'(\\.0{6})?([-+]\\d{4}|Z)?$', '', str(t))\n #return re.sub(r'(\\.0{6})?([-+]\\d{4}|Z)?$', '', str(t)).translate(None, ':-')\n return s.translate(str.maketrans('', '', ':-')) if sys.version_info.major > 2 else s.translate(None, ':-') # sigh... cannot believe i am going this...", "def mydatevec(t):\n # use year 2000 as an offset, this is needed because MATLAB will accept\n # year = 0 but Python will not (year >= 1)\n # also, MATLAB treats year = 0 as a leap year, so we choose a year offset\n # that is also a leap year\n yr0 = 2000\n # mimic MATLAB's ability to handle scalar or vector inputs\n t = numpy.asarray(t)\n scalar_input = False\n if t.ndim == 0:\n t = t[None] # Makes x 1D\n scalar_input = True\n # do the business\n iYaN = numpy.where(~numpy.isnan(t))[0]\n y = numpy.full(len(t), numpy.nan)\n m = y.copy()\n d = y.copy()\n h = y.copy()\n mn = y.copy()\n s = y.copy()\n dt0 = datetime.datetime(yr0, 1, 1)\n dt00 = numpy.array([dt0 + datetime.timedelta(tt - 1) for tt in t[iYaN]])\n y[iYaN] = numpy.array([dt.year for dt in dt00]) - yr0\n m[iYaN] = numpy.array([dt.month for dt in dt00])\n d[iYaN] = numpy.array([dt.day for dt in dt00])\n h[iYaN] = numpy.array([dt.hour for dt in dt00])\n mn[iYaN] = numpy.array([dt.minute for dt in dt00])\n s[iYaN] = numpy.array([dt.second for dt in dt00])\n # index of midnights\n idx = numpy.where((h == 0) & (mn == 0) & (s == 0))[0]\n dt24 = numpy.array([dt00[i] - datetime.timedelta(1) for i in idx])\n y[idx] = numpy.array([dt.year for dt in dt24]) - yr0\n m[idx] = numpy.array([dt.month for dt in dt24])\n d[idx] = numpy.array([dt.day for dt in dt24])\n h[idx] = 24\n if scalar_input:\n # convert back to scalar\n return numpy.ndarray.item(y), numpy.ndarray.item(m), numpy.ndarray.item(d), \\\n numpy.ndarray.item(h), numpy.ndarray.item(mn), numpy.ndarray.item(s)\n else:\n return y, m, d, h, mn, s", "def fromdoy(cls, year, doy=1, sod=0):\n # Find the day and month\n month = 1\n while month <= 12 and doy - calendar.monthrange(year, month)[1] > 0:\n doy -= calendar.monthrange(year, month)[1]\n month += 1\n day = doy\n\n # Find the hour, minute, second, microsecond (if `sod` was a float)\n hour, rest = divmod(sod, 3600)\n minute, second = divmod(rest, 60)\n microsecond, second = math.modf(second)\n\n # Convert to integers\n month = math.floor(month)\n day = math.floor(day)\n hour = math.floor(hour)\n minute = math.floor(minute)\n second = math.floor(second)\n microsecond, second = math.modf(second)\n microsecond = math.floor(microsecond * 1e6)\n return gnsstime(year, month=month, day=day, hour=hour, minute=minute, second=second, microsecond=microsecond)", "def iget_date(self , time_index):\n long_time = EclSum.cNamespace().iget_sim_time( self , time_index )\n ct = CTime(long_time)\n return ct.datetime()", "def _generate_time_vector(self, length):\n pt = pd.to_datetime(self.header_time_str)\n # generate a 24Hz time vector\n pr = pd.date_range(\n pt,\n periods=length,\n freq=\"{}ns\".format(np.int64(np.round(1 / 24 * 1e9))),\n )\n # t = pr.to_numpy()\n mattime = datetime2mtlb(pr.to_numpy())\n sbetime = self.mattime_to_sbetime(mattime)\n return sbetime", "def getTimeInSeconds(fs, tmin, tmax):\n dt = 1.0/fs\n return np.arange(tmin, tmax, dt)", "def extract_datetime_times(uvd):\n times = np.unique(uvd.time_array)\n loc = EarthLocation(*uvd.telescope_location_lat_lon_alt)\n times = Time(times, format='jd', location=loc).to_datetime()\n return times" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the trigger sound. A trigger sound is played when the status is 'listening' to indicate that the assistant is actively listening to the user. The trigger_sound_wave argument should be the path to a valid wave file. If it is None, the trigger sound is disabled.
def set_trigger_sound_wave(self, trigger_sound_wave): if trigger_sound_wave and os.path.exists(os.path.expanduser(trigger_sound_wave)): self.trigger_sound_wave = os.path.expanduser(trigger_sound_wave) else: if trigger_sound_wave: logger.warning( 'File %s specified for --trigger-sound does not exist.', trigger_sound_wave) self.trigger_sound_wave = None
[ "def set_midi_sound(self, s):\n if midi.test_rtmidi():\n \n midi.set_midi_sound(s)\n midi.set_midi_callback(midi.simple_midi_callback)\n \n self.scheduler.unschedule(midi.midi_port_handler)\n self.scheduler.schedule_interval(midi.midi_port_handler, 1)", "def setAudio(self, audio, mode):\n\t\tpass", "def voice(bot, trigger):\n if not trigger.admin:\n return bot.reply('You must be an admin to perform this operation')\n\n if not trigger.user_object or not trigger.user_object.is_login or not trigger.user_object.registered:\n return bot.msg(trigger.nick, 'Please login or register first at %s' % settings.FULL_URL)\n\n try:\n inputs = trigger.group(2).split(' ')\n except (IndexError, AttributeError):\n return bot.reply('Invalid input: .voice #example or .voice #example nick')\n\n try:\n channel = inputs[0]\n if not channel.startswith('#'):\n raise TypeError\n except (IndexError, TypeError):\n return bot.reply('You must provide a valid channel')\n\n nick = None\n try:\n nick = inputs[1]\n except (TypeError, IndexError):\n pass\n\n if not nick:\n nick = trigger.nick\n bot.log.info('Giving voice on %s from %s' % (channel, nick))\n bot.write(['MODE %s +v %s' % (channel, nick)])", "def BACKGROUND_MUSIC(self): \n musicSound = Sound(source = 'ninja.wav')\n musicSound.play()", "def play_sound():\n os.system(\"play -nq -t alsa synth {} sine {}\".format(0.1, 440))", "async def _sfx(self, ctx):\n #default on.\n server = ctx.message.server\n if server.id not in self.settings[\"SERVER_SFX_ON\"]:\n self.settings[\"SERVER_SFX_ON\"][server.id] = True\n else:\n self.settings[\"SERVER_SFX_ON\"][server.id] = not self.settings[\"SERVER_SFX_ON\"][server.id]\n #for a toggle, settings should save here in case bot fails to send message\n fileIO(\"data/audio/settings.json\", \"save\", self.settings)\n if self.settings[\"SERVER_SFX_ON\"][server.id]:\n await self.bot.say(\"Sound effects are now enabled on this server.\")\n else:\n await self.bot.say(\"Sound effects are now disabled on this server.\")", "def select_wave(self, wave):\n if (wave.upper() == 'SINE'):\n self.port.write(WAVE_SINE_CMD.encode('utf-8'))\n elif (wave.upper() == 'TRIANGLE'):\n self.port.write(WAVE_TRIANGLE_CMD.encode('utf-8'))", "def playback_word(self):\n playsound(self.sound_file)", "def play_sound() -> None:\n # Please note that I do not like to put import statements here because\n # it is categorized as a code smell. However, I need this to get rid of\n # the message in the beginning that is forced upon every developer who\n # needs Pygame. On a side note, I am looking to replace Pygame with\n # PySide2 in the future.\n from os import environ\n environ['PYGAME_HIDE_SUPPORT_PROMPT'] = \"True\"\n\n import pygame.mixer\n pygame.mixer.init()\n pygame.mixer.music.load(\"../../media/beep.wav\")\n pygame.mixer.music.play()", "def play_for(sample_wave, ms=1):\n sound = pygame.sndarray.make_sound(sample_wave)\n sound.play(-1)\n pygame.time.delay(ms)\n sound.stop()", "def haveSound(self) -> \"SbBool\":\n return _coin.SoAudioDevice_haveSound(self)", "def _playSound(self):\n self.sound.play()\n self.timesPlayed += 1\n self.isPlaying = True\n self.shouldPlay = False\n self._resetTimer()", "def soundAlarm(self):\n\n self._beeper.start()", "def set_volume(self):\n if self.sound:\n self.sound.volume = self.volume_slider.value", "def turn_music(self):\n if self.config.getboolean('audio', 'music'):\n self.config.set('audio', 'music', 'false')\n pygame.mixer.music.stop()\n self.speech.speak(self.phrases['music_off'])\n else:\n self.config.set('audio', 'music', 'true')\n self.music_play()\n self.speech.speak(self.phrases['music_on'])\n with open('settings.ini', 'w') as config_file:\n self.config.write(config_file)", "def TestSound():\n SoundsPath = os.path.join(AudioFilesPath, MySet.Sound + \".mp3\")\n Parent.PlaySound(SoundsPath, MySet.Volume*0.01)", "def setwave(num, amplitude, wavelength, speed):\n\n patch.enableWave(num)\n patch.setWaveTarget(num, SeaPatchRoot.WTZ)\n patch.setWaveFunc(num, SeaPatchRoot.WFSin)\n patch.setChoppyK(num, 0)\n patch.setWaveAmplitude(num, amplitude)\n patch.setWaveLength(num, wavelength)\n patch.setWaveSpeed(num, speed)\n return 'wave %s modified' % num", "def play_sound(sound_object):\n sound_object.play()\n time.sleep(0.5)\n sound_object.stop()", "def emit_sound(self, sound):\n sound_manager.emit_sound(sound, self.index)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compute lower left ``xy`` pixel position. This is used for the conversion to matplotlib in ``as_artist``. Taken from
def _lower_left_xy(self): hw = self.width / 2. hh = self.height / 2. sint = np.sin(self.angle) cost = np.cos(self.angle) dx = (hh * sint) - (hw * cost) dy = -(hh * cost) - (hw * sint) x = self.center.x + dx y = self.center.y + dy return x, y
[ "def _coord(self,x):\n return self.inset + self.dotwidth/2 + self.fieldwidth*x", "def get_left_top_of_field(self, fieldy, fieldx):\n left_top_Xcoord = (fieldx * self.field_size) + self.ymargin\n left_top_Ycoord = (fieldy * self.field_size) + self.xmargin\n return (left_top_Ycoord, left_top_Xcoord)", "def get_lowerleft_y(self):\n return self[1]", "def get_lowerleft_x(self):\n return self[0]", "def lower_left(self) -> Point:\n return self._lower_left_corner", "def calculate_xy(self):\n x_p = self.offset * self.grid_size * 2\n # multiply by -1 to draw the diagram from top to bottom\n y_p = self.order * self.grid_size * 2 * -1\n return x_p, y_p", "def get_horizontal_position(self, locator):\n return self._get_position(self._selenium.get_element_position_left,\n locator)", "def getoriginx(self):\n return self.origin[0]", "def EffectivePlotOffsetX(self) -> float:", "def _xy(self, pos, update):\n x = pos[0] + update[0]\n y = pos[1] + update[1]\n assert 0 <= x < self.shape[0], f\"Coordinate x out of bound: {x}\"\n assert 0 <= y < self.shape[1], f\"Coordinate y out of bound: {y}\"\n return (x,y)", "def OriginX(self) -> float:", "def get_origin(self):\n x, y = self._origin.xy\n return (-x, -y)", "def left(self):\r\n return GridCoordinates(self.col - 1, self.row)", "def get_pos_x(self):\n return self._position[0]", "def min_x(self):\n return self.origin[0]", "def calculate_xy(gp, point):\n installinfo = gp.GetInstallInfo(\"desktop\")\n if str(installinfo[\"Version\"]) == \"10.1\":\n x_coord = point.Centroid.X\n y_coord = point.Centroid.Y\n \n else:\n x_coord = float(point.Centroid.split(\" \")[0])\n y_coord = float(point.Centroid.split(\" \")[1])\n return x_coord, y_coord", "def _get_plot_coordinates(self) -> Tuple[int, int]:\n return self._x0 + AXIS_SPACE_PX, self._y0 # y does not need to be added AXIS_SPACE_PX, since it is at bottom", "def pixel_to_position(self, pixel):\n x, y = pixel\n return y // LENGTH, x // LENGTH", "def lower_right(self) -> Point:\n return self._lower_left_corner + Point(self._width, 0)", "def _get_pos(self):\r\n \r\n return (self.rect.midbottom[0]-(MAP_TILE_WIDTH/2))/MAP_TILE_WIDTH, (self.rect.midbottom[1]-(MAP_TILE_HEIGHT))/MAP_TILE_HEIGHT" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests to see that assembly_parameter set_value works properly
def test_assembly_parameter_set_value(): sim = M68K() ap = AssemblyParameter(EAMode.IMM, 123) mv = MemoryValue(OpSize.WORD) mv.set_value_unsigned_int(1234) # immediate set should throw assertion error with pytest.raises(AssertionError): ap.set_value(sim, mv) # test data register set ap = AssemblyParameter(EAMode.DataRegisterDirect, 3) mv.set_value_unsigned_int(123) ap.set_value(sim, mv) assert sim.get_register(Register.D3) == 123 # test address register direct ap = AssemblyParameter(EAMode.AddressRegisterDirect, 5) mv.set_value_unsigned_int(0x120) ap.set_value(sim, mv) assert sim.get_register(Register.A5) == 0x120 val = MemoryValue(OpSize.LONG) val.set_value_unsigned_int(0x1ABBAABB) # set some memory at 0x123 sim.memory.set(OpSize.LONG, 0x120, val) # ensure set proper assert sim.memory.get(OpSize.LONG, 0x120) == 0x1ABBAABB # now test address register indirect ap = AssemblyParameter(EAMode.AddressRegisterIndirect, 5) mv = MemoryValue(OpSize.LONG) mv.set_value_unsigned_int(0x123123) # set the value ap.set_value(sim, mv) # ensure that it changed assert sim.memory.get(OpSize.LONG, 0x120).get_value_unsigned() == 0x123123 # test address register indirect pre and post ap = AssemblyParameter(EAMode.AddressRegisterIndirectPostIncrement, 5) ap.set_value(sim, MemoryValue(OpSize.WORD, unsigned_int=0xAA)) assert sim.memory.get(OpSize.WORD, 0x120).get_value_unsigned() == 0xAA ap = AssemblyParameter(EAMode.AddressRegisterIndirectPreDecrement, 5) ap.set_value(sim, MemoryValue(OpSize.WORD, unsigned_int=0xBB)) assert sim.memory.get(OpSize.WORD, 0x120).get_value_unsigned() == 0xBB # test absolute addresses mv.set_value_unsigned_int(0xCC) ap = AssemblyParameter(EAMode.AbsoluteWordAddress, 0x120) ap.set_value(sim, mv) assert sim.memory.get(OpSize.LONG, 0x120).get_value_unsigned() == 0xCC mv.set_value_unsigned_int(0xDD) ap = AssemblyParameter(EAMode.AbsoluteLongAddress, 0x120) ap.set_value(sim, mv) assert sim.memory.get(OpSize.LONG, 0x120).get_value_unsigned() == 0xDD
[ "def setValue(self, parameterValue: cern.japc.value.ParameterValue) -> None:\n ...", "def test_build_param_access(self):\n bps = self.BuildParams()\n assert bps.x is None\n assert getattr(bps, \"x\") is None\n assert not hasattr(bps, \"_BuildParam__x\")\n\n bps.x = 1\n self.check_x_value(bps, 1)\n\n setattr(bps, \"x\", 2)\n self.check_x_value(bps, 2)", "def test_VALUE_readonly(self):\n params = insightiq_api.Parameters()\n with self.assertRaises(AttributeError):\n params.VALUE = 'foo'", "def test_set_system_param(self):\n pass", "def setup_parameter(self, parameter, value):\n self.__dict__[parameter] = value", "def test_set_agent_parameter(self):\n pass", "def test_get_parameter_value(self):\n test_cases = [\n (True, ParameterValue(type=int(ParameterType.PARAMETER_BOOL), bool_value=True)),\n (42, ParameterValue(type=int(ParameterType.PARAMETER_INTEGER), integer_value=42)),\n (3.5, ParameterValue(type=int(ParameterType.PARAMETER_DOUBLE), double_value=3.5)),\n ('foo', ParameterValue(type=int(ParameterType.PARAMETER_STRING), string_value='foo')),\n (' ', ParameterValue(type=int(ParameterType.PARAMETER_STRING), string_value=' ')),\n ('', ParameterValue(type=int(ParameterType.PARAMETER_STRING), string_value='')),\n (\n [True, False],\n ParameterValue(\n type=int(ParameterType.PARAMETER_BOOL_ARRAY),\n bool_array_value=[True, False])\n ),\n (\n [1, 2, 3],\n ParameterValue(\n type=int(ParameterType.PARAMETER_INTEGER_ARRAY),\n integer_array_value=[1, 2, 3])\n ),\n (\n [1.0, 2.0, 3.0],\n ParameterValue(\n type=int(ParameterType.PARAMETER_DOUBLE_ARRAY),\n double_array_value=[1.0, 2.0, 3.0])\n ),\n (\n ['foo', 'bar'],\n ParameterValue(\n type=int(ParameterType.PARAMETER_STRING_ARRAY),\n string_array_value=['foo', 'bar'])\n ),\n ]\n\n for input_value, expected_value in test_cases:\n try:\n p = get_parameter_value(str(input_value))\n except Exception as e:\n assert False, f'failed to get param_value, reason: {e}'\n self.assertEqual(p, expected_value)", "def testAssembler(self):\n seq_set = self.session.create_object(\"wgs_assembled_seq_set\")\n\n self.util.stringTypeTest(self, seq_set, \"assembler\")\n\n self.util.stringPropertyTest(self, seq_set, \"assembler\")", "def test_set_job_value(self):\n\n manager = maya_manager.Maya_Manager()\n\n #start frame\n manager.set_job_value('start_frame', 111)\n self.assertEqual(manager.job.start_frame, 111)\n\n #end frame\n manager.set_job_value('end_frame', 112)\n self.assertEqual(manager.job.end_frame, 112)\n\n #width\n manager.set_job_value('width', 2560)\n self.assertEqual(manager.job.width, 2560)\n\n #height\n manager.set_job_value('height', 1440)\n self.assertEqual(manager.job.height, 1440)\n\n #camera\n manager.set_job_value('camera', 'persp')\n self.assertEqual(manager.job.camera, 'persp')\n\n #dof\n manager.set_job_value('dof', True)\n self.assertEqual(manager.job.dof, True)\n manager.set_job_value('dof', False)\n self.assertEqual(manager.job.dof, False)\n\n #motion blur\n manager.set_job_value('motion_blur', True)\n self.assertEqual(manager.job.motion_blur, True)\n manager.set_job_value('motion_blur', False)\n self.assertEqual(manager.job.motion_blur, False)\n\n #quality\n manager.set_job_value('quality', 'low')\n self.assertEqual(manager.job.quality, 'low')\n\n #shader_override_type\n manager.set_job_value('shader_override_type', 4)\n self.assertEqual(manager.job.shader_override_type, 4)\n\n #unsupported parameter\n with self.assertRaises(Exception):\n manager.set_job_value('asdfas', 1440)", "def test_set_animal_parameters_callable(self):\n params = {}\n self.biosim.set_animal_parameters('Herbivore', params)", "def test_init_values(self):\n # set an additional value for test\n self.protocol._param_dict.add(\"foo\", r'foo=(.*)',\n lambda match : int(match.group(1)),\n lambda x : str(x),\n direct_access=True,\n startup_param=True,\n default_value=10)\n self.protocol._param_dict.add(\"bar\", r'bar=(.*)',\n lambda match : int(match.group(1)),\n lambda x : str(x),\n direct_access=False,\n startup_param=True,\n default_value=0)\n self.protocol._param_dict.add(\"baz\", r'baz=(.*)',\n lambda match : int(match.group(1)),\n lambda x : str(x),\n direct_access=True,\n default_value=20)\n self.protocol._param_dict.add(\"bat\", r'bat=(.*)',\n lambda match : int(match.group(1)),\n lambda x : str(x),\n startup_param=False,\n default_value=20)\n self.protocol._param_dict.add(\"qux\", r'qux=(.*)',\n lambda match : int(match.group(1)),\n lambda x : str(x),\n startup_param=True)\n self.protocol._param_dict.add(\"rok\", r'rok=(.*)',\n lambda match : int(match.group(1)),\n lambda x : str(x))\n self.protocol._param_dict.update(\"qux=6666\")\n \n # mark init params\n self.assertRaises(InstrumentParameterException,\n self.protocol.set_init_params, [])\n self.protocol.set_init_params({DriverConfigKey.PARAMETERS: {\"foo\": 1111, \"baz\":2222}})\n \n # get new startup config\n self.assertRaises(InstrumentProtocolException, self.protocol.get_startup_config)\n self.protocol.set_init_params({DriverConfigKey.PARAMETERS: {\"foo\": 1111, \"baz\":2222, \"bat\": 11, \"qux\": 22}})\n result = self.protocol.get_startup_config()\n \n self.assertEquals(len(result), 5)\n self.assertEquals(result[\"foo\"], 1111) # init param\n self.assertEquals(result[\"bar\"], 0) # init param with default value\n self.assertEquals(result[\"baz\"], 2222) # non-init param, but value specified\n self.assertEquals(result[\"bat\"], 11) # set param\n self.assertEquals(result[\"qux\"], 22) # set param\n self.assertIsNone(result.get(\"rok\")) # defined in paramdict, no config", "def set_parameter(self, param, value, location=3):\n self.reb.set_parameter(param, value, self.stripe, location)\n logging.info(\"Set REB parameter %s to %s at location %d\" % (param, repr(value), location))", "def test_get_parameter_value(value, result):\n args = {'A': True, 'B': 'true', 'C': 1, 'D': {'A': True}}\n assert tp.get_value(value=value, arguments=args) == result", "def set_value(parameter, value, parent):\n if parameter.type is 'bool':\n to_write = u'true' if value else u'false'\n elif parameter.type is 'int':\n to_write = u'%d' % clamp(value, parameter)\n elif parameter.type is 'float':\n to_write = u'%f' % clamp(value, parameter)\n elif parameter.type is 'enum':\n for key, val in parameter.dict.iteritems():\n if key == value.upper():\n value = val\n break\n to_write = u'%d' % value\n getattr(parent,parameter.name).ArrangerAutomation.Events.contents[1]['Value'] = to_write", "def _set_parameter(self, par, val):\n self._parchk(par)\n setattr(self, par, float(val))", "def test_expand_parameter_value(value, args, result):\n parameters = ParameterIndex()\n parameters['A'] = String(name='A', label='P1', index=0)\n parameters['B'] = String(name='B', label='P2', index=0)\n parameters['C'] = String(\n name='C',\n label='P3',\n index=2,\n default='default'\n )\n assert tp.expand_value(value, args, parameters) == result", "def set_value(self, ba, val):\n matched = util.zip_longest(ba.sig.positional, ba.args, fillvalue=util.UNSET)\n for i, (param, arg) in enumerate(matched):\n if param is self:\n if arg is util.UNSET:\n ba.args.append(val)\n else:\n ba.args[i] = val\n return\n else:\n if arg is util.UNSET:\n default_value = param.default_value(ba)\n if default_value is not util.UNSET:\n ba.args.append(default_value)\n # ba.args.append(param.cli_default.value_after_conversion(partial(self.coerce_value, ba=ba)))\n else:\n raise ValueError(\n \"Can't set parameters after required parameters\")\n else:\n raise ValueError(\"{!r} not present in signature\".format(self))", "def can_set_value(self):\n raise NotImplementedError", "def testAssemblyName(self):\n seq_set = self.session.create_object(\"wgs_assembled_seq_set\")\n\n self.util.stringTypeTest(self, seq_set, \"assembly_name\")\n\n self.util.stringPropertyTest(self, seq_set, \"assembly_name\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Resolves a dissimilarity or vector size measure from a string or a float. Passes through values that are already callable. Raises an exception for invalid values. Floats are resolved as Minkowski size with corresponding value for `p`.
def resolve_dissimilarity(dissimilarity, scale_by_dimensionality=False): if callable(dissimilarity): return dissimilarity if isinstance(dissimilarity, str): dissimilarity = dissimilarity.lower() if dissimilarity in ['hamming', ]: p = 0 unrooted = True elif dissimilarity in ['boscovich', 'cityblock', 'manhattan', 'taxicab', ]: p = 1 unrooted = False elif dissimilarity in ['euclidean', 'pythagorean', ]: p = 2 unrooted = False elif dissimilarity in ['squared_euclidean', 'squared_pythagorean', ]: p = 2 unrooted = True elif dissimilarity in ['chebyshev', 'chessboard', 'maximum']: p = np.inf unrooted = False else: raise ValueError(f'Unknown dissimilarity measure: \'{dissimilarity}\'') return MinkowskiSize(p=p, unrooted=unrooted, scale_by_dimensionality=scale_by_dimensionality) if isinstance(dissimilarity, (int, float)): return MinkowskiSize(p=dissimilarity, unrooted=False, scale_by_dimensionality=scale_by_dimensionality) raise ValueError(f'Parameter `dissimilarity` must be a function or a callable class, a string, or a float.')
[ "def parse_size(size_as_string, dpi=72, sep=\"x,;\"):\n if size_as_string is None:\n size_as_string = \"\"\n\n if \"x\" in sep:\n # If we have a pixel measure somewhere, we have to be careful\n size_as_string = size_as_string.replace(\"px\", \"PX\")\n size_as_string = size_as_string.replace(\"Px\", \"PX\")\n table = maketrans(sep, \"_\" * len(sep))\n size_as_string = size_as_string.translate(table)\n size_as_string = re.sub(\"_+\", \"_\", size_as_string)\n width, _, height = size_as_string.partition(\"_\")\n\n def find_measure_and_unit(measure):\n \"\"\"Given a string containing a numeric measure and a unit,\n returns the measure as a number and the unit as a string,\n in a tuple.\"\"\"\n measure = measure.strip().replace(\" \", \"\")\n match = re.match(\"([-0-9.]+)([a-zA-Z]*)\", measure)\n if not match:\n return None, None\n num = float(match.group(1))\n measure = match.group(2).lower()\n if not measure:\n measure = None\n return num, measure\n\n def convert_measure_to_inches(measure, unit):\n \"\"\"Converts a measure-unit pair to a single number that\n represents the measure in inches.\"\"\"\n if measure is None:\n return measure\n if unit is None:\n unit = \"in\"\n if unit == \"in\":\n # We already have inches\n return measure\n if unit == \"cm\":\n # Centimetres to inches\n return measure / 2.54\n if unit == \"mm\":\n # Millimetres to inches\n return measure / 254.0\n if unit == \"m\":\n # Metres to inches\n return measure / 0.0254\n if unit == \"pt\":\n # Points to inches\n return measure / 72.27\n if unit == \"px\":\n # Pixels to inches using the current dpi value\n return measure / float(dpi)\n raise ValueError(\"unsupported unit of length: %r\" % unit)\n\n width, width_unit = find_measure_and_unit(width)\n height, height_unit = find_measure_and_unit(height)\n\n if width is None and height is None:\n width, width_unit = 8.0, \"in\"\n\n width, height = (\n convert_measure_to_inches(width, width_unit),\n convert_measure_to_inches(height, height_unit),\n )\n\n if width is None:\n width = height * (1 + 5 ** 0.5) / 2\n elif height is None:\n height = width * 2 / (1 + 5 ** 0.5)\n\n return width, height", "def parse_mem_value(value):\n elements = value.split()\n result = float(elements[0])\n if len(elements) > 1:\n result *= SIZE_FACTORS[elements[1].lower()]\n return result", "def procheckString2float(string):\n result = float(string)\n if result > 999.8 and result < 999.99:\n return None\n return result", "def human_to_size(in_string):\n\n size_map = { \"K\" : 1024,\n \"M\" : (1024 ** 2),\n \"G\" : (1024 ** 3) }\n\n value = int(\"\".join(itertools.takewhile(str.isdigit, in_string)))\n\n for suffix, scale in zip(size_map.keys(), size_map.values()):\n if in_string.upper().endswith(suffix):\n value *= scale\n\n return value", "def check_valid_size(value, name):\n if value is None:\n return\n check_type(integer_types + (float,), value)\n if value < 0:\n raise InvalidArgument(u'Invalid size %s %r < 0' % (value, name))\n if isinstance(value, float) and math.isnan(value):\n raise InvalidArgument(u'Invalid size %s %r' % (value, name))", "def _validate_size_string(self, size_string):\n\n if isinstance(size_string, _pipeline_param.PipelineParam):\n if size_string.value:\n size_string = size_string.value\n else:\n return\n\n if re.match(r'^[0-9]+(E|Ei|P|Pi|T|Ti|G|Gi|M|Mi|K|Ki){0,1}$',\n size_string) is None:\n raise ValueError(\n 'Invalid memory string. Should be an integer, or integer followed '\n 'by one of \"E|Ei|P|Pi|T|Ti|G|Gi|M|Mi|K|Ki\"')", "def posfloat(string):\n try:\n value = float(string)\n except ValueError as err:\n raise argparse.ArgumentTypeError(err)\n if value < 0.0:\n raise argparse.ArgumentTypeError(f\"invalid value {string}: value must \"\n \"be greater than or equal to 0.0\")\n return value", "def _do_set_scale(self, string):\n def usage():\n self.error(\"Incorrect usage\", \"see 'help set scale'\")\n\n parts = string.split(' ')\n scale = 1e6 / float(parts.pop(0))\n units = parts.pop(0)\n if type(units) is not int:\n if units not in all_units:\n return self.error(\"Unsupported units\", \"See 'help units'\")\n units = all_units[units]\n\n if len(parts):\n return usage()\n\n self.motor.scale = int(scale), units", "def _check_spot_size(\n spatial_data: Optional[Mapping], spot_size: Optional[float]\n) -> float:\n if spatial_data is None and spot_size is None:\n raise ValueError(\n \"When .uns['spatial'][library_id] does not exist, spot_size must be \"\n \"provided directly.\"\n )\n elif spot_size is None:\n return spatial_data['scalefactors']['spot_diameter_fullres']\n else:\n return spot_size", "def toFloat(self, p_str): # real signature unknown; restored from __doc__\n pass", "def _search_float(self, pattern, arg=None):\n string = self._search(pattern)\n if string:\n try:\n return float(string)\n except: pass\n raise WeatherParseError(text=self.text, arg=arg)", "def test_accepts_string() -> None:\n d32 = Damm32()\n d32.calculate(\"STRING\")", "def similarity_name2value(s_name, repr1, repr2):\n if s_name == 'jensen-shannon':\n return jensen_shannon_divergence(repr1, repr2)\n if s_name == 'renyi':\n return renyi_divergence(repr1, repr2)\n if s_name == 'cos' or s_name == 'cosine':\n return cosine_similarity(repr1, repr2)\n if s_name == 'euclidean':\n return euclidean_distance(repr1, repr2)\n if s_name == 'variational':\n return variational_distance(repr1, repr2)\n if s_name == 'kl':\n return kl_divergence(repr1, repr2)\n if s_name == 'bhattacharyya':\n return bhattacharyya_distance(repr1, repr2)\n raise ValueError('%s is not a valid feature name.' % s_name)", "def _convert_units(key, value):\n # See: https://matplotlib.org/users/customizing.html, all props matching\n # the strings use the units 'points', and special categories are inches!\n # WARNING: Must keep colorbar and subplots units alive, so when user\n # requests em units, values change with respect to font size. The points\n # thing is a conveniene feature so not as important for them.\n if (isinstance(value, str)\n and key.split('.')[0] not in ('colorbar', 'subplots')\n and re.match('^.*(width|space|size|pad|len|small|large)$', key)):\n value = units(value, 'pt')\n return value", "def test_compute_params_int_float(self):\n computation_name = Name(\"/func/f1\")\n computation_name += \"_(2,12.5)\"\n computation_name += \"NFN\"\n computation_interest = Interest(computation_name)\n\n computation_entry = NFNComputationTableEntry(computation_name)\n computation_entry.available_data[Name(\"/func/f1\")] = \"PYTHON\\nf\\ndef f(a,b):\\n return a*b\"\n\n computation_str, prepended = self.nfn_layer.parser.network_name_to_nfn_str(computation_name)\n computation_entry.ast = self.nfn_layer.parser.parse(computation_str)\n\n self.nfn_layer.computation_table.append_computation(computation_entry)\n\n self.nfn_layer.compute(computation_interest)\n res = self.nfn_layer.queue_to_lower.get(timeout=2.0)\n self.assertEqual(Content(computation_name, \"25.0\"), res[1])", "def test_regularization_is_float():\n reg = get_regularization(0, 5)\n assert isinstance(reg, float), \"Expected different type.\"", "def __str2pnt(self, s):\n units = s.strip(string.digits + string.whitespace + \".\" + \"-\").upper()\n val = float(s.strip(string.ascii_letters + string.whitespace + \"%\"))\n return self.__unit2pnt(val, units)", "def parse_float(string):\r\n try:\r\n #Attempt parsing string to float\r\n return float(string)\r\n except ValueError:\r\n return None", "def compute_width(d):\n if isinstance(d, str):\n return len(d)\n elif isinstance(d, Number):\n return len(('{:0.%s}' % format_spec).format(d))\n else:\n raise ValueError(\n 'Elements in the values array must be '\n 'strings, ints, or floats. Found: '\n '{}'.format(d.__class__.__name__)\n )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
21. Test checks if default value of colorbar is assigned
def test_default_colorbar(self): result = self.plotter_pca_LOGS.visualize_plot(kind='scatter', size=20, remove_outliers=False, is_colored=True) self.assertNotIsInstance(result.get_legend(), type(None)) self.assertEqual(len(result.figure.axes), 1) pyplot.close()
[ "def test_default_colorbar(self):\n result = self.plotter_tailored_LOGS.umap(n_neighbors=15, random_state=None, min_dist=0.9, kind='scatter', size=20, remove_outliers=False, is_colored=True)\n self.assertNotIsInstance(result.get_legend(), type(None))\n self.assertEqual(len(result.figure.axes), 1)\n pyplot.close()", "def is_colorbar(ax):\n return (ax.get_data_ratio() == 1.0 and not ax.get_navigate())", "def test_colorbar_C_ignore_colorbar(self):\n result = self.plotter_tailored_BBBP.umap(n_neighbors=15, random_state=None, min_dist=0.9, kind='scatter', size=20, remove_outliers=False, is_colored=True, colorbar=True)\n self.assertTrue(len(result.figure.axes)==1)\n pyplot.close()", "def test_colorbar_C_ignore_colorbar(self):\n result = self.plotter_pca_BBBP.visualize_plot(kind='scatter', size=20, remove_outliers=False, is_colored=True, colorbar=True)\n self.assertTrue(len(result.figure.axes)==1)\n pyplot.close()", "def test_colorbar(self):\n # Setup\n data = [\n [0.0, 0.0, 2.0],\n [1.0, 1.0, 1.0],\n [3.0, 2.0, 0.0],\n ]\n paxfig = paxplot.pax_parallel(n_axes=len(data[0]))\n paxfig.plot(data)\n\n # Requesting axis that doesn't exist\n with self.assertRaises(IndexError):\n paxfig.add_colorbar(\n ax_idx=4,\n cmap='viridis',\n )\n\n # Non integer value for ax_idx\n with self.assertRaises(TypeError):\n paxfig.add_colorbar(\n ax_idx='foo',\n cmap='viridis',\n )\n\n # Colorbar that doesn't exist (default message helpful enough)\n with self.assertRaises(ValueError):\n paxfig.add_colorbar(\n ax_idx=0,\n cmap='foo',\n )", "def is_default(self) -> bool:\n return self.type == ColorType.DEFAULT", "def colorbar(self):\n if self.s1:\n ax_cb = plt.subplot(self.gs[1])\n else:\n print 'must create plot before adding colorbar'\n return\n if self.alt_zi == 'int':\n ticks = np.linspace(-1,1,21)\n # find the intersection of the range of data displayed and ticks\n ticks = [ticki for ticki in ticks if ticki >= \n min(self.zi_norm.min(), self.znull) and \n ticki <= max(self.znull, self.zi_norm.max())]\n self.p1.colorbar(self.cax, ticks=ticks, cax=ax_cb)\n elif self.alt_zi == 'amp':\n ticks = np.linspace(0,1,11)\n self.p1.colorbar(self.cax, ticks=ticks, cax=ax_cb)\n elif self.alt_zi == 'log':\n # mask zi's negative and zero elements\n masked = np.ma.masked_less_equal(self.zi-self.znull, 0.)\n # the colorbar range\n # not sure whether to define the range using masked array or\n # full array\n logmin = np.log10(masked.min() / (self.zmax - masked.min()))\n ticks = np.linspace(logmin,0,num=11)\n # determine how much precision is necessary in the ticks:\n decimals = int(np.floor(-np.log10(np.abs(\n ticks[-1]-ticks[0])))) + 2\n ticklabels = np.around(ticks,decimals)\n self.p1.colorbar(self.cax, ticks=ticks).ax.set_yticklabels(ticklabels)\n elif self.alt_zi in [None, 'raw']: # raw colorbar\n ticks = np.linspace(min([self.znull, self.zmin]),max(self.znull, self.zmax),num=11)\n decimals = int(np.floor(-np.log10(np.abs(\n ticks[-1]-ticks[0])))) + 2\n ticklabels = np.around(ticks,decimals)\n self.p1.colorbar(self.cax, ticks=ticks, cax=ax_cb).ax.set_yticklabels(ticklabels)\n #self.p1.colorbar(self.cax, ticks=ticks, cax=ax_cb)\n else: #could not determine colorbar type\n print 'color scale used not recognized: cannot produce colorbar'", "def test_default_color_config():\n actual = eg_config.get_default_color_config()\n\n assert_equal(actual.pound, eg_config.DEFAULT_COLOR_POUND)\n assert_equal(actual.heading, eg_config.DEFAULT_COLOR_HEADING)\n assert_equal(actual.code, eg_config.DEFAULT_COLOR_CODE)\n assert_equal(actual.backticks, eg_config.DEFAULT_COLOR_BACKTICKS)\n assert_equal(actual.prompt, eg_config.DEFAULT_COLOR_PROMPT)\n\n assert_equal(actual.pound_reset, eg_config.DEFAULT_COLOR_POUND_RESET)\n assert_equal(actual.heading_reset, eg_config.DEFAULT_COLOR_HEADING_RESET)\n assert_equal(actual.code_reset, eg_config.DEFAULT_COLOR_CODE_RESET)\n assert_equal(\n actual.backticks_reset,\n eg_config.DEFAULT_COLOR_BACKTICKS_RESET\n )\n assert_equal(actual.prompt_reset, eg_config.DEFAULT_COLOR_PROMPT_RESET)", "def colorbar(self):\n if self.s1:\n ax_cb = plt.subplot(self.gs[1])\n else:\n print('must create plot before adding colorbar')\n return\n if self.alt_zi == 'int':\n ticks = np.linspace(-1,1,21)\n # find the intersection of the range of data displayed and ticks\n ticks = [ticki for ticki in ticks if ticki >= \n min(self.zi_norm.min(), self.znull) and \n ticki <= max(self.znull, self.zi_norm.max())]\n self.p1.colorbar(self.cax, ticks=ticks, cax=ax_cb)\n elif self.alt_zi == '+':\n ticks = np.linspace(self.zmin,\n self.zmax,\n self.contour_n + 2)\n # find the intersection of the range of data displayed and ticks\n self.p1.colorbar(self.cax, ticks=ticks, cax=ax_cb)\n elif self.alt_zi == '-':\n ticks = np.linspace(self.zmin,\n self.zmax,\n self.contour_n + 2)\n # find the intersection of the range of data displayed and ticks\n self.p1.colorbar(self.cax, ticks=ticks, cax=ax_cb)\n elif self.alt_zi == 'amp':\n ampmin = self.floor\n ampmax = self.ceiling\n ticks = np.linspace(ampmin, ampmax,\n num=self.contour_n + 2)\n # determine how much precision is necessary in the ticks:\n self.p1.colorbar(self.cax, ticks=ticks, cax=ax_cb)\n elif self.alt_zi == 'log':\n # the colorbar range\n # not sure whether to define the range using masked array or\n # full array\n #logmin = np.log10(masked.min() / (self.zmax - masked.min()))\n logmin = self.floor\n logmax = self.ceiling\n ticks = np.linspace(logmin,logmax,num=11)\n # determine how much precision is necessary in the ticks:\n decimals = int(np.floor(-np.log10(np.abs(\n ticks[-1]-ticks[0])))) + 2\n ticklabels = np.around(ticks,decimals)\n self.p1.colorbar(self.cax, ticks=ticks, cax=ax_cb).ax.set_yticklabels(ticklabels)\n \"\"\"\n decimals = int(np.floor(-np.log10(np.abs(\n ticks[-1]-ticks[0])))) + 2\n ticklabels = np.around(ticks,decimals)\n self.p1.colorbar(self.cax, ticks=ticks, cax=ax_cb).ax.set_yticklabels(ticklabels)\n \"\"\"\n elif self.alt_zi in [None, 'raw']: # raw colorbar\n ticks = np.linspace(min([self.znull, self.zmin]),\n max(self.znull, self.zmax),num=11)\n decimals = int(np.floor(-np.log10(np.abs(\n ticks[-1]-ticks[0])))) + 2\n ticklabels = np.around(ticks,decimals)\n self.p1.colorbar(self.cax, ticks=ticks, cax=ax_cb).ax.set_yticklabels(ticklabels)\n #self.p1.colorbar(self.cax, ticks=ticks, cax=ax_cb)\n else: #could not determine colorbar type\n print('color scale used not recognized: cannot produce colorbar')", "def test_cbar_minorticks_for_rc_xyminortickvisible():\n\n plt.rcParams['ytick.minor.visible'] = True\n plt.rcParams['xtick.minor.visible'] = True\n\n vmin, vmax = 0.4, 2.6\n fig, ax = plt.subplots()\n im = ax.pcolormesh([[1, 2]], vmin=vmin, vmax=vmax)\n\n cbar = fig.colorbar(im, extend='both', orientation='vertical')\n assert cbar.ax.yaxis.get_minorticklocs()[0] >= vmin\n assert cbar.ax.yaxis.get_minorticklocs()[-1] <= vmax\n\n cbar = fig.colorbar(im, extend='both', orientation='horizontal')\n assert cbar.ax.xaxis.get_minorticklocs()[0] >= vmin\n assert cbar.ax.xaxis.get_minorticklocs()[-1] <= vmax", "def getDefaultBrush():\n pass", "def no_color():\n return False", "def test_colorbar_C_keep_legend(self):\n result = self.plotter_pca_BBBP.visualize_plot(kind='scatter', size=20, remove_outliers=False, is_colored=True, colorbar=True)\n self.assertNotIsInstance(result.get_legend(), type(None))\n pyplot.close()", "def colorbar_only(vmin,vmax,outname='colorbar.png',figsize=(4,1),\n cbsize=[0.05,0.5,0.9,0.2],cmap=None, label='cm/yr',\n orient='horizontal',extend='both',transparent=0,show=False):\n print(vmin, vmax)\n if orient == 'vertical':\n figsize = (1,4)\n cbsize = [0.05,0.05,0.1,0.9]\n\n fig = plt.figure(figsize=figsize)\n ax = fig.add_axes(cbsize)\n\n if cmap == None:\n cmap = mpl.cm.jet\n norm = mpl.colors.Normalize(vmin=vmin, vmax=vmax)\n\n cb = mpl.colorbar.ColorbarBase(ax,\n cmap=cmap,\n norm=norm,\n extend=extend,\n orientation=orient,\n )\n cb.set_label(label)\n\n #Show & save\n if show:\n plt.show()\n\n plt.savefig(outname,\n transparent=transparent,\n bbox_inches='tight', #doesn't work for ps output...\n )\n print('output {}'.format(outname))", "def test_colorbar_R_add_colorbar(self):\n result = self.plotter_pca_LOGS.visualize_plot(kind='scatter', size=20, remove_outliers=False, is_colored=True, colorbar=True)\n self.assertTrue(len(result.figure.axes)>=1)\n pyplot.close()", "def test_fill_default_quant_color_scheme():\n result = CliRunner().invoke(viz, [\"--colorvar\", \"amount\", *OUTPUT_ARGS])\n cdata = json.loads(result.output)\n\n fill = cdata[\"encoding\"][\"fill\"]\n assert fill[\"type\"] == \"quantitative\"\n assert fill[\"scale\"][\"scheme\"] == DEFAULT_COLOR_SCHEMES[\"ramp\"]", "def test_colorbar_extension_shape():\n # Create figures for uniform and proportionally spaced colorbars.\n _colorbar_extension_shape('uniform')\n _colorbar_extension_shape('proportional')", "def hasColor(*args, **kwargs):\n \n pass", "def with_colors() :\n global __with_colors__\n return bool(__with_colors__)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
22. Test checks if colorbar is assigned when target type is R and therefore legend removed
def test_colorbar_R_remove_legend(self): result = self.plotter_pca_LOGS.visualize_plot(kind='scatter', size=20, remove_outliers=False, is_colored=True, colorbar=True) self.assertIsInstance(result.get_legend(), type(None)) pyplot.close()
[ "def test_colorbar_C_keep_legend(self):\n result = self.plotter_pca_BBBP.visualize_plot(kind='scatter', size=20, remove_outliers=False, is_colored=True, colorbar=True)\n self.assertNotIsInstance(result.get_legend(), type(None))\n pyplot.close()", "def test_default_colorbar(self):\n result = self.plotter_pca_LOGS.visualize_plot(kind='scatter', size=20, remove_outliers=False, is_colored=True)\n self.assertNotIsInstance(result.get_legend(), type(None))\n self.assertEqual(len(result.figure.axes), 1)\n pyplot.close()", "def test_colorbar_C_ignore_colorbar(self):\n result = self.plotter_pca_BBBP.visualize_plot(kind='scatter', size=20, remove_outliers=False, is_colored=True, colorbar=True)\n self.assertTrue(len(result.figure.axes)==1)\n pyplot.close()", "def test_colorbar_R_add_colorbar(self):\n result = self.plotter_pca_LOGS.visualize_plot(kind='scatter', size=20, remove_outliers=False, is_colored=True, colorbar=True)\n self.assertTrue(len(result.figure.axes)>=1)\n pyplot.close()", "def test_colorbar_C_ignore_colorbar(self):\n result = self.plotter_tailored_BBBP.umap(n_neighbors=15, random_state=None, min_dist=0.9, kind='scatter', size=20, remove_outliers=False, is_colored=True, colorbar=True)\n self.assertTrue(len(result.figure.axes)==1)\n pyplot.close()", "def test_default_colorbar(self):\n result = self.plotter_tailored_LOGS.umap(n_neighbors=15, random_state=None, min_dist=0.9, kind='scatter', size=20, remove_outliers=False, is_colored=True)\n self.assertNotIsInstance(result.get_legend(), type(None))\n self.assertEqual(len(result.figure.axes), 1)\n pyplot.close()", "def is_colorbar(ax):\n return (ax.get_data_ratio() == 1.0 and not ax.get_navigate())", "def test_colorbar(self):\n # Setup\n data = [\n [0.0, 0.0, 2.0],\n [1.0, 1.0, 1.0],\n [3.0, 2.0, 0.0],\n ]\n paxfig = paxplot.pax_parallel(n_axes=len(data[0]))\n paxfig.plot(data)\n\n # Requesting axis that doesn't exist\n with self.assertRaises(IndexError):\n paxfig.add_colorbar(\n ax_idx=4,\n cmap='viridis',\n )\n\n # Non integer value for ax_idx\n with self.assertRaises(TypeError):\n paxfig.add_colorbar(\n ax_idx='foo',\n cmap='viridis',\n )\n\n # Colorbar that doesn't exist (default message helpful enough)\n with self.assertRaises(ValueError):\n paxfig.add_colorbar(\n ax_idx=0,\n cmap='foo',\n )", "def colorbar(self):\n if self.s1:\n ax_cb = plt.subplot(self.gs[1])\n else:\n print 'must create plot before adding colorbar'\n return\n if self.alt_zi == 'int':\n ticks = np.linspace(-1,1,21)\n # find the intersection of the range of data displayed and ticks\n ticks = [ticki for ticki in ticks if ticki >= \n min(self.zi_norm.min(), self.znull) and \n ticki <= max(self.znull, self.zi_norm.max())]\n self.p1.colorbar(self.cax, ticks=ticks, cax=ax_cb)\n elif self.alt_zi == 'amp':\n ticks = np.linspace(0,1,11)\n self.p1.colorbar(self.cax, ticks=ticks, cax=ax_cb)\n elif self.alt_zi == 'log':\n # mask zi's negative and zero elements\n masked = np.ma.masked_less_equal(self.zi-self.znull, 0.)\n # the colorbar range\n # not sure whether to define the range using masked array or\n # full array\n logmin = np.log10(masked.min() / (self.zmax - masked.min()))\n ticks = np.linspace(logmin,0,num=11)\n # determine how much precision is necessary in the ticks:\n decimals = int(np.floor(-np.log10(np.abs(\n ticks[-1]-ticks[0])))) + 2\n ticklabels = np.around(ticks,decimals)\n self.p1.colorbar(self.cax, ticks=ticks).ax.set_yticklabels(ticklabels)\n elif self.alt_zi in [None, 'raw']: # raw colorbar\n ticks = np.linspace(min([self.znull, self.zmin]),max(self.znull, self.zmax),num=11)\n decimals = int(np.floor(-np.log10(np.abs(\n ticks[-1]-ticks[0])))) + 2\n ticklabels = np.around(ticks,decimals)\n self.p1.colorbar(self.cax, ticks=ticks, cax=ax_cb).ax.set_yticklabels(ticklabels)\n #self.p1.colorbar(self.cax, ticks=ticks, cax=ax_cb)\n else: #could not determine colorbar type\n print 'color scale used not recognized: cannot produce colorbar'", "def test_colorbar_extension_shape():\n # Create figures for uniform and proportionally spaced colorbars.\n _colorbar_extension_shape('uniform')\n _colorbar_extension_shape('proportional')", "def test_cbar_minorticks_for_rc_xyminortickvisible():\n\n plt.rcParams['ytick.minor.visible'] = True\n plt.rcParams['xtick.minor.visible'] = True\n\n vmin, vmax = 0.4, 2.6\n fig, ax = plt.subplots()\n im = ax.pcolormesh([[1, 2]], vmin=vmin, vmax=vmax)\n\n cbar = fig.colorbar(im, extend='both', orientation='vertical')\n assert cbar.ax.yaxis.get_minorticklocs()[0] >= vmin\n assert cbar.ax.yaxis.get_minorticklocs()[-1] <= vmax\n\n cbar = fig.colorbar(im, extend='both', orientation='horizontal')\n assert cbar.ax.xaxis.get_minorticklocs()[0] >= vmin\n assert cbar.ax.xaxis.get_minorticklocs()[-1] <= vmax", "def colorbar(self):\n if self.s1:\n ax_cb = plt.subplot(self.gs[1])\n else:\n print('must create plot before adding colorbar')\n return\n if self.alt_zi == 'int':\n ticks = np.linspace(-1,1,21)\n # find the intersection of the range of data displayed and ticks\n ticks = [ticki for ticki in ticks if ticki >= \n min(self.zi_norm.min(), self.znull) and \n ticki <= max(self.znull, self.zi_norm.max())]\n self.p1.colorbar(self.cax, ticks=ticks, cax=ax_cb)\n elif self.alt_zi == '+':\n ticks = np.linspace(self.zmin,\n self.zmax,\n self.contour_n + 2)\n # find the intersection of the range of data displayed and ticks\n self.p1.colorbar(self.cax, ticks=ticks, cax=ax_cb)\n elif self.alt_zi == '-':\n ticks = np.linspace(self.zmin,\n self.zmax,\n self.contour_n + 2)\n # find the intersection of the range of data displayed and ticks\n self.p1.colorbar(self.cax, ticks=ticks, cax=ax_cb)\n elif self.alt_zi == 'amp':\n ampmin = self.floor\n ampmax = self.ceiling\n ticks = np.linspace(ampmin, ampmax,\n num=self.contour_n + 2)\n # determine how much precision is necessary in the ticks:\n self.p1.colorbar(self.cax, ticks=ticks, cax=ax_cb)\n elif self.alt_zi == 'log':\n # the colorbar range\n # not sure whether to define the range using masked array or\n # full array\n #logmin = np.log10(masked.min() / (self.zmax - masked.min()))\n logmin = self.floor\n logmax = self.ceiling\n ticks = np.linspace(logmin,logmax,num=11)\n # determine how much precision is necessary in the ticks:\n decimals = int(np.floor(-np.log10(np.abs(\n ticks[-1]-ticks[0])))) + 2\n ticklabels = np.around(ticks,decimals)\n self.p1.colorbar(self.cax, ticks=ticks, cax=ax_cb).ax.set_yticklabels(ticklabels)\n \"\"\"\n decimals = int(np.floor(-np.log10(np.abs(\n ticks[-1]-ticks[0])))) + 2\n ticklabels = np.around(ticks,decimals)\n self.p1.colorbar(self.cax, ticks=ticks, cax=ax_cb).ax.set_yticklabels(ticklabels)\n \"\"\"\n elif self.alt_zi in [None, 'raw']: # raw colorbar\n ticks = np.linspace(min([self.znull, self.zmin]),\n max(self.znull, self.zmax),num=11)\n decimals = int(np.floor(-np.log10(np.abs(\n ticks[-1]-ticks[0])))) + 2\n ticklabels = np.around(ticks,decimals)\n self.p1.colorbar(self.cax, ticks=ticks, cax=ax_cb).ax.set_yticklabels(ticklabels)\n #self.p1.colorbar(self.cax, ticks=ticks, cax=ax_cb)\n else: #could not determine colorbar type\n print('color scale used not recognized: cannot produce colorbar')", "def test_remove_from_figure(use_gridspec):\n fig, ax = plt.subplots()\n sc = ax.scatter([1, 2], [3, 4], cmap=\"spring\")\n sc.set_array(np.array([5, 6]))\n pre_figbox = np.array(ax.figbox)\n cb = fig.colorbar(sc, use_gridspec=use_gridspec)\n fig.subplots_adjust()\n cb.remove()\n fig.subplots_adjust()\n post_figbox = np.array(ax.figbox)\n assert (pre_figbox == post_figbox).all()", "def test_cbar_tick_format(plot_func, img_3d_mni, cbar_tick_format, tmp_path):\n filename = tmp_path / \"temp.png\"\n plot_func(\n img_3d_mni,\n output_file=filename,\n colorbar=True,\n cbar_tick_format=cbar_tick_format,\n )\n plt.close()", "def colorbar_only(vmin,vmax,outname='colorbar.png',figsize=(4,1),\n cbsize=[0.05,0.5,0.9,0.2],cmap=None, label='cm/yr',\n orient='horizontal',extend='both',transparent=0,show=False):\n print(vmin, vmax)\n if orient == 'vertical':\n figsize = (1,4)\n cbsize = [0.05,0.05,0.1,0.9]\n\n fig = plt.figure(figsize=figsize)\n ax = fig.add_axes(cbsize)\n\n if cmap == None:\n cmap = mpl.cm.jet\n norm = mpl.colors.Normalize(vmin=vmin, vmax=vmax)\n\n cb = mpl.colorbar.ColorbarBase(ax,\n cmap=cmap,\n norm=norm,\n extend=extend,\n orientation=orient,\n )\n cb.set_label(label)\n\n #Show & save\n if show:\n plt.show()\n\n plt.savefig(outname,\n transparent=transparent,\n bbox_inches='tight', #doesn't work for ps output...\n )\n print('output {}'.format(outname))", "def choropleth_plot(self, error_array, xlabel, ylabel, title): \n \"rotate so population on x axis\"\n data = np.rot90(error_array,k=1) \n \"flip so proportion goes upwards so imshow `origin=lower` is true\"\n data = np.flip(data,axis=0)\n \"put nan values to white\"\n data2 = np.ma.masked_where(np.isnan(data),data)\n\n \"initiate plot\"\n f,ax=plt.subplots(figsize=(8,8))\n \"colourmap\"\n cmap = cm.viridis\n \"set nan values for 100% unobserved to white (not black because black text)\"\n cmap.set_bad(\"white\") \n \n im=ax.imshow(data2,interpolation=\"nearest\",cmap=cmap,origin=\"lower\")\n \n \n \"text on top of squares for clarity\"\n for i in range(data.shape[0]):\n for j in range(data.shape[1]):\n plt.text(j,i,str(data[i,j].round(2)),ha=\"center\",va=\"center\",color=\"w\",\n path_effects=[pe.Stroke(linewidth = 0.7,foreground='k')])\n \n \n \"colourbar alignment and labelling\"\n divider = make_axes_locatable(ax)\n cax = divider.append_axes(\"right\",size=\"5%\",pad=0.05)\n cbar=plt.colorbar(im,cax,cax)\n \n \"labelling\"\n ax.set_xticks(np.arange(len(self.p1)))\n ax.set_yticks(np.arange(len(self.p2)))\n ax.set_xticklabels(self.p1)\n ax.set_yticklabels(self.p2)\n ax.set_xticks(np.arange(-.5,len(self.p1),1),minor=True)\n ax.set_yticks(np.arange(-.5,len(self.p2),1),minor=True)\n ax.grid(which=\"minor\",color=\"k\",linestyle=\"-\",linewidth=2)\n ax.set_xlabel(xlabel)\n ax.set_ylabel(ylabel)\n plt.title = title + \" Choropleth\"\n cbar.set_label(title + \" Grand Median L2s\")\n \n \"save\"\n if self.save:\n plt.savefig(self.destination + title + \"_Choropleth.pdf\")", "def test_colorbar_extension_length():\n # Create figures for uniform and proportionally spaced colorbars.\n _colorbar_extension_length('uniform')\n _colorbar_extension_length('proportional')", "def remove_color_bar(can, hist_idx=1):\n hist = can.pltables[hist_idx]\n palette = hist.GetListOfFunctions().FindObject('palette')\n palette.SetX1NDC(1.2)\n palette.SetX2NDC(1.3)\n can.Modified()\n can.Update()", "def update_colorbar():\n\n for key in colormap_dict:\n if cbar.value == key:\n cmap = log_cmap(\"counts\", palette=colormap_dict[cbar.value], low=1, high=col_max)\n color_bar.color_mapper.palette = colormap_dict[cbar.value]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
23. Test checks if colorbar is ignored when target type is C and therefore legend kept
def test_colorbar_C_keep_legend(self): result = self.plotter_pca_BBBP.visualize_plot(kind='scatter', size=20, remove_outliers=False, is_colored=True, colorbar=True) self.assertNotIsInstance(result.get_legend(), type(None)) pyplot.close()
[ "def test_colorbar_C_ignore_colorbar(self):\n result = self.plotter_pca_BBBP.visualize_plot(kind='scatter', size=20, remove_outliers=False, is_colored=True, colorbar=True)\n self.assertTrue(len(result.figure.axes)==1)\n pyplot.close()", "def test_default_colorbar(self):\n result = self.plotter_pca_LOGS.visualize_plot(kind='scatter', size=20, remove_outliers=False, is_colored=True)\n self.assertNotIsInstance(result.get_legend(), type(None))\n self.assertEqual(len(result.figure.axes), 1)\n pyplot.close()", "def test_colorbar_C_ignore_colorbar(self):\n result = self.plotter_tailored_BBBP.umap(n_neighbors=15, random_state=None, min_dist=0.9, kind='scatter', size=20, remove_outliers=False, is_colored=True, colorbar=True)\n self.assertTrue(len(result.figure.axes)==1)\n pyplot.close()", "def is_colorbar(ax):\n return (ax.get_data_ratio() == 1.0 and not ax.get_navigate())", "def test_colorbar_R_remove_legend(self):\n result = self.plotter_pca_LOGS.visualize_plot(kind='scatter', size=20, remove_outliers=False, is_colored=True, colorbar=True)\n self.assertIsInstance(result.get_legend(), type(None))\n pyplot.close()", "def test_default_colorbar(self):\n result = self.plotter_tailored_LOGS.umap(n_neighbors=15, random_state=None, min_dist=0.9, kind='scatter', size=20, remove_outliers=False, is_colored=True)\n self.assertNotIsInstance(result.get_legend(), type(None))\n self.assertEqual(len(result.figure.axes), 1)\n pyplot.close()", "def test_colorbar_R_add_colorbar(self):\n result = self.plotter_pca_LOGS.visualize_plot(kind='scatter', size=20, remove_outliers=False, is_colored=True, colorbar=True)\n self.assertTrue(len(result.figure.axes)>=1)\n pyplot.close()", "def test_cbar_tick_format(plot_func, img_3d_mni, cbar_tick_format, tmp_path):\n filename = tmp_path / \"temp.png\"\n plot_func(\n img_3d_mni,\n output_file=filename,\n colorbar=True,\n cbar_tick_format=cbar_tick_format,\n )\n plt.close()", "def colorbar(self):\n if self.s1:\n ax_cb = plt.subplot(self.gs[1])\n else:\n print 'must create plot before adding colorbar'\n return\n if self.alt_zi == 'int':\n ticks = np.linspace(-1,1,21)\n # find the intersection of the range of data displayed and ticks\n ticks = [ticki for ticki in ticks if ticki >= \n min(self.zi_norm.min(), self.znull) and \n ticki <= max(self.znull, self.zi_norm.max())]\n self.p1.colorbar(self.cax, ticks=ticks, cax=ax_cb)\n elif self.alt_zi == 'amp':\n ticks = np.linspace(0,1,11)\n self.p1.colorbar(self.cax, ticks=ticks, cax=ax_cb)\n elif self.alt_zi == 'log':\n # mask zi's negative and zero elements\n masked = np.ma.masked_less_equal(self.zi-self.znull, 0.)\n # the colorbar range\n # not sure whether to define the range using masked array or\n # full array\n logmin = np.log10(masked.min() / (self.zmax - masked.min()))\n ticks = np.linspace(logmin,0,num=11)\n # determine how much precision is necessary in the ticks:\n decimals = int(np.floor(-np.log10(np.abs(\n ticks[-1]-ticks[0])))) + 2\n ticklabels = np.around(ticks,decimals)\n self.p1.colorbar(self.cax, ticks=ticks).ax.set_yticklabels(ticklabels)\n elif self.alt_zi in [None, 'raw']: # raw colorbar\n ticks = np.linspace(min([self.znull, self.zmin]),max(self.znull, self.zmax),num=11)\n decimals = int(np.floor(-np.log10(np.abs(\n ticks[-1]-ticks[0])))) + 2\n ticklabels = np.around(ticks,decimals)\n self.p1.colorbar(self.cax, ticks=ticks, cax=ax_cb).ax.set_yticklabels(ticklabels)\n #self.p1.colorbar(self.cax, ticks=ticks, cax=ax_cb)\n else: #could not determine colorbar type\n print 'color scale used not recognized: cannot produce colorbar'", "def test_colorbar(self):\n # Setup\n data = [\n [0.0, 0.0, 2.0],\n [1.0, 1.0, 1.0],\n [3.0, 2.0, 0.0],\n ]\n paxfig = paxplot.pax_parallel(n_axes=len(data[0]))\n paxfig.plot(data)\n\n # Requesting axis that doesn't exist\n with self.assertRaises(IndexError):\n paxfig.add_colorbar(\n ax_idx=4,\n cmap='viridis',\n )\n\n # Non integer value for ax_idx\n with self.assertRaises(TypeError):\n paxfig.add_colorbar(\n ax_idx='foo',\n cmap='viridis',\n )\n\n # Colorbar that doesn't exist (default message helpful enough)\n with self.assertRaises(ValueError):\n paxfig.add_colorbar(\n ax_idx=0,\n cmap='foo',\n )", "def colorbar(self):\n if self.s1:\n ax_cb = plt.subplot(self.gs[1])\n else:\n print('must create plot before adding colorbar')\n return\n if self.alt_zi == 'int':\n ticks = np.linspace(-1,1,21)\n # find the intersection of the range of data displayed and ticks\n ticks = [ticki for ticki in ticks if ticki >= \n min(self.zi_norm.min(), self.znull) and \n ticki <= max(self.znull, self.zi_norm.max())]\n self.p1.colorbar(self.cax, ticks=ticks, cax=ax_cb)\n elif self.alt_zi == '+':\n ticks = np.linspace(self.zmin,\n self.zmax,\n self.contour_n + 2)\n # find the intersection of the range of data displayed and ticks\n self.p1.colorbar(self.cax, ticks=ticks, cax=ax_cb)\n elif self.alt_zi == '-':\n ticks = np.linspace(self.zmin,\n self.zmax,\n self.contour_n + 2)\n # find the intersection of the range of data displayed and ticks\n self.p1.colorbar(self.cax, ticks=ticks, cax=ax_cb)\n elif self.alt_zi == 'amp':\n ampmin = self.floor\n ampmax = self.ceiling\n ticks = np.linspace(ampmin, ampmax,\n num=self.contour_n + 2)\n # determine how much precision is necessary in the ticks:\n self.p1.colorbar(self.cax, ticks=ticks, cax=ax_cb)\n elif self.alt_zi == 'log':\n # the colorbar range\n # not sure whether to define the range using masked array or\n # full array\n #logmin = np.log10(masked.min() / (self.zmax - masked.min()))\n logmin = self.floor\n logmax = self.ceiling\n ticks = np.linspace(logmin,logmax,num=11)\n # determine how much precision is necessary in the ticks:\n decimals = int(np.floor(-np.log10(np.abs(\n ticks[-1]-ticks[0])))) + 2\n ticklabels = np.around(ticks,decimals)\n self.p1.colorbar(self.cax, ticks=ticks, cax=ax_cb).ax.set_yticklabels(ticklabels)\n \"\"\"\n decimals = int(np.floor(-np.log10(np.abs(\n ticks[-1]-ticks[0])))) + 2\n ticklabels = np.around(ticks,decimals)\n self.p1.colorbar(self.cax, ticks=ticks, cax=ax_cb).ax.set_yticklabels(ticklabels)\n \"\"\"\n elif self.alt_zi in [None, 'raw']: # raw colorbar\n ticks = np.linspace(min([self.znull, self.zmin]),\n max(self.znull, self.zmax),num=11)\n decimals = int(np.floor(-np.log10(np.abs(\n ticks[-1]-ticks[0])))) + 2\n ticklabels = np.around(ticks,decimals)\n self.p1.colorbar(self.cax, ticks=ticks, cax=ax_cb).ax.set_yticklabels(ticklabels)\n #self.p1.colorbar(self.cax, ticks=ticks, cax=ax_cb)\n else: #could not determine colorbar type\n print('color scale used not recognized: cannot produce colorbar')", "def test_colorbar_extension_shape():\n # Create figures for uniform and proportionally spaced colorbars.\n _colorbar_extension_shape('uniform')\n _colorbar_extension_shape('proportional')", "def test_cbar_minorticks_for_rc_xyminortickvisible():\n\n plt.rcParams['ytick.minor.visible'] = True\n plt.rcParams['xtick.minor.visible'] = True\n\n vmin, vmax = 0.4, 2.6\n fig, ax = plt.subplots()\n im = ax.pcolormesh([[1, 2]], vmin=vmin, vmax=vmax)\n\n cbar = fig.colorbar(im, extend='both', orientation='vertical')\n assert cbar.ax.yaxis.get_minorticklocs()[0] >= vmin\n assert cbar.ax.yaxis.get_minorticklocs()[-1] <= vmax\n\n cbar = fig.colorbar(im, extend='both', orientation='horizontal')\n assert cbar.ax.xaxis.get_minorticklocs()[0] >= vmin\n assert cbar.ax.xaxis.get_minorticklocs()[-1] <= vmax", "def colorbar_only(vmin,vmax,outname='colorbar.png',figsize=(4,1),\n cbsize=[0.05,0.5,0.9,0.2],cmap=None, label='cm/yr',\n orient='horizontal',extend='both',transparent=0,show=False):\n print(vmin, vmax)\n if orient == 'vertical':\n figsize = (1,4)\n cbsize = [0.05,0.05,0.1,0.9]\n\n fig = plt.figure(figsize=figsize)\n ax = fig.add_axes(cbsize)\n\n if cmap == None:\n cmap = mpl.cm.jet\n norm = mpl.colors.Normalize(vmin=vmin, vmax=vmax)\n\n cb = mpl.colorbar.ColorbarBase(ax,\n cmap=cmap,\n norm=norm,\n extend=extend,\n orientation=orient,\n )\n cb.set_label(label)\n\n #Show & save\n if show:\n plt.show()\n\n plt.savefig(outname,\n transparent=transparent,\n bbox_inches='tight', #doesn't work for ps output...\n )\n print('output {}'.format(outname))", "def no_color():\n return False", "def test_colorbar_extension_length():\n # Create figures for uniform and proportionally spaced colorbars.\n _colorbar_extension_length('uniform')\n _colorbar_extension_length('proportional')", "def test_remove_from_figure(use_gridspec):\n fig, ax = plt.subplots()\n sc = ax.scatter([1, 2], [3, 4], cmap=\"spring\")\n sc.set_array(np.array([5, 6]))\n pre_figbox = np.array(ax.figbox)\n cb = fig.colorbar(sc, use_gridspec=use_gridspec)\n fig.subplots_adjust()\n cb.remove()\n fig.subplots_adjust()\n post_figbox = np.array(ax.figbox)\n assert (pre_figbox == post_figbox).all()", "def update_colorbar():\n\n for key in colormap_dict:\n if cbar.value == key:\n cmap = log_cmap(\"counts\", palette=colormap_dict[cbar.value], low=1, high=col_max)\n color_bar.color_mapper.palette = colormap_dict[cbar.value]", "def cpcolor(*args, **kwargs):\n threshx = np.inf\n threshy = np.inf\n fixgaps = True\n argind = 0\n if isinstance(args[0], mpl.axes.Axes):\n # Data is the second (1) element of args... (see below)\n argind += 1\n ax = args[0]\n elif ('ax' in kwargs) or ('axes' in kwargs) or ('parent' in kwargs):\n if 'parent' in kwargs:\n ax = kwargs.pop('parent')\n elif 'ax' in kwargs:\n ax = kwargs.pop('ax')\n else:\n ax = kwargs.pop('axes')\n else:\n ax = mpl.pylab.gca()\n\n if 'fixgaps' in kwargs:\n fixgaps = kwargs.pop('fixgaps')\n if 'threshx' in kwargs:\n threshx = kwargs.pop('threshx')\n if 'threshy' in kwargs:\n threshy = kwargs.pop('threshy')\n if 'clim' in kwargs:\n clm = kwargs.pop('clim')\n kwargs['vmin'] = clm[0]\n kwargs['vmax'] = clm[1]\n\n if len(args) - argind == 1:\n dat = args[0 + argind]\n x = np.arange(dat.shape[1])\n y = np.arange(dat.shape[0])\n else:\n x = args[0 + argind]\n y = args[1 + argind]\n dat = args[2 + argind]\n\n dfx = np.diff(x, 1, 0).astype('double')\n dx = dfx\n gd = abs(dx) <= 3 * nanmean(abs(dx))\n while not gd.all():\n dx = dx[gd]\n gd = abs(dx) <= 3 * nanmean(abs(dx))\n\n dx = nanmean(dx).astype('double')\n\n dfy = np.diff(y, 1, 0).astype('double')\n dy = dfy\n gd = abs(dy) <= 3 * nanmean(abs(dy))\n while not gd.all():\n dy = dy[gd]\n gd = abs(dy) <= 3 * nanmean(abs(dy))\n\n dy = nanmean(dy).astype('double')\n\n N = dat.shape[1] + sum(abs(dfx) > 3 * abs(dx)) * fixgaps\n datn = nans([dat.shape[0], N + 1])\n xn = nans([N + 1, 1])\n if fixgaps:\n if abs(dfx[0]) < 3 * abs(dx) or abs(dfx[0]) <= threshx:\n xn[0] = x[0] - dfx[0] / 2\n else:\n xn[0] = x[0] - dx\n datn[:, 0] = dat[:, 0]\n c = 0\n for i0 in range(0, len(dfx)):\n c = c + 1\n if abs(dfx[i0]) <= (3 * abs(dx)) or \\\n np.isnan(dfx[i0]) or abs(dfx[i0]) <= threshx:\n xn[c] = x[i0] + dfx[i0] / 2\n datn[:, c] = dat[:, i0 + 1]\n else:\n xn[c] = x[i0] + dx\n datn[:, c] = nans_like(dat[:, 0])\n c = c + 1\n xn[c] = x[i0] + dfx[i0] - dx\n datn[:, c] = dat[:, i0]\n else:\n datn[:, 1:N] = dat\n xn[2:N] = x[2:N] - dfx / 2\n\n xn[0] = x[0] - dx / 2\n xn[-1] = x[-1] + dx / 2\n\n N = datn.shape[0] + sum(abs(dfy) > 3 * abs(dy)) * fixgaps\n datn2 = nans([N + 1, datn.shape[1]])\n yn = nans([N + 1, 1])\n if fixgaps:\n if abs(dfy[0]) < 3 * abs(dy) or abs(dfy[0]) <= threshy:\n yn[0] = y[0] - dfy[0] / 2\n else:\n yn[0] = y[0] - dy\n datn2[0, :] = datn[0, :]\n c = 0\n for i0 in range(0, len(dfy)):\n c = c + 1\n if abs(dfy[i0]) <= (3 * abs(dy)) or \\\n np.isnan(dfy[i0]) or abs(dfy[i0]) <= threshy:\n yn[c] = y[i0] + dfy[i0] / 2\n datn2[c, :] = datn[i0 + 1, :]\n else:\n yn[c] = y[i0] + dy\n datn2[c, :] = nans_like(datn[0, :])\n c = c + 1\n yn[c] = y[i0] + dfy[i0] - dy\n datn2[c, :] = datn[i0, :]\n else:\n datn2[1:N, :] = datn\n yn[2:N] = y[2:N] - dfy / 2\n\n yn[0] = y[0] - dy / 2\n yn[-1] = y[-1] + dy / 2\n\n datm = np.ma.array(datn2, mask=np.isnan(datn2))\n\n [mx, my] = np.meshgrid(xn, yn)\n\n mx = np.ma.array(mx, mask=np.isnan(mx))\n my = np.ma.array(my, mask=np.isnan(my))\n\n # mx=xn\n # my=yn\n\n hndl = ax.pcolormesh(mx, my, datm, shading='flat', **kwargs)\n hndl.set_rasterized(True)\n mpl.pylab.draw_if_interactive()\n return hndl" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
24. Test checks if colorbar is assigned when target type is R
def test_colorbar_R_add_colorbar(self): result = self.plotter_pca_LOGS.visualize_plot(kind='scatter', size=20, remove_outliers=False, is_colored=True, colorbar=True) self.assertTrue(len(result.figure.axes)>=1) pyplot.close()
[ "def is_colorbar(ax):\n return (ax.get_data_ratio() == 1.0 and not ax.get_navigate())", "def test_colorbar_C_ignore_colorbar(self):\n result = self.plotter_tailored_BBBP.umap(n_neighbors=15, random_state=None, min_dist=0.9, kind='scatter', size=20, remove_outliers=False, is_colored=True, colorbar=True)\n self.assertTrue(len(result.figure.axes)==1)\n pyplot.close()", "def test_colorbar_C_ignore_colorbar(self):\n result = self.plotter_pca_BBBP.visualize_plot(kind='scatter', size=20, remove_outliers=False, is_colored=True, colorbar=True)\n self.assertTrue(len(result.figure.axes)==1)\n pyplot.close()", "def test_default_colorbar(self):\n result = self.plotter_pca_LOGS.visualize_plot(kind='scatter', size=20, remove_outliers=False, is_colored=True)\n self.assertNotIsInstance(result.get_legend(), type(None))\n self.assertEqual(len(result.figure.axes), 1)\n pyplot.close()", "def test_colorbar(self):\n # Setup\n data = [\n [0.0, 0.0, 2.0],\n [1.0, 1.0, 1.0],\n [3.0, 2.0, 0.0],\n ]\n paxfig = paxplot.pax_parallel(n_axes=len(data[0]))\n paxfig.plot(data)\n\n # Requesting axis that doesn't exist\n with self.assertRaises(IndexError):\n paxfig.add_colorbar(\n ax_idx=4,\n cmap='viridis',\n )\n\n # Non integer value for ax_idx\n with self.assertRaises(TypeError):\n paxfig.add_colorbar(\n ax_idx='foo',\n cmap='viridis',\n )\n\n # Colorbar that doesn't exist (default message helpful enough)\n with self.assertRaises(ValueError):\n paxfig.add_colorbar(\n ax_idx=0,\n cmap='foo',\n )", "def test_valid_rgb_in_magick(self):\n assert poly.isInMap((205, 201, 201), \"magick\")", "def test_colorbar_extension_shape():\n # Create figures for uniform and proportionally spaced colorbars.\n _colorbar_extension_shape('uniform')\n _colorbar_extension_shape('proportional')", "def test_default_colorbar(self):\n result = self.plotter_tailored_LOGS.umap(n_neighbors=15, random_state=None, min_dist=0.9, kind='scatter', size=20, remove_outliers=False, is_colored=True)\n self.assertNotIsInstance(result.get_legend(), type(None))\n self.assertEqual(len(result.figure.axes), 1)\n pyplot.close()", "def is_checkmated(self, color):", "def colorbar(self):\n if self.s1:\n ax_cb = plt.subplot(self.gs[1])\n else:\n print 'must create plot before adding colorbar'\n return\n if self.alt_zi == 'int':\n ticks = np.linspace(-1,1,21)\n # find the intersection of the range of data displayed and ticks\n ticks = [ticki for ticki in ticks if ticki >= \n min(self.zi_norm.min(), self.znull) and \n ticki <= max(self.znull, self.zi_norm.max())]\n self.p1.colorbar(self.cax, ticks=ticks, cax=ax_cb)\n elif self.alt_zi == 'amp':\n ticks = np.linspace(0,1,11)\n self.p1.colorbar(self.cax, ticks=ticks, cax=ax_cb)\n elif self.alt_zi == 'log':\n # mask zi's negative and zero elements\n masked = np.ma.masked_less_equal(self.zi-self.znull, 0.)\n # the colorbar range\n # not sure whether to define the range using masked array or\n # full array\n logmin = np.log10(masked.min() / (self.zmax - masked.min()))\n ticks = np.linspace(logmin,0,num=11)\n # determine how much precision is necessary in the ticks:\n decimals = int(np.floor(-np.log10(np.abs(\n ticks[-1]-ticks[0])))) + 2\n ticklabels = np.around(ticks,decimals)\n self.p1.colorbar(self.cax, ticks=ticks).ax.set_yticklabels(ticklabels)\n elif self.alt_zi in [None, 'raw']: # raw colorbar\n ticks = np.linspace(min([self.znull, self.zmin]),max(self.znull, self.zmax),num=11)\n decimals = int(np.floor(-np.log10(np.abs(\n ticks[-1]-ticks[0])))) + 2\n ticklabels = np.around(ticks,decimals)\n self.p1.colorbar(self.cax, ticks=ticks, cax=ax_cb).ax.set_yticklabels(ticklabels)\n #self.p1.colorbar(self.cax, ticks=ticks, cax=ax_cb)\n else: #could not determine colorbar type\n print 'color scale used not recognized: cannot produce colorbar'", "def test_cbar_minorticks_for_rc_xyminortickvisible():\n\n plt.rcParams['ytick.minor.visible'] = True\n plt.rcParams['xtick.minor.visible'] = True\n\n vmin, vmax = 0.4, 2.6\n fig, ax = plt.subplots()\n im = ax.pcolormesh([[1, 2]], vmin=vmin, vmax=vmax)\n\n cbar = fig.colorbar(im, extend='both', orientation='vertical')\n assert cbar.ax.yaxis.get_minorticklocs()[0] >= vmin\n assert cbar.ax.yaxis.get_minorticklocs()[-1] <= vmax\n\n cbar = fig.colorbar(im, extend='both', orientation='horizontal')\n assert cbar.ax.xaxis.get_minorticklocs()[0] >= vmin\n assert cbar.ax.xaxis.get_minorticklocs()[-1] <= vmax", "def test_colorbar_C_keep_legend(self):\n result = self.plotter_pca_BBBP.visualize_plot(kind='scatter', size=20, remove_outliers=False, is_colored=True, colorbar=True)\n self.assertNotIsInstance(result.get_legend(), type(None))\n pyplot.close()", "def iscolor(self):\n return self.channels() == 3", "def colorbar(self):\n if self.s1:\n ax_cb = plt.subplot(self.gs[1])\n else:\n print('must create plot before adding colorbar')\n return\n if self.alt_zi == 'int':\n ticks = np.linspace(-1,1,21)\n # find the intersection of the range of data displayed and ticks\n ticks = [ticki for ticki in ticks if ticki >= \n min(self.zi_norm.min(), self.znull) and \n ticki <= max(self.znull, self.zi_norm.max())]\n self.p1.colorbar(self.cax, ticks=ticks, cax=ax_cb)\n elif self.alt_zi == '+':\n ticks = np.linspace(self.zmin,\n self.zmax,\n self.contour_n + 2)\n # find the intersection of the range of data displayed and ticks\n self.p1.colorbar(self.cax, ticks=ticks, cax=ax_cb)\n elif self.alt_zi == '-':\n ticks = np.linspace(self.zmin,\n self.zmax,\n self.contour_n + 2)\n # find the intersection of the range of data displayed and ticks\n self.p1.colorbar(self.cax, ticks=ticks, cax=ax_cb)\n elif self.alt_zi == 'amp':\n ampmin = self.floor\n ampmax = self.ceiling\n ticks = np.linspace(ampmin, ampmax,\n num=self.contour_n + 2)\n # determine how much precision is necessary in the ticks:\n self.p1.colorbar(self.cax, ticks=ticks, cax=ax_cb)\n elif self.alt_zi == 'log':\n # the colorbar range\n # not sure whether to define the range using masked array or\n # full array\n #logmin = np.log10(masked.min() / (self.zmax - masked.min()))\n logmin = self.floor\n logmax = self.ceiling\n ticks = np.linspace(logmin,logmax,num=11)\n # determine how much precision is necessary in the ticks:\n decimals = int(np.floor(-np.log10(np.abs(\n ticks[-1]-ticks[0])))) + 2\n ticklabels = np.around(ticks,decimals)\n self.p1.colorbar(self.cax, ticks=ticks, cax=ax_cb).ax.set_yticklabels(ticklabels)\n \"\"\"\n decimals = int(np.floor(-np.log10(np.abs(\n ticks[-1]-ticks[0])))) + 2\n ticklabels = np.around(ticks,decimals)\n self.p1.colorbar(self.cax, ticks=ticks, cax=ax_cb).ax.set_yticklabels(ticklabels)\n \"\"\"\n elif self.alt_zi in [None, 'raw']: # raw colorbar\n ticks = np.linspace(min([self.znull, self.zmin]),\n max(self.znull, self.zmax),num=11)\n decimals = int(np.floor(-np.log10(np.abs(\n ticks[-1]-ticks[0])))) + 2\n ticklabels = np.around(ticks,decimals)\n self.p1.colorbar(self.cax, ticks=ticks, cax=ax_cb).ax.set_yticklabels(ticklabels)\n #self.p1.colorbar(self.cax, ticks=ticks, cax=ax_cb)\n else: #could not determine colorbar type\n print('color scale used not recognized: cannot produce colorbar')", "def hasColor(*args, **kwargs):\n \n pass", "def test_colorbar_extension_length():\n # Create figures for uniform and proportionally spaced colorbars.\n _colorbar_extension_length('uniform')\n _colorbar_extension_length('proportional')", "def test_colorbar_R_remove_legend(self):\n result = self.plotter_pca_LOGS.visualize_plot(kind='scatter', size=20, remove_outliers=False, is_colored=True, colorbar=True)\n self.assertIsInstance(result.get_legend(), type(None))\n pyplot.close()", "def getBestColorChecker(self) -> retval:\n ...", "def test_create_rgb_color(self):\n self.assertEqual(self.sut.type, 'rgb')\n self.assertEqual(self.sut.vector, Vector(100, 150, 200))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
25. Test checks if colorbar is ignored when target type is C
def test_colorbar_C_ignore_colorbar(self): result = self.plotter_pca_BBBP.visualize_plot(kind='scatter', size=20, remove_outliers=False, is_colored=True, colorbar=True) self.assertTrue(len(result.figure.axes)==1) pyplot.close()
[ "def test_colorbar_C_ignore_colorbar(self):\n result = self.plotter_tailored_BBBP.umap(n_neighbors=15, random_state=None, min_dist=0.9, kind='scatter', size=20, remove_outliers=False, is_colored=True, colorbar=True)\n self.assertTrue(len(result.figure.axes)==1)\n pyplot.close()", "def is_colorbar(ax):\n return (ax.get_data_ratio() == 1.0 and not ax.get_navigate())", "def no_color():\n return False", "def getBestColorChecker(self) -> retval:\n ...", "def test_default_colorbar(self):\n result = self.plotter_pca_LOGS.visualize_plot(kind='scatter', size=20, remove_outliers=False, is_colored=True)\n self.assertNotIsInstance(result.get_legend(), type(None))\n self.assertEqual(len(result.figure.axes), 1)\n pyplot.close()", "def test_colorbar_C_keep_legend(self):\n result = self.plotter_pca_BBBP.visualize_plot(kind='scatter', size=20, remove_outliers=False, is_colored=True, colorbar=True)\n self.assertNotIsInstance(result.get_legend(), type(None))\n pyplot.close()", "def test_colours(self):\n msg = \"This is a test message\" # type: str\n\n log = HammerVLSILogging.context(\"test\")\n\n HammerVLSILogging.enable_buffering = True # we need this for test\n HammerVLSILogging.clear_callbacks()\n HammerVLSILogging.add_callback(HammerVLSILogging.callback_buffering)\n\n HammerVLSILogging.enable_colour = True\n log.info(msg)\n assert HammerVLSILogging.get_colour_escape(Level.INFO) + \"[test] \" + msg + HammerVLSILogging.COLOUR_CLEAR == HammerVLSILogging.get_buffer()[0]\n\n HammerVLSILogging.enable_colour = False\n log.info(msg)\n assert \"[test] \" + msg == HammerVLSILogging.get_buffer()[0]", "def iscolor(self):\n return self.channels() == 3", "def is_checkmated(self, color):", "def with_colors() :\n global __with_colors__\n return bool(__with_colors__)", "def test_colorbar_R_add_colorbar(self):\n result = self.plotter_pca_LOGS.visualize_plot(kind='scatter', size=20, remove_outliers=False, is_colored=True, colorbar=True)\n self.assertTrue(len(result.figure.axes)>=1)\n pyplot.close()", "def hasColor(*args, **kwargs):\n \n pass", "def test_valid_rgb_in_magick(self):\n assert poly.isInMap((205, 201, 201), \"magick\")", "def test_valid_rgb_notin_magick(self):\n assert not poly.isInMap((40, 40, 40), \"magick\")", "def test_colorbar_extension_shape():\n # Create figures for uniform and proportionally spaced colorbars.\n _colorbar_extension_shape('uniform')\n _colorbar_extension_shape('proportional')", "def test_default_colorbar(self):\n result = self.plotter_tailored_LOGS.umap(n_neighbors=15, random_state=None, min_dist=0.9, kind='scatter', size=20, remove_outliers=False, is_colored=True)\n self.assertNotIsInstance(result.get_legend(), type(None))\n self.assertEqual(len(result.figure.axes), 1)\n pyplot.close()", "def color_check(plugin, fmt='png'):\n img = img_as_ubyte(data.chelsea())\n r1 = roundtrip(img, plugin, fmt)\n testing.assert_allclose(img, r1)\n\n img2 = img > 128\n r2 = roundtrip(img2, plugin, fmt)\n testing.assert_allclose(img2.astype(np.uint8), r2)\n\n img3 = img_as_float(img)\n with expected_warnings(['precision loss']):\n r3 = roundtrip(img3, plugin, fmt)\n testing.assert_allclose(r3, img)\n\n with expected_warnings(['precision loss']):\n img4 = img_as_int(img)\n if fmt.lower() in (('tif', 'tiff')):\n img4 -= 100\n with expected_warnings(['sign loss']):\n r4 = roundtrip(img4, plugin, fmt)\n testing.assert_allclose(r4, img4)\n else:\n with expected_warnings(['sign loss|precision loss']):\n r4 = roundtrip(img4, plugin, fmt)\n testing.assert_allclose(r4, img_as_ubyte(img4))\n\n img5 = img_as_uint(img)\n with expected_warnings(['precision loss']):\n r5 = roundtrip(img5, plugin, fmt)\n testing.assert_allclose(r5, img)", "def test_invalid_rgb_magick(self):\n assert not poly.isInMap((40, 40), \"magick\") and not poly.isInMap((40, 40, 40, 40), \"magick\") and not poly.isInMap(('a', 40, 40), \"magick\") and not poly.isInMap((40, 40, 400), \"magick\") \n\n # Testing that the correct maps are supported", "def test_cbar_tick_format(plot_func, img_3d_mni, cbar_tick_format, tmp_path):\n filename = tmp_path / \"temp.png\"\n plot_func(\n img_3d_mni,\n output_file=filename,\n colorbar=True,\n cbar_tick_format=cbar_tick_format,\n )\n plt.close()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
29. Test checks if the default value of filename is assigned with scatter
def test_default_filename_scatter(self): try: os.remove("scatter_test.png") except FileNotFoundError: pass expected = len([name for name in os.listdir('.') if os.path.isfile(name)]) self.plotter_pca_BBBP.visualize_plot(kind='scatter') result = len([name for name in os.listdir('.') if os.path.isfile(name)]) self.assertEqual(expected, result) pyplot.close()
[ "def test_filename_scatter(self):\n try:\n os.remove(\"scatter_test.png\")\n except FileNotFoundError:\n pass\n expected = len([name for name in os.listdir('.') if os.path.isfile(name)])\n self.plotter_pca_BBBP.visualize_plot(kind='scatter', filename=\"scatter_test.png\")\n result = len([name for name in os.listdir('.') if os.path.isfile(name)])\n self.assertEqual(expected, result - 1)\n os.remove(\"scatter_test.png\")\n pyplot.close()", "def test_example_file_and_first_dataset_overrides(self):\n neurotic.global_config['defaults']['file'] = 'some other file'\n neurotic.global_config['defaults']['dataset'] = 'some other dataset'\n argv = ['neurotic', 'example', 'first']\n args = neurotic.parse_args(argv)\n win = neurotic.win_from_args(args)\n self.assertEqual(win.metadata_selector.file, self.example_file,\n 'file was not changed correctly')\n self.assertEqual(win.metadata_selector._selection,\n self.example_dataset,\n 'dataset was not changed correctly')", "def test__render_filenames_undefined_template():\n path = \"/srv/salt/saltines\"\n dest = \"/srv/salt/cheese\"\n saltenv = \"base\"\n template = \"biscuits\"\n ret = (path, dest)\n pytest.raises(\n CommandExecutionError, cp._render_filenames, path, dest, saltenv, template\n )", "def test_filenames_default(self):\n\n class Validator(base.Validator):\n \"\"\"Found FOO\"\"\"\n\n invalid = re.compile(r\"FOO\")\n\n class Case(NamedTuple):\n filename: str\n expected: bool\n\n for c in (\n Case(\"Foo.cls\", (\"Foo.cls:1:0: error: Found FOO\",)),\n Case(\"Foo.trigger\", (\"Foo.trigger:1:0: error: Found FOO\",)),\n Case(\"Foo\", ()),\n ):\n with self.subTest(c):\n self.assertMatchLines(\n validator=Validator,\n contents=\"FOO\",\n expected=c.expected,\n path=pathlib.Path(c.filename),\n verbose=-1,\n )", "def data_already_produced(*args):\n return np.all([os.path.exists(i) for i in args])", "def check_filename_existence(func):\n # TODO: Implement this decorator.", "def test_ParticleDataset_from_path():\n pass", "def ensure_filename (self, option):\r\n self._ensure_tested_string(option, os.path.isfile,\r\n \"filename\",\r\n \"'%s' does not exist or is not a file\")", "def test_set_file_path(self):\n self.wda.set_output_file_path(self.non_existing_valid_file_path)\n self.assertEqual(\n self.non_existing_valid_file_path,\n self.wda.get_output_file_path())", "def test_empty_parameter_file(self):\n self.assertIn(\n (\"WARNING: Parameters file is empty\"),\n self.warning)", "def test_invalid_datasets_raise_errors():\n with pytest.raises(KeyError):\n eio.path_to_example(\"Non-existent dataset\")", "def test_svl_dataset_error(svl_source):\n with pytest.raises(ValueError, match=\"name=path\"):\n svl(\n svl_source,\n datasets=[\n \"bigfoot:{}/test_datasets/bigfoot_sightings.csv\".format(\n CURRENT_DIR\n )\n ],\n )", "def test_pathurl_argument_is_skipped(self):\n f = File()\n self.assertEqual('', f.pathurl)", "def test__make_filename__float_index() -> None:\n int_filename = make_filename(index=1)\n float_filename = make_filename(index=1.1)\n\n assert int_filename != float_filename", "def test_is_data_file__no(self) -> None:\n res = util.is_data_file('a/b/c/d/foo.blah')\n self.assertFalse(res)\n res = util.is_data_file('a/b/c/d/foo.blah' + util.META_FILE_EXTENSION)\n self.assertFalse(res)", "def testFileExistence(self):\n check_file_exists(\"null\")\n check_file_exists(\"sample/null.tif\")\n check_file_exists(\"test_merger.py\")", "def test_missing_datasets_raise_errors():\n with pytest.raises(KeyError):\n eio.path_to_example(\"\")", "def set_points_filename(self,ofile):\n self.__ofilename = ofile;", "def test_svl_missing_file_error(svl_source):\n with pytest.raises(SvlMissingFileError, match=\"File\"):\n svl(svl_source, datasets=[\"ufos={}/test_datasets/ufo_sightings.csv\"])" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
30. Test checks if the value of filename is assigned with scatter
def test_filename_scatter(self): try: os.remove("scatter_test.png") except FileNotFoundError: pass expected = len([name for name in os.listdir('.') if os.path.isfile(name)]) self.plotter_pca_BBBP.visualize_plot(kind='scatter', filename="scatter_test.png") result = len([name for name in os.listdir('.') if os.path.isfile(name)]) self.assertEqual(expected, result - 1) os.remove("scatter_test.png") pyplot.close()
[ "def test_default_filename_scatter(self):\n try:\n os.remove(\"scatter_test.png\")\n except FileNotFoundError:\n pass\n expected = len([name for name in os.listdir('.') if os.path.isfile(name)])\n self.plotter_pca_BBBP.visualize_plot(kind='scatter')\n result = len([name for name in os.listdir('.') if os.path.isfile(name)])\n self.assertEqual(expected, result)\n pyplot.close()", "def test_ParticleDataset_from_path():\n pass", "def data_already_produced(*args):\n return np.all([os.path.exists(i) for i in args])", "def check_filename_existence(func):\n # TODO: Implement this decorator.", "def test__make_filename__index() -> None:\n int_filename = make_filename(index=1)\n\n assert '1' in int_filename\n assert '1.0' not in int_filename", "def test_points(sample, sample_name, assert_image_equal): # pylint: disable=redefined-outer-name\n res = nf.io.load(sample(os.path.join('search', sample_name + '.hdf5')))\n nf.search.plot.points(res)\n assert_image_equal('search:' + sample_name)", "def test__make_filename__float_index() -> None:\n int_filename = make_filename(index=1)\n float_filename = make_filename(index=1.1)\n\n assert int_filename != float_filename", "def test__render_filenames_undefined_template():\n path = \"/srv/salt/saltines\"\n dest = \"/srv/salt/cheese\"\n saltenv = \"base\"\n template = \"biscuits\"\n ret = (path, dest)\n pytest.raises(\n CommandExecutionError, cp._render_filenames, path, dest, saltenv, template\n )", "def check_dimension(self, filename, dimension):\n self.dimension = dimension\n self.filename = filename\n if self.dimension in self.filename:\n return True\n else:\n return False", "def testFileExistence(self):\n check_file_exists(\"null\")\n check_file_exists(\"sample/null.tif\")\n check_file_exists(\"test_merger.py\")", "def test_extension_file(self):\n\n def test_cmp(value, expected_value):\n return value in expected_value\n\n # we named our named file: named_file.jpg\n file_filter = Filter(file_type=Constraint([\".png\"], cmp_func=test_cmp))\n files = file_filter.apply(directory=self.dir)\n self.assertEqual(len(list(files)), 0)\n\n file_filter = Filter(file_type=Constraint([\".jpg\", \".png\"], cmp_func=test_cmp))\n files = file_filter.apply(directory=self.dir)\n self.assertEqual(len(list(files)), 1)", "def test_svl_dataset_error(svl_source):\n with pytest.raises(ValueError, match=\"name=path\"):\n svl(\n svl_source,\n datasets=[\n \"bigfoot:{}/test_datasets/bigfoot_sightings.csv\".format(\n CURRENT_DIR\n )\n ],\n )", "def test_file_creation(Nfiles):\n command = ('python specFit/demo/demo_preprocess_tiff.py '\n '--processed_dir {} --raw_dir {} --Nfiles {} --Nx {} --Ny {} --spectra_type {}')\\\n .format(processed_dir(Nfiles), raw_dir(Nfiles), Nfiles, Nx, Ny, spectra_type)\n os.system(command)\n assert os.path.exists(os.path.join(processed_dir(Nfiles), 'dataCube.npy'))\n assert os.path.exists(os.path.join(processed_dir(Nfiles), 'timestamps.npy'))\n assert os.path.exists(os.path.join(processed_dir(Nfiles), 'exposures.npy'))\n assert os.path.exists(os.path.join(processed_dir(Nfiles), 'visual.npy'))", "def test_hdf5_file_input():\n catfile = os.path.join(TEST_DATA_DIR, 'point_sources.cat')\n output_hdf5 = os.path.join(TEST_DATA_DIR, 'all_spectra.hdf5')\n sed_file = os.path.join(TEST_DATA_DIR, 'sed_file_with_normalized_dataset.hdf5')\n sed_catalog = spec.make_all_spectra(catfile, input_spectra_file=sed_file,\n normalizing_mag_column='nircam_f444w_magnitude',\n output_filename=output_hdf5)\n\n comparison = hdf5.open(os.path.join(TEST_DATA_DIR, 'output_spec_from_hdf5_input_including_normalized.hdf5'))\n constructed = hdf5.open(sed_catalog)\n for key in comparison:\n assert key in constructed.keys()\n assert all(comparison[key][\"wavelengths\"].value == constructed[key][\"wavelengths\"].value)\n assert all(comparison[key][\"fluxes\"].value == constructed[key][\"fluxes\"].value)\n assert comparison[key][\"wavelengths\"].unit == constructed[key][\"wavelengths\"].unit\n assert comparison[key][\"fluxes\"].unit == constructed[key][\"fluxes\"].unit\n\n cat_base = catfile.split('.')[0]\n outbase = cat_base + '_with_flambda.cat'\n flambda_output_catalog = os.path.join(TEST_DATA_DIR, outbase)\n os.remove(flambda_output_catalog)\n os.remove(sed_catalog)", "def test_is_data_file__no(self) -> None:\n res = util.is_data_file('a/b/c/d/foo.blah')\n self.assertFalse(res)\n res = util.is_data_file('a/b/c/d/foo.blah' + util.META_FILE_EXTENSION)\n self.assertFalse(res)", "def test_esidfilegenerator_bad_file_error():\n for boolean in (True, False):\n with pytest.raises(AttributeError):\n for record in call_object(es2json.IDFile, use_with=boolean, idfile=str(uuid.uuid4())):\n assert record", "def test_series_numbering():\n assert series.files[-1].num == 19", "def scatter_(self, dim, index, src): # real signature unknown; restored from __doc__\n pass", "def test_get_file_path(self):\n input_dir, gt_data_list = _create_sample_cityscapes_structure(\n self.tmpdir)\n output_dir = input_dir\n data_list = get_file_path(input_dir)\n for cat1, cat2 in zip(data_list.values(), gt_data_list.values()):\n for list1, list2 in zip(cat1.values(), cat2.values()):\n # Do not care about orders.\n assert set(list1) == set(list2)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
31. Test checks if the default value of filename is assigned with hex
def test_default_filename_hex(self): try: os.remove("hex_test.png") except FileNotFoundError: pass expected = len([name for name in os.listdir('.') if os.path.isfile(name)]) self.plotter_pca_BBBP.visualize_plot(kind='hex') result = len([name for name in os.listdir('.') if os.path.isfile(name)]) self.assertEqual(expected, result) pyplot.close()
[ "def test_bytename_set_non_bytes():\n sfn = EightDotThree()\n for n in ['FILENAME.TXT', 1234, bytearray('FILENAME.TXT', 'ASCII')]:\n with pytest.raises(TypeError) as e:\n sfn.set_byte_name(n)\n assert e.errno == errno.EINVAL", "def test__make_filename__index() -> None:\n int_filename = make_filename(index=1)\n\n assert '1' in int_filename\n assert '1.0' not in int_filename", "def test_get_valid_filename() -> None:\n assert (\n fileup.get_valid_filename(\"john's portrait in 2004.jpg\")\n == \"johns_portrait_in_2004.jpg\"\n )", "def test_filename_nosuffix(self):\n assert bl.generate_filename(0, \"10.3.2.2789\", None) == \"Z10_10.3.2.2789_STL100-1.exe\"", "def test_filename_hex(self):\n try:\n os.remove(\"hex_test.png\")\n except FileNotFoundError:\n pass\n expected = len([name for name in os.listdir('.') if os.path.isfile(name)])\n self.plotter_pca_BBBP.visualize_plot(kind='hex', filename=\"hex_test.png\")\n result = len([name for name in os.listdir('.') if os.path.isfile(name)])\n self.assertEqual(expected, result - 1)\n os.remove(\"hex_test.png\")\n pyplot.close()", "def test_unique_filename_format(self):\n # Confirm that the format of the filename is correct\n result = uniqify_filename('hey.png')\n self.assertTrue(\n re.search(\n r'^[0-9a-f]{32}\\.png$',\n result\n )\n )", "def is_valid_file(x):\n if not os.path.exists(x):\n raise argparse.ArgumentTypeError(\"{0} does not exist\".format(x))\n return x", "def check_filename_existence(func):\n # TODO: Implement this decorator.", "def test_no_filename_extension(self):\n\n expected_filename = os.path.join(self.test_dir, 'test_file.txt')\n returned_filename = randstr_terminal._write_file('', 'test_file')\n\n self.assertEqual(expected_filename, returned_filename)", "def test_filenames_default(self):\n\n class Validator(base.Validator):\n \"\"\"Found FOO\"\"\"\n\n invalid = re.compile(r\"FOO\")\n\n class Case(NamedTuple):\n filename: str\n expected: bool\n\n for c in (\n Case(\"Foo.cls\", (\"Foo.cls:1:0: error: Found FOO\",)),\n Case(\"Foo.trigger\", (\"Foo.trigger:1:0: error: Found FOO\",)),\n Case(\"Foo\", ()),\n ):\n with self.subTest(c):\n self.assertMatchLines(\n validator=Validator,\n contents=\"FOO\",\n expected=c.expected,\n path=pathlib.Path(c.filename),\n verbose=-1,\n )", "def testFileExistence(self):\n check_file_exists(\"null\")\n check_file_exists(\"sample/null.tif\")\n check_file_exists(\"test_merger.py\")", "def test_is_filename_valid_with_invalid_inferred_fname(self):\n with self.assertRaises(UtilsFileNameValidError):\n assert_filename_valid(INVALID_FILENAME,\n os.path.dirname(self.valid_fp.name))", "def valid_file_name_linux(name:str,default_char:str=\"-\") -> str:\r\n if default_char in invalid_linux_char:\r\n default_char = \"-\"\r\n tabla = {ord(c):default_char for c in invalid_linux_char}\r\n name = name.translate(tabla).strip()\r\n return name if name else \"archivo\"", "def test_set_file_path(self):\n self.wda.set_output_file_path(self.non_existing_valid_file_path)\n self.assertEqual(\n self.non_existing_valid_file_path,\n self.wda.get_output_file_path())", "def test_check_default_values():\n iniconf.check_default_values(spec, 'key1')\n iniconf.check_default_values(spec, 'key2')\n try:\n iniconf.check_default_values(spec, 'key3')\n except ConfigError:\n spec['key3'] = 'integer(default=1)'\n else:\n raise AssertionError(\"Checking for a default value should have failed with: %s\" % spec['key3'])", "def test_crc32(self):\n self.assertEqual(\"4B8E39EF\", self.file_path.crc32)", "def check_filename(filename, Truefile = True):\n if filename == None:\n raise Exception(\"you must specify a file name\")\n filename = path.abspath(filename)\n if Truefile and not path.isfile(filename):\n raise Exception(\"file %s is not a real file\" % filename)\n return filename", "def test_isc_utils_isc_file_name_passing(self):\n test_data = [\n 'filenamewithoutatype',\n 'filename.type',\n 'file_expanded.type',\n 'file-hyphenated.type',\n \"'Readable a_file.type'\",\n 'a_file.type',\n 'long_file.type',\n '\"a_file with-dash.type\"',\n \"'Readable a_file.type'\",\n \"'Readable a_file;.type'\",\n \"'Readable \\\"a_file.type'\",\n ]\n result = isc_file_name.runTests(test_data, failureTests=False)\n self.assertTrue(result[0])", "def test_is_data_file__no(self) -> None:\n res = util.is_data_file('a/b/c/d/foo.blah')\n self.assertFalse(res)\n res = util.is_data_file('a/b/c/d/foo.blah' + util.META_FILE_EXTENSION)\n self.assertFalse(res)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
32. Test checks if the value of filename is assigned with hex
def test_filename_hex(self): try: os.remove("hex_test.png") except FileNotFoundError: pass expected = len([name for name in os.listdir('.') if os.path.isfile(name)]) self.plotter_pca_BBBP.visualize_plot(kind='hex', filename="hex_test.png") result = len([name for name in os.listdir('.') if os.path.isfile(name)]) self.assertEqual(expected, result - 1) os.remove("hex_test.png") pyplot.close()
[ "def test_crc32(self):\n self.assertEqual(\"4B8E39EF\", self.file_path.crc32)", "def test_adler32(self):\n self.assertEqual(\"081e0256\", self.file_path.adler32)", "def test_unique_filename_format(self):\n # Confirm that the format of the filename is correct\n result = uniqify_filename('hey.png')\n self.assertTrue(\n re.search(\n r'^[0-9a-f]{32}\\.png$',\n result\n )\n )", "def test_default_filename_hex(self):\n try:\n os.remove(\"hex_test.png\")\n except FileNotFoundError:\n pass\n expected = len([name for name in os.listdir('.') if os.path.isfile(name)])\n self.plotter_pca_BBBP.visualize_plot(kind='hex')\n result = len([name for name in os.listdir('.') if os.path.isfile(name)])\n self.assertEqual(expected, result)\n pyplot.close()", "def CanReadFile(filename, magic):", "def is_elf(filename):\n\n magic = 0\n\n with open(filename, \"rb\") as f:\n magic = f.read(4)\n\n return (magic == \"\\x7fELF\")", "def test_get_valid_filename() -> None:\n assert (\n fileup.get_valid_filename(\"john's portrait in 2004.jpg\")\n == \"johns_portrait_in_2004.jpg\"\n )", "def test__make_filename__index() -> None:\n int_filename = make_filename(index=1)\n\n assert '1' in int_filename\n assert '1.0' not in int_filename", "def test_isc_utils_isc_file_name_passing(self):\n test_data = [\n 'filenamewithoutatype',\n 'filename.type',\n 'file_expanded.type',\n 'file-hyphenated.type',\n \"'Readable a_file.type'\",\n 'a_file.type',\n 'long_file.type',\n '\"a_file with-dash.type\"',\n \"'Readable a_file.type'\",\n \"'Readable a_file;.type'\",\n \"'Readable \\\"a_file.type'\",\n ]\n result = isc_file_name.runTests(test_data, failureTests=False)\n self.assertTrue(result[0])", "def _is_valid_filename(self, filename: str) -> bool:\n return all(c in FILENAME_VALID_CHARS for c in filename)", "def check_filename(filename):\n assert FILE_RE.match(filename), \"Invalid file name \" + repr(filename)", "def check_file(filename: str) -> str:\n # p = Path(filename)\n return (p:= Path(filename)).resolve().as_uri()", "def isSane(filename):\n if filename == '':\n return False\n funnyCharRe = re.compile('[\\t/ ;,$#]')\n m = funnyCharRe.search(filename)\n if m is not None:\n return False\n if filename[0] == '-':\n return False\n return True", "def test_bytename_set_non_bytes():\n sfn = EightDotThree()\n for n in ['FILENAME.TXT', 1234, bytearray('FILENAME.TXT', 'ASCII')]:\n with pytest.raises(TypeError) as e:\n sfn.set_byte_name(n)\n assert e.errno == errno.EINVAL", "def test_invalid_hex_magick(self):\n assert not poly.isInMap(\"#EE822\", \"magick\") and not poly.isInMap(\n \"EE822\", \"magick\", )", "def is_valid_file(x):\n if not os.path.exists(x):\n raise argparse.ArgumentTypeError(\"{0} does not exist\".format(x))\n return x", "def test_upload_reference_with_unsupported_characters_in_file_name(self):\n test_file_names = [\n ('Screen Shot 2014-01-09 at 3.08.09 PM.jpg',\n 'Screen_Shot_2014-01-09_at_3_08_09_PM.jpg'),\n ('pet sise_08.jpg', 'pet_sise_08.jpg'),\n ('03-MOB\\xc4\\xb0LYA (1).jpg', '03-MOBILYA_(1).jpg'),\n ('b-376152-polis_arabas\\xc4\\xb1.jpg', 'b-376152-polis_arabasi.jpg'),\n ('kasap-d\\xc3\\xbckkan\\xc4\\xb1-b644.jpg', 'kasap-dukkani-b644.jpg'),\n ('\\xc3\\xa7\\xc4\\x9f\\xc4\\xb1\\xc3\\xb6\\xc5\\x9f\\xc3\\xbc'\n '\\xc3\\x87\\xc4\\x9e\\xc4\\xb0\\xc3\\x96\\xc3\\x9c.jpg',\n 'cgiosuCGIOU.jpg'),\n ('\\\\/:\\*\\?\"<>|.jpg', '_.jpg'),\n ('eGhsczN1MTI=_o_taklac-gvercinler.jpg',\n 'eGhsczN1MTI__o_taklac-gvercinler.jpg'),\n ('FB,8241,84,konfor-rahat-taba-erkek-ayakkabi.jpg',\n 'FB_8241_84_konfor-rahat-taba-erkek-ayakkabi.jpg'),\n ('++_The_B_U_2013_720p.jpg', '__The_B_U_2013_720p.jpg')\n ]\n\n # create a temp file and ask to save over it\n # upload an image file as a reference\n for test_value, expected_result in test_file_names:\n with open(self.test_image_path) as f:\n link1 = self.test_media_manager.upload_reference(\n self.test_task2, f, test_value\n )\n #the file in filesystem is saved correctly\n self.assertEqual(\n os.path.basename(link1.full_path),\n expected_result\n )\n # but original filename is intact\n self.assertEqual(\n link1.original_filename,\n test_value\n )", "def test_file_creation_utf_16(self):\n process_file('./test_files/utf16_file.csv', 'test_correct')\n\n self.assertTrue(os.path.isfile('../processed_files/test_correct.csv'))", "def check_filename_existence(func):\n # TODO: Implement this decorator." ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Third test with the PyTorchClassifier.
def test_ptclassifier(self): # Build PyTorchClassifier ptc = get_classifier_pt() # Get MNIST (_, _), (x_test, _) = self.mnist x_test = np.swapaxes(x_test, 1, 3).astype(np.float32) # Attack nf = NewtonFool(ptc, max_iter=5, batch_size=100) x_test_adv = nf.generate(x_test) self.assertFalse((x_test == x_test_adv).all()) y_pred = ptc.predict(x_test) y_pred_adv = ptc.predict(x_test_adv) y_pred_bool = y_pred.max(axis=1, keepdims=1) == y_pred y_pred_max = y_pred.max(axis=1) y_pred_adv_max = y_pred_adv[y_pred_bool] self.assertTrue((y_pred_max >= .9 * y_pred_adv_max).all())
[ "def test_5_pytorch_classifier(self):\n self.x_train_mnist = np.reshape(self.x_train_mnist, (self.x_train_mnist.shape[0], 1, 28, 28)).astype(np.float32)\n\n # Build PyTorchClassifier\n victim_ptc = get_image_classifier_pt()\n\n # Create the thieved classifier\n thieved_ptc = get_image_classifier_pt(load_init=False)\n\n # Create random attack\n attack = KnockoffNets(\n classifier=victim_ptc,\n batch_size_fit=BATCH_SIZE,\n batch_size_query=BATCH_SIZE,\n nb_epochs=NB_EPOCHS,\n nb_stolen=NB_STOLEN,\n sampling_strategy=\"random\",\n )\n\n thieved_ptc = attack.extract(x=self.x_train_mnist, thieved_classifier=thieved_ptc)\n\n victim_preds = np.argmax(victim_ptc.predict(x=self.x_train_mnist), axis=1)\n thieved_preds = np.argmax(thieved_ptc.predict(x=self.x_train_mnist), axis=1)\n acc = np.sum(victim_preds == thieved_preds) / len(victim_preds)\n\n self.assertGreater(acc, 0.3)\n\n # Create adaptive attack\n attack = KnockoffNets(\n classifier=victim_ptc,\n batch_size_fit=BATCH_SIZE,\n batch_size_query=BATCH_SIZE,\n nb_epochs=NB_EPOCHS,\n nb_stolen=NB_STOLEN,\n sampling_strategy=\"adaptive\",\n reward=\"all\",\n )\n thieved_ptc = attack.extract(x=self.x_train_mnist, y=self.y_train_mnist, thieved_classifier=thieved_ptc)\n\n victim_preds = np.argmax(victim_ptc.predict(x=self.x_train_mnist), axis=1)\n thieved_preds = np.argmax(thieved_ptc.predict(x=self.x_train_mnist), axis=1)\n acc = np.sum(victim_preds == thieved_preds) / len(victim_preds)\n\n self.assertGreater(acc, 0.4)\n\n self.x_train_mnist = np.reshape(self.x_train_mnist, (self.x_train_mnist.shape[0], 28, 28, 1)).astype(np.float32)", "def exec_classifiers(self, dataset):\n f = Features()\n pt = param_tuning.ParamTuning()\n\n start_time = time.time()\n Xtrain, Xtest, ytrain, ytest = self._load_and_split_data(dataset)\n print(\"Loaded train/test datasets in {} sec.\".format(time.time() - start_time))\n\n fX_train = f.build(Xtrain)\n fX_test = f.build(Xtest)\n print(\"Build features from train/test data in {} sec\".format(time.time() - start_time))\n\n for clf in config.MLConf.clf_custom_params:\n print('Method {}'.format(clf))\n print('=======', end='')\n print(len(clf) * '=')\n\n tot_time = time.time(); start_time = time.time()\n # 1st phase: train each classifier on the whole train dataset (no folds)\n # estimator = pt.clf_names[clf][0](**config.MLConf.clf_custom_params[clf])\n estimator = pt.clf_names[clf][0](random_state=config.seed_no)\n estimator.set_params(**config.MLConf.clf_custom_params[clf])\n estimator = pt.trainClassifier(fX_train, ytrain, estimator)\n\n print(\"Finished training model on dataset; {} sec.\".format(time.time() - start_time))\n\n start_time = time.time()\n # 2nd phase: test each classifier on the test dataset\n res = pt.testClassifier(fX_test, ytest, estimator)\n self._print_stats(clf, res['metrics'], res['feature_imp'], start_time)\n # if not os.path.exists('output'):\n # os.makedirs('output')\n # np.savetxt(f'output/{clf}_default_stats.csv', res['metrics']['stats'], fmt=\"%u\")\n\n print(\"The whole process took {} sec.\\n\".format(time.time() - tot_time))", "def test_train():\n test_clf = train.train()\n assert isinstance(test_clf, RandomForestClassifier)\n assert 8 == test_clf.n_features_", "def test_tf_vs_cvnn():\n test_mnist()\n test_fashion_mnist()\n test_cifar10()", "def test_trial_ensemble(trial_name, classifier):\n models_dir = args.saved_models + '/{0}/best_models/'.format(trial_name)\n best_models = [m[2] for m in os.walk(models_dir)][0]\n classifiers = []\n for m in best_models:\n new_classifier = classifier\n new_classifier.load_checkpoint(models_dir+m)\n classifiers.append(new_classifier)\n \n total_correct = 0\n for i, x in enumerate(classifier.test_di):\n label = x[4] if classifier.classification_type == \"simple\" else x[5]\n predictions = [c.classify(x) for c in classifiers]\n avg_prediction = np.mean(predictions, 0)\n class_prediction = avg_prediction.argmax(0)\n if class_prediction == label:\n total_correct += 1\n \n return total_correct / len(classifier.test_di)", "def train_mlp(X_train,\n X_test,\n y_train,\n y_test,\n seed,\n search_hparams={},\n batch_size=50,\n n_folds=3,\n max_iter=100,\n search_n_iter=20):\n\n import torch.optim\n from skorch import NeuralNetClassifier\n from skorch.callbacks import Callback, EpochScoring\n from skorch.dataset import ValidSplit\n from pancancer_evaluation.prediction.nn_models import ThreeLayerNet\n\n # first set up the random search\n\n # default hyperparameter search options\n # will be overridden by any existing entries in search_hparams\n default_hparams = {\n 'learning_rate': [0.1, 0.01, 0.001, 5e-4, 1e-4],\n 'h1_size': [100, 200, 300, 500, 1000],\n 'dropout': [0.1, 0.5, 0.75],\n 'weight_decay': [0, 0.1, 1, 10, 100]\n }\n for k, v in default_hparams.items():\n search_hparams.setdefault(k, v)\n\n model = ThreeLayerNet(input_size=X_train.shape[1])\n\n clf_parameters = {\n 'lr': search_hparams['learning_rate'],\n 'module__input_size': [X_train.shape[1]],\n 'module__h1_size': search_hparams['h1_size'],\n 'module__dropout': search_hparams['dropout'],\n 'optimizer__weight_decay': search_hparams['weight_decay'],\n }\n\n net = NeuralNetClassifier(\n model,\n max_epochs=max_iter,\n batch_size=batch_size,\n optimizer=torch.optim.Adam,\n iterator_train__shuffle=True,\n verbose=0, # by default this prints loss for each epoch\n train_split=False,\n device='cuda'\n )\n\n if n_folds == -1:\n # for this option we just want to do a grid search for a single\n # train/test split, this is much more computationally efficient\n # but could have higher variance\n from sklearn.model_selection import train_test_split\n subtrain_ixs, valid_ixs = train_test_split(\n np.arange(X_train.shape[0]),\n test_size=0.2,\n random_state=seed,\n shuffle=True\n )\n cv_pipeline = RandomizedSearchCV(\n estimator=net,\n param_distributions=clf_parameters,\n n_iter=search_n_iter,\n cv=((subtrain_ixs, valid_ixs),),\n scoring='average_precision',\n verbose=2,\n random_state=seed\n )\n else:\n cv_pipeline = RandomizedSearchCV(\n estimator=net,\n param_distributions=clf_parameters,\n n_iter=search_n_iter,\n cv=n_folds,\n scoring='average_precision',\n verbose=2,\n random_state=seed\n )\n\n cv_pipeline.fit(X=X_train.values.astype(np.float32),\n y=y_train.status.values.astype(np.int))\n print(cv_pipeline.best_params_)\n print('Training final model...')\n\n # then retrain the model and get epoch-level performance info\n\n # define callback for scoring test set, to run each epoch\n class ScoreData(Callback):\n def __init__(self, X, y):\n self.X = X\n self.y = y\n\n def on_epoch_end(self, net, **kwargs):\n y_pred = net.predict_proba(self.X)[:, 1]\n net.history.record(\n 'test_aupr',\n average_precision_score(self.y, y_pred)\n )\n\n net = NeuralNetClassifier(\n model,\n max_epochs=max_iter,\n batch_size=batch_size,\n optimizer=torch.optim.Adam,\n iterator_train__shuffle=True,\n verbose=0,\n train_split=ValidSplit(cv=((subtrain_ixs, valid_ixs),)),\n callbacks=[\n ScoreData(\n X=X_test.values.astype(np.float32),\n y=y_test.status.values.astype(np.int)\n ),\n EpochScoring(scoring='average_precision', name='valid_aupr',\n lower_is_better=False),\n EpochScoring(scoring='average_precision', on_train=True,\n name='train_aupr', lower_is_better=False)\n ],\n device='cuda',\n **cv_pipeline.best_params_\n )\n\n net.fit(X_train.values.astype(np.float32),\n y_train.status.values.astype(np.int))\n\n X_subtrain, X_valid = X_train.iloc[subtrain_ixs, :], X_train.iloc[valid_ixs, :]\n y_subtrain, y_valid = y_train.iloc[subtrain_ixs, :], y_train.iloc[valid_ixs, :]\n\n # Get all performance results\n y_predict_train = net.predict_proba(\n X_subtrain.values.astype(np.float32)\n )[:, 1]\n y_predict_valid = net.predict_proba(\n X_valid.values.astype(np.float32)\n )[:, 1]\n y_predict_test = net.predict_proba(\n X_test.values.astype(np.float32)\n )[:, 1]\n\n return (net,\n cv_pipeline,\n (y_subtrain, y_valid),\n (y_predict_train, y_predict_valid, y_predict_test))", "def test_that_code_classifier_works(self):\n training_data = _load_training_data()\n self.clf.train(training_data)\n classifier_data = self.clf.to_dict()\n self.clf.validate(classifier_data)", "def setup_classifiers(self):\n # self.names = [\\\n # \"Nearest Neighbors\", \\\n # \"Decision Tree\", \\\n # \"Random Forest\", \\\n # \"AdaBoost\", \\\n # \"Naive Bayes\",\\\n # \"Neural Net\" \\\n\n # #### \"Logistic Regression\",\\\n # #### \"Linear SVM\", \"Rbf SVM\", \"Poly SVM\", \"Sigmoid SVM\"\\\n # ####, \"Gaussian Process\"\n # ]\n max_iterations = 1000\n # self.classifiers = [ \\\n # learn_all_boyou.KNeighborsClassifier(\\\n # n_neighbors=7, weights='uniform', algorithm='auto', n_jobs=-1),\\\n # learn_all_boyou.DecisionTreeClassifier(\\\n # max_depth=17, min_samples_split=12, min_samples_leaf=12,\\\n # presort=True, max_features=None,\\\n # random_state=int(round(time.time()))),\\\n # learn_all_boyou.RandomForestClassifier(max_depth=100, min_samples_split=12,\\\n # min_samples_leaf=12, \\\n # n_estimators=100, max_features=None,\\\n # random_state=int(round(time.time()))), \\\n # learn_all_boyou.AdaBoostClassifier(algorithm='SAMME.R', n_estimators=200, \\\n # random_state=int(round(time.time()))),\\\n # learn_all_boyou.GaussianNB(priors=[0.5, 0.5]),\\\n # learn_all_boyou.MLPClassifier(hidden_layer_sizes=(100,100,100,100), \\\n # alpha=100, solver='lbfgs',\\\n # max_iter=max_iterations,\\\n # activation='tanh', tol=1e-5,\\\n # warm_start='True') \\\n\n # #### LogisticRegression(penalty='l2', tol=1e-4, C=1e2,\\\n # #### fit_intercept=True, solver='lbfgs', \\\n # #### class_weight='balanced', max_iter=max_iterations), \\\n # #### SVC(kernel=\"linear\", C=1e2, tol=1e-4, max_iter=max_iterations,\\\n # #### probability= True),\\\n # #### SVC(kernel=\"rbf\", C=1e2, tol=1e-4, max_iter=max_iterations,\\\n # #### probability=True, shrinking=True),\n # #### SVC(kernel=\"poly\", C=1e2, degree=4, tol=1e-4,\\\n # #### max_iter=max_iterations, probability=True),\\\n # #### SVC(kernel=\"sigmoid\", C=1e2, gamma=1e-1, tol=1e-3, \\\n # #### max_iter=max_iterations, probability=True, \\\n # #### shrinking=True)#,\\\n # #### GaussianProcessClassifier(1.0 * RBF(1.0), n_jobs=-1, \\\n # #### copy_X_train=False, \\\n # #### max_iter_predict=100, warm_start=False )\\\n # ]\n\n self.classifiers = list(\\\n itertools.chain(\\\n [learn_all_boyou.KNeighborsClassifier(\\\n n_neighbors=parameter_i, \\\n weights='uniform', \\\n algorithm='auto', \\\n n_jobs=-1) \\\n for parameter_i in list(xrange(38, 68, 3))],\\\n [learn_all_boyou.DecisionTreeClassifier(\\\n max_depth=parameter_i, \\\n min_samples_split=12, \\\n min_samples_leaf=12,\\\n presort=True, max_features=None,\\\n random_state=int(round(time.time()))) \\\n for parameter_i in list(xrange(35, 45, 1))],\\\n [learn_all_boyou.RandomForestClassifier(\\\n max_depth=parameter_i, \\\n min_samples_split=12,\\\n min_samples_leaf=12, \\\n n_estimators=100, max_features=None,\\\n random_state=int(round(time.time()))) \\\n for parameter_i in list(xrange(30, 40, 1))],\\\n [learn_all_boyou.AdaBoostClassifier(\\\n algorithm='SAMME.R', \\\n n_estimators=parameter_i, \\\n random_state=int(round(time.time())))\n for parameter_i in list(xrange(300, 1300, 100))],\\\n [learn_all_boyou.GaussianNB(\\\n priors=[0.5, 0.5])],\\\n [learn_all_boyou.MLPClassifier(\\\n hidden_layer_sizes=(parameter_i,parameter_i,parameter_i), \\\n alpha = 5, \\\n solver='lbfgs',\\\n max_iter=max_iterations,\\\n activation='tanh', \n tol=1e-5,\\\n warm_start='True') \\\n for parameter_i in list(xrange(3, 53, 5))]\\\n )\\\n )\n self.names = list (\\\n itertools.chain(\\\n [\"Nearest Neighbors: \" + \\\n json.dumps(self.classifiers[parameter_i].get_params()) \\\n for parameter_i in list(xrange(0, 10))],\\\n [\"Decision Tree: \" + \\\n json.dumps(self.classifiers[parameter_i].get_params()) \\\n for parameter_i in list(xrange(10, 20))],\\\n [\"Random Forest: \" + \\\n json.dumps(self.classifiers[parameter_i].get_params()) \\\n for parameter_i in list(xrange(20, 30))],\\\n [\"AdaBoost: \" + \\\n json.dumps(self.classifiers[parameter_i].get_params()) \\\n for parameter_i in list(xrange(30, 40))],\\\n [\"Naive Bayes: \" + \\\n json.dumps(self.classifiers[40].get_params())], \\\n [\"Neural Net: \" + \\\n json.dumps(self.classifiers[parameter_i].get_params()) \\\n for parameter_i in list(xrange(41, 51))]\\\n )\\\n )", "def test_train(self):\n trace.train(10)", "def testCLAMultistepModel(self):\n\n self._printTestHeader()\n inst = OneNodeTests(self._testMethodName)\n return inst.testCLAMultistepModel(onCluster=True, maxModels=4)", "def test_classification(init_env, config_file):\n run_all_steps(init_env, config_file)", "def test__finetune(self) -> None:\n torch.manual_seed(0)\n dataset = FakeData(\n size=10, image_size=(3, 8, 8), num_classes=5, transform=ToTensor()\n )\n train_dataloader = DataLoader(dataset, batch_size=2)\n val_dataloader = DataLoader(dataset, batch_size=2)\n linear = nn.Linear(3 * 8 * 8, 4)\n model = nn.Sequential(nn.Flatten(), linear)\n initial_weights = linear.weight.clone()\n linear_classifier = LinearClassifier(\n model=model,\n batch_size_per_device=2,\n feature_dim=4,\n num_classes=5,\n freeze_model=False, # Don't freeze the model for finetuning.\n )\n initial_head_weights = linear_classifier.classification_head.weight.clone()\n trainer = Trainer(max_epochs=1, accelerator=\"cpu\", devices=1)\n trainer.fit(linear_classifier, train_dataloader, val_dataloader)\n assert trainer.callback_metrics[\"train_loss\"].item() > 0\n assert trainer.callback_metrics[\"train_top1\"].item() >= 0\n assert (\n trainer.callback_metrics[\"train_top5\"].item()\n >= trainer.callback_metrics[\"train_top1\"].item()\n )\n assert trainer.callback_metrics[\"train_top5\"].item() <= 1\n assert trainer.callback_metrics[\"val_loss\"].item() > 0\n assert trainer.callback_metrics[\"val_top1\"].item() >= 0\n assert (\n trainer.callback_metrics[\"val_top5\"].item()\n >= trainer.callback_metrics[\"val_top1\"].item()\n )\n assert trainer.callback_metrics[\"val_top5\"].item() <= 1\n\n # Verify that weights were updated.\n assert not torch.all(torch.eq(initial_weights, linear.weight))\n # Verify that head weights were updated.\n assert not torch.all(\n torch.eq(initial_head_weights, linear_classifier.classification_head.weight)\n )", "def test_trainable_variables(self):\n inputs = torch.zeros(32, 16, dtype=torch.int64)\n\n # case 1\n classifier = BertClassifier()\n _, _ = classifier(inputs)\n self.assertEqual(len(classifier.trainable_variables), 199 + 2)\n\n # case 2\n hparams = {\n \"clas_strategy\": \"all_time\",\n \"max_seq_length\": 8,\n }\n classifier = BertClassifier(hparams=hparams)\n _, _ = classifier(inputs)\n self.assertEqual(len(classifier.trainable_variables), 199 + 2)\n\n # case 3\n hparams = {\n \"clas_strategy\": \"time_wise\",\n }\n classifier = BertClassifier(hparams=hparams)\n _, _ = classifier(inputs)\n self.assertEqual(len(classifier.trainable_variables), 199 + 2)", "def test_run_feature_selection(self):", "def test(self, test_set, test_label):\n\n # YOUR CODE HERE\n accuracy = 0\n pred_label = np.zeros((len(test_set)))\n probs = np.zeros((len(test_set)))\n # predict every sample X by likelihood\n for X_idx, X in tqdm(enumerate(test_set), total=len(pred_label), desc='BAYES MODEL TEST'):\n # initial final log_probs by prior prob\n # log_probs = self.prior.copy()\n log_probs = np.log(self.prior)\n for y_i in range(self.num_class):\n for f_i in range(self.feature_dim):\n log_probs[y_i] += np.log(self.likelihood[f_i, X[f_i], y_i])\n this_predict_label = np.argmax(log_probs)\n pred_label[X_idx] = this_predict_label\n probs[X_idx]=max(log_probs)\n # calculate acc rate\n accuracy = np.sum(pred_label == test_label) / len(pred_label)\n\n return accuracy, pred_label, probs", "def __init__(self, classifier, samples, sample_classif_func):\n self.classifier = classifier\n self.samples = list(samples)\n self.sample_classif_func = sample_classif_func", "def elaspic_train(args):\n _train_predictor('core')\n _train_predictor('interface')", "def train_and_test():\n\ttrain_data, test_data, test_users, test_movies = get_train_data()\n\tprint \"loaded train & test data\"\n\tcf = collaborative_filtering(train_data)\n\t# evaluate the collaborative filtering model by printing the rmse value for the test data\n\tprint cf.score(test_data)", "def test_features_importances(classifiers_names, predictors_agrad, answer_agrad, predictors_seg, answer_seg, group=\"\"):\n\n\tclassifiers = load_classifiers_wodraw(group)#load_classifiers_rnr(group)#load_classifiers_3classes(group)\n\tclassifiers_agrad = [classifiers[0][0]]\n\tclassifiers_seg = [classifiers[1][0]]\n\n\tfor pair in [ [\"Pleasantness\", predictors_agrad, answer_agrad, classifiers_agrad], [\"Safety\", predictors_seg, answer_seg, classifiers_seg] ]:\n\t\tfor classifier_index in range(0, len(pair[3])):\n\t\t\tclf = pair[3][classifier_index]\n\t\t\tclf_name = classifiers_names[classifier_index]\n\n\t\t\t#Training with all data!\n\t\t\tclf.fit(pair[1], pair[2])\n\n\t\t\ttry:\n\t\t\t\timportances_dic = {}\n\t\t\t\timportances = clf.feature_importances_\n\t\t\t\tfor index in range(0, len(list_of_predictors)):\n\t\t\t\t\timportances_dic[list_of_predictors[index]] = importances[index]\n\t\t\t\t\n\t\t\t\tsorted_dic = sorted(importances_dic.items(), key=operator.itemgetter(1), reverse=True)\n\t\t\t\tprint \">>>> G \" + group + \" Q \" + pair[0] + \" C \" + clf_name\n\t\t\t\t#print str(sorted_dic)\n\t\t\t\tprint '\\n'.join([str(tuple[0]) + \" \" + str(tuple[1]) for tuple in sorted_dic])\n\t\t\t\t#print \"FEATURES \" + str(\", \".join(list_of_predictors))\n\t\t\t\t#print(clf.feature_importances_)\n\t\t\n\t\t\t\tplot_importances(clf, pair, group)\n\n\t\t\t\t# RECURSIVE! Create the RFE object and compute a cross-validated score.\n\t\t\t\t#svc = SVC(kernel=\"linear\")\n\t\t\t\t#if pair[0] == \"Pleasantness\":\n\t\t\t\t#\tsvc = load_classifiers_wodraw(group)[0][0]\n\t\t\t\t#else:\n\t\t\t\t#\tsvc = load_classifiers_wodraw(group)[1][0]\n\t\t\t\t# The \"accuracy\" scoring is proportional to the number of correct classifications\n\t\t\t\t#rfecv = RFECV(estimator=svc, step=1, cv=StratifiedKFold(pair[2], 5),\n\t\t\t\t#\t scoring='accuracy')\n\t\t\t\t#rfecv.fit(pair[1], pair[2])\n\n\t\t\t\t#print(\"Optimal number of features : %d\" % rfecv.n_features_)\n\t\t\t\t#print \"Ranking \" + str(rfecv.ranking_)\n\n\t\t\t\t#importances_dic = {}\n\t\t\t\t#importances = rfecv.ranking_\n\t\t\t\t#for index in range(0, len(list_of_predictors)):\n\t\t\t\t#\timportances_dic[list_of_predictors[index]] = importances[index]\n\t\t\t\t#\n\t\t\t\t#sorted_dic = sorted(importances_dic.items(), key=operator.itemgetter(1))\n\t\t\t\t#print \">>>> G \" + group + \" Q \" + pair[0] + \" C \" + clf_name\n\t\t\t\t##print str(sorted_dic)\n\t\t\t\t#print '\\n'.join([str(tuple[0]) + \" \" + str(tuple[1]) for tuple in sorted_dic])\n\t\t\t\t# RECURSIVE!\n\n\t\t\t\t#SELECT FROM MODEL! Quais as features?\n\t\t\t\t#print \">>>> G \" + group + \" Q \" + pair[0] + \" C \" + clf_name\n\t\t\t\t#model = SelectFromModel(clf, prefit=True)\n\t\t\t\t#X_new = model.transform(pair[1])\n\t\t\t\t#print model.inverse_transform(X_new)\n\t\t\t\t#print X_new\n\t\t\t\t#SELECT FROM MODEL!\n \n\t\t\texcept Exception as inst:\n\t\t\t\tprint \"Exception! \"\n\t\t\t\tprint type(inst) \n\t\t\t\tprint inst.args \n\t\t\texcept:\n \t\t\t\tprint \"Unexpected error:\", sys.exc_info()[0]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Adds noise to the 'data_raw'. This could be useful for testing the sensitivity to noisy measurements.
def addNoiseData(self, noise_amp): noise = np.random.normal(0, noise_amp, self.data_to_fit.shape) self.data_to_fit = self.data_to_fit + noise
[ "def add_noise(self, noise):\n self.noise_level = noise\n self.K = self.K + (noise * np.identity(self.X.size))", "def add_noise(SNR_db,audio):\r\n ex1 = puissance(audio)\r\n snr = 10**(SNR_db/10)\r\n ex2 = ex1/snr\r\n noise = np.random.normal(loc = 0, scale = ex2**(1/2), size = audio.shape)\r\n #print(\"noise power wanted: \",10*np.log10(ex2))\r\n #print(\"noise power obtained: \",10*np.log10(puissance(noise)))\r\n try : \r\n return audio.copy()+noise\r\n except :\r\n return audio + noise", "def add_noise(self):\n for i in range(self.num_neurons):\n spike_train = deepcopy(self.spike_trains[i, :])\n\n # Get indices without spikes.\n indices = [j for j, dt in enumerate(spike_train) if dt == 0]\n\n # Add spikes to indices randomly with given probability.\n p = self.noise * self.dt\n for index in indices:\n if np.random.uniform(0, 1) < p:\n spike_train[index] = 1\n\n self.spike_trains[i, :] = spike_train", "def _random_noise(self, arr):\n rnd_snr = random.randint(self.noise_range[0], self.noise_range[1])\n NOISE_FACTOR = 1 / (10 ** (rnd_snr / 10))\n\n return arr + np.random.normal(0, NOISE_FACTOR, len(arr))", "def add_noise(infile, noise_name, snr):\n fs1, x = monoWavRead(filename=infile)\n\n noise_path = './sounds/%s.wav' % noise_name\n fs2, z = monoWavRead(filename=noise_path)\n\n while z.shape[0] < x.shape[0]:\n z = np.concatenate((z, z), axis=0)\n z = z[0: x.shape[0]]\n rms_z = np.sqrt(np.mean(np.power(z, 2)))\n rms_x = np.sqrt(np.mean(np.power(x, 2)))\n snr_linear = 10 ** (snr / 20.0)\n noise_factor = rms_x / rms_z / snr_linear\n y = x + z * noise_factor\n rms_y = np.sqrt(np.mean(np.power(y, 2)))\n y = y * rms_x / rms_y\n\n #Change the output file name to suit your requirements here\n outfile_name = os.path.basename(infile).split(\".\")[0] + (\"_addedNoise%s.wav\" % str(snr))\n outfile = os.path.join(outfile_path, outfile_name)\n write(filename = outfile, rate = fs1, data = y)\n if (FILE_DELETION):\n extractFeaturesAndDelete(outfile)", "def add_noise(u, volume):\n n = u.shape[0]\n u_noisy = u.copy()\n val_range = np.amax(u) - np.amin(u)\n # u_noisy += np.random.normal(0, val_range*volume, u.shape)\n u_noisy[3:n-3, 3:n-3] += np.random.normal(0, val_range*volume, (n-6, n-6))\n return u_noisy", "def __remove_noise(self, data):\n\n denoised_data = []\n\n for feature in data.T:\n var = np.var(feature)\n n = float(len(feature))\n threshold = np.sqrt(2 * var * np.log10(n) / n)\n\n ca, cd = pywt.dwt(feature, 'db1')\n\n for i in range(len(ca)):\n if np.abs(ca[i]) >= threshold:\n ca[i] = np.sign(ca[i]) * (np.absolute(ca[i]) - threshold)\n else:\n ca[i] = 0\n\n for i in range(len(cd)):\n if np.abs(cd[i]) >= threshold:\n cd[i] = np.sign(cd[i]) * (np.absolute(cd[i]) - threshold)\n else:\n cd[i] = 0\n\n new_data = pywt.idwt(ca, cd, 'db1')\n denoised_data.append(new_data)\n\n denoised_data = np.array(denoised_data).T\n denoised_data = np.delete(denoised_data, -1, axis=0)\n\n return denoised_data", "def add_noise_flat(self, spec, wl, sn=50):\n noise = np.random.normal(loc=0, scale=spec / sn)\n noisified_spectra = spec + noise\n\n # interpolate negative values\n return self.interp_negative(noisified_spectra, wl)", "def reduce_noise(\n self, audio_data: bytes, noise_profile: Path, amount: float = 0.5\n ) -> bytes:\n return subprocess.run(\n [\n \"sox\",\n \"-r\",\n str(self.sample_rate),\n \"-e\",\n \"signed-integer\",\n \"-b\",\n str(self.sample_width * 8),\n \"-c\",\n str(self.channels),\n \"-t\",\n \"raw\",\n \"-\",\n \"-t\",\n \"raw\",\n \"-\",\n \"noisered\",\n str(noise_profile),\n str(amount),\n ],\n check=True,\n stdout=subprocess.PIPE,\n input=audio_data,\n ).stdout", "def add_noise(curr_image, noise_amount):\n\n return random_noise(curr_image, mode='s&p', amount=noise_amount)", "def add_noise(self, noise):\n if isinstance(noise, Noise):\n self.noise.append(noise)\n else:\n raise TypeError(\"Input is not a Noise object.\")", "def add_noise(self,\n x_mean,\n x_std=None,\n x_min=None,\n noise_type='chi2'):\n if noise_type == 'chi2':\n noise = distributions.chi2(x_mean, self.chi2_df, self.shape)\n \n # Based on variance of ideal chi-squared distribution\n x_std = np.sqrt(2 * self.chi2_df) * x_mean / self.chi2_df\n elif noise_type in ['normal', 'gaussian']:\n if x_std is not None:\n if x_min is not None:\n noise = distributions.truncated_gaussian(x_mean,\n x_std,\n x_min,\n self.shape)\n else:\n noise = distributions.gaussian(x_mean,\n x_std,\n self.shape)\n else:\n sys.exit('x_std must be given')\n else:\n sys.exit('{} is not a valid noise type'.format(noise_type))\n \n self.data += noise\n\n set_to_param = (self.noise_mean == self.noise_std == 0)\n if set_to_param:\n self.noise_mean, self.noise_std = x_mean, x_std\n else:\n self._update_noise_frame_stats()\n\n return noise", "def generate_noise(noise_params: configparser.ConfigParser, signal: np.ndarray,\n data_points: int) -> np.ndarray:\n snr = float(noise_params[SIGNAL_TO_NOISE])\n if snr != 0.0:\n noise = np.random.normal(size=data_points)\n # work out the current SNR\n current_snr = np.mean(signal) / np.std(noise)\n # scale the noise by the snr ratios (smaller noise <=> larger snr)\n noise *= (current_snr / snr)\n else:\n noise = np.zeros(data_points)\n # return the new signal with noise\n return noise", "def addNoise(*args):\n return _seb.addNoise(*args)", "def noise(data, from_range=(0, 1), axis=0):\n for i in range(len(data)):\n for j in range(len(data[i][axis])):\n data[i][axis][j] += (from_range[1] - from_range[0]) * random.random() + from_range[0]", "def make_noise(self, times):\n long_times = self._calculate_lead_in_times(times)\n preprocessed = self.antenna.make_noise(long_times)\n processed = self.front_end(preprocessed)\n return processed.with_times(times)", "def train_noise(self, data, history = 4):\n \n tmp_mean = 0.0\n tmp_var = 0.0\n total = 0\n\n for vec in data:\n for i in range(history, len(vec) - 1):\n f = self.forecast(vec[0:i])\n d = f - vec[i]\n tmp_mean += d\n total += 1\n \n tmp_mean = tmp_mean / (1.0 * total)\n \n for vec in data:\n for i in range(history, len(vec) - 1):\n f = self.forecast(vec[0:i])\n d = f - vec[i]\n tmp_var += (tmp_mean - d)**2\n \n tmp_var = tmp_var / (1.0 * total)\n \n self.noise_mean = tmp_mean\n self.noise_std = math.sqrt(tmp_var)", "def resample_noise(self):\n # sqrt(N) noise applies to the number of counts, not the rate\n counts = self.rate * self.dt\n counts[counts<0] = 0\n # draw the counts in each time bin from a Poisson distribution\n # with the mean set according to the original number of counts in the bin\n rnd_counts = np.random.poisson(counts)\n rate = rnd_counts.astype(float) / self.dt\n # sqrt(N) errors again as if we're making a measurement\n error = np.sqrt(self.rate / self.dt)\n\n resample_lc = LightCurve(t=self.time, r=rate, e=error)\n resample_lc.__class__ = self.__class__\n return resample_lc", "def setNoise(self,value=0):\n self.noise = value\n if self.noise >= self.threshold:\n self.refresh()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Take an entry and format it for output
def format_entry(entry): separator = '-' * 80 return """ {separator} {entry} {separator}""".format(separator=separator, entry=describe_entry(entry))
[ "def save_entry(f, entry):\n f.write('entry(\\n')\n f.write(' index = {0:d},\\n'.format(entry.index))\n f.write(' label = \"{0}\",\\n'.format(entry.label))\n\n if isinstance(entry.item, Molecule):\n f.write(' molecule = \\n')\n f.write('\"\"\"\\n')\n f.write(entry.item.to_adjacency_list(remove_h=False))\n f.write('\"\"\",\\n')\n elif isinstance(entry.item, Group):\n f.write(' group = \\n')\n f.write('\"\"\"\\n')\n f.write(entry.item.to_adjacency_list())\n f.write('\"\"\",\\n')\n else:\n f.write(' group = \"{0}\",\\n'.format(entry.item))\n\n if isinstance(entry.data, GroupFrequencies):\n f.write(' statmech = GroupFrequencies(\\n')\n f.write(' frequencies = [\\n')\n for lower, upper, degeneracy in entry.data.frequencies:\n f.write(' ({0:g}, {1:g}, {2:d}),\\n'.format(lower, upper, degeneracy))\n f.write(' ],\\n')\n f.write(' symmetry = {0:d},\\n'.format(entry.data.symmetry))\n f.write(' ),\\n')\n else:\n f.write(' statmech = {0!r},\\n'.format(entry.data))\n\n if entry.reference is not None:\n f.write(' reference = {0!r},\\n'.format(entry.reference))\n if entry.reference_type != \"\":\n f.write(' referenceType = \"{0}\",\\n'.format(entry.reference_type))\n f.write(f' shortDesc = \"\"\"{entry.short_desc.strip()}\"\"\",\\n')\n f.write(f' longDesc = \\n\"\"\"\\n{entry.long_desc.strip()}\\n\"\"\",\\n')\n\n f.write(')\\n\\n')", "def get_entry_output(entry, out_path=''):\n\n # --- Start list ---\n out = ['\\n<li>\\n']\n\n # --- author ---\n if 'author' in entry:\n out.append('<span class=\"author\">%s</span>,' % highlight_author(entry, out_path))\n if not params['single_line']:\n out.append('<br>')\n\n out.append('\\n')\n\n # --- chapter ---\n chapter = False\n if 'chapter' in entry:\n chapter = True\n out.append('<span class=\"title\">\"%s\"</span>,' % entry['chapter'])\n if not params['single_line']:\n out.append('<br>')\n\n # --- title ---\n if not chapter:\n out.append('<span class=\"title\">\"%s\"</span>,' % entry['title'])\n if not params['single_line']:\n out.append('<br>')\n\n # -- if book chapter --\n if chapter:\n out.append('in: %s, %s' % (entry['title'], entry['publisher']))\n\n if entry['ENTRYTYPE'] == 'book':\n out.append(entry['publisher'])\n\n out.append('\\n')\n\n # --- journal or similar ---\n if 'journal' in entry:\n out.append('<span class=\"publisher\">%s</span>' % highlight_publisher(entry['journal']))\n elif 'booktitle' in entry:\n out.append('<span class=\"publisher\">')\n if entry['ENTRYTYPE'] in params['type_conference_paper']:\n out.append(highlight_publisher(entry['booktitle']))\n else:\n out.append(entry['booktitle'])\n out.append('</span>')\n elif 'eprint' in entry:\n out.append('<span class=\"publisher\">%s</span>' % highlight_publisher(entry['eprint']))\n elif entry['ENTRYTYPE'] == 'phdthesis':\n out.append('PhD thesis, %s' % entry['school'])\n elif entry['ENTRYTYPE'] == 'techreport':\n out.append('Tech. Report, %s' % entry['number'])\n\n # --- volume, pages, notes etc ---\n # print(entry)\n if 'volume' in entry:\n out.append(', vol. %s' % entry['volume'])\n if 'number' in entry and entry['ENTRYTYPE'] != 'techreport':\n out.append(', no. %s' % entry['number'])\n if 'pages' in entry:\n out.append(', pp. %s' % entry['pages'])\n # elif 'note' in entry:\n # if journal or chapter: out.append(', ')\n # out.append(entry['note'])\n if 'month' in entry:\n out.append(', %s' % entry['month'])\n\n # --- year ---\n out.append(', <span class=\"year\">%s</span>' % entry['year'])\n\n # final period\n out.append('.\\n')\n\n if not params['single_line']:\n out.append('<br>')\n\n # --- Links ---\n\n if not params['single_line']:\n out.append('<div class=\"publilinks\">\\n')\n\n # pdf\n pdf_link = get_pdflink_from_entry(entry)\n if pdf_link != '':\n if params['use_icon'] and params['icon_pdf']:\n icon_pdf_file = params['icon_pdf'] if len(params['author_group']) == 0 else params['author_group_icon_pdf']\n icon_pdf_file = os.path.relpath(icon_pdf_file, os.path.dirname(out_path))\n out.append('<a target=\"%s\" href=\"%s\"><img src=\"%s\" alt=\"[pdf]\" style=\"width: %s; height: %s;\"></a>' % (\n params['target_link'], pdf_link, icon_pdf_file, params['icon_size'], params['icon_size']))\n else:\n out.append('[<a target=\"%s\" href=\"%s\">pdf</a>]' % (params['target_link'], pdf_link))\n out.append('&nbsp;')\n\n # url, www, doi, hal_id\n href_link = get_wwwlink_from_entry(entry)\n if href_link != '':\n out.append('\\n')\n if not params['use_icon']:\n out.append('[')\n out.append('<a target=\"%s\" href=\"%s\">' % (params['target_link'], href_link))\n if params['use_icon'] and params['icon_www']:\n icon_www_file = params['icon_www'] if len(params['author_group']) == 0 else params['author_group_icon_www']\n icon_www_file = os.path.relpath(icon_www_file, os.path.dirname(out_path))\n out.append('<img src=\"%s\" alt=\"[www]\" style=\"width: %s; height: %s;\"></a>' % (\n icon_www_file, params['icon_size'], params['icon_size']))\n else:\n out.append('link</a>')\n if not params['use_icon']:\n out.append(']')\n out.append('&nbsp;')\n\n bibid = entry['ID']\n bibid = bibid.replace(':', u'-')\n bibid = bibid.replace('.', u'-')\n show_abstract = params['show_abstract'] and 'abstract' in entry and entry['abstract'] != ''\n show_bibtex = params['show_bibtex']\n\n # bibtex\n if show_bibtex:\n out.append('\\n')\n if params['use_bootstrap_dialog']:\n out.append('''[<a type=\"button\" data-toggle=\"modal\" data-target=\"#bib-%s\">bibtex</a>]&nbsp;''' % bibid)\n else:\n out.append('''[<a id=\"blk-%s\" href=\"javascript:toggle('bib-%s', 'blk-%s');\">bibtex</a>]&nbsp;''' % (\n bibid, bibid, bibid))\n\n # abstract\n if show_abstract:\n out.append('\\n')\n if params['use_bootstrap_dialog']:\n out.append('''[<a type=\"button\" data-toggle=\"modal\" data-target=\"#abs-%s\">abstract</a>]&nbsp;''' % bibid)\n else:\n out.append('''[<a id=\"alk-%s\" href=\"javascript:toggle('abs-%s', 'alk-%s');\">abstract</a>]&nbsp;''' % (\n bibid, bibid, bibid))\n\n # download fields\n for i_str in params['bibtex_fields_download']:\n if i_str in entry and entry[i_str] != '':\n out.append('\\n')\n out.append('''[<a target=\"%s\" href=\"%s\">%s</a>]&nbsp;''' % (\n params['target_link'], entry[i_str] if i_str != 'arxiv' else get_arxivlink_from_entry(entry), i_str))\n\n # citation\n if entry['ENTRYTYPE'] in params['show_citation_types'] and int(entry['year']) <= params['show_citation_year']:\n if params['show_citation'] == 'no':\n pass\n elif params['show_citation'] == 'scholar.js':\n out.append('\\n[citations: <span class=\"scholar\" name=\"%s\" with-link=\"true\" target=\"%s\"></span>]&nbsp;' % (\n entry['title'], params['target_link_citation']))\n elif params['show_citation'] == 'bs':\n if entry['title'].lower() in params['dict_title']:\n citations_url = params['dict_title'][entry['title'].lower()]\n out.append('\\n[citations: <a target=\"%s\" href=\"%s\">%s</a>]&nbsp;' % (\n params['target_link_citation'], citations_url[1], citations_url[0]))\n else:\n raise ValueError('wrong show_citation')\n\n # note\n for i_str in params['bibtex_fields_note']:\n if i_str in entry and entry[i_str] != '':\n out.append('\\n(<span class=\"%s\">%s</span>)&nbsp;' % (i_str if i_str != 'note' else 'hlnote0', entry[i_str]))\n\n out.append('\\n')\n if not params['single_line']:\n out.append('</div>')\n\n if show_bibtex:\n out.append('\\n')\n bibstr = get_bibtex_from_entry(entry, comma_to_and=True)\n if params['use_bootstrap_dialog']:\n out.append(\n '''<div class=\"modal fade\" id=\"bib-%s\" role=\"dialog\"><div class=\"modal-dialog\"><div class=\"modal-content\"><div class=\"modal-header\"><button type=\"button\" class=\"close\" data-dismiss=\"modal\">&times;</button><h4 class=\"modal-title\">Bibtex</h4></div><div class=\"modal-body\"> \\n<pre>%s</pre> </div><div class=\"modal-footer\"><button type=\"button\" class=\"btn btn-default\" data-dismiss=\"modal\">Close</button></div></div></div></div>''' % (\n bibid, bibstr))\n else:\n out.append(\n '''<div class=\"bibtex\" id=\"bib-%s\" style=\"display: none;\">\\n<pre>%s</pre></div>''' % (bibid, bibstr))\n\n # abstract\n if show_abstract:\n out.append('\\n')\n if params['use_bootstrap_dialog']:\n out.append(\n '''<div class=\"modal fade\" id=\"abs-%s\" role=\"dialog\"><div class=\"modal-dialog\"><div class=\"modal-content\"><div class=\"modal-header\"><button type=\"button\" class=\"close\" data-dismiss=\"modal\">&times;</button><h4 class=\"modal-title\">Abstract</h4></div><div class=\"modal-body\"> \\n<pre>%s</pre> </div><div class=\"modal-footer\"><button type=\"button\" class=\"btn btn-default\" data-dismiss=\"modal\">Close</button></div></div></div></div>''' % (\n bibid, \"\\n\".join(textwrap.wrap(entry['abstract'], 68))))\n else:\n out.append(\n '''<div class=\"abstract\" id=\"abs-%s\" style=\"display: none;\">%s</div>''' % (bibid, entry['abstract']))\n\n # Terminate the list entry\n out.append('\\n</li>')\n\n if params['add_blank_line_after_item']:\n out.append('<br>')\n\n out.append('\\n')\n\n return ''.join(out)", "def format_log(cls, entry):\n course = entry.get(\"course\", {}).get(\"code\", \"n/a\")\n str_list = [\n \"{:<21}{:>7}{:>10}{:>10}\".format(entry.get(\"dateString\"), format_time(entry.get(\"elapsed\", 0)), course, entry.get(\"id\")),\n \"{}\".format(entry.get(\"notes\")),\n # \"id: {} duration: {} course: {}\".format(entry.get(\"id\"), format_time(entry.get(\"elapsed\", 0)), course),\n # \"date: {}\".format(entry.get(\"dateString\")),\n # \"duration: {}\".format(format_time(entry.get(\"elapsed\", 0))),\n # \"course: {}\".format(course),\n # \"notes: {}\".format(entry.get(\"notes\")),\n \"\"\n ]\n return '\\n'.join(str_list)", "def display_entry(row):\n print(\"\\n\" + blue_row(\"Task name: \" + row['name']))\n print(blue_row(\"Task date: \" + row['date'][:-9]))\n print(blue_row(\"Task minutes: \" + row['time']))\n print(blue_row(\"Task notes: \" + row['note']) + \"\\n\")", "def bibentry_to_style(bibentry, style='default'):\n s = ''\n if style == 'default':\n s += '%s ' % bibentry['author']\n s += '(%s). ' % bibentry['year']\n s += '*%s*' % bibentry['title']\n\n if 'journal' in bibentry:\n s += '. %s, ' % bibentry['journal']\n\n if 'volume' in bibentry:\n s += '%s' % bibentry['volume']\n\n if 'number' in bibentry:\n s += '(%s)' % bibentry['number']\n\n if 'pages' in bibentry:\n s += ', %s' % bibentry['pages'].replace('--', '-')\n\n s += '.'\n return s", "def _create_entry(val, norm=1.0, total=None, perctag=\"%\"):\n if val is not None and norm is not None:\n if total is not None:\n if total <= 0.0:\n entry = \"--\"\n else:\n perc = float(val) / float(total) * 100.0\n entry = \"%2.1f %s\" % (perc, perctag)\n else:\n value = float(val) / norm\n if value < 1.0:\n entry = \"%2.2f\" % (value)\n else:\n entry = \"%2.1f\" % (value)\n\n else:\n entry = \"--\"\n\n return entry", "def get_entry_info(entry):\n\n summary = get_entry_summary(entry)[0]\n plan = get_entry_plan(entry)[0]\n tasks = get_entry_tasks(entry)[0]\n completed_tasks = get_entry_completed_tasks(entry)[0]\n knowledges = get_entry_knowledge(entry)[0]\n failure_points = get_entry_failure_points(entry)[0]\n\n return EntryContent(summary, plan, tasks, completed_tasks, knowledges, failure_points, entry.time_created)", "def get_entry(self, colwidth=18):\n kk = list(self.__dict__.keys())\n kk.sort()\n # build the format specifier\n fmt = '{{:<{:d}.8e}}'.format(colwidth)\n out = ''\n for key in kk:\n out += fmt.format(self.__dict__[key])\n return out", "def format_menu_item(entry):\n if len(entry) > 58:\n entry = entry[:56]\n while len(entry) < 56:\n entry += ' '\n entry += ' #'\n return entry", "def create_and_tag_entry(self, entry):\n return self._make_post_request(\"v3/entries\", data=entry)", "def parse_tex_entry(entry, section):\n _type = entry[0]\n content = entry[1]\n content_lines = [x.strip() for x in content.split(\"\\n\")]\n name = content_lines.pop(0)[1:-1]\n if content_lines[0][0] == \"%\":\n tags = content_lines.pop(0)[8:]\n else:\n tags = \"\"\n\n content = \"\\n\".join(content_lines)\n return {\n \"type\": _type,\n \"name\": name,\n \"tags\": tags,\n \"category\": section,\n \"content\": tex2html(content),\n }", "def preprocess_entry(self, entry):\n raise NotImplementedError('BaseDataSource::preprocess_entry not implemented.')", "def write_latex_bibentry(self, fd=sys.stdout):\n\n print(\"{}{{{},\".format(self.type, self.handle), file=fd)\n print(\" author = {{{}}},\".format(self.author), file=fd)\n print(' title = \"{{{}}}\",'.format(self.title), file=fd)\n print(\" year = {},\".format(self.year), file=fd)\n print(\" month = {},\".format(self.month), file=fd)\n print(\" handle = {{{}}},\".format(self.handle), file=fd)\n print(\" note = {{{}}},\".format(self.note), file=fd)\n print(\" url = {{{}}} }}\".format(self.url), file=fd)", "def clean_entries(entry):\n \n # convert None and NaN to an empty string. This allows simple string concatenation\n if pd.isnull(entry):\n entry = ''\n \n # convert to string, lowercase, and strip leading and trailing whitespace\n entry = str(entry).lower().strip()\n \n # cut down (internal) consecutive whitespaces to one white space\n entry = re.sub(r'\\s+', ' ', entry)\n \n return entry", "def entry(request, entry_name):\n\n if entry_name not in util.list_entries():\n return render(request, \"encyclopedia/entry_error.html\")\n\n return render(request, \"encyclopedia/entry.html\",{\n \"entry_name\": entry_name,\n \"entry_text\": markdown(util.get_entry(entry_name))\n })", "def create_uniq_output_entry(entry, entry_list):\n if entry not in entry_list:\n return entry\n else:\n new_entry = '{}*'.format(entry)\n return create_uniq_output_entry(new_entry, entry_list)", "def line_format_contribution(node: dict) -> str:\n title = node['title']\n author = node['author'].get('name')\n link = node['permalink']\n merged = format_github_time_to_date(node['mergedAt'])\n return f'[{title}]({link}) - {author} (merged {merged})'", "def format_issue(issue: Issue)-> str:\n histories = issue.changelog.histories\n formatted_issue = \"\"\n flagged = False\n donetime = datetime.now()\n history = { \"created\": donetime }\n for history in histories:\n timestamp = iso8601.parse_date(history.created)\n flags = filter(filters.is_flag, history.items)\n for flag in flags:\n if flag.toString not in (\"\", None):\n formatted_issue += f\"{issue.key}\"\n formatted_issue += f\",{issue.id}\"\n formatted_issue += f\",\\\"{issue.fields.summary}\\\"\"\n formatted_issue += f\",{issue.fields.customfield_10008}\"\n formatted_issue += f\",\\\"{timestamp.strftime('%d/%m/%Y %H:%M')}\\\"\"\n flagged = True\n else:\n formatted_issue += f\",\\\"{timestamp.strftime('%d/%m/%Y %H:%M')}\\\"\\n\" if flagged else \"\"\n flagged = False\n if flagged:\n donetime = iso8601.parse_date(history.created)\n formatted_issue += f\",\\\"{donetime.strftime('%d/%m/%Y %H:%M')}\\\"\\n\"\n return formatted_issue", "def entry_from_human_readable(line):\n matches = LINE_RE.findall(line)\n\n if len(matches) != 1:\n raise ValueError(\"Not a valid entry line: {}\".format(line))\n\n rd_uid, name, car_nick = matches[0]\n car_nick = car_nick.strip().lower()\n\n if car_nick == \"tbd\":\n car = None\n else:\n car = get_car_from_nickname(car_nick)\n if not car:\n raise ValueError(\"Unknown car nickname: {}\".format(car_nick))\n\n return Entry(name=name, rd_uid=rd_uid, car=car, skin=None, steam_uid=None)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returs a list of (id, num_of_entries) of the users whose entries in maillog surpase the threshold.
def detected_from_entries(self): entrygroup = [list(e) for k, e in groupby(sorted(self.entries), lambda x: x.id)] result = [(x[0].id, len(x)) for x in entrygroup if len(x) >= self.threshold] return result
[ "def get_log_entries_by_user(session, user, limit=20):\n return AuditLog.get_entries(session, involve_user_id=user.id, limit=limit)", "def get_num_emails(self, number=10, folder=\"inbox\"):\n try:\n emails = []\n count = 0\n for item in self._victim_account.inbox.all():\n if count >= number:\n break\n else:\n emails.append(item)\n count += 1\n\n return emails\n except:\n print(\"Error\")", "def __getUniqueUserIDs(self, conditions):\n total_rows_query = \"select count(distinct user_id) as count from connection_log where \" + conditions\n return db_main.getHandle().selectQuery(total_rows_query)[0][\"count\"]", "def get_recent_entries(num_entries):\n filehashes = []\n filehash_set = set()\n with DBConn() as conn:\n cursor = conn.cursor()\n cursor.execute(\"\"\"\n SELECT * FROM recents order by rowid DESC\n \"\"\")\n row = cursor.fetchone()\n while len(filehashes) < num_entries and row:\n filehash = row['filehash']\n if filehash not in filehash_set:\n filehashes.append(filehash)\n filehash_set.add(filehash)\n row = cursor.fetchone()\n return filehashes", "async def find_matched_users(self) -> list[int]:\n users = []\n my_coord = (self.longitude, self.latitude)\n queryset = await self._get_queryset_of_related_users()\n for user in await queryset:\n coord_distance = await self._calculate_distance(my_coord, user)\n if coord_distance <= float(self.search_distance):\n users.append(user.user_id)\n return users", "def count_emailusers(self, source):\n return ccnet_threaded_rpc.count_emailusers(source)", "def get_performed_users(self):\n search = Search(using=self.es, index=self.index)\n for query in self.searchfilter.values():\n search = search.query(query)\n\n search.aggs.bucket('user_names', 'terms', field=self.get_field_name('userIdentity.userName'), size=5000)\n response = search.execute()\n\n user_names = {}\n for user in response.aggregations.user_names.buckets:\n if user.key == 'HIDDEN_DUE_TO_SECURITY_REASONS':\n # This happens when a user logs in with the wrong username\n continue\n user_names[user.key] = True\n return user_names", "def get_log_entries_by_query(self, log_entry_query):\n return # osid.logging.LogEntryList", "def detect_write_ins(results, threshold):\n question_answers = {}\n write_ins = set()\n for row in results:\n for question in row:\n if question in question_answers:\n if row[question] in question_answers[question]:\n pass\n else:\n question_answers[question].add(row[question])\n if len(question_answers[question]) >= threshold:\n write_ins.add(question)\n else:\n question_answers[question] = set()\n return list(write_ins)", "def _get_users(zip, radius):\n radius = float(radius)\n logger.debug('finding users within %s miles of zip code %s' % (radius, zip))\n\n zipcode = ZipGeocode.objects.get(pk=zip)\n geom = GEOSGeometry('POINT(%s %s)' % (zipcode.longitude, zipcode.latitude))\n buffered_geom = geom.buffer(radius)\n\n extended_users = ExtendedUserData.objects.filter(location__intersects=buffered_geom)\n\n return extended_users", "def get_users(subs, num_posts=1000):\n users = Counter()\n for sub in subs:\n print(\"Scraping %s\" % sub)\n user_list = [post.author for post in reddit.subreddit(sub).hot(limit=num_posts)]\n users.update([user.name for user in user_list if user is not None])\n\n return users", "def get_log_entries_by_date(self, start, end):\n return # osid.logging.LogEntryList", "def num_entries(self):\n return len(self._log_entries)", "def _count_users(self, txn: LoggingTransaction, time_from: int) -> int:\n sql = \"\"\"\n SELECT COUNT(*) FROM (\n SELECT user_id FROM user_ips\n WHERE last_seen > ?\n GROUP BY user_id\n ) u\n \"\"\"\n txn.execute(sql, (time_from,))\n # Mypy knows that fetchone() might return None if there are no rows.\n # We know better: \"SELECT COUNT(...) FROM ...\" without any GROUP BY always\n # returns exactly one row.\n (count,) = cast(Tuple[int], txn.fetchone())\n return count", "def find_entries(self, users, start, *args, **kwargs):\r\n forever = kwargs.get('all', False)\r\n for user in users:\r\n if forever:\r\n entries = Entry.objects.filter(user=user).order_by('start_time')\r\n else:\r\n entries = Entry.objects.filter(\r\n user=user, start_time__gte=start).order_by(\r\n 'start_time')\r\n yield entries", "def get_log_entries_by_ids(self, log_entry_ids):\n return # osid.logging.LogEntryList", "def _unique_participants(thread):\n users = set([thread['user_id']])\n n_replies = 1 + len(thread['children'])\n for reply in thread['children']:\n r_users, r_replies = _unique_participants(reply)\n n_replies += r_replies\n users = users | r_users\n return users, n_replies", "def get_dedicated_users(user_dic, main_sub, dedicated=5):\n dedicated_users = {}\n for username, userobject in user_dic.items():\n count = 0\n for sub in userobject.subreddits:\n if sub == main_sub:\n count += 1\n if count < dedicated:\n continue\n else:\n dedicated_users[username] = userobject\n return dedicated_users", "def _get_new_entries(self):\n lines = self.logfile.read().splitlines()\n new_entries = [self._entry_from_line(line)\n for line in lines\n if self._filter_line(line)]\n return new_entries" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return list of new entries read from logfile.
def _get_new_entries(self): lines = self.logfile.read().splitlines() new_entries = [self._entry_from_line(line) for line in lines if self._filter_line(line)] return new_entries
[ "def update(self):\n p = Popen([\"journalctl\", \"-n\", \"1000\", \"-o\", \"json\"], stdout=PIPE)\n\n logs = []\n for i, line in enumerate(reversed(p.stdout.readlines())):\n obj = json.loads(line.decode(\"utf-8\").strip())\n if os.path.basename(obj.get(\"_EXE\", \"\")) != \"hawck-macrod\":\n continue\n obj = LogRetriever.mklog(obj)\n ## Log has been read, stop\n if obj[\"UTIME\"] <= self.last_time or len(logs) > self.max_logs:\n break\n logs.append(obj)\n\n p.kill()\n\n if not logs:\n return\n\n log = None\n for log in reversed(logs):\n msg = log[\"MESSAGE\"]\n if msg not in self.logs:\n log[\"DUP\"] = 1\n self.logs[msg] = log\n else:\n self.logs[msg][\"DUP\"] += 1\n self.last_time = log[\"UTIME\"]\n\n return logs", "def _ReadChangeLogEntries(self, file_object):\n self.entries = []\n\n file_offset = file_object.tell()\n while file_offset < self._file_size:\n change_log_entry = self._ReadChangeLogEntry(file_object)\n file_offset = file_object.tell()\n\n self.entries.append(change_log_entry)", "def read_log():\n formated_log_data = list()\n log_data = sys.stdin.readlines()\n # log information\n logger.debug(\"The log file contains {} lines in total\"\n .format(len(log_data)))\n for line in log_data:\n line_parts = line.rstrip(\"\\n\").strip().split()\n # make sure that all the four parameters are there\n if (len(line_parts) == 4):\n (path, status_code, size, time) = line_parts\n new_status_code = int(status_code)\n new_size = int(size)\n new_time = int(time)\n formated_log_data.append((path.strip(), new_status_code,\n new_size, new_time))\n # log information\n logger.debug(\"The log file contains {} real log information \"\n .format(len(formated_log_data)))\n return formated_log_data", "def load_log():\n\n try:\n with open(COMMENTS_LOG_FILE, \"r\", encoding=\"utf-8\") as temp_file:\n return [x for x in temp_file.read().split(\"\\n\")]\n\n except:\n with open(COMMENTS_LOG_FILE, \"w\", encoding=\"utf-8\") as temp_file:\n return list()", "def logfile_timeline(self, container):\n interesting_lines = [\n line.strip()\n for line in open(container.logfile)\n if self.interesting_re.search(line)]\n return [(container.name,) + split_timestamp(line) for line in interesting_lines]", "def get_log_entries_by_date(self, start, end):\n return # osid.logging.LogEntryList", "def _read_log(self):\n path = self.path_log()\n log_file = open(path, \"r\", encoding='latin-1')\n return log_file.readlines()", "def list_entries(client, args):\n # [START list]\n logger = client.logger(args.logger_name)\n print('Listing all log entries for logger {}'.format(logger.name))\n entries = []\n token = None\n while True:\n new_entries, token = client.list_entries(\n filter_='logName=\"{}\"'.format(logger.full_name),\n page_token=token)\n entries += new_entries\n if token is None:\n break\n\n for entry in entries:\n timestamp = entry.timestamp.isoformat()\n print('{}: {}'.format\n (timestamp, entry.payload))\n # [END list]\n return entries", "def log_entries(self):\n return iter(self._log_entries)", "def get_log_entries_by_log(self, log_id):\n return # osid.logging.LogEntryList", "def __log_file(self):\n while True:\n line = self.fd.readline()\n if not line: break\n syslog.syslog(self.p, line)", "def get_events( self):\n\n\t\t# Moves at the end of the log file\n\t\tif self.flush:\n\t\t\tself.file.seek( 0, 2)\n\t\t\tself.flush = False\n\n\t\t# TODO: investigate select-like approach?\n\t\tidle_cnt = 0\n\t\tiaa_cnt = 0\n\t\tevents = ''\n\t\twhile iaa_cnt != IAA_INTERVAL and not shutdown:\n\t\t\t# Collect lines\n\t\t\tevents = self.read_log_line()\n\t\t\t#####\tprint type(events) ##DEBUG\n\t\t\tif len(events) != 0: break\n\n\t\t\t# No more events, wait\n\t\t\ttime.sleep( TAIL_RECHECK)\n\n\t\t\t# Log rename check\n\t\t\tidle_cnt += 1\n\t\t\tif idle_cnt == NAME_CHECK:\n\t\t\t\tif self.log_rename():\n\t\t\t\t\tself.open_log()\n\t\t\t\t\tiaa_cnt = 0\n\t\t\t\telse:\n\t\t\t\t\t# Recover from external file modification\n\t\t\t\t\tposition = self.file.tell()\n\t\t\t\t\tself.file.seek( 0, 2)\n\t\t\t\t\tfile_size = self.file.tell()\n\t\t\t\t\tif file_size < position:\n\t\t\t\t\t\t# File has been externaly modified\n\t\t\t\t\t\tposition = file_size\n\t\t\t\t\tself.file.seek( position)\n\t\t\t\tidle_cnt = 0\n\t\t\telse:\n\t\t\t\t# To reset end-of-line error\n\t\t\t\tself.file.seek( self.file.tell())\n\t\t\tiaa_cnt += 1\n\n\t\t# Send IAA packet if required\n\t\tif iaa_cnt == IAA_INTERVAL:\n\t\t\treturn None\n\n\t\treturn events", "def _get_log_entries(self) -> List[Tuple[int, bytes, List[int], bytes]]:\n if self.is_error:\n return []\n else:\n return sorted(itertools.chain(\n self._log_entries,\n *(child._get_log_entries() for child in self.children)\n ))", "def read_file(self, filename):\n with open(filename, \"r\") as reader:\n loglines = reader.readlines()\n \n return loglines", "def iter_history():\n logname = '/var/log/dpkg.log'\n if not os.path.exists(logname):\n raise FileNotFoundError('File does not exist: {}'.format(logname))\n try:\n with open(logname, 'r') as f:\n # Going to read these backwards, latest first.\n for line in reversed(f.readlines()):\n historyline = HistoryLine.from_dpkg_line(line)\n if historyline is not None:\n yield historyline\n except EnvironmentError as exenv:\n errfmt = 'Failed to read history: {}\\n{}'\n raise EnvironmentError(errfmt.format(logname, exenv))", "def feedLog():\n config = getConfig()\n feedLogDict = {}\n entryDict = {}\n if os.path.exists( os.path.sep.join( (config.options[\"homedir\"], \"feed.log\") )):\n log = open( os.path.sep.join( (config.options[\"homedir\"], \"feed.log\") ), \"r\" )\n logger.debug(\"Reading logfile: \" + log.name)\n for line in log.readlines():\n entryDict = {}\n parts = line.split( \"||\" )\n entryDict[\"e-tag\"] = string.strip( parts[1] )\n entryDict[\"modified\"] = string.strip( parts[2] )\n feedLogDict[parts[0]] = entryDict\n log.close()\n #now clear out the file\n log = open( os.path.sep.join( (config.options[\"homedir\"], \"feed.log\") ), \"w\" )\n log.close()\n return feedLogDict", "def get_log_entries_for_resource(self, resource_id):\n return # osid.logging.LogEntryList", "def _get_log_files(self):\n logs = glob.glob(os.path.join(self.md_dir, 'log*.lammps*'))\n logs.sort()\n if self.temper:\n logs.remove(os.path.join(self.md_dir, 'log.lammps'))\n logs = sorted(logs, key=lambda x: int(re.findall(r'\\d+', os.path.basename(x))[0]))\n log_data_store = []\n if len(logs) > 1:\n logs = sorted(logs, key=lambda x: int(re.findall(r'\\d+', os.path.basename(x))[0]))\n for log in logs:\n with open(os.path.join(self.md_dir, log), 'r') as log_file:\n lines = log_file.readlines()\n for i, line in enumerate(lines):\n if \"WARNING\" in line:\n del lines[i]\n log_data_store.append(lines)\n return log_data_store", "def _get_log_lines(self):\n return [\n log_line\n for log_line in self.captured_output.getvalue().split(\"\\n\")\n if log_line\n ]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Converts a logfile line in an entry.
def _entry_from_line(self, line): raise NotImplemented()
[ "def parse_log_line(line: str) -> LogEntry:\n match = LOGPAT.match(line)\n if not match:\n # we could catch that error and skip the line\n raise ValueError(f'incorrect log format: {line}')\n\n entry = match.groups()\n parsed_time = parse(entry[3][:11] + ' ' + entry[3][12:])\n size = int(entry[8]) if entry[8] != '-' else 0\n return LogEntry(\n entry[0], entry[1], entry[2], parsed_time, entry[4], entry[5],\n entry[6], int(entry[7]), size\n )", "def ParseLine(self, path, line):\n del path # We don't use the path of the log file.\n return datatypes.Event(json.loads(line.rstrip()))", "def parse_line(cls, line, log):\n m = cls._LOG_LINE_RE.match(line)\n if m is None:\n return None\n entry_type = m.group('type')\n y, mo, d, h, mi, s = map(int, m.group('year', 'month', 'day',\n 'hour', 'min', 'sec'))\n timestamp = datetime.datetime(y, mo, d, h, mi, s, tzinfo = UTC())\n data = m.group('data')\n if entry_type == 'combat':\n return CombatLogEntry(timestamp, data, log)\n else:\n if entry_type == 'info':\n t = LogEntry.INFO\n elif entry_type == 'notify':\n t = LogEntry.NOTIFY\n elif entry_type == 'warning':\n t = LogEntry.WARNING\n elif entry_type == 'question':\n t = LogEntry.QUESTION\n elif entry_type == 'hint':\n t = LogEntry.HINT\n elif entry_type == 'None':\n t = LogEntry.NONE\n else:\n raise ValueError('Unknown log entry type \"%s\".' % entry_type)\n return LogEntry(timestamp, t, data)", "def RecordFromLine(line):\n try:\n created, level, unused_source_location, message = (\n _StrictParseLogEntry(line, clean_message=False))\n\n\n message = Stripnl(message)\n return LoggingRecord(level, created, message, None)\n except ValueError:\n return StderrRecord(line)", "def convert_by_line(self, line):\n return self.conversion(line)", "def process_line(self, line):\n if not line:\n return\n msg = self.line_to_message(line)\n self.handle_message(msg)", "def _prepare_line(self, line):\r\n return line.rstrip('\\r\\n').strip()", "async def process_log_entry(self, entry: Event, sender: str) -> bool:\n if not isinstance(entry, LogEvent):\n return False\n\n # date\n time = Time(entry.time, format=\"unix\")\n\n # define new row and emit\n row = [\n time.iso.split()[1],\n str(sender),\n entry.level,\n \"%s:%d\" % (os.path.basename(entry.filename), entry.line),\n entry.message,\n ]\n self.add_log.emit(row)\n return True", "def create_log_entry(self, log_entry_form):\n return # osid.logging.LogEntry", "def handleE(self, line):\n self.events.append({\"UTC\": self._parseTime(line[1:7]),\n \"TLC\": line[7:10], \"STR\": line[10:].strip()})", "def mapLogRecord(self, record):\n record_modified = HTTPHandler.mapLogRecord(self, record)\n if self.formatter is not None:\n record_modified['t'] = self.formatter.format(record)\n else:\n record_modified['t'] = record_modified['msg'].encode('utf-8')\n\n record_modified['no_date'] = self.no_date\n return record_modified", "def _ProcessLogLine(self,\n log_line: str,\n query: str,\n project_name: str) -> str:\n log_record = json.loads(log_line)\n\n # Metadata about how the record was obtained.\n timesketch_record = {'query': query, 'project_name': project_name,\n 'data_type': self.DATA_TYPE}\n\n # Timestamp related fields.\n timestamp = log_record.get('timestamp', None)\n if timestamp:\n timesketch_record['datetime'] = timestamp\n timesketch_record['timestamp_desc'] = 'Event Recorded'\n\n # General resource information.\n resource = log_record.get('resource', None)\n if resource:\n labels = resource.get('labels', None)\n if labels:\n for attribute, value in labels.items():\n timesketch_attribute = 'resource_label_{0:s}'.format(attribute)\n timesketch_record[timesketch_attribute] = value\n\n # Some Cloud logs pass through Severity from the underlying log source\n severity = log_record.get('severity', None)\n if severity:\n timesketch_record['severity'] = severity\n\n # The log entry will have either a jsonPayload, a protoPayload or a\n # textPayload.\n json_payload = log_record.get('jsonPayload', None)\n if json_payload:\n self._ParseJSONPayload(json_payload, timesketch_record)\n\n proto_payload = log_record.get('protoPayload', None)\n if proto_payload:\n self._parse_proto_payload(proto_payload, timesketch_record)\n\n text_payload = log_record.get('textPayload', None)\n if text_payload:\n timesketch_record['textPayload'] = text_payload\n\n self._BuildMessageString(timesketch_record)\n\n return json.dumps(timesketch_record)", "def _ReadChangeLogEntry(self, file_object):\n file_offset = file_object.tell()\n data_type_map = self._GetDataTypeMap('rp_change_log_entry')\n\n change_log_entry_record, data_size = self._ReadStructureFromFileObject(\n file_object, file_offset, data_type_map, 'change log entry record')\n\n if self._debug:\n self._DebugPrintChangeLogEntryRecord(change_log_entry_record)\n\n if change_log_entry_record.record_type != 1:\n raise errors.ParseError(\n f'Unsupported record type: {change_log_entry_record.record_type:d}')\n\n signature = change_log_entry_record.signature\n if signature != self._RECORD_SIGNATURE:\n raise errors.ParseError('Unsupported change.log file signature')\n\n # TODO: refactor to use size hints\n record_size = (\n change_log_entry_record.record_size - data_size)\n record_data = file_object.read(record_size)\n file_offset += data_size\n\n if self._debug:\n self._DebugPrintData('Record data', record_data)\n\n context = dtfabric_data_maps.DataTypeMapContext(values={\n 'rp_change_log_entry': change_log_entry_record})\n\n data_type_map = self._GetDataTypeMap('rp_change_log_entry2')\n\n try:\n change_log_entry_record2 = self._ReadStructureFromByteStream(\n record_data, file_offset, data_type_map, 'change log entry record',\n context=context)\n except (ValueError, errors.ParseError) as exception:\n raise errors.ParseError(\n f'Unable to parse change log entry record with error: {exception!s}')\n\n if self._debug:\n self._DebugPrintValue(\n 'Process name', change_log_entry_record2.process_name[:-1])\n\n self._DebugPrintText('\\n')\n\n change_log_entry = ChangeLogEntry()\n change_log_entry.entry_type = change_log_entry_record.entry_type\n change_log_entry.entry_flags = change_log_entry_record.entry_flags\n change_log_entry.file_attribute_flags = (\n change_log_entry_record.file_attribute_flags)\n change_log_entry.sequence_number = change_log_entry_record.sequence_number\n change_log_entry.process_name = change_log_entry_record2.process_name[:-1]\n\n sub_record_data_offset = context.byte_size\n sub_record_data_size = record_size - 4\n if self._debug:\n self._DebugPrintValue(\n 'Sub record data offset', f'{sub_record_data_offset:d}')\n\n value_size = sub_record_data_size - sub_record_data_offset\n self._DebugPrintValue('Sub record data size', f'{value_size:d}')\n\n if sub_record_data_offset < sub_record_data_size:\n self._DebugPrintText('\\n')\n\n while sub_record_data_offset < sub_record_data_size:\n read_size = self._ReadRecord(record_data, sub_record_data_offset)\n if read_size == 0:\n break\n sub_record_data_offset += read_size\n\n data_type_map = self._GetDataTypeMap('uint32le')\n\n try:\n copy_of_record_size = self._ReadStructureFromByteStream(\n record_data[-4:], sub_record_data_offset, data_type_map,\n 'copy of record size')\n except (ValueError, errors.ParseError) as exception:\n raise errors.ParseError(\n f'Unable to parse copy of record size with error: {exception!s}')\n\n if change_log_entry_record.record_size != copy_of_record_size:\n raise errors.ParseError((\n f'Record size mismatch ({change_log_entry_record.record_size:d} != '\n f'{copy_of_record_size:d})'))\n\n if self._debug:\n self._DebugPrintValue('Copy of record size', f'{copy_of_record_size:d}')\n\n self._DebugPrintText('\\n')\n\n return change_log_entry", "def parse_log(self, log_entry: str) -> Optional[dict]:\n match = self.log_grok.match(log_entry)\n\n if match is None:\n return None\n\n if \"timestamp\" in match:\n match[\"timestamp\"] = datetime.strptime(\n match[\"timestamp\"], self.strptime_pattern\n ).isoformat()\n\n # Rename for elasticsearch\n match[\"@timestamp\"] = match.pop(\"timestamp\")\n\n match[\"type\"] = self.type\n\n return match", "def mapLogRecord(self, record):\n record_modified = HTTPHandler.mapLogRecord(self, record)\n record_modified['logPath'] = self.logPath\n record_modified['msg'] = record_modified['msg'].encode('utf-8')\n return record_modified", "def ParseLogEntry(entry):\n try:\n return _StrictParseLogEntry(entry)\n except ValueError:\n\n return _CurrentTimeMicro(), _DEFAULT_LEVEL, _Clean(entry), None", "def _parse_line(self, line):\n return {'raw_message': line}", "def get_log_entry(self, log_entry_id):\n return # osid.logging.LogEntry", "def _StrictParseLogEntry(entry, clean_message=True):\n magic, level, timestamp, message = entry.split(' ', 3)\n if magic != 'LOG':\n raise ValueError()\n\n timestamp, level = int(timestamp), int(level)\n if level not in LOG_LEVELS:\n raise ValueError()\n\n return timestamp, level, _Clean(message), None if clean_message else message" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Purge entries older than the TTL
def _purge_old_entries(self, now=None): if now is None: now = time() self.entries = [x for x in self.entries if x.expire > now]
[ "def purge(self):\n for key, (expiry, _) in list(self._items.items()):\n if expiry < time():\n self._log.debug('Purging expired item %s', key)\n self._items.pop(key, None)", "def clean_expired(self):\n\t\tl_time = datetime.datetime.now() - datetime.timedelta(seconds = 600)\n\t\tself.get_query_set().filter(last_update__lt=l_time).delete()", "def purge(self):\r\n t = time.time()\r\n expired = []\r\n for address,worker in self.queue.items():\r\n if t > worker.expiry: # Worker expired\r\n expired.append(address)\r\n for address in expired:\r\n print (\"expired worker: %s\" % address)\r\n self.queue.pop(address, None)", "def purge_expired_sessions(self):\n logger.debug(\"Redis handler looking for sessions without ttl.\")\n self.engine.session_callback.stop()\n logging.debug(\"Session periodic callback stopped by the redis \"\n \"handler.\")\n keys = self.data_source.get_connection().keys(self.__get_key(\"*\"))\n purge_count = 0\n purge_hiccup = False\n for key in keys:\n ttl = self.data_source.get_connection().ttl(key)\n if ttl == -1:\n logger.warning(\"Session %s without ttl. Setting expiration \"\n \"now.\", key)\n self.data_source.get_connection().expire(key, self.life_time)\n purge_count += 1\n if purge_count == firenado.conf.session['purge_limit']:\n purge_hiccup = True\n logger.warning(\"Set ttl to 500 sessions. Exiting the call\"\n \" and waiting for purge hiccup.\")\n break\n if purge_hiccup:\n self.engine.set_purge_hiccup()\n else:\n self.engine.set_purge_normal()\n self.engine.session_callback.start()\n logging.debug(\"Session periodic callback resumed by the redis \"\n \"handler.\")", "def purge_expired_tokens():\n\n from keydom.models import user\n\n query = (user.Token\n .delete()\n .where(user.Token.expire_time <= datetime.now()))\n query.execute()", "def purge():", "def expire_routes(self):\n # TODO: fill this in!\n hosts_to_delete = []\n\n for host,entry in self.table.items():\n if entry.expire_time <= api.current_time(): #delete if equal to expiry time as well.\n hosts_to_delete.append(host)\n\n for host in hosts_to_delete:\n if self.POISON_EXPIRED: # added during poison expired update (stage 9)\n self.table[host] = TableEntry(dst=self.table[host].dst, port=self.table[host].port, latency=INFINITY,\n expire_time=self.table[host].expire_time)\n else:\n del self.table[host]\n self.s_log(\"Removed route to {} has expire time {}, time is {}\".format(host, entry.expire_time, api.current_time()))", "def clear_expired_metering_data(self, ttl):\n LOG.debug(\"Clearing expired metering data is based on native \"\n \"MongoDB time to live feature and going in background.\")", "def remove_expired(self):\n exp_time = timezone.now() - timezone.timedelta(30)\n self.filter(accessed__lt=exp_time).delete()", "def purge(self):\n # t = time.time()\n t = current_seconds_time()\n # print(\"Killing expired workers at time: %s\" % t)\n expired = []\n for address,worker in self.queue.items():\n # print(address, worker.last_alive)\n if t > worker.last_alive: # Worker expired\n expired.append(address)\n for address in expired:\n print(\"W: Idle worker expired: %s\" % address)\n self.queue.pop(address, None)", "def delete_expired(self):\n expired_date = timezone.now() - timezone.timedelta(\n seconds=settings.LIVE_PAIRING_EXPIRATION_SECONDS\n )\n return self.filter(created_on__lt=expired_date).delete()", "def _clean_cache(self):\n query = _AppEngineUtilities_Cache.all()\n query.filter('timeout < ', datetime.datetime.now())\n results = query.fetch(self.max_hits_to_clean)\n db.delete(results)\n #for result in results:\n # result.delete()", "def remove_expired(self):\n while len(self.insertedTimes) != 0 and self.is_expired(self.insertedTimes[0]):\n key = self.insertedTimes[0].key\n if self.insertedTimesDict[key] == self.insertedTimes[0].insertion_time:\n del self.data[key]\n del self.insertedTimesDict[key]\n self.insertedTimes.popleft()", "def purge(self):\n if not self.enabled: return\n model_list = [x.model_class() for x in self.tables.all()]\n d = timezone.now() - timedelta(days=self.age_in_days)\n datetime_filter = {self.datetime_field + '__lt': d}\n date_filter = {self.datetime_field + '__lt': d.date()}\n if self.delete_by_age:\n for m in model_list:\n try:\n m.objects.filter(**datetime_filter).delete()\n except TypeError: # field is datefield, not datetimefield\n m.objects.filter(**date_filter).delete()\n if self.delete_by_quantity:\n for m in model_list:\n x = m.objects.order_by('-' + self.datetime_field)[self.max_records:]\n m.objects.filter(pk__in=x).delete()", "def clear_expired_entries(self) -> None:\n with self._lock:\n self._clear_expired_entries()", "def purge(self, directory=None):\n if not self.enabled: return\n d = timezone.now() - timedelta(days=self.age_in_days)\n self.purge_recursive(d.replace(tzinfo=None))", "def clean_realtime_data():\n logger.info('BEGIN -- running task: clean_realtime_data')\n date = datetime.datetime.now() - datetime.timedelta(days=7)\n Data.objects.filter(datetime__lte=date).delete()\n logger.info('delete realtime data older than 1 week successfull')\n logger.info('END -- running task: clean_realtime_data')", "def garbageCollector(self):\n tcutoff = self.latest_event - TimeSpan(self.expirationtime)\n for evID in self.event_dict.keys():\n evt = self.cache.get(seiscomp3.DataModel.Event, evID)\n if self.event_dict[evID]['timestamp'] < tcutoff:\n self.event_dict.pop(evID)", "def timer_dead(self):\n self.stop_timer_dead()\n logger.debug(\"[%s] - Remove dead entries in cache\", self.__class__.__name__)\n try:\n now = datetime.datetime.now()\n dead_time = now - datetime.timedelta(seconds=self._cache_dead_ttl)\n for key in list(self._cache.keys()):\n self._lock.acquire()\n if 'last_update' not in self._cache[key]:\n self._cache[key]['last_update'] = now\n try:\n if key in self._cache and self._cache[key]['last_update'] < dead_time:\n logger.debug(\"[%s] - Remove dead entries in cache : %s\", self.__class__.__name__, key)\n self.remove_rrd_from_list(key)\n del self._cache[key]\n except Exception:\n logger.exception(\"[%s] - Exception when removing dead entry %s in cache\", self.__class__.__name__, key)\n finally:\n self._lock.release()\n except Exception:\n logger.exception(\"[%s] - Exception when removing dead entries\", self.__class__.__name__)\n self.start_timer_dead()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a dict of counts for items in iterable.
def counter(iterable): counts = defaultdict(int) for item in iterable: counts[item] += 1 return counts
[ "def count(iterable, x):\n contagem = {}\n for x in iterable:\n if x in iterable:\n contagem[x] = contagem.get(x, 0) + 1\n else:\n contagem[x] = 1\n\n return contagem", "def count(list_counted: list[str]) -> dict[str, int]:\n counter: dict[str, int] = dict()\n for item in list_counted:\n if item in counter:\n counter[item] += 1\n else:\n counter[item] = 1\n return counter", "def count(list_val: list[str]) -> dict[str, int]:\n result: dict[str, int] = {}\n \n for i in list_val:\n if i in result:\n result[i] += 1\n else:\n result[i] = 1\n\n return result", "def _element_count_map(elem_list: List[Any]) -> Dict[Any, int]:\n count_map = dict()\n for elem in elem_list:\n if elem in count_map:\n # If the element has been already added to the dict, increase the count.\n count_map[elem] += 1\n else:\n # The element wasn't yet in the dict, add it there.\n count_map[elem] = 1\n return count_map", "def list_itemcnt(a_list):\n return list(Counter(a_list).items())", "def CountFrequency(my_list):\n\n count = {}\n for i in my_list:\n count[i] = count.get(i, 0) + 1\n return count", "def count_by_name(self,iterable):\n\tdef _count(group, item):\n handler_key = str(item['handler_name'])\n ip_key = str(item['ip'])\n pid_key = str(item['pid'])\n if group.has_key(handler_key):\n if group[handler_key].has_key(ip_key):\n if group[handler_key][ip_key].has_key(pid_key):\n group[handler_key][ip_key][pid_key] +=1\n else:\n group[handler_key][ip_key][pid_key] = 1\n else:\n group[handler_key][ip_key] = {}\n group[handler_key][ip_key][pid_key] = 1\n else:\n group[handler_key]={}\n group[handler_key][ip_key] = {}\n group[handler_key][ip_key][pid_key] = 1\n return group\n\n return reduce(_count, iterable, {})", "def count_items(data):\n # Create a counter object\n counts = Counter(data)\n # Sort by highest count first and place in ordered dictionary\n counts = sorted(counts.items(), key=lambda x: x[1], reverse=True)\n counts = OrderedDict(counts)\n return counts", "def count_by(arr, fn=lambda x: x):\n\n key = {}\n for el in map(fn, arr):\n key[el] = 0 if el not in key else key[el]\n key[el] += 1\n return key", "def allele_counts_dictionary(self):\n return {\n allele_name: len(allele_dataset)\n for allele_name, allele_dataset\n in self.groupby_allele()\n }", "def _count_frequencies(self, tokens: list) -> dict:\n frequencies = defaultdict(lambda: 0)\n\n for token in tokens:\n frequencies[token] += 1\n\n return frequencies", "def getCount(self, combs: list):\n counts_dict = defaultdict(int)\n for itemSet in combs:\n itemSet = tuple(sorted(itemSet))\n for group in self.data:\n if set(itemSet) <= group:\n counts_dict[itemSet] += 1\n\n return counts_dict", "def count_repetition(l):\n counts = {}\n for s in l:\n counts[s] = counts.get(s,0) + 1\n return counts", "def createCount(self,arr):\n \n counter = {}\n\n # create counter of items in array\n for item in arr:\n counter[item] = counter.get(item,0) + 1\n\n return counter", "def neighbor_counts(living):\n n = collections.Counter()\n for x in map(neighbors, living):\n n.update(x)\n return dict(n)", "def iteratorCount(*args, **kwargs):\n \n pass", "def frequencies(seq): # real signature unknown; restored from __doc__\n return {}", "def count_objects(self):\r\n count = {}\r\n for obj in self.trackableObjects.values():\r\n cat = obj.category.split(':')[0]\r\n if cat in count:\r\n count[cat] += 1\r\n else:\r\n count[cat] = 1\r\n return count", "def groupby(iterable: Iterable,\n keyfunc: Callable) -> dict[Hashable, list[Any]]:\n grouped = defaultdict(list)\n for item in iterable:\n key = keyfunc(item)\n grouped[key].append(item)\n\n return grouped" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Takes integer size_of_game returns filenamethe best available checkpoint for this size
def get_checkpoint_filename(size_of_game): path = "neat-checkpoints" filenames = os.listdir(path) filenames = [name.split("-") for name in filenames] check_size = lambda x: x[2] == str(size_of_game) filenames = list(filter(check_size, filenames)) filenames = [int(name[3]) for name in filenames] name = str(max(filenames)) name = "neat-checkpoint-" + str(size_of_game) + "-" + name return path + "/" + name
[ "def get_latest_checkpoint(ckpt_dir):\n\n listfiles = os.listdir(ckpt_dir)\n\n if len(listfiles) == 0:\n return None\n else:\n file_split = listfiles[0].split('_')\n extension = file_split[-1].split('.')[-1]\n\n basename = ''\n for i in range(len(file_split) - 1):\n basename = basename + file_split[i] + '_'\n\n listfiles_step = [int(file.split('_')[-1].split('.')[0]) for file in listfiles]\n listfiles_step = np.array(listfiles_step)\n global_step = listfiles_step.max()\n\n return basename + str(global_step) + '.' + extension", "def get_checkpoint_file(self, model_name):\n assert isinstance(model_name, str)\n return os.path.join(\n f\"{self.data_save_dir}/saves/iter_{self.iteration}\",\n model_name\n )", "def get_most_played(filename):\n games = read_data_from_file(filename)\n\n list_of_games = [game.rstrip().split(\"\\t\") for game in games]\n sold_copies = [(float(shooter[1])) for shooter in list_of_games]\n index_top_sold_game = sold_copies.index(max(sold_copies))\n\n return list_of_games[index_top_sold_game][0]", "def findLatestMetaFile(name): \n\n directory = \"./savedModels/\"+name\n if not(os.path.isdir(directory)):\n print(\"Meta file not found (directory not found)\")\n return -1, \"\"\n\n onlyfiles = [f for f in listdir(directory) if isfile(join(directory, f))]\n biggest_step=-1\n file_with_biggest_step=\"\"\n for file in onlyfiles:\n filename, file_extension = os.path.splitext(file)\n beginning = \"state_at_step-\"\n if file_extension==\".meta\" and filename.startswith(beginning):\n rest=filename[len(beginning):]\n try:\n int_value = int(rest)\n if int_value > biggest_step:\n biggest_step=int_value\n file_with_biggest_step=filename+file_extension\n except ValueError:\n pass\n if biggest_step!=-1:\n print(\"Biggest step found is \", biggest_step)\n print(\"Meta file is \" + file_with_biggest_step)\n else:\n print(\"Meta file not found\")\n return biggest_step, file_with_biggest_step", "def find_latest_checkpoint(self) -> str:\n files = glob.glob(os.path.join(self.storage_dir, \"*_state.pth\"))\n latest_file_path = max(files, key=os.path.getctime)\n latest_file_path = os.path.join(self.storage_dir, latest_file_path)\n return latest_file_path", "def find_best_checkpoint(\n ckpt_dir, start_from=None, end_on=None, metric_name='fid5k_full'\n):\n # based on stylegan training-runs outputs\n metric_file = os.path.join(ckpt_dir, f'metric-{metric_name}.jsonl')\n fids = []\n with open(metric_file) as f:\n for line in f:\n fids.append((json.loads(line.strip())))\n metric = []\n for item in fids:\n metric.append((item['results'][metric_name], item['snapshot_pkl']))\n if start_from is not None:\n metric = metric[start_from:]\n if end_on is not None:\n metric = metric[:end_on]\n ckpt_metric = min(metric)\n print('best checkpoint:')\n print(ckpt_metric)\n ckpt_path = os.path.join(ckpt_dir, ckpt_metric[1])\n print(ckpt_path)\n print('final checkpoint: %s' % metric[-1][1])\n print('final checkpoint idx: %s' % len(metric))\n return ckpt_path", "def get_index_of_highest_checkpoint():\n cp_files_highest = glob.glob(os.path.join(HIGHEST_DIR,\n CHECK_POINT_FILE.format('*')))\n if len(cp_files_highest) == 0:\n return 'latest'\n\n index = int(cp_files_highest[0].split('-')[-1].split('.')[0])\n\n # Check if checkpoint files exists in the CHECK_POINT_DIR directory\n check_points = [os.path.basename(f) for f in cp_files_highest]\n cp_files_cpdir = [os.path.join(CHECK_POINT_DIR, f) for f in check_points]\n exists = all([os.path.isfile(f) for f in cp_files_cpdir])\n\n # If it doesn't already exists, copy from HIGHEST_DIR\n if not exists:\n for f in cp_files_highest:\n copy(f, CHECK_POINT_DIR)\n\n return index", "def _get_last_ckpt(arg):\n file_dict = {}\n ckpt_dir = os.path.join(arg.train_url, 'output')\n lists = os.listdir(ckpt_dir)\n for i in lists:\n ctime = os.stat(os.path.join(ckpt_dir, i)).st_ctime\n file_dict[ctime] = i\n max_ctime = max(file_dict.keys())\n ckpt_dir = os.path.join(ckpt_dir, file_dict[max_ctime])\n ckpt_files = [ckpt_file for ckpt_file in os.listdir(ckpt_dir)\n if ckpt_file.endswith('.ckpt')]\n if not ckpt_files:\n print(\"No ckpt file found.\")\n return None\n\n return os.path.join(ckpt_dir, sorted(ckpt_files)[-1])", "def _checkpoint(self, step_number):\n if not os.path.exists(self.arg_parser.args.output_folder):\n os.makedirs(self.arg_parser.args.output_folder)\n model_file = os.path.join(self.arg_parser.args.output_folder,\n \"model_{}_{}.bin\".format(step_number, self.arg_parser.args.my_rank))\n bak_file1 = os.path.join(self.arg_parser.args.output_folder,\n \"file1_{}_{}.bin\".format(step_number, self.arg_parser.args.my_rank))\n bak_file2 = os.path.join(self.arg_parser.args.output_folder,\n \"file2_{}_{}.bin\".format(step_number, self.arg_parser.args.my_rank))\n meta_file = os.path.join(self.arg_parser.args.output_folder,\n \"meta_{}_{}.bin\".format(step_number, self.arg_parser.args.my_rank))\n f = open(model_file, \"w\")\n string_val = \"x\" * (1024 * 1024 * 4)\n f.write(string_val)\n f.close()\n f = open(bak_file1, \"w\")\n string_val = \"x\" * (1024 * 64)\n f.write(string_val)\n f.close()\n f = open(bak_file2, \"w\")\n string_val = \"x\" * (1024 * 4)\n f.write(string_val)\n f.close()\n f = open(meta_file, \"w\")\n string_val = \"x\" * (1024)\n f.write(string_val)\n f.close()\n pass", "def best_or_last_checkpoint_file(path: str) -> str:\n if not os.path.exists(path):\n raise Exception(\"Path or file {} does not exist\".format(path))\n\n config = Config(folder=path, load_default=False)\n checkpoint_file = config.checkpoint_file(\"best\")\n if os.path.isfile(checkpoint_file):\n return checkpoint_file\n cpt_epoch = config.last_checkpoint_number()\n if cpt_epoch:\n return config.checkpoint_file(cpt_epoch)\n else:\n raise Exception(\"Could not find checkpoint in {}\".format(path))", "def save_bestk(current_best,k):\n## if os.access('%s_best%d.blif'%(f_name,k),os.R_OK):\n## res = get_bestk_value(k)\n## else: \n res = current_best\n if n_nodes() < res:\n res = n_nodes()\n abc('write_blif %s_best%d.blif'%(f_name,k))\n print '\\n*** best%d for %s *** = %d\\n'%(k,f_name,res)\n assert check_blif(),'inequivalence'\n return res", "def get_filename(self, batch_name):\n\t\tmax_number = -1\n\t\tfor filename in os.listdir(self.storage_folder):\n\t\t\tif filename.endswith(\".csv\"):\n\t\t\t\tfilename = filename[0:-4]\n\t\t\t\tnumber = int(filename.split(\"_\")[0])\n\t\t\t\tif number > max_number:\n\t\t\t\t\tmax_number = number\n\t\tmax_number += 1\n\t\treturn str(max_number) + \"_\" + batch_name + \".csv\"", "def extract_newest_ckpt_name(ckpt_folder_path, output_data_file_path):\n files_list = [f for f in listdir(ckpt_folder_path) if isfile(join(ckpt_folder_path, f))]\n print (str(files_list))\n matching = [s for s in files_list if \"model.ckpt-\" in s]\n print (str(matching))\n meta_files_list = [s for s in matching if \".meta\" in s]\n print (str(meta_files_list))\n \n # Get the highest number file model.ckpt-XXX.meta\n MAX_index_file_version = 0\n for meta_file in meta_files_list:\n # filename = model.ckpt-XXX\n filename, file_extension = path.splitext(meta_file)\n # aux1 = XXX.meta\n index_file_version = int(filename.split(\"-\")[1])\n if index_file_version > MAX_index_file_version:\n MAX_index_file_version = index_file_version\n \n \n print (\"MAX_INDEX=\"+str(MAX_index_file_version))\n \n print (\"Opening file==\"+str(output_data_file_path))\n file = open(output_data_file_path,'w')\n print (\"Start create_label_contents...\")\n contents = str(MAX_index_file_version)\n print (\"Done create_label_contents...\")\n file.write(contents)\n file.close() \n print (\"Pbtxt Generated...\"+str(output_data_file_path))\n \n return None", "def save_checkpoint(state, is_best, filename=os.path.join(os.environ.get('USER_PATH'),'/data/checkpoints/checkpoint.pt')):\n\t if is_best:\n\t\t print (\"=> Saving a new best model\")\n\t\t print(f'SAVING TO: {filename}')\n\t\t torch.save(state, filename) # save checkpoint\n\t else:\n\t\t print (\"=> Loss did not improve\")", "def __find_one_file():\n\n batch_number = 140010\n\n training_example_file_name = find_training_file(\n top_training_dir_name=TOP_TRAINING_DIR_NAME, batch_number=batch_number,\n raise_error_if_missing=True)\n print training_example_file_name", "def get_exp_weight(exp_path):\n exp_path = Path(exp_path)\n list_checkpoint = list(exp_path.glob(\"*.pth\"))\n # list_checkpoint = sorted(list_checkpoint)\n if len(list_checkpoint) == 0:\n logger.info(f\"No checkpoint found in experiment folder {str(exp_path)}\")\n found_checkpoint = None\n cp_name = None\n return found_checkpoint, cp_name\n elif len(list_checkpoint) == 1:\n logger.info(f\"Single checkpoint exist in folder {str(exp_path)}\")\n cp_name = list_checkpoint[0].name\n else:\n logger.warning(f\"Multiple checkpoint found in experiment folder {str(exp_path)}\")\n logger.warning(\"Available option is : \")\n for cp in list_checkpoint:\n print(\"===> \", str(cp.name))\n print(f\"Please enter the checkpoint name that you want to load :\", end=\" \")\n cp_name = input()\n\n # extract checkpoint of largest iteration\n if not cp_name:\n list_index = list()\n for cp in list_checkpoint:\n cp_index = ''.join([n for n in str(cp.name) if n.isdigit()])\n if cp_index.isdigit():\n list_index.append(int(cp_index))\n else:\n list_index.append(0)\n id_get = list_index.index(max(list_index))\n cp_name = list_checkpoint[id_get].name\n logger.warning(f\"No checkpoint chosen. Get the checkpoint : \" + cp_name)\n\n found_checkpoint = str(exp_path.joinpath(cp_name))\n return found_checkpoint, cp_name", "def save_checkpoint(state, is_best, filename=\"/output/checkpoint.pkl\"):\n if is_best:\n print(\"=> Saving a new best model.\")\n torch.save(state, filename) # save checkpoint\n else:\n print(\"=> Validation loss did not improve.\")", "def _get_checkpoint_filename(ckpt_dir_or_file):\n if isinstance(ckpt_dir_or_file, os.PathLike):\n ckpt_dir_or_file = os.fspath(ckpt_dir_or_file)\n if gfile.IsDirectory(ckpt_dir_or_file):\n return checkpoint_management.latest_checkpoint(ckpt_dir_or_file)\n return ckpt_dir_or_file", "def load_game(game_name):\n file_name = game_name + \".pickle\"\n for item in pathlib.Path(\".\").iterdir():\n if file_name == item.name and item.is_file():\n with open(file_name, \"rb\") as file:\n game_info = pickle.load(file)\n return game_info\n else:\n print(\"Invalid file name. Please try again.\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
`omnibus` context processor provides the correct api endpoint and an auth token, if possible (user needs to be logged in).
def omnibus(request): auth_token = '' if hasattr(request, 'user') and request.user.is_authenticated(): auth_token = '{0}:{1}'.format( request.user.pk, UserAuthenticator.get_auth_token(request.user.pk)) return { 'OMNIBUS_ENDPOINT': u'{0}://{1}:{2}{3}'.format( ENDPOINT_SCHEME, SERVER_HOST or split_domain_port(request.get_host())[0], SERVER_PORT, SERVER_BASE_URL ), 'OMNIBUS_AUTH_TOKEN': auth_token }
[ "def user_endpoint(self):\n pass", "def __init__(self, token_introspect_endpoint, client_authentication=None):\n super(IntrospectionClient, self).__init__(client_authentication)\n self._token_introspect_endpoint = token_introspect_endpoint", "def run(self) -> AsyncContextManager[\"EndpointAPI\"]:\n ...", "def token_endpoint(self):\n pass", "def configure_rest_api():\n from collective.civicrm.config import SITE_KEY_RECORD\n from collective.civicrm.config import URL_RECORD\n api.portal.set_registry_record(URL_RECORD, u'localhost')\n api.portal.set_registry_record(SITE_KEY_RECORD, u'secret')\n user = api.user.get_current()\n user.setMemberProperties(mapping={'api_key': '123456'})", "def test_user_get_oauth2_application(self):\n pass", "def test_user_get_o_auth2_application(self):\n pass", "def tokenAuth(self):\n self.basicAuth()\n token_url = reverse('api-token')\n response = self.client.get(token_url, format='json', data={})\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertIn('token', response.data)\n\n token = response.data['token']\n self.token = token", "def _required_auth_info(cls):", "def setup(self):\n if self.token:\n if self.api_key:\n raise CredentialsError(\"Can't use API Key when defining the token.\")\n \n self.private = False\n self.header = {\n \"Content-Type\": \"application/json\",\n \"X-Shopify-Access-Token\": self.token\n }\n\n elif self.api_key:\n if not self.api_password:\n raise CredentialsError(\"No password set for private app.\")\n self.header = {\"Content-Type\": \"application/json\"}", "def django_otp_context(request):\n\n def callback():\n return default_device(request.user)\n\n return {'default_device': SimpleLazyObject(callback)}", "def test_auth_against_real_deployment_with_api_key():\n host = os.getenv(DOMINO_HOST_KEY_NAME)\n api_key = os.getenv(DOMINO_USER_API_KEY_KEY_NAME)\n\n d = Domino(host=host, project=\"anyuser/quick-start\", api_key=api_key)\n assert isinstance(d.request_manager.auth, requests.auth.HTTPBasicAuth)\n\n # Raises a requests.exceptions.HTTPError if authentication failed\n d.environments_list()", "def __init__(self, uri, username, password,\n client_id, client_secret,\n api_dir='api', auth_dir='oauth2/token', **kwargs):\n super(WideskyHaystackSession, self).__init__(\n uri, api_dir, **kwargs)\n self._auth_dir = auth_dir\n self._username = username\n self._password = password\n self._client_id = client_id\n self._client_secret = client_secret\n self._auth_result = None", "def includeme(config):\n config.add_route('info', '/api/v1/')\n config.add_route('register', '/api/v1/accounts')\n config.add_route('profile_detail', '/api/v1/accounts/{username}')\n config.add_route('login', '/api/v1/accounts/login')\n config.add_route('logout', '/api/v1/accounts/logout')\n config.add_route('tasks', '/api/v1/accounts/{username}/tasks')\n config.add_route('task_detail', '/api/v1/accounts/{username}/tasks/{id}')", "def fetch_token(self, **kwargs):\n\n return super().fetch_token( # pragma: no cover\n self.openid_config[\"token_endpoint\"],\n client_secret=self.config.MICROSOFT_AUTH_CLIENT_SECRET,\n **kwargs,\n )", "def distantAuthCall ( api_request=None, query={}, payload={}, func_name='user_login') :\n\n print (\". \"*50)\n log.debug(\"distantAuthCall/ payload : \\n%s\", pformat(payload) )\n log.debug(\"distantAuthCall/ log_type : %s\", func_name )\n\n ### retrieve distant auth url root\n auth_url_root = getDistantAuthUrl()\n log.debug(\"distantAuthCall/ auth_url_root : %s\", auth_url_root )\n\n ### retrieve distant auth endpoint config\n endpoint_config = getDistantEndpointconfig(func_name)\n log.debug(\"distantAuthCall/ endpoint_config : \\n%s\", pformat(endpoint_config) )\n \n url = endpoint_config[\"url\"]\n method = endpoint_config[\"method\"]\n url_args = endpoint_config[\"url_args\"]\n post_args = endpoint_config[\"post_args\"]\n url_append = endpoint_config[\"url_append\"]\n resp_path = endpoint_config[\"resp_path\"]\n\n\n ### build url base for specific auth\n base_url = auth_url_root + url \n log.debug(\"distantAuthCall/ base_url : %s\", base_url )\n\n\n\n\n ### TO DO : append url_append value\n # get param from request\n log.debug(\"distantAuthCall / url_append : %s\", url_append )\n if url_append : \n # log.debug(\"distantAuthCall / api_request : \\n%s\", pformat(api_request.__dict__) )\n url_append_string = \"\"\n url_append_list = []\n view_args = api_request.view_args\n log.debug(\"distantAuthCall / view_args : \\n%s\", pformat(view_args) )\n for append_arg in url_append : \n append_val = view_args[append_arg]\n url_append_list.append(append_val)\n url_append_string = \"/\".join(url_append_list)\n base_url += url_append_string\n\n\n\n\n\n \n\n ### append distant auth request headers\n headers = app.config[\"AUTH_URL_HEADERS\"]\n if payload :\n headers = app.config[\"AUTH_URL_HEADERS_PAYLOAD\"]\n\n ### TO DO : add token to requests in headers or query_string\n token = getTokenFromRequest(api_request)\n log.debug(\"token : %s\", token )\n\n token_query_string = \"\"\n\n if token :\n token_locations = app.config[\"AUTH_URL_TOKEN_LOCATION\"]\n \n if \"query_string\" in token_locations and \"headers\" not in token_locations : \n token_query_string_name = app.config[\"AUTH_URL_TOKEN_QUERY_STRING_NAME\"]\n token_query_string = \"{}={}\".format(token_query_string_name,token)\n\n if \"headers\" in token_locations : \n token_header_name = app.config[\"AUTH_URL_TOKEN_HEADER_NAME\"]\n token_header_type = app.config[\"AUTH_URL_TOKEN_HEADER_TYPE\"]\n headers[token_header_name] = token\n\n log.debug(\"distantAuthCall / headers : \\n%s\", pformat(headers) )\n\n\n\n\n ### TO DO : append url_args\n url_args_string = \"\"\n if url_args :\n url_args_string = \"?\"\n for arg_k, arg_v in url_args.items() : \n url_args_string += \"&{}={}\".format( arg_k, query[arg_v] )\n query_url = base_url + url_args_string + token_query_string\n log.debug(\"distantAuthCall / query_url : %s\", query_url)\n\n\n\n ### send request to service and read response\n if method == 'GET' : \n response = requests.get(query_url, headers=headers)\n\n elif method == 'DELETE' : \n response = requests.delete(query_url, headers=headers)\n\n elif method in ['POST', 'PUT'] :\n\n ### TO DO : rebuild payload given \n\n # remap payload given endpoint connfig \n payload_type = type(payload)\n log.debug(\"distantAuthCall / payload_type : %s\", payload_type )\n \n if post_args : \n if payload_type == dict : \n payload_remapped = {\n post_args[k] : v for k,v in payload.items() if k in post_args.keys()\n }\n elif payload_type == list : \n payload_remapped = []\n for p in payload : \n p_remapped = {\n post_args[k] : v for k,v in p.items() if k in post_args.keys()\n }\n payload_remapped.append(p_remapped)\n else : \n payload_remapped = payload\n log.debug(\"distantAuthCall / payload_remapped : \\n%s\", pformat(payload_remapped) )\n\n # then payload as json\n payload_json = json.dumps(payload_remapped)\n log.debug(\"distantAuthCall / payload_json : %s\", payload_json )\n\n if method == 'POST' : \n response = requests.post(query_url, data=payload_json, headers=headers)\n\n elif method == 'PUT' : \n response = requests.put(query_url, data=payload_json, headers=headers)\n\n\n log.debug(\"distantAuthCall / response.status_code : %s\", response.status_code )\n response_json = response.json()\n # log.debug(\"distantAuthCall / response_json : \\n%s\", pformat(response_json) )\n \n if resp_path : \n ### remap response_json given resp_path if specific \n response_json = { arg_k : response_json[arg_v] for arg_k, arg_v in resp_path.items() if arg_v in response_json.keys() }\n\n return response_json", "def auth(request):\n import warnings\n warnings.warn(\n \"The context processor at `django.core.context_processors.auth` is \" \\\n \"deprecated; use the path `django.contrib.auth.context_processors.auth` \" \\\n \"instead.\",\n DeprecationWarning\n )\n #from django.contrib.auth.context_processors import auth as auth_context_processor\n return {}#auth_context_processor(request)", "def run(self) -> AsyncContextManager[\"RemoteEndpointAPI\"]:\n ...", "def api_jwt_token_page():\n return render_template('/api/token.html')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Adds new piece of PLAIN TEXT message str message bool newLine whether to prepend message with new line return MessageBuilder
def add_text(self, message, newLine=True): if newLine and len(self.txt) > 0: self.txt += "\r\n" self.txt += message return self
[ "def add_html(self, message, newLine=True):\n if newLine and len(self.html) > 0:\n self.html += \"<br />\"\n self.html += message\n return self", "def create_plaintext_message(self, text):\n plain_text_maxcols = 72\n textout = cStringIO.StringIO()\n formtext = formatter.AbstractFormatter(\n formatter.DumbWriter(textout, plain_text_maxcols))\n parser = HTMLParser(formtext)\n parser.feed(text)\n parser.close()\n\n # append the anchorlist at the bottom of a message\n # to keep the message readable.\n anchorlist = \"\\n\\n\" + (\"-\" * plain_text_maxcols) + \"\\n\\n\"\n for counter, item in enumerate(parser.anchorlist):\n anchorlist += \"[%d] %s\\n\" % (counter, item)\n\n text = textout.getvalue() + anchorlist\n del textout, formtext, parser, anchorlist\n return text", "def add_text(self, text, newline=False):\r\n style = \"clear: left;\" if newline else ''\r\n self.html += '<div class=\"text\" style=\"%s\">%s</div>\\n' %\\\r\n (style, text)", "def _append_plain_text(self, text, before_prompt=False):\n self._append_custom(self._insert_plain_text, text, before_prompt)", "def create_notification_line(msg):\n local_time = util.format_date(msg[\"time\"])\n message_line = click.style(\"{} : {} from {}\\n\".format(local_time, msg[\"type\"],\n msg[\"from\"]),\n fg=\"cyan\")\n message_line += \"{}\\n\".format(msg[\"content\"])\n return message_line", "def __manage_lines(message: str, color: str, first_line_start: str, new_line_start: str):\n index = 0\n output = str()\n for line in message.split(\"\\n\"):\n if not index:\n # First line.\n output += f\"{first_line_start}{color}{line}\\n\"\n else:\n # Any other line.\n output += f\"{new_line_start}{color}{line}\\n\"\n index += 1\n return output", "def new_paragraph(self):\n if self.chainMode == ChainMode.CHARS:\n return \"\\n\\n\"\n elif self.chainMode == ChainMode.WORDS:\n return [\"\\n\\n\"]", "def message(new_msg, color=libtcod.white): \n global game_msgs\n\n new_msg_lines = textwrap.wrap(new_msg, MSG_WIDTH)\n for line in new_msg_lines:\n #if the buffer is full, remove the first line to make room for the new one\n if len(game_msgs) == MSG_HEIGHT:\n del game_msgs[0]\n game_msgs.append( (line, color) )", "def __updateMsg(self, text = \"\"):\n\n # Ensure there is a new line\n text = \"\\n\" + text\n\n # Append the new text to the previous message text\n self.messageText = self.messageText + text\n self.msgIO.canvas.f.iomb.msg.configure(text = self.messageText)\n self.msgIO.canvas.f.update_idletasks()\n\n # Reconfigure the canvas size/scrollregion based upon the message box\n mbHgt = self.msgIO.canvas.f.iomb.msg.winfo_height()\n scrollHgt = mbHgt + self.minHgt\n self.msgIO.canvas.itemconfigure(1, height = scrollHgt)\n self.msgIO.canvas.configure(scrollregion = (0, 0, 0, scrollHgt))\n self.msgIO.canvas.yview_moveto(1)", "def add_line(self, txt, indent=0):\n self.add(txt+'\\n', indent)", "def add_message(\n self,\n text: str,\n fg: Tuple[ int, int, int ] = color.white,\n *, # What does this mean?\n stack: bool = True,\n ) -> None:\n if stack and self.messages and text == self.messages[ -1 ].plain_text:\n # Reminder, -1 means the last entry\n self.messages[ -1 ].count += 1\n else:\n self.messages.append( Message( text, fg ) )", "def create_new_message():\n return questionary.confirm(\"Create widgets for another message?\").ask()", "def new_line(self, text='', above=False):\n if above:\n target_line = self.line_number()\n else:\n target_line = self.line_number() + 1\n self._lines.insert(self._line_index(target_line), text)\n self.goto_line(target_line)", "def introduce(self):\n return Message(text='Hello, my name is {}'.format(self.name))", "def appendMessage(self, from_, message):\n if from_ == \"\" or message == \"\":\n return\n \n self.chatEdit.append(\n QDateTime.currentDateTime().toString(Qt.SystemLocaleLongDate) +\n \" <\" + from_ + \">:\")\n self.chatEdit.append(message + \"\\n\")\n bar = self.chatEdit.verticalScrollBar()\n bar.setValue(bar.maximum())\n \n if not self.isVisible():\n self.__ui.showNotification(\n UI.PixmapCache.getPixmap(\"cooperation48.png\"),\n self.tr(\"Message from <{0}>\").format(from_), message)", "def make_hl7_message(self, now):\n # http://python-hl7.readthedocs.org/en/latest/index.html\n\n msh_segment = make_msh_segment(\n message_datetime=now,\n message_control_id=unicode(self.msg_id)\n )\n pid_segment = self.task.get_patient_hl7_pid_segment(self.recipient_def)\n other_segments = self.task.get_hl7_data_segments(self.recipient_def)\n\n # ---------------------------------------------------------------------\n # Whole message\n # ---------------------------------------------------------------------\n segments = [msh_segment, pid_segment] + other_segments\n self.msg = hl7.Message(SEGMENT_SEPARATOR, segments)\n if self.recipient_def.keep_message:\n self.message = unicode(self.msg)", "def create_message(self):\n request = self.create_request()\n headers = self.create_header_str()\n data = self.body\n return \"%s%s\\r\\n%s\" % (request, headers, data)", "def add(self, txt, indent=0):\n self.text.append(u'{0}{1}'.format(' '*indent, txt))", "def _create_message_simple(self, sender, to, subject, message_text):\n self.log.info(\"Creating a simple message...\")\n\n message = MIMEText(message_text)\n message[\"to\"] = to\n message[\"from\"] = sender\n message[\"subject\"] = subject\n\n return message" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Adds new piece of HTML message str message bool newLine whether to prepend message with new line return MessageBuilder
def add_html(self, message, newLine=True): if newLine and len(self.html) > 0: self.html += "<br />" self.html += message return self
[ "def add_text(self, message, newLine=True):\n if newLine and len(self.txt) > 0:\n self.txt += \"\\r\\n\"\n self.txt += message\n return self", "def create_notification_line(msg):\n local_time = util.format_date(msg[\"time\"])\n message_line = click.style(\"{} : {} from {}\\n\".format(local_time, msg[\"type\"],\n msg[\"from\"]),\n fg=\"cyan\")\n message_line += \"{}\\n\".format(msg[\"content\"])\n return message_line", "def __manage_lines(message: str, color: str, first_line_start: str, new_line_start: str):\n index = 0\n output = str()\n for line in message.split(\"\\n\"):\n if not index:\n # First line.\n output += f\"{first_line_start}{color}{line}\\n\"\n else:\n # Any other line.\n output += f\"{new_line_start}{color}{line}\\n\"\n index += 1\n return output", "def add_text(self, text, newline=False):\r\n style = \"clear: left;\" if newline else ''\r\n self.html += '<div class=\"text\" style=\"%s\">%s</div>\\n' %\\\r\n (style, text)", "def message(new_msg, color=libtcod.white): \n global game_msgs\n\n new_msg_lines = textwrap.wrap(new_msg, MSG_WIDTH)\n for line in new_msg_lines:\n #if the buffer is full, remove the first line to make room for the new one\n if len(game_msgs) == MSG_HEIGHT:\n del game_msgs[0]\n game_msgs.append( (line, color) )", "def appendMessage(self, from_, message):\n if from_ == \"\" or message == \"\":\n return\n \n self.chatEdit.append(\n QDateTime.currentDateTime().toString(Qt.SystemLocaleLongDate) +\n \" <\" + from_ + \">:\")\n self.chatEdit.append(message + \"\\n\")\n bar = self.chatEdit.verticalScrollBar()\n bar.setValue(bar.maximum())\n \n if not self.isVisible():\n self.__ui.showNotification(\n UI.PixmapCache.getPixmap(\"cooperation48.png\"),\n self.tr(\"Message from <{0}>\").format(from_), message)", "def create_new_message():\n return questionary.confirm(\"Create widgets for another message?\").ask()", "def add_line_break(self) -> \"Span\":\n return self.add(LineBreakChunk())", "def _addline(self, s='', comment=None):\n self.line_number += 1\n linebuf = ''\n if self.show_line_numbers and s:\n linebuf += 'N%d' % self.line_number\n if s:\n linebuf += ' '\n linebuf += s\n if self.show_comments and comment:\n if s:\n linebuf += ' '\n linebuf += '(' + comment + ')'\n linebuf += '\\n'\n self._add(linebuf)", "def add_message(self):\r\n\r\n if self._succesful_login == False:\r\n return\r\n\r\n if self._lastselectedfriend == None:\r\n return\r\n \r\n UI = self._input_ui.get()\r\n friendname = self._lastselectedfriend\r\n participants = [UI, friendname]\r\n message = self._texttosend.get()\r\n if len(message) ==0:\r\n return\r\n message2 =self._msgs.appendmessages(UI, message)\r\n\r\n msg=['update chat history', participants, message2] \r\n encoded = json.dumps(msg) \r\n self._client._s.send(encoded)\r\n\r\n encoded_chat = self._client._s.recv(4096)\r\n unencoded = json.loads(encoded_chat)\r\n self._current_chat_history = unencoded\r\n self.show_chat()\r\n self._texttosend.delete(0, 'end')\r\n self._chatdisplay.see(tk.END)", "def build_entry_field_string(p_date: str, p_source: str, p_type: str, p_message: str) -> str:\n new_line = \"[\" + p_date + \"]\"\n new_line += \"[\" + p_source + \"]\"\n new_line += \"[\" + p_type + \"]\"\n new_line += \" \" + p_message\n return new_line", "def messageLine(name, docTag=\"string\", height=int, defineTemplate=\"string\", parent=\"string\", numberOfPopupMenus=bool, useTemplate=\"string\", width=int, highlightColor=float, popupMenuArray=bool, annotation=\"string\", dropCallback=\"string\", exists=bool, enable=bool, enableBackground=bool, visibleChangeCommand=\"string\", visible=bool, fullPathName=bool, preventOverride=bool, dragCallback=\"string\", backgroundColor=float, noBackground=bool, manage=bool, isObscured=bool):\n pass", "def create_message(self):\n request = self.create_request()\n headers = self.create_header_str()\n data = self.body\n return \"%s%s\\r\\n%s\" % (request, headers, data)", "def append(self, element):\n \n# Don't add non-string things to the list by mistake!\n assert type(element) == type(\"\")\n# Don't add strings which are too long!\n if (len(element) > config.MESSAGES_DIMENSIONS[0]):\n raise ValueError(\"String added to message list too long.\\n%s\" \n % element)\n\n self.message_list.append(element)", "def make_hl7_message(self, now):\n # http://python-hl7.readthedocs.org/en/latest/index.html\n\n msh_segment = make_msh_segment(\n message_datetime=now,\n message_control_id=unicode(self.msg_id)\n )\n pid_segment = self.task.get_patient_hl7_pid_segment(self.recipient_def)\n other_segments = self.task.get_hl7_data_segments(self.recipient_def)\n\n # ---------------------------------------------------------------------\n # Whole message\n # ---------------------------------------------------------------------\n segments = [msh_segment, pid_segment] + other_segments\n self.msg = hl7.Message(SEGMENT_SEPARATOR, segments)\n if self.recipient_def.keep_message:\n self.message = unicode(self.msg)", "def _create_message_html(self, sender, to, subject, message_text, message_html):\n self.log.info(\"Creating an html message...\")\n\n message = MIMEMultipart(\"alternative\")\n message[\"subject\"] = subject\n message[\"from\"] = sender\n message[\"to\"] = to\n if message_text:\n message.attach(MIMEText(message_text, \"plain\"))\n message.attach(MIMEText(message_html, \"html\"))\n\n return message", "def create_plaintext_message(self, text):\n plain_text_maxcols = 72\n textout = cStringIO.StringIO()\n formtext = formatter.AbstractFormatter(\n formatter.DumbWriter(textout, plain_text_maxcols))\n parser = HTMLParser(formtext)\n parser.feed(text)\n parser.close()\n\n # append the anchorlist at the bottom of a message\n # to keep the message readable.\n anchorlist = \"\\n\\n\" + (\"-\" * plain_text_maxcols) + \"\\n\\n\"\n for counter, item in enumerate(parser.anchorlist):\n anchorlist += \"[%d] %s\\n\" % (counter, item)\n\n text = textout.getvalue() + anchorlist\n del textout, formtext, parser, anchorlist\n return text", "def send_line_separator(self, newlines=True):\n if newlines:\n self.send_data('display_message', '\\n' + '=' * 80 + '\\n')\n else:\n self.send_data('display_message', '=' * 80)", "def makeNewLineAdd(oldLine, myLen, distance):\n\n oldLine[1] = int(oldLine[1])\n oldLine[2] = int(oldLine[2])\n oldLine[6] = int(oldLine[6])\n oldLine[7] = int(oldLine[7])\n\n if oldLine[1] <= int(distance):\n oldLine[1] = 0\n oldLine[6] = 0\n else:\n oldLine[1] -= distance\n oldLine[6] -= distance\n\n if oldLine[2]+distance >= myLen:\n oldLine[2] = myLen-1\n oldLine[7] = myLen-1\n else:\n oldLine[2] += distance\n oldLine[7] += distance\n\n oldLine[9] = '1'\n oldLine[10] = str(oldLine[2]-oldLine[1])+','\n oldLine[11] = '0,'\n return(joiner(oldLine))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns message. Caches build message for further retrival. bool includeImage whether include image in response or not return str
def build(self, includeImage=False): if self.response is None: self.response = self._build(includeImage) return self.response
[ "def create_message(self):\n request = self.create_request()\n headers = self.create_header_str()\n data = self.body\n return \"%s%s\\r\\n%s\" % (request, headers, data)", "def build_warm_email_body(self):\n self.html_body = render_to_string('email/warm_temp_mail.html',\n {'curr_temp': self.curr_temp,\n 'curr_state': self.temp_desc,\n 'location': self.location})\n self.text_body = strip_tags(self.html_body)\n try:\n filehandle = open(os.path.join(settings.BASE_DIR, 'weather-app-server', 'weatherapp', 'images', 'good_weather.jpg'), 'rb')\n except IOError:\n logger.error(\"good_weather.jpg file does not exist in images folder\")\n return\n self.image = MIMEImage(filehandle.read())\n self.image.add_header('Content-ID', '<image1>')\n filehandle.close()", "def profile_image_message(self):\n self.wait_for_field('image')\n self.wait_for_ajax()\n return self.q(css='.message-banner p').text[0]", "def build_cold_email_body(self):\n self.html_body = render_to_string('email/cold_temp_mail.html',\n {'curr_temp': self.curr_temp,\n 'curr_state': self.temp_desc,\n 'location': self.location})\n self.text_body = strip_tags(self.html_body)\n try:\n filehandle = open(os.path.join(settings.BASE_DIR, 'weather-app-server', 'weatherapp', 'images', 'cold.gif'), 'rb')\n except IOError:\n logger.error(\"cold.gif file does not exist in images folder\")\n return\n self.image = MIMEImage(filehandle.read())\n self.image.add_header('Content-ID', '<image1>')\n filehandle.close()", "def build_reply(self, module):\n\n\t\t# Log\n\t\tdebug_print(\"build_reply\")\n\t\tdebug_print(\" WebCommunicator\")\n\n\t\t# Front matter\n\t\treply = \"--- Random \"\n\t\treply += module.type + \" ---\\n\"\n\n\t\t# Module description\n\t\treply += module.get_long_description()\n\n\t\t# Error checking\n\t\tif is_error_string(reply):\n\t\t\treturn self.get_error_message(reply)\n\n\t\t# Log\n\t\tdebug_print(\"Web Reply: Success\")\n\n\t\t# Return\n\t\treturn reply", "def get_request_msg(self) -> str:", "def build_normal_email_body(self):\n self.html_body = render_to_string('email/normal_temp_mail.html',\n {'curr_temp': self.curr_temp,\n 'curr_state': self.temp_desc,\n 'location': self.location})\n self.text_body = strip_tags(self.html_body)\n try:\n filehandle = open(os.path.join(settings.BASE_DIR, 'weather-app-server', 'weatherapp', 'images', 'normal.gif'), 'rb')\n except IOError:\n logger.error(\"normal.gif file does not exist in images folder\")\n return\n self.image = MIMEImage(filehandle.read())\n self.image.add_header('Content-ID', '<image1>')\n filehandle.close()", "def _prepare_message(msg):\n msg_mime = MIMEText(msg, 'text', 'utf-8')\n msg_mime['From'] = Header(infomail.fromaddr, charset='utf-8')\n msg_mime['To'] = Header(', '.join(infomail.toaddrs),\n charset='utf-8')\n msg_mime['Subject'] = Header(\"VirtualBox images built\",\n charset='utf-8')\n return msg_mime", "def build_message(self, message_dictionary):\n try:\n return self._renderer.render(self._parsed_template, message_dictionary)\n except pystache_context.KeyNotFoundError as e:\n logger.error('Failed to find {Key} when generating message from {TemplateFile} . {ErrorMessage}',\n fparams={'Key': e.key, 'TemplateFile': self.template_file, 'ErrorMessage': e})\n raise MessageGenerationError(f'Failed to find key:{e.key} when generating message from'\n f' template file:{self.template_file}') from e", "async def msg_handler(self, msg: Message) -> dict:\n data_param = {}\n if msg.type() == MessageType.MESSAGE_TYPE_IMAGE:\n img = await msg.to_file_box()\n data_param.update(\n img_name=img.name,\n data=img.base64\n )\n elif msg.type() == MessageType.MESSAGE_TYPE_EMOTICON:\n import xml.etree.ElementTree as Etree\n content = msg.payload.text # xml content, need xml parser to extract msg.emoji(cdnurl)\n msgtree = Etree.fromstring(content)\n cdn_url = msgtree.find('emoji').attrib['cdnurl']\n ret = self.s.get(cdn_url)\n b64_str = base64.b64encode(ret.content)\n data_param.update(img_name=str(uuid.uuid4()) + MemeBot.content_type_mapping[ret.headers['Content-Type']],\n data=b64_str)\n ret_json = self.s.post(url=config.backend_upload_url, data=data_param).json() # ret keys: img_name, md5, log\n return ret_json", "def pii_message_body(self):\n http_content = \"\"\n headers = self.headers_dict\n content_len = int(headers.get(\"content-length\", 0))\n content_type = headers.get(\"content-type\", \"\")\n # Retrieve content from HTTP request message body\n if (content_len > 0 and content_type in VALID_CONTENT_TYPES):\n http_content = self.http_request.rfile.read(content_len)\n return http_content", "def pii_message_body(self):\n headers = self.headers_dict\n content_type = headers.get(\"content-type\", \"\")\n # Retrieve content from HTTP request message body\n if (content_type in VALID_CONTENT_TYPES):\n http_content = self.message_body\n return http_content", "def CreateMessage(sender, to, subject, message_text):\n #message = MIMEText(message_text)\n message = MIMEText(message_text,'html')\n message['to'] = to\n message['from'] = sender\n message['subject'] = subject\n return {'raw': base64.urlsafe_b64encode(message.as_bytes()).decode()}", "def get_message_body(self):\n body = loader.render_to_string(self.body_template, self.get_rendering_context())\n return body", "def __compose_can_msg(json_msg: Dict[str, str]) -> can.Message:\n ins = format(json_msg[\"c\"], '05b')\n sid = format(json_msg[\"s\"], '012b')\n did = format(json_msg[\"d\"], '012b')\n can_id = int(ins + sid + did, 2)\n\n data = json_msg[\"b\"]\n data_decoded = base64.b64decode(data)\n data_decoded_in_bytes = bytearray(data_decoded)\n\n can_msg = can.Message(\n arbitration_id=can_id,\n data=data_decoded_in_bytes,\n dlc=json_msg[\"l\"],\n extended_id=True,\n )\n return can_msg", "def get_response_msg(self) -> str:\n return self.response_msg", "def make_compressed_msg(frame):\n\n # Make a compressed image\n msg = CompressedImage()\n msg.header.stamp = rospy.Time.now()\n msg.format = \"jpeg\"\n msg.data = np.array(cv2.imencode('.jpg', frame)[1]).tostring()\n\n # Return the compressed image\n return msg", "def image_response(self, raw_url, accessibility_text):\r\n \r\n return{\r\n \"payload\":\r\n {\r\n \"richContent\":\r\n [\r\n [\r\n {\r\n \"type\": \"image\",\r\n \"rawUrl\": raw_url,\r\n \"accessibilityText\": accessibility_text\r\n }\r\n ]\r\n ]\r\n\r\n }\r\n }", "def respond(sender_id, message_text, attachment_type, attachment_url, postback, quick_reply, context):\n\n new_context = dict(project_id=postback)\n conversation = dict(name='update_project_status', stage='add_image')\n response = dict(message_text=\"Great! Take or upload a image to update your progress\")\n return response, new_context, conversation" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Figure out which reactions in our set have no proteins associated with them.
def reactions_with_no_proteins(reactions, verbose=False): nopegs = set() for r in reactions: if reactions[r].number_of_enzymes() == 0: nopegs.add(r) if verbose: sys.stderr.write("REACTIONS WITH NO PROTEINS: {} reactions have no pegs associated ".format(len(nopegs)) + "with them (out of {} reactions)\n".format(len(reactions))) return nopegs
[ "def reactions_with_proteins(reactions, verbose=False):\n\n pegs = set()\n for r in reactions:\n if reactions[r].number_of_enzymes() != 0:\n pegs.add(r)\n\n if verbose:\n sys.stderr.write(\"REACTIONS WITH PROTEINS: {} reactions have pegs associated \".format(len(pegs)) +\n \"with them (out of {} reactions)\\n\".format(len(reactions)))\n\n return pegs", "def Exclude_reactions(self):\n try:\n return self._Exclude_reactions\n except AttributeError:\n self._Exclude_reactions = list(\n set(self.Exclude_list + self.problematic_rxns)\n )\n return self._Exclude_reactions", "def AllReactions(self):\n rxns = []\n hashes = set()\n for r in self.reactions.all():\n if r.GetHash() not in hashes:\n rxns.append(r)\n hashes.add(r.GetHash())\n return rxns", "def _FilterProtonsAndElectrons(self):\n self.reactants = list(filter(lambda c: c.compound.kegg_id not in\n ['C00080', 'C05359'], self.reactants))", "def unsubmitted_participants(self):\n return [x for x in self.turn_order if not x in self.actions]", "def cal_problematic_rxns(self):\n\n problematic_rxns = []\n for met in self.metabolites:\n if met.is_exclude:\n problematic_rxns.append(met.reactions)\n\n if len(problematic_rxns) > 0:\n problematic_rxns = frozenset.union(*problematic_rxns)\n problems = [i.id for i in problematic_rxns]\n return problems\n else:\n return []", "def rewards_all_paid(self):\n return (self.participants.filter(\n date_rewarded__isnull=True).count() == 0)", "def setOfEmptySeats(self):\n emptySeats = set()\n for seatNumber, player in self.seats.items():\n if player is None:\n emptySeats.add(seatNumber)\n return emptySeats # as a list of seat numbers, e.g. [2, 3, 5, 8, 9]", "def remove_uptake_and_secretion_reactions(reactions):\n\n toremove = set()\n for r in reactions:\n if r.startswith('upsr_'):\n toremove.add(r)\n\n for r in toremove:\n reactions.pop(r)\n return reactions", "def get_discard_possibilities(self):\n result = []\n self.get_discard_possibilities_rec(self.hand, [], self.number_point, result)\n\n return result", "def get_unassigned_wishers(self, package_number, wish_id):\r\n # get Package object\r\n package = self.packages[package_number]\r\n # get all students having this package as their wish (according to wish_id) that aren't assigned\r\n return [wisher for wisher in package.wishers[wish_id] if wisher not in self.assigned_students.keys()]", "def unanswered(self):\n return self.filter_by(answer=None)", "def ordered_not_acknowledged(self):\n qs = self.get_qs()\n return qs.filter(~Q(orderitem__po_num=\"\") & Q(orderitem__ack_num=\"\")).distinct()", "def no_predecessors_iter(self):\n for n in self.nodes:\n if not len(list(self.predecessors(n))):\n yield n", "def getUnprofitableCount(self):\n\t\treturn len(self.__losses)", "def reactions(self):\n return self.__reactions.list()", "def met(self, pdgIDs_to_ignore=[12, 14, 16]):\n status_filter = functools.partial(_filter_by_status, status=1)\n return sum([p.p4 for p in self.particles(status_filter) if p.pdgID not in pdgIDs_to_ignore],\n FourMomentum())", "def allCertain(self):\n for ci in self.creatures:\n if not ci.certain:\n return False\n return True", "def notfinal(self):\n return {k: v for k, v in self.current.items() if v is not Unset and v != self.final[k]}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Figure out which reactions in our set have proteins associated with them.
def reactions_with_proteins(reactions, verbose=False): pegs = set() for r in reactions: if reactions[r].number_of_enzymes() != 0: pegs.add(r) if verbose: sys.stderr.write("REACTIONS WITH PROTEINS: {} reactions have pegs associated ".format(len(pegs)) + "with them (out of {} reactions)\n".format(len(reactions))) return pegs
[ "def reactions_with_no_proteins(reactions, verbose=False):\n\n nopegs = set()\n for r in reactions:\n if reactions[r].number_of_enzymes() == 0:\n nopegs.add(r)\n\n if verbose:\n sys.stderr.write(\"REACTIONS WITH NO PROTEINS: {} reactions have no pegs associated \".format(len(nopegs)) +\n \"with them (out of {} reactions)\\n\".format(len(reactions)))\n\n return nopegs", "def reactions(self):\n return self.__reactions.list()", "def proteins(self):\n return self._regions.keys()", "def incoming_peers(self):\n registrations = {a for a in self.actions.filter(include={Receive}) if a.replicative}\n return {peer: registrations.intersection(signals) for peer, signals in self.peers.items()\n if registrations.intersection(signals)}", "def get_all_profiles_to_invite(self, sender):\n profiles = Profile.objects.all().exclude(user=sender)\n profile = Profile.objects.get(user=sender)\n qs = Relationship.objects.filter(Q(sender=profile) | Q(receiver=profile))\n\n accepted = []\n for relationship in qs:\n if relationship.status == 'accepted':\n accepted.append(relationship.receiver)\n accepted.append(relationship.sender)\n\n # All the profiles which are not in accepted list\n available = [profile for profile in profiles if profile not in accepted]\n return available", "def get_reactions_as_product(net_index: int, node_index: int) -> Set[int]:\n return _controller.get_reactions_as_product(net_index, node_index)", "def AllReactions(self):\n rxns = []\n hashes = set()\n for r in self.reactions.all():\n if r.GetHash() not in hashes:\n rxns.append(r)\n hashes.add(r.GetHash())\n return rxns", "def condensed_reactions(self):\n return list(self._condensed_reactions | self._spurious_condensed_reactions)", "def _FilterProtonsAndElectrons(self):\n self.reactants = list(filter(lambda c: c.compound.kegg_id not in\n ['C00080', 'C05359'], self.reactants))", "def predecessors_as_reactions(self, node):\n predecessors = self.predecessors(node)\n reactions = []\n for pred in predecessors:\n if self.isand(pred):\n reactions.append(pred)\n elif self[pred][node]['link'] == '+':\n reactions.append(pred + \"=\" +node)\n else:\n reactions.append(\"!\" + pred + \"=\" +node)\n return reactions", "def is_reaction_in(self, reaction):\n reaction = Reaction(reaction)\n if reaction in self.reactions:\n return True\n else:\n return False", "def confirmed_per_agent(\n self,\n ) -> Dict[Address, Dict[datetime.datetime, Transaction]]:\n return self._confirmed_per_agent", "def get_reactions(self, reactants = [], products = [], arity = 2, unproductive = None, spurious = None):\n if spurious == True:\n rxns = list(self._spurious_condensed_reactions)\n elif spurious == False:\n rxns = list(self._condensed_reactions)\n else:\n rxns = list(self._spurious_condensed_reactions | self._condensed_reactions)\n\n if unproductive == True:\n rxns = filter(lambda x: x.has_reactants(x.products) and x.has_products(x.reactants), rxns)\n elif unproductive == False:\n rxns = filter(lambda x: not(x.has_reactants(x.products) and x.has_products(x.reactants)), rxns)\n\n if arity is not None:\n rxns = filter(lambda x: len(x.reactants)==arity, rxns)\n\n return filter(lambda x: x.has_reactants(reactants) and x.has_products(products), rxns)", "def setProteins(self, proteinSets=None):\n # A consolidated master set containing all Protein objects\n self.proteins = Proteins.ProteinSet()\n # A list of sets of proteins, each set a potential agent of the reaction\n self.proteinSets = []\n if proteinSets is not None:\n # Make sure all the Protein objects are represented in the master set,\n # and that proteins mentioned multiple times are represented by the same Protein object.\n for subSet in proteinSets:\n self.proteinSets.append(self.proteins.recastSet(subSet))", "def get_penalties(self):\n from nablapps.events.models import ( # Moved down to avoid loop error when FysmatClass was imported to mixins in events\n EventRegistration,\n )\n\n # Penalties are valid for six months\n six_months_ago = timezone.now() - timezone.timedelta(\n days=182\n ) # about six months\n\n penalties = (\n EventRegistration.objects.filter(user=self, date__gte=six_months_ago)\n .exclude(penalty=0)\n .exclude(penalty=None)\n )\n return penalties", "def get_slots_being_confirmed(self):\n pass", "def transport_reactions(model: Model) -> List[str]:\n compartment_name = [\"_\" + id for id in model.compartments.keys()]\n res = []\n for rec in model.reactions:\n for i, c1 in enumerate(compartment_name):\n for c2 in compartment_name[i + 1 :]:\n if c1 in rec.reaction and c2 in rec.reaction:\n res.append(rec.id)\n return res", "def draw_reactions(self):\n reactions = {}\n\n for node in self.nodes:\n reaction = self.draw_reaction(node, self.reaction_scale, self.reaction_color)\n if reaction:\n reactions[node] = reaction\n\n return reactions", "def interset(genotypes):\n\tsnplist = map(lambda x: getsnps(x), genotypes)\n\tprint len(snplist)\n\tineverything = reduce(lambda x,y: set(x) & set(y), snplist)\n\treturn ineverything" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function for setting org_id.
def org_id(self, value): if isinstance(value, str): self._org_id = value else: raise ValueError("org_id must be a string")
[ "def set_organization_id(self, organization_id):\n raise NotImplementedError", "def external_org_id(self, external_org_id):\n\n self._external_org_id = external_org_id", "def test_organization_id_put(self):\n pass", "def org_name(self, value):\n if value != None:\n if not isinstance(value, str):\n raise ValueError(\"Org_name must be a string\")\n self._org_name = value", "def external_organization_id(self, external_organization_id):\n \n self._external_organization_id = external_organization_id", "def pid_organization_id(self) -> str:\n return pulumi.get(self, \"pid_organization_id\")", "def test_organization_id_get(self):\n pass", "def make_org_id(organisation_name: str) -> str:\n\n return organisation_name.strip().replace(\" \", \"_\").lower()", "def test_organization_id_resource_put(self):\n pass", "def editOrg(org_id):\n\n dbSession = current_app.config['DBSESSION'] # get the db session\n # get the organization from the database\n org = dbSession.query(Organization).filter(Organization.id == org_id).first()\n orgTypes = dbSession.query(OrganizationType).all()\n organizations = dbSession.query(Organization).all()\n if org is None:\n abort(404)\n\n # save the organization\n error = OrganizationHelperFunctions.saveOrganization(org, request, dbSession)\n\n if error == '':\n return redirect(url_for('org.orgList'))\n\n return render_template('org/edit_organization.html',\n organization=org,\n orgTypes=orgTypes,\n organizations=organizations)", "def update_organization(oid):\n kwargs = request.form.to_dict()\n\n org = None\n try:\n org = Organization.objects.get(id=oid)\n except DoesNotExist:\n raise APIError(ORG_NOT_FOUND, status_code=404)\n\n org.modify(**kwargs)\n return org", "def external_organization_id(self):\n return self._external_organization_id", "def set_jobid(jobid):\n global _set_jobid\n _set_jobid=str(jobid)", "def on_put(self, req, resp, orgid):\n mapper = self.meta.get('mapper')\n o = mapper.organization.Organization.get_by_uid(orgid)\n if o is None:\n raise falcon.HTTPInvalidParam('Organization not found', 'orgid')\n \n body = req.context['body']\n # look for changes to name, description, status, parameters, and data\n if 'name' in body:\n o.set_name(body['name'].strip())\n if 'status' in body:\n o.set_status(body['status'].strip())\n if 'custom_data' in body and isinstance(body['custom_data'], dict):\n o.set_custom_data(body['custom_data'])\n if 'data' in body and isinstance(body['data'], list):\n # body['data'] = [{'key': 'spam', 'value': 'eggs'}, ...]\n o.set_data(body['data'])\n \n o = mapper.organization.Organization.update_from_object(o)\n resp.body = {\"organization\": o.to_dict()}\n return True", "def edit_org(org_id):\n settings = Organisation.query.filter_by(id=org_id).first_or_404()\n form = OrganisationForm(obj=settings)\n \n if request.method == 'POST':\n form.populate_obj(settings)\n db.session.add(settings)\n db.session.commit()\n flash('Settings successfully edited', 'success')\n return redirect(url_for('admin.frontend_dashboard'))\n return render_template('admin/organisations/edit_org.html', form=form)", "def test_change_organization(self):\n pass", "def csr_org_name(self, csr_org_name):\n\n self._csr_org_name = csr_org_name", "def room_id(self, value: str):\r\n self._room_id = value", "def test_organization_id_alfresco_sync_get(self):\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function for setting org_name.
def org_name(self, value): if value != None: if not isinstance(value, str): raise ValueError("Org_name must be a string") self._org_name = value
[ "def csr_org_name(self, csr_org_name):\n\n self._csr_org_name = csr_org_name", "def org_id(self, value):\n if isinstance(value, str):\n self._org_id = value\n else:\n raise ValueError(\"org_id must be a string\")", "def make_org_id(organisation_name: str) -> str:\n\n return organisation_name.strip().replace(\" \", \"_\").lower()", "def pt_organization_uri(name):\n\n\treturn 'organization/' + alphaNumeric(name.strip().lower(), '')", "def autoname(self):\n\t\tif not self.email_account_name:\n\t\t\tself.email_account_name = (\n\t\t\t\tself.email_id.split(\"@\", 1)[0].replace(\"_\", \" \").replace(\".\", \" \").replace(\"-\", \" \").title()\n\t\t\t)\n\n\t\tself.name = self.email_account_name", "def test_change_organization(self):\n pass", "def include_morgan_in_name(self):\n\t\tself.name=self.old_name+str(self.morgan)", "def fetch_organization_name(org_id):\n response = fetch_url(AFFINITY_BASE + 'organizations/' + str(org_id))\n\n return response['name']", "def external_org_id(self, external_org_id):\n\n self._external_org_id = external_org_id", "def set_name(self, room_name):\n self.name = room_name", "def set_jobname(jobname):\n global _set_jobname\n _set_jobname=str(jobname)", "def legal_name(self, value: str):\n self._legal_name = value\n self._dao.legal_name = value", "def set_project_name(self, value):\n (self.driver.find_element(*ProjectFormLoc.FIELD_PROJECT_NAME).\n send_keys(value))", "def license_name(self, value):\n self.logger.warn(\n \"Setting values on license_name will NOT update the remote Canvas instance.\"\n )\n self._license_name = value", "def set_organization_id(self, organization_id):\n raise NotImplementedError", "def set_nick_name(self, val):\n self.nick = val", "def setLastName(self, name=\"\"):\n\t\tself.lastName = name", "def project_name(self, value):\n self._project_name = value\n self.CP2K_INPUT.GLOBAL.Project_name = value", "def organization_role_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"organization_role_name\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function for setting train_memory_quota.
def train_memory_quota(self, value): if value != None: if not isinstance(value, str): raise ValueError("train_memory_quota must be a string") unit = value[-1:] float_value = value[:-1] if unit not in constant.CLOUDML_MEMORY_UNITS: raise ValueError("train_memory_quota unit must be one of %s!" % constant.CLOUDML_MEMORY_UNITS) if not float_value.replace(".", "", 1).isdigit(): raise ValueError("train_memory_quota must be a number!") self._train_memory_quota = value
[ "def train_gpu_quota(self, value):\n if value != None:\n if not (isinstance(value, int) and value > 0):\n raise ValueError(\"train_gpu_quota must be a postive integer!\")\n self._train_gpu_quota = value", "def model_memory_quota(self, value):\n if value != None:\n if not isinstance(value, str):\n raise ValueError(\"model_memory_quota must be a string\")\n unit = value[-1:]\n float_value = value[:-1]\n if unit not in constant.CLOUDML_MEMORY_UNITS:\n raise ValueError(\"model_memory_quota unit must be one of %s!\" %\n constant.CLOUDML_MEMORY_UNITS)\n if not float_value.replace(\".\", \"\", 1).isdigit():\n raise ValueError(\"model_memory_quota must be a number!\")\n self._model_memory_quota = value", "def set_memlimit(self, value):\n value = value * 1024 * 1024\n self.set_int(\"memory.limit_in_bytes\", value)", "def limit_mem():\n K.get_session().close()\n cfg = K.tf.ConfigProto()\n cfg.gpu_options.allow_growth = True\n K.set_session(K.tf.Session(config=cfg))", "def set_GPU_Memory_Limit():\n gpus = tf.config.experimental.list_physical_devices('GPU')\n if gpus:\n try:\n for gpu in gpus:\n tf.config.experimental.set_memory_growth(gpu, True)\n except RuntimeError as e:\n print(e)", "def dev_memory_quota(self, value):\n if value != None:\n if not isinstance(value, str):\n raise ValueError(\"dev_memory_quota must be a string\")\n unit = value[-1:]\n float_value = value[:-1]\n if unit not in constant.CLOUDML_MEMORY_UNITS:\n raise ValueError(\"dev_memory_quota unit must be one of %s!\" %\n constant.CLOUDML_MEMORY_UNITS)\n if not float_value.replace(\".\", \"\", 1).isdigit():\n raise ValueError(\"dev_memory_quota must be a number!\")\n self._dev_memory_quota = value", "def model_gpu_quota(self, value):\n if value != None:\n if not (isinstance(value, int) and value > 0):\n raise ValueError(\"model_gpu_quota must be a postive integer!\")\n self._model_gpu_quota = value", "def train_cpu_quota(self, value):\n if value != None:\n if not isinstance(value, str):\n raise ValueError(\"train_cpu_quota must be a string!\")\n if not value.replace(\".\", \"\", 1).isdigit():\n raise ValueError(\"train_cpu_quota must be a number!\")\n self._train_cpu_quota = value", "def train_count_quota(self, value):\n if value != None:\n if not (isinstance(value, int) and value > 0):\n raise ValueError(\"train_count_quota must be a postive integer!\")\n self._train_count_quota = value", "def setQuotaRoot(request, maxsize):", "def set_cpu_quota(self, new_cpu_quota):\n try:\n requests.post(\n 'http://%s:5000' %\n (self.actuator.api_address),\n data='{\\\"cpu_quota\\\":\\\"' +\n str(new_cpu_quota) +\n '\\\"}')\n except Exception as ex:\n print(\"Error while modifying cpu quota\")\n print ex.message\n raise", "def dev_gpu_quota(self, value):\n if value != None:\n if not (isinstance(value, int) and value > 0):\n raise ValueError(\"dev_gpu_quota must be a postive integer!\")\n self._dev_gpu_quota = value", "def set_memory_growth():\n gpus = tf.config.experimental.list_physical_devices('GPU')\n if gpus:\n try:\n # Currently, memory growth needs to be the same across GPUs\n for gpu in gpus:\n tf.config.experimental.set_memory_growth(gpu, True)\n logical_gpus = tf.config.experimental.list_logical_devices(\n 'GPU')\n logging.info(\n \"Detect {} Physical GPUs, {} Logical GPUs.\".format(\n len(gpus), len(logical_gpus)))\n except RuntimeError as e:\n # Memory growth must be set before GPUs have been initialized\n logging.info(e)", "def set_max_mem(max_mem):\n\n JobServer._set_max_mem(max_mem)", "def total_memory_quota(self, value):\n if value != None:\n if not isinstance(value, str):\n raise ValueError(\"total_memory_quota must be a string\")\n unit = value[-1:]\n float_value = value[:-1]\n if unit not in constant.CLOUDML_MEMORY_UNITS:\n raise ValueError(\"total_memory_quota unit must be one of %s!\" %\n constant.CLOUDML_MEMORY_UNITS)\n if not float_value.replace(\".\", \"\", 1).isdigit():\n raise ValueError(\"total_memory_quota must be a number!\")\n self._total_memory_quota = value", "def total_gpu_quota(self, value):\n if value != None:\n if not (isinstance(value, int) and value > 0):\n raise ValueError(\"total_gpu_quota must be a postive integer!\")\n self._total_gpu_quota = value", "def tensorboard_quota(self, value):\n if value != None:\n if not (isinstance(value, int) and value > 0):\n raise ValueError(\"tensorboard_quota must be a postive integer!\")\n self._tensorboard_quota = value", "def model_cpu_quota(self, value):\n if value != None:\n if not isinstance(value, str):\n raise ValueError(\"model_cpu_quota must be a string!\")\n if not value.replace(\".\", \"\", 1).isdigit():\n raise ValueError(\"model_cpu_quota must be a number!\")\n self._model_cpu_quota = value", "def dev_cpu_quota(self, value):\n if value != None:\n if not isinstance(value, str):\n raise ValueError(\"dev_cpu_quota must be a string!\")\n if not value.replace(\".\", \"\", 1).isdigit():\n raise ValueError(\"dev_cpu_quota must be a number!\")\n self._dev_cpu_quota = value" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function for setting train_cpu_quota.
def train_cpu_quota(self, value): if value != None: if not isinstance(value, str): raise ValueError("train_cpu_quota must be a string!") if not value.replace(".", "", 1).isdigit(): raise ValueError("train_cpu_quota must be a number!") self._train_cpu_quota = value
[ "def train_gpu_quota(self, value):\n if value != None:\n if not (isinstance(value, int) and value > 0):\n raise ValueError(\"train_gpu_quota must be a postive integer!\")\n self._train_gpu_quota = value", "def set_cpu_quota(self, new_cpu_quota):\n try:\n requests.post(\n 'http://%s:5000' %\n (self.actuator.api_address),\n data='{\\\"cpu_quota\\\":\\\"' +\n str(new_cpu_quota) +\n '\\\"}')\n except Exception as ex:\n print(\"Error while modifying cpu quota\")\n print ex.message\n raise", "def model_cpu_quota(self, value):\n if value != None:\n if not isinstance(value, str):\n raise ValueError(\"model_cpu_quota must be a string!\")\n if not value.replace(\".\", \"\", 1).isdigit():\n raise ValueError(\"model_cpu_quota must be a number!\")\n self._model_cpu_quota = value", "def dev_cpu_quota(self, value):\n if value != None:\n if not isinstance(value, str):\n raise ValueError(\"dev_cpu_quota must be a string!\")\n if not value.replace(\".\", \"\", 1).isdigit():\n raise ValueError(\"dev_cpu_quota must be a number!\")\n self._dev_cpu_quota = value", "def train_memory_quota(self, value):\n if value != None:\n if not isinstance(value, str):\n raise ValueError(\"train_memory_quota must be a string\")\n unit = value[-1:]\n float_value = value[:-1]\n if unit not in constant.CLOUDML_MEMORY_UNITS:\n raise ValueError(\"train_memory_quota unit must be one of %s!\" %\n constant.CLOUDML_MEMORY_UNITS)\n if not float_value.replace(\".\", \"\", 1).isdigit():\n raise ValueError(\"train_memory_quota must be a number!\")\n self._train_memory_quota = value", "def train_count_quota(self, value):\n if value != None:\n if not (isinstance(value, int) and value > 0):\n raise ValueError(\"train_count_quota must be a postive integer!\")\n self._train_count_quota = value", "def model_gpu_quota(self, value):\n if value != None:\n if not (isinstance(value, int) and value > 0):\n raise ValueError(\"model_gpu_quota must be a postive integer!\")\n self._model_gpu_quota = value", "def total_cpu_quota(self, value):\n if value != None:\n if not isinstance(value, str):\n raise ValueError(\"total_cpu_quota must be a string!\")\n if not value.replace(\".\", \"\", 1).isdigit():\n raise ValueError(\"total_cpu_quota must be a number!\")\n self._total_cpu_quota = value", "def set_number_used_cores(job):\n\n pilot_user = os.environ.get('PILOT_USER', 'generic').lower()\n cpu = __import__('pilot.user.%s.cpu' % pilot_user, globals(), locals(), [pilot_user], 0) # Python 2/3\n cpu.set_core_counts(job)", "def total_gpu_quota(self, value):\n if value != None:\n if not (isinstance(value, int) and value > 0):\n raise ValueError(\"total_gpu_quota must be a postive integer!\")\n self._total_gpu_quota = value", "def dev_gpu_quota(self, value):\n if value != None:\n if not (isinstance(value, int) and value > 0):\n raise ValueError(\"dev_gpu_quota must be a postive integer!\")\n self._dev_gpu_quota = value", "def set_cpu_request(self, cpu_request):\n self.cpu_request = cpu_request", "def set_cpus(self, num_cpus):\n if self.batch:\n self.batch_settings.batch_args[\"cpus-per-task\"] = num_cpus\n for db in self:\n db.run_settings.set_cpus_per_task(num_cpus)", "def limit_cpu(self) -> Optional[str]:\n return pulumi.get(self, \"limit_cpu\")", "def configure_gpu_cpu(RUN_GPU, GPU_ALLOCATION):\n # Extra imports to set GPU options\n import tensorflow as tf\n from keras import backend as k\n import os\n # To force code to run on cpu\n if RUN_GPU==False:\n os.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\n os.environ['CUDA_VISIBLE_DEVICES'] = '-1'\n\n if RUN_GPU and GPU_ALLOCATION !=100:\n # TensorFlow congif\n config = tf.ConfigProto()\n\n # Allocate memory as-needed\n config.gpu_options.allow_growth = True\n\n # Allocate GPU memory based on user input USE_GPU\n config.gpu_options.per_process_gpu_memory_fraction = GPU_ALLOCATION/100\n\n # Create a session with the above specified options\n k.tensorflow_backend.set_session(tf.Session(config=config))", "def _apply_cpu_count(self, args, thisTask, cmd_args, payload, setup):\n if thisTask.ncores is not None:\n cmd_args.append(\"-l\")\n cmd_args.append(\"nodes=1:ppn={:d}\".format(thisTask.ncores))\n return True", "def _set_cpuunits(self, instance, units=None):\n if not units:\n LOG.debug(\"Reported cpuunits %s\" % self.utility['UNITS'])\n LOG.debug(\"Reported percent of resource: %s\" %\n self._percent_of_resource(instance))\n units = int(self.utility['UNITS'] *\n self._percent_of_resource(instance))\n # TODO(imsplitbit): This needs to be adjusted to not allow\n # subscription of more than available cpuunits. For now we\n # won't let the obvious case of a container getting more than\n # the maximum cpuunits for the host.\n if units > self.utility['UNITS']:\n units = self.utility['UNITS']\n\n try:\n _, err = utils.execute('sudo', 'vzctl', 'set', instance['id'],\n '--save', '--cpuunits', units)\n if err:\n LOG.error(err)\n except ProcessExecutionError as err:\n LOG.error(err)\n raise exception.Error('Cannot set cpuunits for %s' %\n (instance['id'],))", "def _validate_cores(self, client: 'KubernetesClient'):\n request = getattr(self.spec.scheduling.default.resources.requests, \"cpu\", None)\n lim = getattr(self.spec.scheduling.default.resources.limits, \"cpu\", None)\n\n # Lim and request could be empty strings if they're being deleted from the settings\n if lim and request and type(lim) is KubeQuantity and type(request) is KubeQuantity:\n if lim < request:\n raise ValueError(\n \"Cores request of %s cannot exceed cores limit of %s\" % (request.quantity, lim.quantity))", "def set_task_cpu(\n self,\n data: Dict[str, Any],\n container_data: List[Dict[str, Any]],\n source: Dict[str, Any] = None\n ) -> None:\n if not source:\n source = self.data\n cpu_required = self._get_container_cpu_usage(container_data)\n if self.is_fargate():\n cpu = self._set_fargate_task_cpu(cpu_required, source=source)\n else:\n cpu = self._set_ec2_task_cpu(source=source)\n if cpu is not None:\n if cpu_required > cpu:\n raise SchemaException(\n f'You set task cpu to {cpu} but your container cpu sums to {cpu_required}.'\n 'Task cpu must be greater than the sum of container cpu.'\n )\n # we calculate cpu as an int, but register_task_definition wants a str\n data['cpu'] = str(cpu)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function for setting train_gpu_quota.
def train_gpu_quota(self, value): if value != None: if not (isinstance(value, int) and value > 0): raise ValueError("train_gpu_quota must be a postive integer!") self._train_gpu_quota = value
[ "def model_gpu_quota(self, value):\n if value != None:\n if not (isinstance(value, int) and value > 0):\n raise ValueError(\"model_gpu_quota must be a postive integer!\")\n self._model_gpu_quota = value", "def dev_gpu_quota(self, value):\n if value != None:\n if not (isinstance(value, int) and value > 0):\n raise ValueError(\"dev_gpu_quota must be a postive integer!\")\n self._dev_gpu_quota = value", "def set_GPU_Memory_Limit():\n gpus = tf.config.experimental.list_physical_devices('GPU')\n if gpus:\n try:\n for gpu in gpus:\n tf.config.experimental.set_memory_growth(gpu, True)\n except RuntimeError as e:\n print(e)", "def limit_gpu(config: Dict):\n if config['limit_gpu'] is not False:\n gpus = tf.config.experimental.list_physical_devices('GPU')\n if gpus:\n try:\n # Currently, memory growth needs to be the same across GPUs\n for gpu in gpus:\n tf.config.experimental.set_memory_growth(gpu, True)\n logical_gpus = tf.config.experimental.list_logical_devices('GPU')\n print(len(gpus), \"Physical GPUs,\", len(logical_gpus), \"Logical GPUs\")\n except RuntimeError as e:\n # Memory growth must be set before GPUs have been initialized\n print(e)", "def total_gpu_quota(self, value):\n if value != None:\n if not (isinstance(value, int) and value > 0):\n raise ValueError(\"total_gpu_quota must be a postive integer!\")\n self._total_gpu_quota = value", "def configure_gpu_cpu(RUN_GPU, GPU_ALLOCATION):\n # Extra imports to set GPU options\n import tensorflow as tf\n from keras import backend as k\n import os\n # To force code to run on cpu\n if RUN_GPU==False:\n os.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\n os.environ['CUDA_VISIBLE_DEVICES'] = '-1'\n\n if RUN_GPU and GPU_ALLOCATION !=100:\n # TensorFlow congif\n config = tf.ConfigProto()\n\n # Allocate memory as-needed\n config.gpu_options.allow_growth = True\n\n # Allocate GPU memory based on user input USE_GPU\n config.gpu_options.per_process_gpu_memory_fraction = GPU_ALLOCATION/100\n\n # Create a session with the above specified options\n k.tensorflow_backend.set_session(tf.Session(config=config))", "def limit_mem():\n K.get_session().close()\n cfg = K.tf.ConfigProto()\n cfg.gpu_options.allow_growth = True\n K.set_session(K.tf.Session(config=cfg))", "def assign_gpu_and_run(self):\n if not self.gpu_free.empty():\n\n # Retrieve the job from the queue\n job_to_run = self.check_enough_gpu()\n\n if job_to_run is None:\n return\n\n # Floor division to get lower bound of num_gpus\n num_gpus = int(job_to_run.width)//1000 + 1\n\n #if (int(job_to_run.width) % 1000)/1000 >= 0.4:\n # num_gpus += 2\n # This is okay because we already know that from check_enough_gpu that\n # gpu_free's size is greater than int(job_to_run.width)/1000\n\n for _ in range(num_gpus):\n job_to_run.gpu.append(self.gpu_free.get())\n\n # Create a copy of the environemnt\n new_env = os.environ.copy()\n\n # Create the CUDA GPU string\n gpu_string = \"\"\n\n i = 0\n while (i < len(job_to_run.gpu)):\n if i == 0:\n gpu_string = gpu_string + str(job_to_run.gpu[i])\n else:\n gpu_string = gpu_string + \",\" + str(job_to_run.gpu[i])\n i += 1\n\n new_env['CUDA_VISIBLE_DEVICES'] = gpu_string\n\n params = ['python',\n '/app/neural-style/neural_style.py',\n '--content', '%s' % job_to_run.path1,\n '--styles', '%s' % job_to_run.path2,\n '--output','%s' % job_to_run.output_path,\n '--content-weight', str(job_to_run.content_weight),\n '--content-weight-blend', str(job_to_run.content_blend),\n '--style-weight', str(job_to_run.style_weight),\n '--style-layer-weight-exp', str(job_to_run.style_layer_weight_exp),\n '--style-scales', str(job_to_run.style_scale),\n '--style-blend-weights', str(job_to_run.style_blend),\n '--iterations', str(job_to_run.iterations),\n '--width', str(job_to_run.width),\n '--network', VGG_LOCATION ]\n\n # set preserve colors if indicated\n # assuming that preserve_colors will be of type boolean\n if job_to_run.preserve_color:\n params.append('--preserve-colors')\n\n # Run the subprocess\n try:\n job_to_run.proc = Popen(params, env=new_env)\n self.logger.log.info(\"Popen worked! Job %d assigned GPU %s.\" % (job_to_run.job_id, job_to_run.gpu))\n self.running_jobs.append(job_to_run)\n self.logger.log.info(\"The number of free gpus is: \" + str(self.gpu_free.qsize()))\n\n except Exception as e:\n self.logger.log.error(\"Job %d could not be assigned GPU %s.\" % (job_to_run.job_id, job_to_run.gpu))\n self.logger.log.exception(e)\n\n #c = self.db.cursor()\n #c.execute(\"UPDATE deepstyle_job SET job_status='PF' WHERE id = (%s)\", (job_to_run.job_id,))\n self.safe_execute_sql(\"UPDATE deepstyle_job SET job_status='PF' WHERE id = (%s)\", True, (job_to_run.job_id,))\n\n for free in job_to_run.gpu:\n self.gpu_free.put(free)\n\n self.logger.log.info(\"The number of free gpus is: \" + str(self.gpu_free.qsize()))", "def set_gpu():\n if Config.gpu_count == 1:\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = Config.gpu1\n elif Config.gpu_count == 2:\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = Config.gpu1 + ', ' + Config.gpu2\n elif Config.gpu_count == 3:\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = Config.gpu1 + ', ' + Config.gpu2 + ', ' + Config.gpu3\n elif Config.gpu_count == 4:\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = Config.gpu1 + ', ' + Config.gpu2 + ', ' + Config.gpu3 + ', ' + Config.gpu4", "def set_gpu_memory_target(frac):\n import keras\n if keras.backend.backend() != 'tensorflow':\n print(\"Return without doing anything\")\n return\n # Do the import here, not at the top, in case Tensorflow is not\n # installed at all.\n import tensorflow as tf\n #from keras.backend.tensorflow_backend import set_session\n if tf_version_comp(tf.__version__):\n config = tf.compat.v1.ConfigProto()\n config.gpu_options.per_process_gpu_memory_fraction = frac\n config.gpu_options.allow_growth = True\n # set_session(tf.compat.v1.Session(config=config))\n session = tf.compat.v1.Session(config=config)\n # tf.compat.v1.keras.backend.set_session(session)\n\n else:\n config = tf.ConfigProto()\n config.gpu_options.per_process_gpu_memory_fraction = frac\n config.gpu_options.allow_growth = True\n # set_session(tf.Session(config=config))\n session = tf.Session(config=config)", "def set_gpu(gpu, enable_benchmark=True):\n if len(str(gpu)) > 1:\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = gpu\n parallel = True\n device = torch.device(\"cuda:{}\".format(','.join([str(a) for a in range(len(gpu.split(',')))])))\n print(\"Devices being used:\", device)\n else:\n parallel = False\n device = torch.device(\"cuda:{}\".format(gpu))\n print(\"Device being used:\", device)\n torch.backends.cudnn.benchmark = enable_benchmark\n return device, parallel", "def train_cpu_quota(self, value):\n if value != None:\n if not isinstance(value, str):\n raise ValueError(\"train_cpu_quota must be a string!\")\n if not value.replace(\".\", \"\", 1).isdigit():\n raise ValueError(\"train_cpu_quota must be a number!\")\n self._train_cpu_quota = value", "def set_cpu_quota(self, new_cpu_quota):\n try:\n requests.post(\n 'http://%s:5000' %\n (self.actuator.api_address),\n data='{\\\"cpu_quota\\\":\\\"' +\n str(new_cpu_quota) +\n '\\\"}')\n except Exception as ex:\n print(\"Error while modifying cpu quota\")\n print ex.message\n raise", "def set_memory_growth():\n gpus = tf.config.experimental.list_physical_devices('GPU')\n if gpus:\n try:\n # Currently, memory growth needs to be the same across GPUs\n for gpu in gpus:\n tf.config.experimental.set_memory_growth(gpu, True)\n logical_gpus = tf.config.experimental.list_logical_devices(\n 'GPU')\n logging.info(\n \"Detect {} Physical GPUs, {} Logical GPUs.\".format(\n len(gpus), len(logical_gpus)))\n except RuntimeError as e:\n # Memory growth must be set before GPUs have been initialized\n logging.info(e)", "def train_memory_quota(self, value):\n if value != None:\n if not isinstance(value, str):\n raise ValueError(\"train_memory_quota must be a string\")\n unit = value[-1:]\n float_value = value[:-1]\n if unit not in constant.CLOUDML_MEMORY_UNITS:\n raise ValueError(\"train_memory_quota unit must be one of %s!\" %\n constant.CLOUDML_MEMORY_UNITS)\n if not float_value.replace(\".\", \"\", 1).isdigit():\n raise ValueError(\"train_memory_quota must be a number!\")\n self._train_memory_quota = value", "def dev_cpu_quota(self, value):\n if value != None:\n if not isinstance(value, str):\n raise ValueError(\"dev_cpu_quota must be a string!\")\n if not value.replace(\".\", \"\", 1).isdigit():\n raise ValueError(\"dev_cpu_quota must be a number!\")\n self._dev_cpu_quota = value", "def test_assign_gpu(self):\n gt_bboxes = torch.randn(200, 4)\n bboxes = torch.randn(20000, 4)\n assign_result = self.assigner.assign(bboxes, gt_bboxes)\n assert assign_result.gt_inds.shape == torch.Size([20000])\n assert assign_result.max_overlaps.shape == torch.Size([20000])", "def train_count_quota(self, value):\n if value != None:\n if not (isinstance(value, int) and value > 0):\n raise ValueError(\"train_count_quota must be a postive integer!\")\n self._train_count_quota = value", "def model_cpu_quota(self, value):\n if value != None:\n if not isinstance(value, str):\n raise ValueError(\"model_cpu_quota must be a string!\")\n if not value.replace(\".\", \"\", 1).isdigit():\n raise ValueError(\"model_cpu_quota must be a number!\")\n self._model_cpu_quota = value" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function for setting train_count_quota.
def train_count_quota(self, value): if value != None: if not (isinstance(value, int) and value > 0): raise ValueError("train_count_quota must be a postive integer!") self._train_count_quota = value
[ "def train_gpu_quota(self, value):\n if value != None:\n if not (isinstance(value, int) and value > 0):\n raise ValueError(\"train_gpu_quota must be a postive integer!\")\n self._train_gpu_quota = value", "def model_count_quota(self, value):\n if value != None:\n if not (isinstance(value, int) and value > 0):\n raise ValueError(\"model_count_quota must be a postive integer!\")\n self._model_count_quota = value", "def train_cpu_quota(self, value):\n if value != None:\n if not isinstance(value, str):\n raise ValueError(\"train_cpu_quota must be a string!\")\n if not value.replace(\".\", \"\", 1).isdigit():\n raise ValueError(\"train_cpu_quota must be a number!\")\n self._train_cpu_quota = value", "def dev_count_quota(self, value):\n if value != None:\n if not (isinstance(value, int) and value > 0):\n raise ValueError(\"dev_count_quota must be a postive integer!\")\n self._dev_count_quota = value", "def train_memory_quota(self, value):\n if value != None:\n if not isinstance(value, str):\n raise ValueError(\"train_memory_quota must be a string\")\n unit = value[-1:]\n float_value = value[:-1]\n if unit not in constant.CLOUDML_MEMORY_UNITS:\n raise ValueError(\"train_memory_quota unit must be one of %s!\" %\n constant.CLOUDML_MEMORY_UNITS)\n if not float_value.replace(\".\", \"\", 1).isdigit():\n raise ValueError(\"train_memory_quota must be a number!\")\n self._train_memory_quota = value", "def _request_quota(self) -> int:", "def tensorboard_quota(self, value):\n if value != None:\n if not (isinstance(value, int) and value > 0):\n raise ValueError(\"tensorboard_quota must be a postive integer!\")\n self._tensorboard_quota = value", "def set_quota_value(self, quota):\n\n self.send_qwctl_command('set quota %d' % quota,\n ['quota must be between'])", "def model_gpu_quota(self, value):\n if value != None:\n if not (isinstance(value, int) and value > 0):\n raise ValueError(\"model_gpu_quota must be a postive integer!\")\n self._model_gpu_quota = value", "def quota(self):\n raise NotImplementedError", "def setQuotaRoot(request, maxsize):", "def total_gpu_quota(self, value):\n if value != None:\n if not (isinstance(value, int) and value > 0):\n raise ValueError(\"total_gpu_quota must be a postive integer!\")\n self._total_gpu_quota = value", "def set_cpu_quota(self, new_cpu_quota):\n try:\n requests.post(\n 'http://%s:5000' %\n (self.actuator.api_address),\n data='{\\\"cpu_quota\\\":\\\"' +\n str(new_cpu_quota) +\n '\\\"}')\n except Exception as ex:\n print(\"Error while modifying cpu quota\")\n print ex.message\n raise", "def model_cpu_quota(self, value):\n if value != None:\n if not isinstance(value, str):\n raise ValueError(\"model_cpu_quota must be a string!\")\n if not value.replace(\".\", \"\", 1).isdigit():\n raise ValueError(\"model_cpu_quota must be a number!\")\n self._model_cpu_quota = value", "def dev_gpu_quota(self, value):\n if value != None:\n if not (isinstance(value, int) and value > 0):\n raise ValueError(\"dev_gpu_quota must be a postive integer!\")\n self._dev_gpu_quota = value", "def dev_cpu_quota(self, value):\n if value != None:\n if not isinstance(value, str):\n raise ValueError(\"dev_cpu_quota must be a string!\")\n if not value.replace(\".\", \"\", 1).isdigit():\n raise ValueError(\"dev_cpu_quota must be a number!\")\n self._dev_cpu_quota = value", "def total_cpu_quota(self, value):\n if value != None:\n if not isinstance(value, str):\n raise ValueError(\"total_cpu_quota must be a string!\")\n if not value.replace(\".\", \"\", 1).isdigit():\n raise ValueError(\"total_cpu_quota must be a number!\")\n self._total_cpu_quota = value", "def _setMaxCount(self, value):\n self.__maxcount = value", "def create_quota(self, values):" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function for setting model_memory_quota.
def model_memory_quota(self, value): if value != None: if not isinstance(value, str): raise ValueError("model_memory_quota must be a string") unit = value[-1:] float_value = value[:-1] if unit not in constant.CLOUDML_MEMORY_UNITS: raise ValueError("model_memory_quota unit must be one of %s!" % constant.CLOUDML_MEMORY_UNITS) if not float_value.replace(".", "", 1).isdigit(): raise ValueError("model_memory_quota must be a number!") self._model_memory_quota = value
[ "def set_memlimit(self, value):\n value = value * 1024 * 1024\n self.set_int(\"memory.limit_in_bytes\", value)", "def train_memory_quota(self, value):\n if value != None:\n if not isinstance(value, str):\n raise ValueError(\"train_memory_quota must be a string\")\n unit = value[-1:]\n float_value = value[:-1]\n if unit not in constant.CLOUDML_MEMORY_UNITS:\n raise ValueError(\"train_memory_quota unit must be one of %s!\" %\n constant.CLOUDML_MEMORY_UNITS)\n if not float_value.replace(\".\", \"\", 1).isdigit():\n raise ValueError(\"train_memory_quota must be a number!\")\n self._train_memory_quota = value", "def dev_memory_quota(self, value):\n if value != None:\n if not isinstance(value, str):\n raise ValueError(\"dev_memory_quota must be a string\")\n unit = value[-1:]\n float_value = value[:-1]\n if unit not in constant.CLOUDML_MEMORY_UNITS:\n raise ValueError(\"dev_memory_quota unit must be one of %s!\" %\n constant.CLOUDML_MEMORY_UNITS)\n if not float_value.replace(\".\", \"\", 1).isdigit():\n raise ValueError(\"dev_memory_quota must be a number!\")\n self._dev_memory_quota = value", "def model_gpu_quota(self, value):\n if value != None:\n if not (isinstance(value, int) and value > 0):\n raise ValueError(\"model_gpu_quota must be a postive integer!\")\n self._model_gpu_quota = value", "def setQuotaRoot(request, maxsize):", "def setquota(self, root, limits):\n typ, dat = self._simple_command('SETQUOTA', root, limits)\n return self._untagged_response(typ, dat, 'QUOTA')", "def total_memory_quota(self, value):\n if value != None:\n if not isinstance(value, str):\n raise ValueError(\"total_memory_quota must be a string\")\n unit = value[-1:]\n float_value = value[:-1]\n if unit not in constant.CLOUDML_MEMORY_UNITS:\n raise ValueError(\"total_memory_quota unit must be one of %s!\" %\n constant.CLOUDML_MEMORY_UNITS)\n if not float_value.replace(\".\", \"\", 1).isdigit():\n raise ValueError(\"total_memory_quota must be a number!\")\n self._total_memory_quota = value", "def quota_set(self, path, quota_type, size):\n quota_json = self.quota_get(path, quota_type)\n if quota_json is None:\n self.quota_create(path, quota_type, size)\n else:\n # quota already exists, modify it's size\n quota_id = quota_json['id']\n self.quota_modify_size(quota_id, size)", "def model_cpu_quota(self, value):\n if value != None:\n if not isinstance(value, str):\n raise ValueError(\"model_cpu_quota must be a string!\")\n if not value.replace(\".\", \"\", 1).isdigit():\n raise ValueError(\"model_cpu_quota must be a number!\")\n self._model_cpu_quota = value", "def model_count_quota(self, value):\n if value != None:\n if not (isinstance(value, int) and value > 0):\n raise ValueError(\"model_count_quota must be a postive integer!\")\n self._model_count_quota = value", "def dev_gpu_quota(self, value):\n if value != None:\n if not (isinstance(value, int) and value > 0):\n raise ValueError(\"dev_gpu_quota must be a postive integer!\")\n self._dev_gpu_quota = value", "def train_gpu_quota(self, value):\n if value != None:\n if not (isinstance(value, int) and value > 0):\n raise ValueError(\"train_gpu_quota must be a postive integer!\")\n self._train_gpu_quota = value", "def set_quota_value(self, quota):\n\n self.send_qwctl_command('set quota %d' % quota,\n ['quota must be between'])", "def tensorboard_quota(self, value):\n if value != None:\n if not (isinstance(value, int) and value > 0):\n raise ValueError(\"tensorboard_quota must be a postive integer!\")\n self._tensorboard_quota = value", "def memory_gb(self, memory_gb):\n\n self._memory_gb = memory_gb", "def set_quota(self,request):\n\n\t\t# ----------- DEBUG -----------------\n\t\tMODULE.info(\"printers/quota/set invoked with:\")\n\t\tpp = pprint.PrettyPrinter(indent=4)\n\t\tst = pp.pformat(request.options).split(\"\\n\")\n\t\tfor s in st:\n\t\t\tMODULE.info(\" << %s\" % s)\n\t\t# -----------------------------------\n\n\t\tprinter = request.options.get('printer','')\n\t\tuser = request.options.get('user','')\n\t\tsoft = request.options.get('soft',0)\n\t\thard = request.options.get('hard',0)\n\n\t\tif printer=='' or user=='':\n\t\t\tresult = \"Required parameter missing\"\n\t\telse:\n\t\t\tresult = self._set_quota(printer,user,soft,hard)\n\n\t\t# ---------- DEBUG --------------\n\t\tMODULE.info(\"printers/quota/set returns:\")\n\t\tpp = pprint.PrettyPrinter(indent=4)\n\t\tst = pp.pformat(result).split(\"\\n\")\n\t\tfor s in st:\n\t\t\tMODULE.info(\" >> %s\" % s)\n\t\t# --------------------------------\n\n\t\tself.finished(request.id, result)", "def set_quota(self, value=None, override_rules=False):\n old_quota = self.quota\n if value is None:\n if self.use_domain_quota:\n self.quota = self.domain.default_mailbox_quota\n else:\n self.quota = 0\n else:\n self.quota = value\n if self.quota == 0:\n if self.domain.quota and not override_rules:\n raise lib_exceptions.BadRequest(_(\"A quota is required\"))\n elif self.domain.quota:\n quota_usage = self.domain.allocated_quota\n if old_quota:\n quota_usage -= old_quota\n if quota_usage + self.quota > self.domain.quota:\n raise lib_exceptions.BadRequest(\n _(\"{}: domain quota exceeded\").format(self.domain.name)\n )", "def quota(self):\n raise NotImplementedError", "def set_cpu_quota(self, new_cpu_quota):\n try:\n requests.post(\n 'http://%s:5000' %\n (self.actuator.api_address),\n data='{\\\"cpu_quota\\\":\\\"' +\n str(new_cpu_quota) +\n '\\\"}')\n except Exception as ex:\n print(\"Error while modifying cpu quota\")\n print ex.message\n raise" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function for setting model_cpu_quota.
def model_cpu_quota(self, value): if value != None: if not isinstance(value, str): raise ValueError("model_cpu_quota must be a string!") if not value.replace(".", "", 1).isdigit(): raise ValueError("model_cpu_quota must be a number!") self._model_cpu_quota = value
[ "def train_cpu_quota(self, value):\n if value != None:\n if not isinstance(value, str):\n raise ValueError(\"train_cpu_quota must be a string!\")\n if not value.replace(\".\", \"\", 1).isdigit():\n raise ValueError(\"train_cpu_quota must be a number!\")\n self._train_cpu_quota = value", "def set_cpu_quota(self, new_cpu_quota):\n try:\n requests.post(\n 'http://%s:5000' %\n (self.actuator.api_address),\n data='{\\\"cpu_quota\\\":\\\"' +\n str(new_cpu_quota) +\n '\\\"}')\n except Exception as ex:\n print(\"Error while modifying cpu quota\")\n print ex.message\n raise", "def dev_cpu_quota(self, value):\n if value != None:\n if not isinstance(value, str):\n raise ValueError(\"dev_cpu_quota must be a string!\")\n if not value.replace(\".\", \"\", 1).isdigit():\n raise ValueError(\"dev_cpu_quota must be a number!\")\n self._dev_cpu_quota = value", "def model_gpu_quota(self, value):\n if value != None:\n if not (isinstance(value, int) and value > 0):\n raise ValueError(\"model_gpu_quota must be a postive integer!\")\n self._model_gpu_quota = value", "def train_gpu_quota(self, value):\n if value != None:\n if not (isinstance(value, int) and value > 0):\n raise ValueError(\"train_gpu_quota must be a postive integer!\")\n self._train_gpu_quota = value", "def total_cpu_quota(self, value):\n if value != None:\n if not isinstance(value, str):\n raise ValueError(\"total_cpu_quota must be a string!\")\n if not value.replace(\".\", \"\", 1).isdigit():\n raise ValueError(\"total_cpu_quota must be a number!\")\n self._total_cpu_quota = value", "def train_memory_quota(self, value):\n if value != None:\n if not isinstance(value, str):\n raise ValueError(\"train_memory_quota must be a string\")\n unit = value[-1:]\n float_value = value[:-1]\n if unit not in constant.CLOUDML_MEMORY_UNITS:\n raise ValueError(\"train_memory_quota unit must be one of %s!\" %\n constant.CLOUDML_MEMORY_UNITS)\n if not float_value.replace(\".\", \"\", 1).isdigit():\n raise ValueError(\"train_memory_quota must be a number!\")\n self._train_memory_quota = value", "def model_memory_quota(self, value):\n if value != None:\n if not isinstance(value, str):\n raise ValueError(\"model_memory_quota must be a string\")\n unit = value[-1:]\n float_value = value[:-1]\n if unit not in constant.CLOUDML_MEMORY_UNITS:\n raise ValueError(\"model_memory_quota unit must be one of %s!\" %\n constant.CLOUDML_MEMORY_UNITS)\n if not float_value.replace(\".\", \"\", 1).isdigit():\n raise ValueError(\"model_memory_quota must be a number!\")\n self._model_memory_quota = value", "def set_cpu_request(self, cpu_request):\n self.cpu_request = cpu_request", "def dev_gpu_quota(self, value):\n if value != None:\n if not (isinstance(value, int) and value > 0):\n raise ValueError(\"dev_gpu_quota must be a postive integer!\")\n self._dev_gpu_quota = value", "def model_count_quota(self, value):\n if value != None:\n if not (isinstance(value, int) and value > 0):\n raise ValueError(\"model_count_quota must be a postive integer!\")\n self._model_count_quota = value", "def set_number_used_cores(job):\n\n pilot_user = os.environ.get('PILOT_USER', 'generic').lower()\n cpu = __import__('pilot.user.%s.cpu' % pilot_user, globals(), locals(), [pilot_user], 0) # Python 2/3\n cpu.set_core_counts(job)", "def total_gpu_quota(self, value):\n if value != None:\n if not (isinstance(value, int) and value > 0):\n raise ValueError(\"total_gpu_quota must be a postive integer!\")\n self._total_gpu_quota = value", "def _set_cpuunits(self, instance, units=None):\n if not units:\n LOG.debug(\"Reported cpuunits %s\" % self.utility['UNITS'])\n LOG.debug(\"Reported percent of resource: %s\" %\n self._percent_of_resource(instance))\n units = int(self.utility['UNITS'] *\n self._percent_of_resource(instance))\n # TODO(imsplitbit): This needs to be adjusted to not allow\n # subscription of more than available cpuunits. For now we\n # won't let the obvious case of a container getting more than\n # the maximum cpuunits for the host.\n if units > self.utility['UNITS']:\n units = self.utility['UNITS']\n\n try:\n _, err = utils.execute('sudo', 'vzctl', 'set', instance['id'],\n '--save', '--cpuunits', units)\n if err:\n LOG.error(err)\n except ProcessExecutionError as err:\n LOG.error(err)\n raise exception.Error('Cannot set cpuunits for %s' %\n (instance['id'],))", "def train_count_quota(self, value):\n if value != None:\n if not (isinstance(value, int) and value > 0):\n raise ValueError(\"train_count_quota must be a postive integer!\")\n self._train_count_quota = value", "def limit_cpu(self) -> Optional[str]:\n return pulumi.get(self, \"limit_cpu\")", "def ex_set_vm_cpu(self, vapp_or_vm_id, vm_cpu):\r\n self._validate_vm_cpu(vm_cpu)\r\n self._change_vm_cpu(vapp_or_vm_id, vm_cpu)", "def set_cpus(self, num_cpus):\n if self.batch:\n self.batch_settings.batch_args[\"cpus-per-task\"] = num_cpus\n for db in self:\n db.run_settings.set_cpus_per_task(num_cpus)", "def setQuotaRoot(request, maxsize):" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function for setting model_gpu_quota.
def model_gpu_quota(self, value): if value != None: if not (isinstance(value, int) and value > 0): raise ValueError("model_gpu_quota must be a postive integer!") self._model_gpu_quota = value
[ "def train_gpu_quota(self, value):\n if value != None:\n if not (isinstance(value, int) and value > 0):\n raise ValueError(\"train_gpu_quota must be a postive integer!\")\n self._train_gpu_quota = value", "def dev_gpu_quota(self, value):\n if value != None:\n if not (isinstance(value, int) and value > 0):\n raise ValueError(\"dev_gpu_quota must be a postive integer!\")\n self._dev_gpu_quota = value", "def total_gpu_quota(self, value):\n if value != None:\n if not (isinstance(value, int) and value > 0):\n raise ValueError(\"total_gpu_quota must be a postive integer!\")\n self._total_gpu_quota = value", "def set_GPU_Memory_Limit():\n gpus = tf.config.experimental.list_physical_devices('GPU')\n if gpus:\n try:\n for gpu in gpus:\n tf.config.experimental.set_memory_growth(gpu, True)\n except RuntimeError as e:\n print(e)", "def limit_gpu(config: Dict):\n if config['limit_gpu'] is not False:\n gpus = tf.config.experimental.list_physical_devices('GPU')\n if gpus:\n try:\n # Currently, memory growth needs to be the same across GPUs\n for gpu in gpus:\n tf.config.experimental.set_memory_growth(gpu, True)\n logical_gpus = tf.config.experimental.list_logical_devices('GPU')\n print(len(gpus), \"Physical GPUs,\", len(logical_gpus), \"Logical GPUs\")\n except RuntimeError as e:\n # Memory growth must be set before GPUs have been initialized\n print(e)", "def assign_gpu_and_run(self):\n if not self.gpu_free.empty():\n\n # Retrieve the job from the queue\n job_to_run = self.check_enough_gpu()\n\n if job_to_run is None:\n return\n\n # Floor division to get lower bound of num_gpus\n num_gpus = int(job_to_run.width)//1000 + 1\n\n #if (int(job_to_run.width) % 1000)/1000 >= 0.4:\n # num_gpus += 2\n # This is okay because we already know that from check_enough_gpu that\n # gpu_free's size is greater than int(job_to_run.width)/1000\n\n for _ in range(num_gpus):\n job_to_run.gpu.append(self.gpu_free.get())\n\n # Create a copy of the environemnt\n new_env = os.environ.copy()\n\n # Create the CUDA GPU string\n gpu_string = \"\"\n\n i = 0\n while (i < len(job_to_run.gpu)):\n if i == 0:\n gpu_string = gpu_string + str(job_to_run.gpu[i])\n else:\n gpu_string = gpu_string + \",\" + str(job_to_run.gpu[i])\n i += 1\n\n new_env['CUDA_VISIBLE_DEVICES'] = gpu_string\n\n params = ['python',\n '/app/neural-style/neural_style.py',\n '--content', '%s' % job_to_run.path1,\n '--styles', '%s' % job_to_run.path2,\n '--output','%s' % job_to_run.output_path,\n '--content-weight', str(job_to_run.content_weight),\n '--content-weight-blend', str(job_to_run.content_blend),\n '--style-weight', str(job_to_run.style_weight),\n '--style-layer-weight-exp', str(job_to_run.style_layer_weight_exp),\n '--style-scales', str(job_to_run.style_scale),\n '--style-blend-weights', str(job_to_run.style_blend),\n '--iterations', str(job_to_run.iterations),\n '--width', str(job_to_run.width),\n '--network', VGG_LOCATION ]\n\n # set preserve colors if indicated\n # assuming that preserve_colors will be of type boolean\n if job_to_run.preserve_color:\n params.append('--preserve-colors')\n\n # Run the subprocess\n try:\n job_to_run.proc = Popen(params, env=new_env)\n self.logger.log.info(\"Popen worked! Job %d assigned GPU %s.\" % (job_to_run.job_id, job_to_run.gpu))\n self.running_jobs.append(job_to_run)\n self.logger.log.info(\"The number of free gpus is: \" + str(self.gpu_free.qsize()))\n\n except Exception as e:\n self.logger.log.error(\"Job %d could not be assigned GPU %s.\" % (job_to_run.job_id, job_to_run.gpu))\n self.logger.log.exception(e)\n\n #c = self.db.cursor()\n #c.execute(\"UPDATE deepstyle_job SET job_status='PF' WHERE id = (%s)\", (job_to_run.job_id,))\n self.safe_execute_sql(\"UPDATE deepstyle_job SET job_status='PF' WHERE id = (%s)\", True, (job_to_run.job_id,))\n\n for free in job_to_run.gpu:\n self.gpu_free.put(free)\n\n self.logger.log.info(\"The number of free gpus is: \" + str(self.gpu_free.qsize()))", "def limit_mem():\n K.get_session().close()\n cfg = K.tf.ConfigProto()\n cfg.gpu_options.allow_growth = True\n K.set_session(K.tf.Session(config=cfg))", "def set_gpu_memory_target(frac):\n import keras\n if keras.backend.backend() != 'tensorflow':\n print(\"Return without doing anything\")\n return\n # Do the import here, not at the top, in case Tensorflow is not\n # installed at all.\n import tensorflow as tf\n #from keras.backend.tensorflow_backend import set_session\n if tf_version_comp(tf.__version__):\n config = tf.compat.v1.ConfigProto()\n config.gpu_options.per_process_gpu_memory_fraction = frac\n config.gpu_options.allow_growth = True\n # set_session(tf.compat.v1.Session(config=config))\n session = tf.compat.v1.Session(config=config)\n # tf.compat.v1.keras.backend.set_session(session)\n\n else:\n config = tf.ConfigProto()\n config.gpu_options.per_process_gpu_memory_fraction = frac\n config.gpu_options.allow_growth = True\n # set_session(tf.Session(config=config))\n session = tf.Session(config=config)", "def set_gpu():\n if Config.gpu_count == 1:\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = Config.gpu1\n elif Config.gpu_count == 2:\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = Config.gpu1 + ', ' + Config.gpu2\n elif Config.gpu_count == 3:\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = Config.gpu1 + ', ' + Config.gpu2 + ', ' + Config.gpu3\n elif Config.gpu_count == 4:\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = Config.gpu1 + ', ' + Config.gpu2 + ', ' + Config.gpu3 + ', ' + Config.gpu4", "def model_cpu_quota(self, value):\n if value != None:\n if not isinstance(value, str):\n raise ValueError(\"model_cpu_quota must be a string!\")\n if not value.replace(\".\", \"\", 1).isdigit():\n raise ValueError(\"model_cpu_quota must be a number!\")\n self._model_cpu_quota = value", "def set_cpu_quota(self, new_cpu_quota):\n try:\n requests.post(\n 'http://%s:5000' %\n (self.actuator.api_address),\n data='{\\\"cpu_quota\\\":\\\"' +\n str(new_cpu_quota) +\n '\\\"}')\n except Exception as ex:\n print(\"Error while modifying cpu quota\")\n print ex.message\n raise", "def model_memory_quota(self, value):\n if value != None:\n if not isinstance(value, str):\n raise ValueError(\"model_memory_quota must be a string\")\n unit = value[-1:]\n float_value = value[:-1]\n if unit not in constant.CLOUDML_MEMORY_UNITS:\n raise ValueError(\"model_memory_quota unit must be one of %s!\" %\n constant.CLOUDML_MEMORY_UNITS)\n if not float_value.replace(\".\", \"\", 1).isdigit():\n raise ValueError(\"model_memory_quota must be a number!\")\n self._model_memory_quota = value", "def dev_cpu_quota(self, value):\n if value != None:\n if not isinstance(value, str):\n raise ValueError(\"dev_cpu_quota must be a string!\")\n if not value.replace(\".\", \"\", 1).isdigit():\n raise ValueError(\"dev_cpu_quota must be a number!\")\n self._dev_cpu_quota = value", "def configure_gpu_cpu(RUN_GPU, GPU_ALLOCATION):\n # Extra imports to set GPU options\n import tensorflow as tf\n from keras import backend as k\n import os\n # To force code to run on cpu\n if RUN_GPU==False:\n os.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\n os.environ['CUDA_VISIBLE_DEVICES'] = '-1'\n\n if RUN_GPU and GPU_ALLOCATION !=100:\n # TensorFlow congif\n config = tf.ConfigProto()\n\n # Allocate memory as-needed\n config.gpu_options.allow_growth = True\n\n # Allocate GPU memory based on user input USE_GPU\n config.gpu_options.per_process_gpu_memory_fraction = GPU_ALLOCATION/100\n\n # Create a session with the above specified options\n k.tensorflow_backend.set_session(tf.Session(config=config))", "def train_memory_quota(self, value):\n if value != None:\n if not isinstance(value, str):\n raise ValueError(\"train_memory_quota must be a string\")\n unit = value[-1:]\n float_value = value[:-1]\n if unit not in constant.CLOUDML_MEMORY_UNITS:\n raise ValueError(\"train_memory_quota unit must be one of %s!\" %\n constant.CLOUDML_MEMORY_UNITS)\n if not float_value.replace(\".\", \"\", 1).isdigit():\n raise ValueError(\"train_memory_quota must be a number!\")\n self._train_memory_quota = value", "def train_cpu_quota(self, value):\n if value != None:\n if not isinstance(value, str):\n raise ValueError(\"train_cpu_quota must be a string!\")\n if not value.replace(\".\", \"\", 1).isdigit():\n raise ValueError(\"train_cpu_quota must be a number!\")\n self._train_cpu_quota = value", "def setQuotaRoot(request, maxsize):", "def dev_memory_quota(self, value):\n if value != None:\n if not isinstance(value, str):\n raise ValueError(\"dev_memory_quota must be a string\")\n unit = value[-1:]\n float_value = value[:-1]\n if unit not in constant.CLOUDML_MEMORY_UNITS:\n raise ValueError(\"dev_memory_quota unit must be one of %s!\" %\n constant.CLOUDML_MEMORY_UNITS)\n if not float_value.replace(\".\", \"\", 1).isdigit():\n raise ValueError(\"dev_memory_quota must be a number!\")\n self._dev_memory_quota = value", "def setquota(self, root, limits):\n typ, dat = self._simple_command('SETQUOTA', root, limits)\n return self._untagged_response(typ, dat, 'QUOTA')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function for setting model_count_quota.
def model_count_quota(self, value): if value != None: if not (isinstance(value, int) and value > 0): raise ValueError("model_count_quota must be a postive integer!") self._model_count_quota = value
[ "def train_count_quota(self, value):\n if value != None:\n if not (isinstance(value, int) and value > 0):\n raise ValueError(\"train_count_quota must be a postive integer!\")\n self._train_count_quota = value", "def dev_count_quota(self, value):\n if value != None:\n if not (isinstance(value, int) and value > 0):\n raise ValueError(\"dev_count_quota must be a postive integer!\")\n self._dev_count_quota = value", "def _request_quota(self) -> int:", "def setQuotaRoot(request, maxsize):", "def model_gpu_quota(self, value):\n if value != None:\n if not (isinstance(value, int) and value > 0):\n raise ValueError(\"model_gpu_quota must be a postive integer!\")\n self._model_gpu_quota = value", "def model_cpu_quota(self, value):\n if value != None:\n if not isinstance(value, str):\n raise ValueError(\"model_cpu_quota must be a string!\")\n if not value.replace(\".\", \"\", 1).isdigit():\n raise ValueError(\"model_cpu_quota must be a number!\")\n self._model_cpu_quota = value", "def quota(self):\n raise NotImplementedError", "def setquota(self, root, limits):\n typ, dat = self._simple_command('SETQUOTA', root, limits)\n return self._untagged_response(typ, dat, 'QUOTA')", "def create_quota(self, values):", "def set_quota_value(self, quota):\n\n self.send_qwctl_command('set quota %d' % quota,\n ['quota must be between'])", "def run(self, max_quota=1024):\n\n self._update_quotas(\"nova\", self.context[\"tenant\"][\"id\"],\n max_quota)", "def run(self, max_quota=1024):\n\n self._update_quotas(\"nova\", self.context[\"tenant\"][\"id\"],\n max_quota)\n self._delete_quotas(\"nova\", self.context[\"tenant\"][\"id\"])", "def set_limit(context, site, limit, value):\n _set_limits(context, site, ((limit, value),))", "def model_memory_quota(self, value):\n if value != None:\n if not isinstance(value, str):\n raise ValueError(\"model_memory_quota must be a string\")\n unit = value[-1:]\n float_value = value[:-1]\n if unit not in constant.CLOUDML_MEMORY_UNITS:\n raise ValueError(\"model_memory_quota unit must be one of %s!\" %\n constant.CLOUDML_MEMORY_UNITS)\n if not float_value.replace(\".\", \"\", 1).isdigit():\n raise ValueError(\"model_memory_quota must be a number!\")\n self._model_memory_quota = value", "def set_quota(self, value=None, override_rules=False):\n old_quota = self.quota\n if value is None:\n if self.use_domain_quota:\n self.quota = self.domain.default_mailbox_quota\n else:\n self.quota = 0\n else:\n self.quota = value\n if self.quota == 0:\n if self.domain.quota and not override_rules:\n raise lib_exceptions.BadRequest(_(\"A quota is required\"))\n elif self.domain.quota:\n quota_usage = self.domain.allocated_quota\n if old_quota:\n quota_usage -= old_quota\n if quota_usage + self.quota > self.domain.quota:\n raise lib_exceptions.BadRequest(\n _(\"{}: domain quota exceeded\").format(self.domain.name)\n )", "def set_quota(self,request):\n\n\t\t# ----------- DEBUG -----------------\n\t\tMODULE.info(\"printers/quota/set invoked with:\")\n\t\tpp = pprint.PrettyPrinter(indent=4)\n\t\tst = pp.pformat(request.options).split(\"\\n\")\n\t\tfor s in st:\n\t\t\tMODULE.info(\" << %s\" % s)\n\t\t# -----------------------------------\n\n\t\tprinter = request.options.get('printer','')\n\t\tuser = request.options.get('user','')\n\t\tsoft = request.options.get('soft',0)\n\t\thard = request.options.get('hard',0)\n\n\t\tif printer=='' or user=='':\n\t\t\tresult = \"Required parameter missing\"\n\t\telse:\n\t\t\tresult = self._set_quota(printer,user,soft,hard)\n\n\t\t# ---------- DEBUG --------------\n\t\tMODULE.info(\"printers/quota/set returns:\")\n\t\tpp = pprint.PrettyPrinter(indent=4)\n\t\tst = pp.pformat(result).split(\"\\n\")\n\t\tfor s in st:\n\t\t\tMODULE.info(\" >> %s\" % s)\n\t\t# --------------------------------\n\n\t\tself.finished(request.id, result)", "def tensorboard_quota(self, value):\n if value != None:\n if not (isinstance(value, int) and value > 0):\n raise ValueError(\"tensorboard_quota must be a postive integer!\")\n self._tensorboard_quota = value", "def _setMaxCount(self, value):\n self.__maxcount = value", "def set_limit(self, limit):\n self.limits[self.api_key] = limit" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function for setting dev_memory_quota.
def dev_memory_quota(self, value): if value != None: if not isinstance(value, str): raise ValueError("dev_memory_quota must be a string") unit = value[-1:] float_value = value[:-1] if unit not in constant.CLOUDML_MEMORY_UNITS: raise ValueError("dev_memory_quota unit must be one of %s!" % constant.CLOUDML_MEMORY_UNITS) if not float_value.replace(".", "", 1).isdigit(): raise ValueError("dev_memory_quota must be a number!") self._dev_memory_quota = value
[ "def set_memlimit(self, value):\n value = value * 1024 * 1024\n self.set_int(\"memory.limit_in_bytes\", value)", "def model_memory_quota(self, value):\n if value != None:\n if not isinstance(value, str):\n raise ValueError(\"model_memory_quota must be a string\")\n unit = value[-1:]\n float_value = value[:-1]\n if unit not in constant.CLOUDML_MEMORY_UNITS:\n raise ValueError(\"model_memory_quota unit must be one of %s!\" %\n constant.CLOUDML_MEMORY_UNITS)\n if not float_value.replace(\".\", \"\", 1).isdigit():\n raise ValueError(\"model_memory_quota must be a number!\")\n self._model_memory_quota = value", "def dev_gpu_quota(self, value):\n if value != None:\n if not (isinstance(value, int) and value > 0):\n raise ValueError(\"dev_gpu_quota must be a postive integer!\")\n self._dev_gpu_quota = value", "def train_memory_quota(self, value):\n if value != None:\n if not isinstance(value, str):\n raise ValueError(\"train_memory_quota must be a string\")\n unit = value[-1:]\n float_value = value[:-1]\n if unit not in constant.CLOUDML_MEMORY_UNITS:\n raise ValueError(\"train_memory_quota unit must be one of %s!\" %\n constant.CLOUDML_MEMORY_UNITS)\n if not float_value.replace(\".\", \"\", 1).isdigit():\n raise ValueError(\"train_memory_quota must be a number!\")\n self._train_memory_quota = value", "def setQuotaRoot(request, maxsize):", "def setquota(self, root, limits):\n typ, dat = self._simple_command('SETQUOTA', root, limits)\n return self._untagged_response(typ, dat, 'QUOTA')", "def model_gpu_quota(self, value):\n if value != None:\n if not (isinstance(value, int) and value > 0):\n raise ValueError(\"model_gpu_quota must be a postive integer!\")\n self._model_gpu_quota = value", "def set_cpu_quota(self, new_cpu_quota):\n try:\n requests.post(\n 'http://%s:5000' %\n (self.actuator.api_address),\n data='{\\\"cpu_quota\\\":\\\"' +\n str(new_cpu_quota) +\n '\\\"}')\n except Exception as ex:\n print(\"Error while modifying cpu quota\")\n print ex.message\n raise", "def total_memory_quota(self, value):\n if value != None:\n if not isinstance(value, str):\n raise ValueError(\"total_memory_quota must be a string\")\n unit = value[-1:]\n float_value = value[:-1]\n if unit not in constant.CLOUDML_MEMORY_UNITS:\n raise ValueError(\"total_memory_quota unit must be one of %s!\" %\n constant.CLOUDML_MEMORY_UNITS)\n if not float_value.replace(\".\", \"\", 1).isdigit():\n raise ValueError(\"total_memory_quota must be a number!\")\n self._total_memory_quota = value", "def train_gpu_quota(self, value):\n if value != None:\n if not (isinstance(value, int) and value > 0):\n raise ValueError(\"train_gpu_quota must be a postive integer!\")\n self._train_gpu_quota = value", "def set_quota_value(self, quota):\n\n self.send_qwctl_command('set quota %d' % quota,\n ['quota must be between'])", "def set_GPU_Memory_Limit():\n gpus = tf.config.experimental.list_physical_devices('GPU')\n if gpus:\n try:\n for gpu in gpus:\n tf.config.experimental.set_memory_growth(gpu, True)\n except RuntimeError as e:\n print(e)", "def dev_cpu_quota(self, value):\n if value != None:\n if not isinstance(value, str):\n raise ValueError(\"dev_cpu_quota must be a string!\")\n if not value.replace(\".\", \"\", 1).isdigit():\n raise ValueError(\"dev_cpu_quota must be a number!\")\n self._dev_cpu_quota = value", "def set_quota(self,request):\n\n\t\t# ----------- DEBUG -----------------\n\t\tMODULE.info(\"printers/quota/set invoked with:\")\n\t\tpp = pprint.PrettyPrinter(indent=4)\n\t\tst = pp.pformat(request.options).split(\"\\n\")\n\t\tfor s in st:\n\t\t\tMODULE.info(\" << %s\" % s)\n\t\t# -----------------------------------\n\n\t\tprinter = request.options.get('printer','')\n\t\tuser = request.options.get('user','')\n\t\tsoft = request.options.get('soft',0)\n\t\thard = request.options.get('hard',0)\n\n\t\tif printer=='' or user=='':\n\t\t\tresult = \"Required parameter missing\"\n\t\telse:\n\t\t\tresult = self._set_quota(printer,user,soft,hard)\n\n\t\t# ---------- DEBUG --------------\n\t\tMODULE.info(\"printers/quota/set returns:\")\n\t\tpp = pprint.PrettyPrinter(indent=4)\n\t\tst = pp.pformat(result).split(\"\\n\")\n\t\tfor s in st:\n\t\t\tMODULE.info(\" >> %s\" % s)\n\t\t# --------------------------------\n\n\t\tself.finished(request.id, result)", "def _set_diskspace(self, instance, soft=None, hard=None):\n instance_type = instance_types.get_instance_type(\n instance['instance_type_id'])\n\n if not soft:\n soft = int(instance_type['local_gb'])\n\n if not hard:\n hard = int(instance_type['local_gb'] *\n FLAGS.ovz_disk_space_oversub_percent)\n\n # Now set the increment of the limit. I do this here so that I don't\n # have to do this in every line above.\n soft = '%s%s' % (soft, FLAGS.ovz_disk_space_increment)\n hard = '%s%s' % (hard, FLAGS.ovz_disk_space_increment)\n\n try:\n _, err = utils.execute('sudo', 'vzctl', 'set', instance['id'],\n '--save', '--diskspace',\n '%s:%s' % (soft, hard))\n if err:\n LOG.error(err)\n except ProcessExecutionError as err:\n LOG.error(err)\n raise exception.Error('Error setting diskspace quota for %s' %\n (instance['id'],))", "def dev_count_quota(self, value):\n if value != None:\n if not (isinstance(value, int) and value > 0):\n raise ValueError(\"dev_count_quota must be a postive integer!\")\n self._dev_count_quota = value", "def set_disk_quota(self, disk_quota: int):\n data = self._props.copy()\n data[\"quota\"] = int(disk_quota) if disk_quota else self.QUOTA_INFINITE\n resp = self._http.post(\"/api/users/update\", json=[{\"action\": \"update\", \"user\": data}], csrf=True).json()\n\n if resp.get(\"status\") == \"ok\":\n self._props.update(data)\n else:\n raise StackException(\"Unable to set user password '{}', expected status 'ok' and got response: {}\".format(self.username, resp))", "def quota_set(self, username, filesystem, bqs, bqh, iqs, iqh):\n\n self.cmd(\"%s -r %s %d %d %d %d %s\" %\n (rbconfig.command_setquota, self.shquote(str(username)), bqs,\n bqh, iqs, iqh, filesystem))", "def total_gpu_quota(self, value):\n if value != None:\n if not (isinstance(value, int) and value > 0):\n raise ValueError(\"total_gpu_quota must be a postive integer!\")\n self._total_gpu_quota = value" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function for setting dev_cpu_quota.
def dev_cpu_quota(self, value): if value != None: if not isinstance(value, str): raise ValueError("dev_cpu_quota must be a string!") if not value.replace(".", "", 1).isdigit(): raise ValueError("dev_cpu_quota must be a number!") self._dev_cpu_quota = value
[ "def set_cpu_quota(self, new_cpu_quota):\n try:\n requests.post(\n 'http://%s:5000' %\n (self.actuator.api_address),\n data='{\\\"cpu_quota\\\":\\\"' +\n str(new_cpu_quota) +\n '\\\"}')\n except Exception as ex:\n print(\"Error while modifying cpu quota\")\n print ex.message\n raise", "def train_cpu_quota(self, value):\n if value != None:\n if not isinstance(value, str):\n raise ValueError(\"train_cpu_quota must be a string!\")\n if not value.replace(\".\", \"\", 1).isdigit():\n raise ValueError(\"train_cpu_quota must be a number!\")\n self._train_cpu_quota = value", "def train_gpu_quota(self, value):\n if value != None:\n if not (isinstance(value, int) and value > 0):\n raise ValueError(\"train_gpu_quota must be a postive integer!\")\n self._train_gpu_quota = value", "def dev_gpu_quota(self, value):\n if value != None:\n if not (isinstance(value, int) and value > 0):\n raise ValueError(\"dev_gpu_quota must be a postive integer!\")\n self._dev_gpu_quota = value", "def model_cpu_quota(self, value):\n if value != None:\n if not isinstance(value, str):\n raise ValueError(\"model_cpu_quota must be a string!\")\n if not value.replace(\".\", \"\", 1).isdigit():\n raise ValueError(\"model_cpu_quota must be a number!\")\n self._model_cpu_quota = value", "def total_cpu_quota(self, value):\n if value != None:\n if not isinstance(value, str):\n raise ValueError(\"total_cpu_quota must be a string!\")\n if not value.replace(\".\", \"\", 1).isdigit():\n raise ValueError(\"total_cpu_quota must be a number!\")\n self._total_cpu_quota = value", "def model_gpu_quota(self, value):\n if value != None:\n if not (isinstance(value, int) and value > 0):\n raise ValueError(\"model_gpu_quota must be a postive integer!\")\n self._model_gpu_quota = value", "def total_gpu_quota(self, value):\n if value != None:\n if not (isinstance(value, int) and value > 0):\n raise ValueError(\"total_gpu_quota must be a postive integer!\")\n self._total_gpu_quota = value", "def dev_memory_quota(self, value):\n if value != None:\n if not isinstance(value, str):\n raise ValueError(\"dev_memory_quota must be a string\")\n unit = value[-1:]\n float_value = value[:-1]\n if unit not in constant.CLOUDML_MEMORY_UNITS:\n raise ValueError(\"dev_memory_quota unit must be one of %s!\" %\n constant.CLOUDML_MEMORY_UNITS)\n if not float_value.replace(\".\", \"\", 1).isdigit():\n raise ValueError(\"dev_memory_quota must be a number!\")\n self._dev_memory_quota = value", "def train_memory_quota(self, value):\n if value != None:\n if not isinstance(value, str):\n raise ValueError(\"train_memory_quota must be a string\")\n unit = value[-1:]\n float_value = value[:-1]\n if unit not in constant.CLOUDML_MEMORY_UNITS:\n raise ValueError(\"train_memory_quota unit must be one of %s!\" %\n constant.CLOUDML_MEMORY_UNITS)\n if not float_value.replace(\".\", \"\", 1).isdigit():\n raise ValueError(\"train_memory_quota must be a number!\")\n self._train_memory_quota = value", "def dev_count_quota(self, value):\n if value != None:\n if not (isinstance(value, int) and value > 0):\n raise ValueError(\"dev_count_quota must be a postive integer!\")\n self._dev_count_quota = value", "def set_cpu_request(self, cpu_request):\n self.cpu_request = cpu_request", "def set_disk_quota(self, disk_quota: int):\n data = self._props.copy()\n data[\"quota\"] = int(disk_quota) if disk_quota else self.QUOTA_INFINITE\n resp = self._http.post(\"/api/users/update\", json=[{\"action\": \"update\", \"user\": data}], csrf=True).json()\n\n if resp.get(\"status\") == \"ok\":\n self._props.update(data)\n else:\n raise StackException(\"Unable to set user password '{}', expected status 'ok' and got response: {}\".format(self.username, resp))", "def set_quota_value(self, quota):\n\n self.send_qwctl_command('set quota %d' % quota,\n ['quota must be between'])", "def _request_quota(self) -> int:", "def set_quota(self,request):\n\n\t\t# ----------- DEBUG -----------------\n\t\tMODULE.info(\"printers/quota/set invoked with:\")\n\t\tpp = pprint.PrettyPrinter(indent=4)\n\t\tst = pp.pformat(request.options).split(\"\\n\")\n\t\tfor s in st:\n\t\t\tMODULE.info(\" << %s\" % s)\n\t\t# -----------------------------------\n\n\t\tprinter = request.options.get('printer','')\n\t\tuser = request.options.get('user','')\n\t\tsoft = request.options.get('soft',0)\n\t\thard = request.options.get('hard',0)\n\n\t\tif printer=='' or user=='':\n\t\t\tresult = \"Required parameter missing\"\n\t\telse:\n\t\t\tresult = self._set_quota(printer,user,soft,hard)\n\n\t\t# ---------- DEBUG --------------\n\t\tMODULE.info(\"printers/quota/set returns:\")\n\t\tpp = pprint.PrettyPrinter(indent=4)\n\t\tst = pp.pformat(result).split(\"\\n\")\n\t\tfor s in st:\n\t\t\tMODULE.info(\" >> %s\" % s)\n\t\t# --------------------------------\n\n\t\tself.finished(request.id, result)", "def _set_cpuunits(self, instance, units=None):\n if not units:\n LOG.debug(\"Reported cpuunits %s\" % self.utility['UNITS'])\n LOG.debug(\"Reported percent of resource: %s\" %\n self._percent_of_resource(instance))\n units = int(self.utility['UNITS'] *\n self._percent_of_resource(instance))\n # TODO(imsplitbit): This needs to be adjusted to not allow\n # subscription of more than available cpuunits. For now we\n # won't let the obvious case of a container getting more than\n # the maximum cpuunits for the host.\n if units > self.utility['UNITS']:\n units = self.utility['UNITS']\n\n try:\n _, err = utils.execute('sudo', 'vzctl', 'set', instance['id'],\n '--save', '--cpuunits', units)\n if err:\n LOG.error(err)\n except ProcessExecutionError as err:\n LOG.error(err)\n raise exception.Error('Cannot set cpuunits for %s' %\n (instance['id'],))", "def quota_set(self, username, filesystem, bqs, bqh, iqs, iqh):\n\n self.cmd(\"%s -r %s %d %d %d %d %s\" %\n (rbconfig.command_setquota, self.shquote(str(username)), bqs,\n bqh, iqs, iqh, filesystem))", "def setQuotaRoot(request, maxsize):" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function for setting dev_gpu_quota.
def dev_gpu_quota(self, value): if value != None: if not (isinstance(value, int) and value > 0): raise ValueError("dev_gpu_quota must be a postive integer!") self._dev_gpu_quota = value
[ "def train_gpu_quota(self, value):\n if value != None:\n if not (isinstance(value, int) and value > 0):\n raise ValueError(\"train_gpu_quota must be a postive integer!\")\n self._train_gpu_quota = value", "def model_gpu_quota(self, value):\n if value != None:\n if not (isinstance(value, int) and value > 0):\n raise ValueError(\"model_gpu_quota must be a postive integer!\")\n self._model_gpu_quota = value", "def total_gpu_quota(self, value):\n if value != None:\n if not (isinstance(value, int) and value > 0):\n raise ValueError(\"total_gpu_quota must be a postive integer!\")\n self._total_gpu_quota = value", "def set_GPU_Memory_Limit():\n gpus = tf.config.experimental.list_physical_devices('GPU')\n if gpus:\n try:\n for gpu in gpus:\n tf.config.experimental.set_memory_growth(gpu, True)\n except RuntimeError as e:\n print(e)", "def limit_gpu(config: Dict):\n if config['limit_gpu'] is not False:\n gpus = tf.config.experimental.list_physical_devices('GPU')\n if gpus:\n try:\n # Currently, memory growth needs to be the same across GPUs\n for gpu in gpus:\n tf.config.experimental.set_memory_growth(gpu, True)\n logical_gpus = tf.config.experimental.list_logical_devices('GPU')\n print(len(gpus), \"Physical GPUs,\", len(logical_gpus), \"Logical GPUs\")\n except RuntimeError as e:\n # Memory growth must be set before GPUs have been initialized\n print(e)", "def set_cpu_quota(self, new_cpu_quota):\n try:\n requests.post(\n 'http://%s:5000' %\n (self.actuator.api_address),\n data='{\\\"cpu_quota\\\":\\\"' +\n str(new_cpu_quota) +\n '\\\"}')\n except Exception as ex:\n print(\"Error while modifying cpu quota\")\n print ex.message\n raise", "def dev_cpu_quota(self, value):\n if value != None:\n if not isinstance(value, str):\n raise ValueError(\"dev_cpu_quota must be a string!\")\n if not value.replace(\".\", \"\", 1).isdigit():\n raise ValueError(\"dev_cpu_quota must be a number!\")\n self._dev_cpu_quota = value", "def set_gpu():\n if Config.gpu_count == 1:\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = Config.gpu1\n elif Config.gpu_count == 2:\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = Config.gpu1 + ', ' + Config.gpu2\n elif Config.gpu_count == 3:\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = Config.gpu1 + ', ' + Config.gpu2 + ', ' + Config.gpu3\n elif Config.gpu_count == 4:\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = Config.gpu1 + ', ' + Config.gpu2 + ', ' + Config.gpu3 + ', ' + Config.gpu4", "def dev_memory_quota(self, value):\n if value != None:\n if not isinstance(value, str):\n raise ValueError(\"dev_memory_quota must be a string\")\n unit = value[-1:]\n float_value = value[:-1]\n if unit not in constant.CLOUDML_MEMORY_UNITS:\n raise ValueError(\"dev_memory_quota unit must be one of %s!\" %\n constant.CLOUDML_MEMORY_UNITS)\n if not float_value.replace(\".\", \"\", 1).isdigit():\n raise ValueError(\"dev_memory_quota must be a number!\")\n self._dev_memory_quota = value", "def assign_gpu_and_run(self):\n if not self.gpu_free.empty():\n\n # Retrieve the job from the queue\n job_to_run = self.check_enough_gpu()\n\n if job_to_run is None:\n return\n\n # Floor division to get lower bound of num_gpus\n num_gpus = int(job_to_run.width)//1000 + 1\n\n #if (int(job_to_run.width) % 1000)/1000 >= 0.4:\n # num_gpus += 2\n # This is okay because we already know that from check_enough_gpu that\n # gpu_free's size is greater than int(job_to_run.width)/1000\n\n for _ in range(num_gpus):\n job_to_run.gpu.append(self.gpu_free.get())\n\n # Create a copy of the environemnt\n new_env = os.environ.copy()\n\n # Create the CUDA GPU string\n gpu_string = \"\"\n\n i = 0\n while (i < len(job_to_run.gpu)):\n if i == 0:\n gpu_string = gpu_string + str(job_to_run.gpu[i])\n else:\n gpu_string = gpu_string + \",\" + str(job_to_run.gpu[i])\n i += 1\n\n new_env['CUDA_VISIBLE_DEVICES'] = gpu_string\n\n params = ['python',\n '/app/neural-style/neural_style.py',\n '--content', '%s' % job_to_run.path1,\n '--styles', '%s' % job_to_run.path2,\n '--output','%s' % job_to_run.output_path,\n '--content-weight', str(job_to_run.content_weight),\n '--content-weight-blend', str(job_to_run.content_blend),\n '--style-weight', str(job_to_run.style_weight),\n '--style-layer-weight-exp', str(job_to_run.style_layer_weight_exp),\n '--style-scales', str(job_to_run.style_scale),\n '--style-blend-weights', str(job_to_run.style_blend),\n '--iterations', str(job_to_run.iterations),\n '--width', str(job_to_run.width),\n '--network', VGG_LOCATION ]\n\n # set preserve colors if indicated\n # assuming that preserve_colors will be of type boolean\n if job_to_run.preserve_color:\n params.append('--preserve-colors')\n\n # Run the subprocess\n try:\n job_to_run.proc = Popen(params, env=new_env)\n self.logger.log.info(\"Popen worked! Job %d assigned GPU %s.\" % (job_to_run.job_id, job_to_run.gpu))\n self.running_jobs.append(job_to_run)\n self.logger.log.info(\"The number of free gpus is: \" + str(self.gpu_free.qsize()))\n\n except Exception as e:\n self.logger.log.error(\"Job %d could not be assigned GPU %s.\" % (job_to_run.job_id, job_to_run.gpu))\n self.logger.log.exception(e)\n\n #c = self.db.cursor()\n #c.execute(\"UPDATE deepstyle_job SET job_status='PF' WHERE id = (%s)\", (job_to_run.job_id,))\n self.safe_execute_sql(\"UPDATE deepstyle_job SET job_status='PF' WHERE id = (%s)\", True, (job_to_run.job_id,))\n\n for free in job_to_run.gpu:\n self.gpu_free.put(free)\n\n self.logger.log.info(\"The number of free gpus is: \" + str(self.gpu_free.qsize()))", "def set_gpu_memory_target(frac):\n import keras\n if keras.backend.backend() != 'tensorflow':\n print(\"Return without doing anything\")\n return\n # Do the import here, not at the top, in case Tensorflow is not\n # installed at all.\n import tensorflow as tf\n #from keras.backend.tensorflow_backend import set_session\n if tf_version_comp(tf.__version__):\n config = tf.compat.v1.ConfigProto()\n config.gpu_options.per_process_gpu_memory_fraction = frac\n config.gpu_options.allow_growth = True\n # set_session(tf.compat.v1.Session(config=config))\n session = tf.compat.v1.Session(config=config)\n # tf.compat.v1.keras.backend.set_session(session)\n\n else:\n config = tf.ConfigProto()\n config.gpu_options.per_process_gpu_memory_fraction = frac\n config.gpu_options.allow_growth = True\n # set_session(tf.Session(config=config))\n session = tf.Session(config=config)", "def configure_gpu_cpu(RUN_GPU, GPU_ALLOCATION):\n # Extra imports to set GPU options\n import tensorflow as tf\n from keras import backend as k\n import os\n # To force code to run on cpu\n if RUN_GPU==False:\n os.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\n os.environ['CUDA_VISIBLE_DEVICES'] = '-1'\n\n if RUN_GPU and GPU_ALLOCATION !=100:\n # TensorFlow congif\n config = tf.ConfigProto()\n\n # Allocate memory as-needed\n config.gpu_options.allow_growth = True\n\n # Allocate GPU memory based on user input USE_GPU\n config.gpu_options.per_process_gpu_memory_fraction = GPU_ALLOCATION/100\n\n # Create a session with the above specified options\n k.tensorflow_backend.set_session(tf.Session(config=config))", "def limit_mem():\n K.get_session().close()\n cfg = K.tf.ConfigProto()\n cfg.gpu_options.allow_growth = True\n K.set_session(K.tf.Session(config=cfg))", "def setquota(self, root, limits):\n typ, dat = self._simple_command('SETQUOTA', root, limits)\n return self._untagged_response(typ, dat, 'QUOTA')", "def set_disk_quota(self, disk_quota: int):\n data = self._props.copy()\n data[\"quota\"] = int(disk_quota) if disk_quota else self.QUOTA_INFINITE\n resp = self._http.post(\"/api/users/update\", json=[{\"action\": \"update\", \"user\": data}], csrf=True).json()\n\n if resp.get(\"status\") == \"ok\":\n self._props.update(data)\n else:\n raise StackException(\"Unable to set user password '{}', expected status 'ok' and got response: {}\".format(self.username, resp))", "def quota_set(self, username, filesystem, bqs, bqh, iqs, iqh):\n\n self.cmd(\"%s -r %s %d %d %d %d %s\" %\n (rbconfig.command_setquota, self.shquote(str(username)), bqs,\n bqh, iqs, iqh, filesystem))", "def set_gpu(gpu, enable_benchmark=True):\n if len(str(gpu)) > 1:\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = gpu\n parallel = True\n device = torch.device(\"cuda:{}\".format(','.join([str(a) for a in range(len(gpu.split(',')))])))\n print(\"Devices being used:\", device)\n else:\n parallel = False\n device = torch.device(\"cuda:{}\".format(gpu))\n print(\"Device being used:\", device)\n torch.backends.cudnn.benchmark = enable_benchmark\n return device, parallel", "def set_gpu_limit(\n self,\n gpu: Union[str, _pipeline_param.PipelineParam],\n vendor: Union[str, _pipeline_param.PipelineParam] = 'nvidia'\n ) -> 'Container':\n\n if not isinstance(gpu, _pipeline_param.PipelineParam):\n self._validate_positive_number(gpu, 'gpu')\n\n if self._container_spec:\n # For backforward compatibiliy, allow `gpu` to be a string.\n self._container_spec.resources.accelerator.count = int(gpu)\n\n if vendor != 'nvidia' and vendor != 'amd':\n raise ValueError('vendor can only be nvidia or amd.')\n\n return self.add_resource_limit('%s.com/gpu' % vendor, gpu)\n\n return self.add_resource_limit(vendor, gpu)", "def dev_count_quota(self, value):\n if value != None:\n if not (isinstance(value, int) and value > 0):\n raise ValueError(\"dev_count_quota must be a postive integer!\")\n self._dev_count_quota = value" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function for setting dev_count_quota.
def dev_count_quota(self, value): if value != None: if not (isinstance(value, int) and value > 0): raise ValueError("dev_count_quota must be a postive integer!") self._dev_count_quota = value
[ "def model_count_quota(self, value):\n if value != None:\n if not (isinstance(value, int) and value > 0):\n raise ValueError(\"model_count_quota must be a postive integer!\")\n self._model_count_quota = value", "def _request_quota(self) -> int:", "def set_quota_value(self, quota):\n\n self.send_qwctl_command('set quota %d' % quota,\n ['quota must be between'])", "def train_count_quota(self, value):\n if value != None:\n if not (isinstance(value, int) and value > 0):\n raise ValueError(\"train_count_quota must be a postive integer!\")\n self._train_count_quota = value", "def dev_gpu_quota(self, value):\n if value != None:\n if not (isinstance(value, int) and value > 0):\n raise ValueError(\"dev_gpu_quota must be a postive integer!\")\n self._dev_gpu_quota = value", "def setQuotaRoot(request, maxsize):", "def setquota(self, root, limits):\n typ, dat = self._simple_command('SETQUOTA', root, limits)\n return self._untagged_response(typ, dat, 'QUOTA')", "def quota(self):\n raise NotImplementedError", "def dev_cpu_quota(self, value):\n if value != None:\n if not isinstance(value, str):\n raise ValueError(\"dev_cpu_quota must be a string!\")\n if not value.replace(\".\", \"\", 1).isdigit():\n raise ValueError(\"dev_cpu_quota must be a number!\")\n self._dev_cpu_quota = value", "def set_quota(self,request):\n\n\t\t# ----------- DEBUG -----------------\n\t\tMODULE.info(\"printers/quota/set invoked with:\")\n\t\tpp = pprint.PrettyPrinter(indent=4)\n\t\tst = pp.pformat(request.options).split(\"\\n\")\n\t\tfor s in st:\n\t\t\tMODULE.info(\" << %s\" % s)\n\t\t# -----------------------------------\n\n\t\tprinter = request.options.get('printer','')\n\t\tuser = request.options.get('user','')\n\t\tsoft = request.options.get('soft',0)\n\t\thard = request.options.get('hard',0)\n\n\t\tif printer=='' or user=='':\n\t\t\tresult = \"Required parameter missing\"\n\t\telse:\n\t\t\tresult = self._set_quota(printer,user,soft,hard)\n\n\t\t# ---------- DEBUG --------------\n\t\tMODULE.info(\"printers/quota/set returns:\")\n\t\tpp = pprint.PrettyPrinter(indent=4)\n\t\tst = pp.pformat(result).split(\"\\n\")\n\t\tfor s in st:\n\t\t\tMODULE.info(\" >> %s\" % s)\n\t\t# --------------------------------\n\n\t\tself.finished(request.id, result)", "def create_quota(self, values):", "def total_gpu_quota(self, value):\n if value != None:\n if not (isinstance(value, int) and value > 0):\n raise ValueError(\"total_gpu_quota must be a postive integer!\")\n self._total_gpu_quota = value", "def train_gpu_quota(self, value):\n if value != None:\n if not (isinstance(value, int) and value > 0):\n raise ValueError(\"train_gpu_quota must be a postive integer!\")\n self._train_gpu_quota = value", "def quotaUsedBytes(): # @NoSelf", "def set_quota(self, value=None, override_rules=False):\n old_quota = self.quota\n if value is None:\n if self.use_domain_quota:\n self.quota = self.domain.default_mailbox_quota\n else:\n self.quota = 0\n else:\n self.quota = value\n if self.quota == 0:\n if self.domain.quota and not override_rules:\n raise lib_exceptions.BadRequest(_(\"A quota is required\"))\n elif self.domain.quota:\n quota_usage = self.domain.allocated_quota\n if old_quota:\n quota_usage -= old_quota\n if quota_usage + self.quota > self.domain.quota:\n raise lib_exceptions.BadRequest(\n _(\"{}: domain quota exceeded\").format(self.domain.name)\n )", "def quota_set(self, username, filesystem, bqs, bqh, iqs, iqh):\n\n self.cmd(\"%s -r %s %d %d %d %d %s\" %\n (rbconfig.command_setquota, self.shquote(str(username)), bqs,\n bqh, iqs, iqh, filesystem))", "def set_cpu_quota(self, new_cpu_quota):\n try:\n requests.post(\n 'http://%s:5000' %\n (self.actuator.api_address),\n data='{\\\"cpu_quota\\\":\\\"' +\n str(new_cpu_quota) +\n '\\\"}')\n except Exception as ex:\n print(\"Error while modifying cpu quota\")\n print ex.message\n raise", "def model_gpu_quota(self, value):\n if value != None:\n if not (isinstance(value, int) and value > 0):\n raise ValueError(\"model_gpu_quota must be a postive integer!\")\n self._model_gpu_quota = value", "def run(self, max_quota=1024):\n\n self._update_quotas(\"nova\", self.context[\"tenant\"][\"id\"],\n max_quota)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function for setting total_memory_quota.
def total_memory_quota(self, value): if value != None: if not isinstance(value, str): raise ValueError("total_memory_quota must be a string") unit = value[-1:] float_value = value[:-1] if unit not in constant.CLOUDML_MEMORY_UNITS: raise ValueError("total_memory_quota unit must be one of %s!" % constant.CLOUDML_MEMORY_UNITS) if not float_value.replace(".", "", 1).isdigit(): raise ValueError("total_memory_quota must be a number!") self._total_memory_quota = value
[ "def dev_memory_quota(self, value):\n if value != None:\n if not isinstance(value, str):\n raise ValueError(\"dev_memory_quota must be a string\")\n unit = value[-1:]\n float_value = value[:-1]\n if unit not in constant.CLOUDML_MEMORY_UNITS:\n raise ValueError(\"dev_memory_quota unit must be one of %s!\" %\n constant.CLOUDML_MEMORY_UNITS)\n if not float_value.replace(\".\", \"\", 1).isdigit():\n raise ValueError(\"dev_memory_quota must be a number!\")\n self._dev_memory_quota = value", "def total_gpu_quota(self, value):\n if value != None:\n if not (isinstance(value, int) and value > 0):\n raise ValueError(\"total_gpu_quota must be a postive integer!\")\n self._total_gpu_quota = value", "def train_memory_quota(self, value):\n if value != None:\n if not isinstance(value, str):\n raise ValueError(\"train_memory_quota must be a string\")\n unit = value[-1:]\n float_value = value[:-1]\n if unit not in constant.CLOUDML_MEMORY_UNITS:\n raise ValueError(\"train_memory_quota unit must be one of %s!\" %\n constant.CLOUDML_MEMORY_UNITS)\n if not float_value.replace(\".\", \"\", 1).isdigit():\n raise ValueError(\"train_memory_quota must be a number!\")\n self._train_memory_quota = value", "def model_memory_quota(self, value):\n if value != None:\n if not isinstance(value, str):\n raise ValueError(\"model_memory_quota must be a string\")\n unit = value[-1:]\n float_value = value[:-1]\n if unit not in constant.CLOUDML_MEMORY_UNITS:\n raise ValueError(\"model_memory_quota unit must be one of %s!\" %\n constant.CLOUDML_MEMORY_UNITS)\n if not float_value.replace(\".\", \"\", 1).isdigit():\n raise ValueError(\"model_memory_quota must be a number!\")\n self._model_memory_quota = value", "def set_memlimit(self, value):\n value = value * 1024 * 1024\n self.set_int(\"memory.limit_in_bytes\", value)", "def total_used_space(self, total_used_space):\n\n self._total_used_space = total_used_space", "def total_disk_space_gb(self, total_disk_space_gb):\n\n self._total_disk_space_gb = total_disk_space_gb", "def total_free_space(self, total_free_space):\n\n self._total_free_space = total_free_space", "def setQuotaRoot(request, maxsize):", "def total_cpu_quota(self, value):\n if value != None:\n if not isinstance(value, str):\n raise ValueError(\"total_cpu_quota must be a string!\")\n if not value.replace(\".\", \"\", 1).isdigit():\n raise ValueError(\"total_cpu_quota must be a number!\")\n self._total_cpu_quota = value", "def quotaUsedBytes(): # @NoSelf", "def setquota(self, root, limits):\n typ, dat = self._simple_command('SETQUOTA', root, limits)\n return self._untagged_response(typ, dat, 'QUOTA')", "def adjustQuotaUsedBytes(delta): # @NoSelf", "def available_memory_mb(self) -> int:\n return pulumi.get(self, \"available_memory_mb\")", "def total_free_space(self):\n return self.free_space", "def set_cpu_quota(self, new_cpu_quota):\n try:\n requests.post(\n 'http://%s:5000' %\n (self.actuator.api_address),\n data='{\\\"cpu_quota\\\":\\\"' +\n str(new_cpu_quota) +\n '\\\"}')\n except Exception as ex:\n print(\"Error while modifying cpu quota\")\n print ex.message\n raise", "def dev_gpu_quota(self, value):\n if value != None:\n if not (isinstance(value, int) and value > 0):\n raise ValueError(\"dev_gpu_quota must be a postive integer!\")\n self._dev_gpu_quota = value", "def _memory():\n\n free_lines = subprocess.check_output([\"free\", \"-b\", \"-w\"],\n universal_newlines=True).split('\\n')\n free_grid = [x.split() for x in free_lines]\n # Identify columns for \"total\" and \"available\"\n total_idx = free_grid[0].index(\"total\")\n available_idx = free_grid[0].index(\"available\")\n total = int(free_grid[1][1 + total_idx])\n available = int(free_grid[1][1 + available_idx])\n used = total - available\n total_gb = total / (1024.0 * 1024.0 * 1024.0)\n used_gb = used / (1024.0 * 1024.0 * 1024.0)\n return (total_gb, used_gb)", "def min_total_memory():\n return CONSTANTS[\"MIN_TOTAL_MEMORY\"]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function for setting total_cpu_quota.
def total_cpu_quota(self, value): if value != None: if not isinstance(value, str): raise ValueError("total_cpu_quota must be a string!") if not value.replace(".", "", 1).isdigit(): raise ValueError("total_cpu_quota must be a number!") self._total_cpu_quota = value
[ "def set_cpu_quota(self, new_cpu_quota):\n try:\n requests.post(\n 'http://%s:5000' %\n (self.actuator.api_address),\n data='{\\\"cpu_quota\\\":\\\"' +\n str(new_cpu_quota) +\n '\\\"}')\n except Exception as ex:\n print(\"Error while modifying cpu quota\")\n print ex.message\n raise", "def train_cpu_quota(self, value):\n if value != None:\n if not isinstance(value, str):\n raise ValueError(\"train_cpu_quota must be a string!\")\n if not value.replace(\".\", \"\", 1).isdigit():\n raise ValueError(\"train_cpu_quota must be a number!\")\n self._train_cpu_quota = value", "def total_gpu_quota(self, value):\n if value != None:\n if not (isinstance(value, int) and value > 0):\n raise ValueError(\"total_gpu_quota must be a postive integer!\")\n self._total_gpu_quota = value", "def dev_cpu_quota(self, value):\n if value != None:\n if not isinstance(value, str):\n raise ValueError(\"dev_cpu_quota must be a string!\")\n if not value.replace(\".\", \"\", 1).isdigit():\n raise ValueError(\"dev_cpu_quota must be a number!\")\n self._dev_cpu_quota = value", "def model_cpu_quota(self, value):\n if value != None:\n if not isinstance(value, str):\n raise ValueError(\"model_cpu_quota must be a string!\")\n if not value.replace(\".\", \"\", 1).isdigit():\n raise ValueError(\"model_cpu_quota must be a number!\")\n self._model_cpu_quota = value", "def train_gpu_quota(self, value):\n if value != None:\n if not (isinstance(value, int) and value > 0):\n raise ValueError(\"train_gpu_quota must be a postive integer!\")\n self._train_gpu_quota = value", "def total_memory_quota(self, value):\n if value != None:\n if not isinstance(value, str):\n raise ValueError(\"total_memory_quota must be a string\")\n unit = value[-1:]\n float_value = value[:-1]\n if unit not in constant.CLOUDML_MEMORY_UNITS:\n raise ValueError(\"total_memory_quota unit must be one of %s!\" %\n constant.CLOUDML_MEMORY_UNITS)\n if not float_value.replace(\".\", \"\", 1).isdigit():\n raise ValueError(\"total_memory_quota must be a number!\")\n self._total_memory_quota = value", "def add_cpu_usage(self, cpu_usage):\n self.cpu_metrics = {\"number\": cpu_usage}", "def _set_cpuunits(self, instance, units=None):\n if not units:\n LOG.debug(\"Reported cpuunits %s\" % self.utility['UNITS'])\n LOG.debug(\"Reported percent of resource: %s\" %\n self._percent_of_resource(instance))\n units = int(self.utility['UNITS'] *\n self._percent_of_resource(instance))\n # TODO(imsplitbit): This needs to be adjusted to not allow\n # subscription of more than available cpuunits. For now we\n # won't let the obvious case of a container getting more than\n # the maximum cpuunits for the host.\n if units > self.utility['UNITS']:\n units = self.utility['UNITS']\n\n try:\n _, err = utils.execute('sudo', 'vzctl', 'set', instance['id'],\n '--save', '--cpuunits', units)\n if err:\n LOG.error(err)\n except ProcessExecutionError as err:\n LOG.error(err)\n raise exception.Error('Cannot set cpuunits for %s' %\n (instance['id'],))", "def total_cpus():\n return mp.cpu_count()", "def set_number_used_cores(job):\n\n pilot_user = os.environ.get('PILOT_USER', 'generic').lower()\n cpu = __import__('pilot.user.%s.cpu' % pilot_user, globals(), locals(), [pilot_user], 0) # Python 2/3\n cpu.set_core_counts(job)", "def cpu_usage(self, cpu_usage):\n if self.local_vars_configuration.client_side_validation and cpu_usage is None: # noqa: E501\n raise ValueError(\"Invalid value for `cpu_usage`, must not be `None`\") # noqa: E501\n\n self._cpu_usage = cpu_usage", "def CpuUsageTimer(self):\n (new_used, new_total) = self._ParseProcStat()\n total = new_total - self.cpu_total\n used = new_used - self.cpu_used\n if total == 0:\n self.cpu_usage = 0.0\n else:\n self.cpu_usage = (used / total) * 100.0\n self.cpu_total = new_total\n self.cpu_used = new_used", "def get_cpu_usage():\n return psutil.cpu_percent()", "def train_memory_quota(self, value):\n if value != None:\n if not isinstance(value, str):\n raise ValueError(\"train_memory_quota must be a string\")\n unit = value[-1:]\n float_value = value[:-1]\n if unit not in constant.CLOUDML_MEMORY_UNITS:\n raise ValueError(\"train_memory_quota unit must be one of %s!\" %\n constant.CLOUDML_MEMORY_UNITS)\n if not float_value.replace(\".\", \"\", 1).isdigit():\n raise ValueError(\"train_memory_quota must be a number!\")\n self._train_memory_quota = value", "def model_gpu_quota(self, value):\n if value != None:\n if not (isinstance(value, int) and value > 0):\n raise ValueError(\"model_gpu_quota must be a postive integer!\")\n self._model_gpu_quota = value", "def dev_gpu_quota(self, value):\n if value != None:\n if not (isinstance(value, int) and value > 0):\n raise ValueError(\"dev_gpu_quota must be a postive integer!\")\n self._dev_gpu_quota = value", "def quotaUsedBytes(): # @NoSelf", "def _apply_cpu_count(self, args, thisTask, cmd_args, payload, setup):\n if thisTask.ncores is not None:\n cmd_args.append(\"-l\")\n cmd_args.append(\"nodes=1:ppn={:d}\".format(thisTask.ncores))\n return True" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function for setting total_gpu_quota.
def total_gpu_quota(self, value): if value != None: if not (isinstance(value, int) and value > 0): raise ValueError("total_gpu_quota must be a postive integer!") self._total_gpu_quota = value
[ "def train_gpu_quota(self, value):\n if value != None:\n if not (isinstance(value, int) and value > 0):\n raise ValueError(\"train_gpu_quota must be a postive integer!\")\n self._train_gpu_quota = value", "def dev_gpu_quota(self, value):\n if value != None:\n if not (isinstance(value, int) and value > 0):\n raise ValueError(\"dev_gpu_quota must be a postive integer!\")\n self._dev_gpu_quota = value", "def model_gpu_quota(self, value):\n if value != None:\n if not (isinstance(value, int) and value > 0):\n raise ValueError(\"model_gpu_quota must be a postive integer!\")\n self._model_gpu_quota = value", "def set_GPU_Memory_Limit():\n gpus = tf.config.experimental.list_physical_devices('GPU')\n if gpus:\n try:\n for gpu in gpus:\n tf.config.experimental.set_memory_growth(gpu, True)\n except RuntimeError as e:\n print(e)", "def limit_gpu(config: Dict):\n if config['limit_gpu'] is not False:\n gpus = tf.config.experimental.list_physical_devices('GPU')\n if gpus:\n try:\n # Currently, memory growth needs to be the same across GPUs\n for gpu in gpus:\n tf.config.experimental.set_memory_growth(gpu, True)\n logical_gpus = tf.config.experimental.list_logical_devices('GPU')\n print(len(gpus), \"Physical GPUs,\", len(logical_gpus), \"Logical GPUs\")\n except RuntimeError as e:\n # Memory growth must be set before GPUs have been initialized\n print(e)", "def set_cpu_quota(self, new_cpu_quota):\n try:\n requests.post(\n 'http://%s:5000' %\n (self.actuator.api_address),\n data='{\\\"cpu_quota\\\":\\\"' +\n str(new_cpu_quota) +\n '\\\"}')\n except Exception as ex:\n print(\"Error while modifying cpu quota\")\n print ex.message\n raise", "def assign_gpu_and_run(self):\n if not self.gpu_free.empty():\n\n # Retrieve the job from the queue\n job_to_run = self.check_enough_gpu()\n\n if job_to_run is None:\n return\n\n # Floor division to get lower bound of num_gpus\n num_gpus = int(job_to_run.width)//1000 + 1\n\n #if (int(job_to_run.width) % 1000)/1000 >= 0.4:\n # num_gpus += 2\n # This is okay because we already know that from check_enough_gpu that\n # gpu_free's size is greater than int(job_to_run.width)/1000\n\n for _ in range(num_gpus):\n job_to_run.gpu.append(self.gpu_free.get())\n\n # Create a copy of the environemnt\n new_env = os.environ.copy()\n\n # Create the CUDA GPU string\n gpu_string = \"\"\n\n i = 0\n while (i < len(job_to_run.gpu)):\n if i == 0:\n gpu_string = gpu_string + str(job_to_run.gpu[i])\n else:\n gpu_string = gpu_string + \",\" + str(job_to_run.gpu[i])\n i += 1\n\n new_env['CUDA_VISIBLE_DEVICES'] = gpu_string\n\n params = ['python',\n '/app/neural-style/neural_style.py',\n '--content', '%s' % job_to_run.path1,\n '--styles', '%s' % job_to_run.path2,\n '--output','%s' % job_to_run.output_path,\n '--content-weight', str(job_to_run.content_weight),\n '--content-weight-blend', str(job_to_run.content_blend),\n '--style-weight', str(job_to_run.style_weight),\n '--style-layer-weight-exp', str(job_to_run.style_layer_weight_exp),\n '--style-scales', str(job_to_run.style_scale),\n '--style-blend-weights', str(job_to_run.style_blend),\n '--iterations', str(job_to_run.iterations),\n '--width', str(job_to_run.width),\n '--network', VGG_LOCATION ]\n\n # set preserve colors if indicated\n # assuming that preserve_colors will be of type boolean\n if job_to_run.preserve_color:\n params.append('--preserve-colors')\n\n # Run the subprocess\n try:\n job_to_run.proc = Popen(params, env=new_env)\n self.logger.log.info(\"Popen worked! Job %d assigned GPU %s.\" % (job_to_run.job_id, job_to_run.gpu))\n self.running_jobs.append(job_to_run)\n self.logger.log.info(\"The number of free gpus is: \" + str(self.gpu_free.qsize()))\n\n except Exception as e:\n self.logger.log.error(\"Job %d could not be assigned GPU %s.\" % (job_to_run.job_id, job_to_run.gpu))\n self.logger.log.exception(e)\n\n #c = self.db.cursor()\n #c.execute(\"UPDATE deepstyle_job SET job_status='PF' WHERE id = (%s)\", (job_to_run.job_id,))\n self.safe_execute_sql(\"UPDATE deepstyle_job SET job_status='PF' WHERE id = (%s)\", True, (job_to_run.job_id,))\n\n for free in job_to_run.gpu:\n self.gpu_free.put(free)\n\n self.logger.log.info(\"The number of free gpus is: \" + str(self.gpu_free.qsize()))", "def total_cpu_quota(self, value):\n if value != None:\n if not isinstance(value, str):\n raise ValueError(\"total_cpu_quota must be a string!\")\n if not value.replace(\".\", \"\", 1).isdigit():\n raise ValueError(\"total_cpu_quota must be a number!\")\n self._total_cpu_quota = value", "def gpu_usage(device=device, digits=4):\n print(\n f\"GPU Usage: {round((torch.cuda.memory_allocated(device=device) / 1e9), digits)} GB\\n\"\n )", "def quotaUsedBytes(): # @NoSelf", "def dev_cpu_quota(self, value):\n if value != None:\n if not isinstance(value, str):\n raise ValueError(\"dev_cpu_quota must be a string!\")\n if not value.replace(\".\", \"\", 1).isdigit():\n raise ValueError(\"dev_cpu_quota must be a number!\")\n self._dev_cpu_quota = value", "def total_memory_quota(self, value):\n if value != None:\n if not isinstance(value, str):\n raise ValueError(\"total_memory_quota must be a string\")\n unit = value[-1:]\n float_value = value[:-1]\n if unit not in constant.CLOUDML_MEMORY_UNITS:\n raise ValueError(\"total_memory_quota unit must be one of %s!\" %\n constant.CLOUDML_MEMORY_UNITS)\n if not float_value.replace(\".\", \"\", 1).isdigit():\n raise ValueError(\"total_memory_quota must be a number!\")\n self._total_memory_quota = value", "def gpu_num(self):\n return sum([len(gpu) for gpu in self.worker_vacant_gpus.values()])", "def limit_mem():\n K.get_session().close()\n cfg = K.tf.ConfigProto()\n cfg.gpu_options.allow_growth = True\n K.set_session(K.tf.Session(config=cfg))", "def total_free_space(self, total_free_space):\n\n self._total_free_space = total_free_space", "def set_gpu():\n if Config.gpu_count == 1:\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = Config.gpu1\n elif Config.gpu_count == 2:\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = Config.gpu1 + ', ' + Config.gpu2\n elif Config.gpu_count == 3:\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = Config.gpu1 + ', ' + Config.gpu2 + ', ' + Config.gpu3\n elif Config.gpu_count == 4:\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = Config.gpu1 + ', ' + Config.gpu2 + ', ' + Config.gpu3 + ', ' + Config.gpu4", "def configure_gpu_cpu(RUN_GPU, GPU_ALLOCATION):\n # Extra imports to set GPU options\n import tensorflow as tf\n from keras import backend as k\n import os\n # To force code to run on cpu\n if RUN_GPU==False:\n os.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\n os.environ['CUDA_VISIBLE_DEVICES'] = '-1'\n\n if RUN_GPU and GPU_ALLOCATION !=100:\n # TensorFlow congif\n config = tf.ConfigProto()\n\n # Allocate memory as-needed\n config.gpu_options.allow_growth = True\n\n # Allocate GPU memory based on user input USE_GPU\n config.gpu_options.per_process_gpu_memory_fraction = GPU_ALLOCATION/100\n\n # Create a session with the above specified options\n k.tensorflow_backend.set_session(tf.Session(config=config))", "def set_gpu_memory_target(frac):\n import keras\n if keras.backend.backend() != 'tensorflow':\n print(\"Return without doing anything\")\n return\n # Do the import here, not at the top, in case Tensorflow is not\n # installed at all.\n import tensorflow as tf\n #from keras.backend.tensorflow_backend import set_session\n if tf_version_comp(tf.__version__):\n config = tf.compat.v1.ConfigProto()\n config.gpu_options.per_process_gpu_memory_fraction = frac\n config.gpu_options.allow_growth = True\n # set_session(tf.compat.v1.Session(config=config))\n session = tf.compat.v1.Session(config=config)\n # tf.compat.v1.keras.backend.set_session(session)\n\n else:\n config = tf.ConfigProto()\n config.gpu_options.per_process_gpu_memory_fraction = frac\n config.gpu_options.allow_growth = True\n # set_session(tf.Session(config=config))\n session = tf.Session(config=config)", "def total_disk_space_gb(self, total_disk_space_gb):\n\n self._total_disk_space_gb = total_disk_space_gb" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function for setting tensorboard_quota.
def tensorboard_quota(self, value): if value != None: if not (isinstance(value, int) and value > 0): raise ValueError("tensorboard_quota must be a postive integer!") self._tensorboard_quota = value
[ "def model_gpu_quota(self, value):\n if value != None:\n if not (isinstance(value, int) and value > 0):\n raise ValueError(\"model_gpu_quota must be a postive integer!\")\n self._model_gpu_quota = value", "def train_gpu_quota(self, value):\n if value != None:\n if not (isinstance(value, int) and value > 0):\n raise ValueError(\"train_gpu_quota must be a postive integer!\")\n self._train_gpu_quota = value", "def dev_gpu_quota(self, value):\n if value != None:\n if not (isinstance(value, int) and value > 0):\n raise ValueError(\"dev_gpu_quota must be a postive integer!\")\n self._dev_gpu_quota = value", "def set_quota(self,request):\n\n\t\t# ----------- DEBUG -----------------\n\t\tMODULE.info(\"printers/quota/set invoked with:\")\n\t\tpp = pprint.PrettyPrinter(indent=4)\n\t\tst = pp.pformat(request.options).split(\"\\n\")\n\t\tfor s in st:\n\t\t\tMODULE.info(\" << %s\" % s)\n\t\t# -----------------------------------\n\n\t\tprinter = request.options.get('printer','')\n\t\tuser = request.options.get('user','')\n\t\tsoft = request.options.get('soft',0)\n\t\thard = request.options.get('hard',0)\n\n\t\tif printer=='' or user=='':\n\t\t\tresult = \"Required parameter missing\"\n\t\telse:\n\t\t\tresult = self._set_quota(printer,user,soft,hard)\n\n\t\t# ---------- DEBUG --------------\n\t\tMODULE.info(\"printers/quota/set returns:\")\n\t\tpp = pprint.PrettyPrinter(indent=4)\n\t\tst = pp.pformat(result).split(\"\\n\")\n\t\tfor s in st:\n\t\t\tMODULE.info(\" >> %s\" % s)\n\t\t# --------------------------------\n\n\t\tself.finished(request.id, result)", "def setquota(self, root, limits):\n typ, dat = self._simple_command('SETQUOTA', root, limits)\n return self._untagged_response(typ, dat, 'QUOTA')", "def setQuotaRoot(request, maxsize):", "def _request_quota(self) -> int:", "def train_memory_quota(self, value):\n if value != None:\n if not isinstance(value, str):\n raise ValueError(\"train_memory_quota must be a string\")\n unit = value[-1:]\n float_value = value[:-1]\n if unit not in constant.CLOUDML_MEMORY_UNITS:\n raise ValueError(\"train_memory_quota unit must be one of %s!\" %\n constant.CLOUDML_MEMORY_UNITS)\n if not float_value.replace(\".\", \"\", 1).isdigit():\n raise ValueError(\"train_memory_quota must be a number!\")\n self._train_memory_quota = value", "def total_gpu_quota(self, value):\n if value != None:\n if not (isinstance(value, int) and value > 0):\n raise ValueError(\"total_gpu_quota must be a postive integer!\")\n self._total_gpu_quota = value", "def set_quota_value(self, quota):\n\n self.send_qwctl_command('set quota %d' % quota,\n ['quota must be between'])", "def test_set_bucket_quota_succes(self):\n response = self.cm.set_bucket_quota(\"Testforreal\", \"TB\", \"1\")\n self.assertEqual(response.status_code, 200)", "def set_quota(self, value=None, override_rules=False):\n old_quota = self.quota\n if value is None:\n if self.use_domain_quota:\n self.quota = self.domain.default_mailbox_quota\n else:\n self.quota = 0\n else:\n self.quota = value\n if self.quota == 0:\n if self.domain.quota and not override_rules:\n raise lib_exceptions.BadRequest(_(\"A quota is required\"))\n elif self.domain.quota:\n quota_usage = self.domain.allocated_quota\n if old_quota:\n quota_usage -= old_quota\n if quota_usage + self.quota > self.domain.quota:\n raise lib_exceptions.BadRequest(\n _(\"{}: domain quota exceeded\").format(self.domain.name)\n )", "def create_quota(self, values):", "def quota(self):\n raise NotImplementedError", "def train_count_quota(self, value):\n if value != None:\n if not (isinstance(value, int) and value > 0):\n raise ValueError(\"train_count_quota must be a postive integer!\")\n self._train_count_quota = value", "def model_memory_quota(self, value):\n if value != None:\n if not isinstance(value, str):\n raise ValueError(\"model_memory_quota must be a string\")\n unit = value[-1:]\n float_value = value[:-1]\n if unit not in constant.CLOUDML_MEMORY_UNITS:\n raise ValueError(\"model_memory_quota unit must be one of %s!\" %\n constant.CLOUDML_MEMORY_UNITS)\n if not float_value.replace(\".\", \"\", 1).isdigit():\n raise ValueError(\"model_memory_quota must be a number!\")\n self._model_memory_quota = value", "def quota_set(self, username, filesystem, bqs, bqh, iqs, iqh):\n\n self.cmd(\"%s -r %s %d %d %d %d %s\" %\n (rbconfig.command_setquota, self.shquote(str(username)), bqs,\n bqh, iqs, iqh, filesystem))", "def set_cpu_quota(self, new_cpu_quota):\n try:\n requests.post(\n 'http://%s:5000' %\n (self.actuator.api_address),\n data='{\\\"cpu_quota\\\":\\\"' +\n str(new_cpu_quota) +\n '\\\"}')\n except Exception as ex:\n print(\"Error while modifying cpu quota\")\n print ex.message\n raise", "def quotaAllowedBytes(): # @NoSelf" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
SF reference circuit for gate tests
def SF_gate_reference(sf_op, cutoff_dim, wires, *args): eng = sf.Engine("fock", backend_options={"cutoff_dim": cutoff_dim}) prog = sf.Program(2) with prog.context as q: sf.ops.S2gate(0.1) | q sf_op(*args) | [q[i] for i in wires] state = eng.run(prog).state return state.mean_photon(0)[0], state.mean_photon(1)[0]
[ "def test_gate_multimode(self):\n xir_prog = xir.Program()\n xir_prog.add_statement(xir.Statement(\"BSgate\", {\"theta\": 0.54, \"phi\": np.pi}, (0, 2)))\n\n sf_prog = io.to_program(xir_prog)\n\n assert len(sf_prog) == 1\n assert sf_prog.circuit\n assert sf_prog.circuit[0].op.__class__.__name__ == \"BSgate\"\n assert sf_prog.circuit[0].op.p[0] == 0.54\n assert sf_prog.circuit[0].op.p[1] == np.pi\n assert sf_prog.circuit[0].reg[0].ind == 0\n assert sf_prog.circuit[0].reg[1].ind == 2", "def test_controlled_by_gates_fusion(backend):\n c = Circuit(4)\n c.add((gates.H(i) for i in range(4)))\n c.add(gates.RX(1, theta=0.1234).controlled_by(0))\n c.add(gates.RX(3, theta=0.4321).controlled_by(2))\n c.add((gates.RY(i, theta=0.5678) for i in range(4)))\n c.add(gates.RX(1, theta=0.1234).controlled_by(0))\n c.add(gates.RX(3, theta=0.4321).controlled_by(2))\n fused_c = c.fuse()\n np.testing.assert_allclose(fused_c(), c())", "def test_fock_state(self, tol):\n arg = 1\n wires = [0]\n\n gate_name = \"FockState\"\n operation = qml.FockState\n\n cutoff_dim = 10\n dev = qml.device(\"strawberryfields.fock\", wires=2, cutoff_dim=cutoff_dim)\n\n sf_operation = dev._operation_map[gate_name]\n\n assert dev.supports_operation(gate_name)\n\n @qml.qnode(dev)\n def circuit(*args):\n qml.TwoModeSqueezing(0.1, 0, wires=[0, 1])\n operation(*args, wires=wires)\n return qml.expval(qml.NumberOperator(0)), qml.expval(qml.NumberOperator(1))\n\n res = circuit(arg)\n sf_res = SF_gate_reference(sf_operation, cutoff_dim, wires, arg)\n assert np.allclose(res, sf_res, atol=tol, rtol=0)", "def circuit(self):\n raise NotImplementedError", "def test_gaussian_state(self, tol):\n V = np.array([[0.5, 0], [0, 2]])\n r = np.array([0, 0])\n\n wires = [0]\n\n gate_name = \"GaussianState\"\n operation = qml.GaussianState\n\n cutoff_dim = 10\n dev = qml.device(\"strawberryfields.fock\", wires=2, cutoff_dim=cutoff_dim)\n\n sf_operation = dev._operation_map[gate_name]\n\n assert dev.supports_operation(gate_name)\n\n @qml.qnode(dev)\n def circuit(*args):\n qml.TwoModeSqueezing(0.1, 0, wires=[0, 1])\n operation(*args, wires=wires)\n return qml.expval(qml.NumberOperator(0)), qml.expval(qml.NumberOperator(1))\n\n res = circuit(V, r)\n sf_res = SF_gate_reference(sf_operation, cutoff_dim, wires, V, r)\n assert np.allclose(res, sf_res, atol=tol, rtol=0)", "def test_target_basis_01(self):\n circuit = QuantumCircuit(1)\n circuit.s(0)\n circuit.z(0)\n circuit.t(0)\n circuit.rz(np.pi, 0)\n theta = Parameter(\"theta\")\n target = Target(num_qubits=2)\n target.add_instruction(CXGate())\n target.add_instruction(PhaseGate(theta))\n target.add_instruction(SXGate())\n passmanager = PassManager()\n passmanager.append(CommutativeCancellation(target=target))\n new_circuit = passmanager.run(circuit)\n expected = QuantumCircuit(1)\n expected.rz(11 * np.pi / 4, 0)\n expected.global_phase = 11 * np.pi / 4 / 2 - np.pi / 2\n\n self.assertEqual(new_circuit, expected)", "def test_script_with_gate_definition(self, use_floats):\n xir_script = inspect.cleandoc(\n \"\"\"\n gate Aubergine(x, y)[w]:\n Squeezed(x, y) | [w];\n end;\n\n gate Banana(a, b, c, d, x, y):\n Aubergine(x, y) | [0];\n Aubergine(x, y) | [1];\n Rgate(a) | [0];\n BSgate(b, c) | [0, 1];\n Rgate(d) | [1];\n end;\n\n Vacuum | [1];\n Banana(0.5, 0.4, 0.0, 0.5, 1.0, 0.0) | [3, 0];\n \"\"\"\n )\n\n xir_prog = xir.parse_script(xir_script, eval_pi=True, use_floats=use_floats)\n sf_prog = io.to_program(xir_prog)\n\n assert isinstance(sf_prog, Program)\n\n assert len(sf_prog) == 6\n assert sf_prog.circuit\n\n names = [cmd.op.__class__.__name__ for cmd in sf_prog.circuit]\n parameters = [cmd.op.p for cmd in sf_prog.circuit]\n modes = [[r.ind for r in cmd.reg] for cmd in sf_prog.circuit]\n\n assert names == [\"Vacuum\", \"Squeezed\", \"Squeezed\", \"Rgate\", \"BSgate\", \"Rgate\"]\n assert parameters == [[], [1.0, 0.0], [1.0, 0.0], [0.5], [0.4, 0.0], [0.5]]\n assert modes == [[1], [3], [0], [3], [3, 0], [0]]", "def test_two_mode_gate(self):\n sf_prog = Program(4)\n\n with sf_prog.context as q:\n ops.BSgate(0.54, -0.324) | (q[3], q[0])\n\n xir_prog = io.to_xir(sf_prog)\n\n expected = [(\"BSgate\", [0.54, -0.324], (3, 0))]\n assert [(stmt.name, stmt.params, stmt.wires) for stmt in xir_prog.statements] == expected", "def test_gate_arg(self):\n # create a test program\n sf_prog = Program(2)\n\n with sf_prog.context as q:\n ops.Sgate(0.54, 0.324) | q[1]\n\n xir_prog = io.to_xir(sf_prog)\n\n expected = [(\"Sgate\", [0.54, 0.324], (1,))]\n assert [(stmt.name, stmt.params, stmt.wires) for stmt in xir_prog.statements] == expected", "def test_basis_02(self):\n circuit = QuantumCircuit(1)\n circuit.s(0)\n circuit.z(0)\n circuit.t(0)\n passmanager = PassManager()\n passmanager.append(CommutativeCancellation(basis_gates=[\"cx\", \"rz\", \"sx\"]))\n new_circuit = passmanager.run(circuit)\n\n expected = QuantumCircuit(1)\n expected.rz(7 * np.pi / 4, 0)\n expected.global_phase = 7 * np.pi / 4 / 2\n self.assertEqual(new_circuit, expected)", "def SF_expectation_reference(sf_expectation, cutoff_dim, wires, *args):\n eng = sf.Engine(\"fock\", backend_options={\"cutoff_dim\": cutoff_dim})\n prog = sf.Program(2)\n with prog.context as q:\n sf.ops.Dgate(0.1) | q[0]\n sf.ops.S2gate(0.1) | q\n\n state = eng.run(prog).state\n return sf_expectation(state, Wires(wires), args)[0]", "def test_fock_circuit(self, tol):\n dev = qml.device(\"strawberryfields.fock\", wires=1, cutoff_dim=10)\n\n @qml.qnode(dev)\n def circuit(x):\n qml.Displacement(x, 0, wires=0)\n return qml.expval(qml.NumberOperator(0))\n\n assert np.allclose(circuit(1), 1, atol=tol, rtol=0)", "def test_fock_state_vector(self, tol):\n args = psi\n\n wires = [0]\n\n gate_name = \"FockStateVector\"\n operation = qml.FockStateVector\n\n cutoff_dim = 10\n dev = qml.device(\"strawberryfields.fock\", wires=2, cutoff_dim=cutoff_dim)\n\n sf_operation = dev._operation_map[gate_name]\n\n assert dev.supports_operation(gate_name)\n\n @qml.qnode(dev)\n def circuit(*args):\n qml.TwoModeSqueezing(0.1, 0, wires=[0, 1])\n operation(*args, wires=wires)\n return qml.expval(qml.NumberOperator(0)), qml.expval(qml.NumberOperator(1))\n\n res = circuit(psi)\n sf_res = SF_gate_reference(sf_operation, cutoff_dim, wires, psi)\n assert np.allclose(res, sf_res, atol=tol, rtol=0)", "def test_circuit_init(self):\n circuit, target = self.simple_circuit_no_measure()\n op = SuperOp(circuit)\n target = SuperOp(target)\n self.assertEqual(op, target)", "def test_fock_state(self):\n self.logTestName()\n\n a = 0.54321\n r = 0.123\n\n hbar = 2\n dev = qml.device('strawberryfields.gaussian', wires=2, hbar=hbar)\n\n # test correct number state expectation |<n|a>|^2\n @qml.qnode(dev)\n def circuit(x):\n qml.Displacement(x, 0, wires=0)\n return qml.expval(qml.FockStateProjector(np.array([2]), wires=0))\n\n expected = np.abs(np.exp(-np.abs(a)**2/2)*a**2/np.sqrt(2))**2\n self.assertAlmostEqual(circuit(a), expected)\n\n # test correct number state expectation |<n|S(r)>|^2\n @qml.qnode(dev)\n def circuit(x):\n qml.Squeezing(x, 0, wires=0)\n return qml.expval(qml.FockStateProjector(np.array([2, 0]), wires=[0, 1]))\n\n expected = np.abs(np.sqrt(2)/(2)*(-np.tanh(r))/np.sqrt(np.cosh(r)))**2\n self.assertAlmostEqual(circuit(r), expected)", "def test_gaussian_circuit(self):\n self.logTestName()\n\n dev = qml.device('strawberryfields.gaussian', wires=1)\n\n @qml.qnode(dev)\n def circuit(x):\n qml.Displacement(x, 0, wires=0)\n return qml.expval(qml.NumberOperator(0))\n\n self.assertAlmostEqual(circuit(1), 1, delta=self.tol)", "def test_fuse_circuit_two_qubit_only(backend):\n c = Circuit(2)\n c.add(gates.CNOT(0, 1))\n c.add(gates.RX(0, theta=0.1234).controlled_by(1))\n c.add(gates.SWAP(0, 1))\n c.add(gates.fSim(1, 0, theta=0.1234, phi=0.324))\n c.add(gates.RY(1, theta=0.1234).controlled_by(0))\n fused_c = c.fuse()\n np.testing.assert_allclose(fused_c(), c())", "def test_circuit_with_dynamic_circuit(self):\n from unittest.mock import Mock\n\n from qiskit.providers import BackendV2\n from qiskit_aer import Aer\n\n qc = QuantumCircuit(2, 1)\n\n with qc.for_loop(range(5)):\n qc.h(0)\n qc.cx(0, 1)\n qc.measure(0, 0)\n qc.break_loop().c_if(0, True)\n\n backend = Aer.get_backend(\"aer_simulator\")\n backend.set_options(seed_simulator=15)\n sampler = StagedSampler(Mock(BackendV2), skip_transpilation=True)\n sampler._backend = backend # TODO: BackendV2Converter fails for `aer_simulator`\n sampler.set_transpile_options(seed_transpiler=15)\n result = sampler.run(qc).result()\n assert dicts_almost_equal(result.quasi_dists[0], {0: 0.5029296875, 1: 0.4970703125})", "def test_all_gates(self):\n qr = QuantumRegister(2, \"q\")\n circuit = QuantumCircuit(qr)\n circuit.h(qr[0])\n circuit.h(qr[0])\n circuit.x(qr[0])\n circuit.x(qr[0])\n circuit.y(qr[0])\n circuit.y(qr[0])\n circuit.rz(0.5, qr[0])\n circuit.rz(0.5, qr[0])\n circuit.append(U1Gate(0.5), [qr[0]]) # TODO this should work with Phase gates too\n circuit.append(U1Gate(0.5), [qr[0]])\n circuit.rx(0.5, qr[0])\n circuit.rx(0.5, qr[0])\n circuit.cx(qr[0], qr[1])\n circuit.cx(qr[0], qr[1])\n circuit.cy(qr[0], qr[1])\n circuit.cy(qr[0], qr[1])\n circuit.cz(qr[0], qr[1])\n circuit.cz(qr[0], qr[1])\n\n passmanager = PassManager()\n passmanager.append(CommutativeCancellation())\n new_circuit = passmanager.run(circuit)\n\n expected = QuantumCircuit(qr)\n expected.append(RZGate(2.0), [qr[0]])\n expected.rx(1.0, qr[0])\n\n self.assertEqual(expected, new_circuit)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
SF reference circuit for expectation tests
def SF_expectation_reference(sf_expectation, cutoff_dim, wires, *args): eng = sf.Engine("fock", backend_options={"cutoff_dim": cutoff_dim}) prog = sf.Program(2) with prog.context as q: sf.ops.Dgate(0.1) | q[0] sf.ops.S2gate(0.1) | q state = eng.run(prog).state return sf_expectation(state, Wires(wires), args)[0]
[ "def test_fock_state(self, tol):\n arg = 1\n wires = [0]\n\n gate_name = \"FockState\"\n operation = qml.FockState\n\n cutoff_dim = 10\n dev = qml.device(\"strawberryfields.fock\", wires=2, cutoff_dim=cutoff_dim)\n\n sf_operation = dev._operation_map[gate_name]\n\n assert dev.supports_operation(gate_name)\n\n @qml.qnode(dev)\n def circuit(*args):\n qml.TwoModeSqueezing(0.1, 0, wires=[0, 1])\n operation(*args, wires=wires)\n return qml.expval(qml.NumberOperator(0)), qml.expval(qml.NumberOperator(1))\n\n res = circuit(arg)\n sf_res = SF_gate_reference(sf_operation, cutoff_dim, wires, arg)\n assert np.allclose(res, sf_res, atol=tol, rtol=0)", "def test_fock_state(self):\n self.logTestName()\n\n a = 0.54321\n r = 0.123\n\n hbar = 2\n dev = qml.device('strawberryfields.gaussian', wires=2, hbar=hbar)\n\n # test correct number state expectation |<n|a>|^2\n @qml.qnode(dev)\n def circuit(x):\n qml.Displacement(x, 0, wires=0)\n return qml.expval(qml.FockStateProjector(np.array([2]), wires=0))\n\n expected = np.abs(np.exp(-np.abs(a)**2/2)*a**2/np.sqrt(2))**2\n self.assertAlmostEqual(circuit(a), expected)\n\n # test correct number state expectation |<n|S(r)>|^2\n @qml.qnode(dev)\n def circuit(x):\n qml.Squeezing(x, 0, wires=0)\n return qml.expval(qml.FockStateProjector(np.array([2, 0]), wires=[0, 1]))\n\n expected = np.abs(np.sqrt(2)/(2)*(-np.tanh(r))/np.sqrt(np.cosh(r)))**2\n self.assertAlmostEqual(circuit(r), expected)", "def test_target_basis_01(self):\n circuit = QuantumCircuit(1)\n circuit.s(0)\n circuit.z(0)\n circuit.t(0)\n circuit.rz(np.pi, 0)\n theta = Parameter(\"theta\")\n target = Target(num_qubits=2)\n target.add_instruction(CXGate())\n target.add_instruction(PhaseGate(theta))\n target.add_instruction(SXGate())\n passmanager = PassManager()\n passmanager.append(CommutativeCancellation(target=target))\n new_circuit = passmanager.run(circuit)\n expected = QuantumCircuit(1)\n expected.rz(11 * np.pi / 4, 0)\n expected.global_phase = 11 * np.pi / 4 / 2 - np.pi / 2\n\n self.assertEqual(new_circuit, expected)", "def test_ef_circuits(self):\n\n test_amps = [-0.5, 0, 0.5]\n rabi_ef = EFRoughXSXAmplitudeCal([0], self.cals, amplitudes=test_amps, backend=self.backend)\n\n circs = rabi_ef._transpiled_circuits()\n\n for circ, amp in zip(circs, test_amps):\n\n self.assertEqual(circ.count_ops()[\"x\"], 1)\n self.assertEqual(circ.count_ops()[\"Rabi\"], 1)\n\n d0 = pulse.DriveChannel(0)\n with pulse.build(name=\"x\") as expected_x:\n pulse.play(pulse.Drag(160, 0.5, 40, 0), d0)\n\n with pulse.build(name=\"x12\") as expected_x12:\n with pulse.frequency_offset(-300e6, d0):\n pulse.play(pulse.Drag(160, amp, 40, 0), d0)\n\n self.assertEqual(circ.calibrations[\"x\"][((0,), ())], expected_x)\n self.assertEqual(circ.calibrations[\"Rabi\"][((0,), (amp,))], expected_x12)", "def test_basis_02(self):\n circuit = QuantumCircuit(1)\n circuit.s(0)\n circuit.z(0)\n circuit.t(0)\n passmanager = PassManager()\n passmanager.append(CommutativeCancellation(basis_gates=[\"cx\", \"rz\", \"sx\"]))\n new_circuit = passmanager.run(circuit)\n\n expected = QuantumCircuit(1)\n expected.rz(7 * np.pi / 4, 0)\n expected.global_phase = 7 * np.pi / 4 / 2\n self.assertEqual(new_circuit, expected)", "def test_controlled_by_gates_fusion(backend):\n c = Circuit(4)\n c.add((gates.H(i) for i in range(4)))\n c.add(gates.RX(1, theta=0.1234).controlled_by(0))\n c.add(gates.RX(3, theta=0.4321).controlled_by(2))\n c.add((gates.RY(i, theta=0.5678) for i in range(4)))\n c.add(gates.RX(1, theta=0.1234).controlled_by(0))\n c.add(gates.RX(3, theta=0.4321).controlled_by(2))\n fused_c = c.fuse()\n np.testing.assert_allclose(fused_c(), c())", "def test_circuit_with_dynamic_circuit(self):\n from unittest.mock import Mock\n\n from qiskit.providers import BackendV2\n from qiskit_aer import Aer\n\n qc = QuantumCircuit(2, 1)\n\n with qc.for_loop(range(5)):\n qc.h(0)\n qc.cx(0, 1)\n qc.measure(0, 0)\n qc.break_loop().c_if(0, True)\n\n backend = Aer.get_backend(\"aer_simulator\")\n backend.set_options(seed_simulator=15)\n sampler = StagedSampler(Mock(BackendV2), skip_transpilation=True)\n sampler._backend = backend # TODO: BackendV2Converter fails for `aer_simulator`\n sampler.set_transpile_options(seed_transpiler=15)\n result = sampler.run(qc).result()\n assert dicts_almost_equal(result.quasi_dists[0], {0: 0.5029296875, 1: 0.4970703125})", "def test_fock_circuit(self, tol):\n dev = qml.device(\"strawberryfields.fock\", wires=1, cutoff_dim=10)\n\n @qml.qnode(dev)\n def circuit(x):\n qml.Displacement(x, 0, wires=0)\n return qml.expval(qml.NumberOperator(0))\n\n assert np.allclose(circuit(1), 1, atol=tol, rtol=0)", "def test_station_track_and_switches_two_trains():\n class Stations_switches_problem():\n \"\"\"\n\n swith - c\n\n tracks - ......\n\n\n .\n 1 -> .\n ..0 -> ................................... c .0-> .. 1->.....\n\n A B\n simplifies swith condition\n \"\"\"\n def __init__(self):\n \"\"\" parmaeters \"\"\"\n\n self.taus = {\"pass\": {\"0_A_B\": 4, \"1_A_B\": 4},\n \"headway\": {\"0_1_A_B\": 2, \"1_0_B_A\": 4},\n \"stop\": {\"0_B\": 1, \"1_B\": 1}, \"res\": 2}\n self.trains_timing = {\"tau\": self.taus,\n \"initial_conditions\": {\"0_A\": 1, \"1_A\": 1},\n \"penalty_weights\": {\"0_A\": 2, \"1_A\": 0.5}}\n\n self.trains_paths = {\n \"Paths\": {0: [\"A\", \"B\"], 1: [\"A\", \"B\"]},\n \"J\": [0, 1],\n \"Jd\": {},\n \"Josingle\": {},\n \"Jround\": {},\n \"Jtrack\": {\"B\": [[0, 1]]},\n \"Jswitch\": {},\n \"add_swithes_at_s\": [\"B\"]\n }\n\n self.p_sum = 2\n self.p_pair = 1.\n self.p_qubic = 2.\n self.d_max = 5\n\n Q = make_Qubo(Stations_switches_problem())\n\n assert np.array_equal(Q, np.load(\"test/files/Qfile_track.npz\")[\"Q\"])\n\n sol = np.load(\"test/files/solution_track.npz\")\n\n assert energy(sol, Q) == -8+0.3", "def test_fock_state_vector(self, tol):\n args = psi\n\n wires = [0]\n\n gate_name = \"FockStateVector\"\n operation = qml.FockStateVector\n\n cutoff_dim = 10\n dev = qml.device(\"strawberryfields.fock\", wires=2, cutoff_dim=cutoff_dim)\n\n sf_operation = dev._operation_map[gate_name]\n\n assert dev.supports_operation(gate_name)\n\n @qml.qnode(dev)\n def circuit(*args):\n qml.TwoModeSqueezing(0.1, 0, wires=[0, 1])\n operation(*args, wires=wires)\n return qml.expval(qml.NumberOperator(0)), qml.expval(qml.NumberOperator(1))\n\n res = circuit(psi)\n sf_res = SF_gate_reference(sf_operation, cutoff_dim, wires, psi)\n assert np.allclose(res, sf_res, atol=tol, rtol=0)", "def circuit(self):\n raise NotImplementedError", "def test_circuit_generation(self):\n qubits = [1, 2, 3]\n exp = CorrelatedReadoutError(qubits)\n self.assertEqual(len(exp.circuits()), 8)\n\n exp = LocalReadoutError(qubits)\n self.assertEqual(len(exp.circuits()), 2)", "def test_multiple_simulationobject():\n sim = Sim()\n sys = MassSpringDamper()\n sys.store(\"x1\")\n sys.inputs.b = 50\n sys.inputs.f = 0\n sim.add_system(sys)\n sim.simulate(5, 0.1)\n xref = sys.res.x1\n for dummy in range(60):\n #Create Simulaton\n sim = Sim()\n sys = MassSpringDamper()\n sys.store(\"x1\")\n sys.inputs.b = 50\n sys.inputs.f = 0\n sim.add_system(sys)\n sim.simulate(5, 0.1)\n x = sys.res.x1\n assert np.all(xref == x)", "def test_quad_operator(self, tol):\n cutoff_dim = 10\n a = 0.312\n\n dev = qml.device(\"strawberryfields.fock\", wires=2, cutoff_dim=cutoff_dim)\n\n op = qml.QuadOperator\n gate_name = \"QuadOperator\"\n assert dev.supports_observable(gate_name)\n\n sf_expectation = dev._observable_map[gate_name]\n wires = [0]\n\n @qml.qnode(dev)\n def circuit(*args):\n qml.Displacement(0.1, 0, wires=0)\n qml.TwoModeSqueezing(0.1, 0, wires=[0, 1])\n return qml.expval(op(*args, wires=wires))\n\n assert np.allclose(\n circuit(a),\n SF_expectation_reference(sf_expectation, cutoff_dim, wires, a),\n atol=tol,\n rtol=0,\n )", "def test_gaussian_state(self, tol):\n V = np.array([[0.5, 0], [0, 2]])\n r = np.array([0, 0])\n\n wires = [0]\n\n gate_name = \"GaussianState\"\n operation = qml.GaussianState\n\n cutoff_dim = 10\n dev = qml.device(\"strawberryfields.fock\", wires=2, cutoff_dim=cutoff_dim)\n\n sf_operation = dev._operation_map[gate_name]\n\n assert dev.supports_operation(gate_name)\n\n @qml.qnode(dev)\n def circuit(*args):\n qml.TwoModeSqueezing(0.1, 0, wires=[0, 1])\n operation(*args, wires=wires)\n return qml.expval(qml.NumberOperator(0)), qml.expval(qml.NumberOperator(1))\n\n res = circuit(V, r)\n sf_res = SF_gate_reference(sf_operation, cutoff_dim, wires, V, r)\n assert np.allclose(res, sf_res, atol=tol, rtol=0)", "def test_correct_state(self, rep, tol):\n\n dev = qml.device(\"default.tensor.tf\", wires=2, representation=rep)\n\n state = dev._state()\n\n expected = np.array([[1, 0], [0, 0]])\n assert np.allclose(state, expected, atol=tol, rtol=0)\n\n @qml.qnode(dev)\n def circuit():\n qml.Hadamard(wires=0)\n return qml.expval(qml.PauliZ(0))\n\n circuit()\n state = dev._state()\n\n expected = np.array([[1, 0], [1, 0]]) / np.sqrt(2)\n assert np.allclose(state, expected, atol=tol, rtol=0)", "def test_check_rsw_manual(self, capsys):\n with mock.patch('bbarchivist.networkutils.availability', mock.MagicMock(return_value=True)):\n bs.check_radio_sw(\"http://qrrbrbirlbel.yu/\", \"10.3.2.2474\", False)\n assert \"EXISTS\" in capsys.readouterr()[0]", "def test_operations_after_observables(self, operable_mock_device_2_wires):\n\n def circuit(x):\n qml.RX(x, wires=[0])\n ex = qml.expval(qml.PauliZ(wires=1))\n qml.RY(0.5, wires=[0])\n return qml.expval(qml.PauliZ(wires=0))\n\n node = qml.QNode(circuit, operable_mock_device_2_wires)\n\n with pytest.raises(QuantumFunctionError, match=\"gates must precede\"):\n node(0.5)", "def test_circuit_init(self):\n circuit, target = self.simple_circuit_no_measure()\n op = SuperOp(circuit)\n target = SuperOp(target)\n self.assertEqual(op, target)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test that the fock plugin requires correct arguments
def test_fock_args(self): with pytest.raises(TypeError, match="missing 1 required positional argument: 'wires'"): dev = qml.device("strawberryfields.fock") with pytest.raises( TypeError, match="missing 1 required keyword-only argument: 'cutoff_dim'" ): dev = qml.device("strawberryfields.fock", wires=1)
[ "def test_Tucker_args():\n testing_function_with_args('tucker')", "def test_function_args(self):\n reporter = SimpleReporter(\n pkgs=[PackageAPI(BASE_PACKAGE), PackageAPI(PACKAGE_WITH_DIFFERENT_ARGS)],\n errors_allowed=100,\n )\n reporter._check_function_args()\n errors = reporter.errors\n self.assertTrue(len(errors) == 2)\n self.assertTrue(all([isinstance(x, DoppelTestError) for x in errors]))\n expected_message = (\n \"Function 'playback()' exists in all packages but some \"\n \"arguments are not shared in all implementations.\"\n )\n self.assertTrue(errors[0].msg == expected_message)", "def test_Complex_args():\n testing_function_with_args('complex')", "def test_some_parser_defaults(self):\n assert self.args.rate == 250.0\n assert self.args.gain == 1", "def test_ConvE_args():\n testing_function_with_args('conve')", "def test_HoLE_args():\n testing_function_with_args('hole')", "def test_ProjE_args():\n testing_function_with_args('proje_pointwise')", "def test_check_if_help_or_version_in_arguments(self):\n test_args = [\"rss_reader.py\", 'https://news.tut.by/index.rss', \"--help\"]\n with patch.object(sys, 'argv', test_args):\n self.assertEqual(argparse_handler.check_if_help_or_version_in_arguments(), 'help')\n\n test_args = [\"rss_reader.py\", 'https://news.tut.by/index.rss', \"-h\"]\n with patch.object(sys, 'argv', test_args):\n self.assertEqual(argparse_handler.check_if_help_or_version_in_arguments(), 'help')\n\n test_args = [\"rss_reader.py\", 'https://news.tut.by/index.rss', \"--version\"]\n with patch.object(sys, 'argv', test_args):\n self.assertEqual(argparse_handler.check_if_help_or_version_in_arguments(), 'version')\n\n test_args = [\"rss_reader.py\", 'https://news.tut.by/index.rss']\n with patch.object(sys, 'argv', test_args):\n self.assertIsNone(argparse_handler.check_if_help_or_version_in_arguments())", "def test_get_arg_parser():\n get_arg_parser()", "def test_SLM_args():\n testing_function_with_args('slm')", "def test_validate_ticket_track_arguments_successful_execution():\n\n # Verify valid value\n assert not ExtraHop_v2.validate_ticket_track_arguments(\"3\")", "def test_RESCAL_args():\n testing_function_with_args('rescal')", "def test_argument(self):\n arg = action.Argument('SomeArgument', 'in-and-out', 'Brightness')\n self.assertEqual(arg.get_name(), 'SomeArgument')\n self.assertEqual(arg.get_direction(), 'in-and-out')\n self.assertEqual(arg.get_state_variable(), 'Brightness')", "def test_required_args(self):\n parser, config_dict = set_args()\n args = parser.parse_args(self.cmd_args[5])\n config_dict, arg_dict = parse_args(config_dict, args)\n self.assertTrue(arg_dict.get(\"generate_config\") is None)", "def test_ignorearg(self):\n self.assertEqual(check_args(self.ignorearg), {})", "def test_TransD_args():\n testing_function_with_args('transd')", "def test_instantion():\n args = HatchbuckArgsMock()\n assert isinstance(args, HatchbuckArgsMock)\n\n parser = HatchbuckParser(args)\n assert isinstance(parser, HatchbuckParser)", "def test_arg_name(self):\n self.assertEqual(self.sync_strategy.arg_name, None)\n self.sync_strategy.ARGUMENT = {'name': 'my-sync-strategy'}\n self.assertEqual(self.sync_strategy.arg_name, 'my-sync-strategy')", "def test_check_if_date_in_arguments(self):\n test_args = [\"rss_reader.py\", 'https://news.tut.by/index.rss', \"--verbose\"]\n with patch.object(sys, 'argv', test_args):\n self.assertFalse(argparse_handler.check_if_date_in_arguments())\n test_args = [\"rss_reader.py\", 'https://news.tut.by/index.rss', \"--verbose\", '--date']\n with patch.object(sys, 'argv', test_args):\n self.assertTrue(argparse_handler.check_if_date_in_arguments())" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test that the fock plugin provides correct result for simple circuit
def test_fock_circuit(self, tol): dev = qml.device("strawberryfields.fock", wires=1, cutoff_dim=10) @qml.qnode(dev) def circuit(x): qml.Displacement(x, 0, wires=0) return qml.expval(qml.NumberOperator(0)) assert np.allclose(circuit(1), 1, atol=tol, rtol=0)
[ "def test_fock_state(self):\n self.logTestName()\n\n a = 0.54321\n r = 0.123\n\n hbar = 2\n dev = qml.device('strawberryfields.gaussian', wires=2, hbar=hbar)\n\n # test correct number state expectation |<n|a>|^2\n @qml.qnode(dev)\n def circuit(x):\n qml.Displacement(x, 0, wires=0)\n return qml.expval(qml.FockStateProjector(np.array([2]), wires=0))\n\n expected = np.abs(np.exp(-np.abs(a)**2/2)*a**2/np.sqrt(2))**2\n self.assertAlmostEqual(circuit(a), expected)\n\n # test correct number state expectation |<n|S(r)>|^2\n @qml.qnode(dev)\n def circuit(x):\n qml.Squeezing(x, 0, wires=0)\n return qml.expval(qml.FockStateProjector(np.array([2, 0]), wires=[0, 1]))\n\n expected = np.abs(np.sqrt(2)/(2)*(-np.tanh(r))/np.sqrt(np.cosh(r)))**2\n self.assertAlmostEqual(circuit(r), expected)", "def test_coin_info(self):", "def test_fock_state(self, tol):\n arg = 1\n wires = [0]\n\n gate_name = \"FockState\"\n operation = qml.FockState\n\n cutoff_dim = 10\n dev = qml.device(\"strawberryfields.fock\", wires=2, cutoff_dim=cutoff_dim)\n\n sf_operation = dev._operation_map[gate_name]\n\n assert dev.supports_operation(gate_name)\n\n @qml.qnode(dev)\n def circuit(*args):\n qml.TwoModeSqueezing(0.1, 0, wires=[0, 1])\n operation(*args, wires=wires)\n return qml.expval(qml.NumberOperator(0)), qml.expval(qml.NumberOperator(1))\n\n res = circuit(arg)\n sf_res = SF_gate_reference(sf_operation, cutoff_dim, wires, arg)\n assert np.allclose(res, sf_res, atol=tol, rtol=0)", "def test_mixing_of_cv_and_qubit_operations(self, operable_mock_device_2_wires):\n\n def circuit(x):\n qml.RX(x, wires=[0])\n qml.Displacement(0.5, 0, wires=[0])\n return qml.expval(qml.PauliZ(0))\n\n node = qml.QNode(circuit, operable_mock_device_2_wires)\n\n with pytest.raises(QuantumFunctionError, match=\"Continuous and discrete\"):\n node(0.5)", "def test_circuits(self):\n\n drag = FineDrag([0], Gate(\"Drag\", num_qubits=1, params=[]))\n drag.set_experiment_options(schedule=self.schedule)\n drag.backend = FakeArmonkV2()\n for circuit in drag.circuits()[1:]:\n for idx, name in enumerate([\"Drag\", \"rz\", \"Drag\", \"rz\"]):\n self.assertEqual(circuit.data[idx][0].name, name)", "def test_handcrafted_examples(self):\n self.assertTrue(abs(pi(1000000) - 3.14) < 0.01)", "def test_solarnoon(self):\n pass", "def test_fock_state_projector(self, tol):\n cutoff_dim = 12\n a = 0.54321\n r = 0.123\n\n hbar = 2\n dev = qml.device(\"strawberryfields.fock\", wires=2, hbar=hbar, cutoff_dim=cutoff_dim)\n\n # test correct number state expectation |<n|a>|^2\n @qml.qnode(dev)\n def circuit(x):\n qml.Displacement(x, 0, wires=0)\n return qml.expval(qml.FockStateProjector(np.array([2]), wires=0))\n\n expected = np.abs(np.exp(-np.abs(a) ** 2 / 2) * a ** 2 / np.sqrt(2)) ** 2\n assert np.allclose(circuit(a), expected, atol=tol, rtol=0)\n\n # test correct number state expectation |<n|S(r)>|^2\n @qml.qnode(dev)\n def circuit(x):\n qml.Squeezing(x, 0, wires=0)\n return qml.expval(qml.FockStateProjector(np.array([2, 0]), wires=[0, 1]))\n\n expected = np.abs(np.sqrt(2) / (2) * (-np.tanh(r)) / np.sqrt(np.cosh(r))) ** 2\n assert np.allclose(circuit(r), expected, atol=tol, rtol=0)", "def test_integration(self):\n\n d_i = Decider(100, 0.05)\n p_i = Pump('127.0.0.1', '8000')\n s_i = Sensor('127.0.0.2', '8000')\n c_i = Controller(s_i, p_i, d_i)\n c_i.pump.set_state = MagicMock(return_value=True)\n\n for water_level in range(75, 125, 5):\n for action in c_i.actions.values():\n # Measuring water level.\n c_i.sensor.measure = MagicMock(return_value=water_level)\n # Checking pump state.\n c_i.pump.get_state = MagicMock(return_value=d_i.decide\n (water_level, action,\n c_i.actions))\n c_i.tick()", "def test_return_of_non_observable(self, operable_mock_device_2_wires):\n\n def circuit(x):\n qml.RX(x, wires=[0])\n return qml.expval(qml.PauliZ(wires=0)), 0.3\n\n node = qml.QNode(circuit, operable_mock_device_2_wires)\n\n with pytest.raises(QuantumFunctionError, match=\"must return either\"):\n node(0.5)", "def test_circuits(self):\n test_amps = [-0.5, 0, 0.5]\n rabi = RoughXSXAmplitudeCal([0], self.cals, amplitudes=test_amps, backend=self.backend)\n\n circs = rabi._transpiled_circuits()\n\n for circ, amp in zip(circs, test_amps):\n self.assertEqual(circ.count_ops()[\"Rabi\"], 1)\n\n d0 = pulse.DriveChannel(0)\n with pulse.build(name=\"x\") as expected_x:\n pulse.play(pulse.Drag(160, amp, 40, 0), d0)\n\n self.assertEqual(circ.calibrations[\"Rabi\"][((0,), (amp,))], expected_x)", "def test_ef_circuits(self):\n\n test_amps = [-0.5, 0, 0.5]\n rabi_ef = EFRoughXSXAmplitudeCal([0], self.cals, amplitudes=test_amps, backend=self.backend)\n\n circs = rabi_ef._transpiled_circuits()\n\n for circ, amp in zip(circs, test_amps):\n\n self.assertEqual(circ.count_ops()[\"x\"], 1)\n self.assertEqual(circ.count_ops()[\"Rabi\"], 1)\n\n d0 = pulse.DriveChannel(0)\n with pulse.build(name=\"x\") as expected_x:\n pulse.play(pulse.Drag(160, 0.5, 40, 0), d0)\n\n with pulse.build(name=\"x12\") as expected_x12:\n with pulse.frequency_offset(-300e6, d0):\n pulse.play(pulse.Drag(160, amp, 40, 0), d0)\n\n self.assertEqual(circ.calibrations[\"x\"][((0,), ())], expected_x)\n self.assertEqual(circ.calibrations[\"Rabi\"][((0,), (amp,))], expected_x12)", "async def test_fan_single_preset_mode(hass: HomeAssistant, hk_driver, events) -> None:\n entity_id = \"fan.demo\"\n\n hass.states.async_set(\n entity_id,\n STATE_ON,\n {\n ATTR_SUPPORTED_FEATURES: FanEntityFeature.PRESET_MODE\n | FanEntityFeature.SET_SPEED,\n ATTR_PERCENTAGE: 42,\n ATTR_PRESET_MODE: \"smart\",\n ATTR_PRESET_MODES: [\"smart\"],\n },\n )\n await hass.async_block_till_done()\n acc = Fan(hass, hk_driver, \"Fan\", entity_id, 1, None)\n hk_driver.add_accessory(acc)\n\n assert acc.char_target_fan_state.value == 1\n\n await acc.run()\n await hass.async_block_till_done()\n\n # Set from HomeKit\n call_set_preset_mode = async_mock_service(hass, DOMAIN, \"set_preset_mode\")\n call_turn_on = async_mock_service(hass, DOMAIN, \"turn_on\")\n\n char_target_fan_state_iid = acc.char_target_fan_state.to_HAP()[HAP_REPR_IID]\n\n hk_driver.set_characteristics(\n {\n HAP_REPR_CHARS: [\n {\n HAP_REPR_AID: acc.aid,\n HAP_REPR_IID: char_target_fan_state_iid,\n HAP_REPR_VALUE: 0,\n },\n ]\n },\n \"mock_addr\",\n )\n await hass.async_block_till_done()\n assert call_turn_on[0]\n assert call_turn_on[0].data[ATTR_ENTITY_ID] == entity_id\n assert call_turn_on[0].data[ATTR_PERCENTAGE] == 42\n assert len(events) == 1\n assert events[-1].data[\"service\"] == \"turn_on\"\n\n hk_driver.set_characteristics(\n {\n HAP_REPR_CHARS: [\n {\n HAP_REPR_AID: acc.aid,\n HAP_REPR_IID: char_target_fan_state_iid,\n HAP_REPR_VALUE: 1,\n },\n ]\n },\n \"mock_addr\",\n )\n await hass.async_block_till_done()\n assert call_set_preset_mode[0]\n assert call_set_preset_mode[0].data[ATTR_ENTITY_ID] == entity_id\n assert call_set_preset_mode[0].data[ATTR_PRESET_MODE] == \"smart\"\n assert events[-1].data[\"service\"] == \"set_preset_mode\"\n assert len(events) == 2\n\n hass.states.async_set(\n entity_id,\n STATE_ON,\n {\n ATTR_SUPPORTED_FEATURES: FanEntityFeature.PRESET_MODE\n | FanEntityFeature.SET_SPEED,\n ATTR_PERCENTAGE: 42,\n ATTR_PRESET_MODE: None,\n ATTR_PRESET_MODES: [\"smart\"],\n },\n )\n await hass.async_block_till_done()\n assert acc.char_target_fan_state.value == 0", "def test_conditional_unitary_1bit(self, method, device):\n shots = 100\n backend = self.backend(method=method, device=device)\n circuits = ref_conditionals.conditional_circuits_1bit(\n final_measure=True, conditional_type=\"kraus\"\n )\n targets = ref_conditionals.conditional_counts_1bit(shots)\n result = backend.run(circuits, shots=shots).result()\n self.assertSuccess(result)\n self.compare_counts(result, circuits, targets, delta=0)", "def test_qfi_simple(self):\n # create the circuit\n a, b = Parameter(\"a\"), Parameter(\"b\")\n qc = QuantumCircuit(1)\n qc.h(0)\n qc.rz(a, 0)\n qc.rx(b, 0)\n\n param_list = [[np.pi / 4, 0.1], [np.pi, 0.1], [np.pi / 2, 0.1]]\n correct_values = [[[1, 0], [0, 0.5]], [[1, 0], [0, 0]], [[1, 0], [0, 1]]]\n\n qfi = LinCombQFI(self.estimator)\n for i, param in enumerate(param_list):\n qfis = qfi.run([qc], [param]).result().qfis\n np.testing.assert_allclose(qfis[0], correct_values[i], atol=1e-3)", "def test_fuel_consuption_min(self):\n self.assertEqual(str(self.airplane.fuel_consuption_min), \"1.0\")", "async def test_fan_oscillation(hass: HomeAssistant, knx: KNXTestKit) -> None:\n await knx.setup_integration(\n {\n FanSchema.PLATFORM: {\n CONF_NAME: \"test\",\n KNX_ADDRESS: \"1/1/1\",\n FanSchema.CONF_OSCILLATION_ADDRESS: \"2/2/2\",\n }\n }\n )\n\n # turn on oscillation\n await hass.services.async_call(\n \"fan\",\n \"oscillate\",\n {\"entity_id\": \"fan.test\", \"oscillating\": True},\n blocking=True,\n )\n await knx.assert_write(\"2/2/2\", True)\n\n # turn off oscillation\n await hass.services.async_call(\n \"fan\",\n \"oscillate\",\n {\"entity_id\": \"fan.test\", \"oscillating\": False},\n blocking=True,\n )\n await knx.assert_write(\"2/2/2\", False)\n\n # receive oscillation on\n await knx.receive_write(\"2/2/2\", True)\n state = hass.states.get(\"fan.test\")\n assert state.attributes.get(\"oscillating\") is True\n\n # receive oscillation off\n await knx.receive_write(\"2/2/2\", False)\n state = hass.states.get(\"fan.test\")\n assert state.attributes.get(\"oscillating\") is False", "def test_ConvE():\n testing_function('conve')", "def test_gaussian_circuit(self):\n self.logTestName()\n\n dev = qml.device('strawberryfields.gaussian', wires=1)\n\n @qml.qnode(dev)\n def circuit(x):\n qml.Displacement(x, 0, wires=0)\n return qml.expval(qml.NumberOperator(0))\n\n self.assertAlmostEqual(circuit(1), 1, delta=self.tol)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test that the GaussianState gate works correctly
def test_gaussian_state(self, tol): V = np.array([[0.5, 0], [0, 2]]) r = np.array([0, 0]) wires = [0] gate_name = "GaussianState" operation = qml.GaussianState cutoff_dim = 10 dev = qml.device("strawberryfields.fock", wires=2, cutoff_dim=cutoff_dim) sf_operation = dev._operation_map[gate_name] assert dev.supports_operation(gate_name) @qml.qnode(dev) def circuit(*args): qml.TwoModeSqueezing(0.1, 0, wires=[0, 1]) operation(*args, wires=wires) return qml.expval(qml.NumberOperator(0)), qml.expval(qml.NumberOperator(1)) res = circuit(V, r) sf_res = SF_gate_reference(sf_operation, cutoff_dim, wires, V, r) assert np.allclose(res, sf_res, atol=tol, rtol=0)
[ "def test_gaussian(self):\n self.logTestName()\n res = self.H.is_gaussian()\n self.assertTrue(res)", "def test_gaussian(self):\n self.logTestName()\n res = self.H.is_gaussian()\n self.assertFalse(res)", "def test_gaussian_circuit(self):\n self.logTestName()\n\n dev = qml.device('strawberryfields.gaussian', wires=1)\n\n @qml.qnode(dev)\n def circuit(x):\n qml.Displacement(x, 0, wires=0)\n return qml.expval(qml.NumberOperator(0))\n\n self.assertAlmostEqual(circuit(1), 1, delta=self.tol)", "def test_gaussian_node(self):\n means = [0.0, 0.5, 1.0]\n stds = [1.0, 2.0, 3.0]\n gauss0 = GaussianNode(mean=means[0], std=stds[0], scope=0)\n gauss1 = GaussianNode(mean=means[1], std=stds[1], scope=1)\n gauss2 = GaussianNode(mean=means[2], std=stds[2], scope=2)\n sample1 = np.array([1, 2, 3])\n sample2 = np.array([10, 20, 30])\n x = torch.Tensor([sample1, sample2])\n\n # Get results\n res_gauss0 = gauss0(x)\n res_gauss1 = gauss1(x)\n res_gauss2 = gauss2(x)\n\n # Expect results from normal distributions\n normal0 = torch.distributions.Normal(loc=means[0], scale=stds[0])\n normal1 = torch.distributions.Normal(loc=means[1], scale=stds[1])\n normal2 = torch.distributions.Normal(loc=means[2], scale=stds[2])\n\n exp_gauss0 = normal0.log_prob(torch.Tensor([1, 10]))\n exp_gauss1 = normal1.log_prob(torch.Tensor([2, 20]))\n exp_gauss2 = normal2.log_prob(torch.Tensor([3, 30]))\n\n # Assertions\n self.assertEqual(len(res_gauss0.tolist()), 2)\n self.assertEqual(len(res_gauss1.tolist()), 2)\n self.assertEqual(len(res_gauss2.tolist()), 2)\n\n # Assert that results are numerically equal\n self.assertTrue(np.isclose(res_gauss0.tolist(), exp_gauss0, atol=DELTA).all())\n self.assertTrue(np.isclose(res_gauss1.tolist(), exp_gauss1, atol=DELTA).all())\n self.assertTrue(np.isclose(res_gauss2.tolist(), exp_gauss2, atol=DELTA).all())", "def test_two_gaussian_potential_trigger(self):\n\n trigger2 = two_gaussian_potential(coords[0]-5)[2]\n self.assertTrue(trigger2)", "def test_gaussian_kernel_same_state(self):\n crkr = CrKr(self.S_2x3, self.C_2x2, self.D_2x3)\n s = np.array([[1, 2, 3]])\n\n assert_equal(1, crkr._gaussian_kernel(s, s))", "def test_gaussian_mixture_models_algorithm(prepare_environment: Any) -> None:\n data = np.array([\n [0, 0, 1, 1],\n [0, 0, 1, 1],\n [1, 1, 0, 0],\n [1, 1, 0, 0],\n [1, 1, 0, 0],\n ])\n algorithm = gaussian_mixture_models.GaussianMixtureModelsAlgorithm()\n ground_truth = algorithm.get_ground_truth(data)\n expected_ground_truth = np.array([1, 1, 0, 0])\n assert ground_truth.shape == expected_ground_truth.shape\n for i in range(expected_ground_truth.shape[0]): # pylint: disable=unsubscriptable-object # Astroid>2.3 bug\n assert math.isclose(ground_truth[i], expected_ground_truth[i], abs_tol=1e-5)", "def test_unsupported_gates(self):\n self.logTestName()\n\n dev = qml.device('strawberryfields.gaussian', wires=2)\n gates = set(dev._operation_map.keys())\n all_gates = qml.ops._cv__ops__\n\n for g in all_gates - gates:\n op = getattr(qml.ops, g)\n\n if op.num_wires <= 0:\n wires = [0]\n else:\n wires = list(range(op.num_wires))\n\n @qml.qnode(dev)\n def circuit(*x):\n x = prep_par(x, op)\n op(*x, wires=wires)\n\n if issubclass(op, qml.operation.CV):\n return qml.expval(qml.X(0))\n else:\n return qml.expval(qml.PauliZ(0))\n\n with self.assertRaisesRegex(qml.DeviceError,\n \"Gate {} not supported on device strawberryfields.gaussian\".format(g)):\n x = np.random.random([op.num_params])\n circuit(*x)", "def testGaussian(self):\n random.seed(42)\n\n us = UniformSample()\n for _ in range(300):\n us.update(random.gauss(42.0, 13.0))\n self.assertAlmostEqual(us.mean, 43.143067271195235, places=5)\n self.assertAlmostEqual(us.stddev, 13.008553229943168, places=5)\n\n us.clear()\n for _ in range(30000):\n us.update(random.gauss(0.0012, 0.00005))\n self.assertAlmostEqual(us.mean, 0.0012015284549517493, places=5)\n self.assertAlmostEqual(us.stddev, 4.9776450250869146e-05, places=5)", "def test_gaussian_profile(): \n\n # check sigma input\n obj = galsim.Gaussian(sigma=sigma)\n image_galsim_sigma = obj.drawImage(nx=stamp_size, ny=stamp_size, scale=1., method='no_pixel').array\n image_galflow_sigma = gf.lightprofiles.gaussian(sigma=[sigma], nx=stamp_size, ny=stamp_size)[0,...]\n\n # check batch input\n obj1 = galsim.Gaussian(sigma=sigma)\n obj2 = galsim.Gaussian(sigma=sigma*2)\n image_galsim_batch1 = obj1.drawImage(nx=stamp_size, ny=stamp_size, scale=1., method='no_pixel').array\n image_galsim_batch2 = obj2.drawImage(nx=stamp_size, ny=stamp_size, scale=1., method='no_pixel').array\n image_galsim_batch = np.stack([image_galsim_batch1, image_galsim_batch2], axis=0)\n image_galflow_batch = gf.lightprofiles.gaussian(sigma=[sigma, sigma*2], nx=stamp_size, ny=stamp_size)\n\n # check half_light_radius input\n obj = galsim.Gaussian(half_light_radius=hlr)\n image_galsim_hlr = obj.drawImage(nx=stamp_size, ny=stamp_size, scale=1., method='no_pixel').array\n image_galflow_hlr = gf.lightprofiles.gaussian(half_light_radius=[hlr], nx=stamp_size, ny=stamp_size)[0,...]\n\n # check fwhm input\n obj = galsim.Gaussian(fwhm=fwhm)\n image_galsim_fwhm = obj.drawImage(nx=stamp_size, ny=stamp_size, scale=1., method='no_pixel').array\n image_galflow_fwhm = gf.lightprofiles.gaussian(fwhm=[fwhm], nx=stamp_size, ny=stamp_size)[0,...]\n\n # check fwhm input\n obj = galsim.Gaussian(fwhm=fwhm)\n image_galsim_scale = obj.drawImage(nx=stamp_size, ny=stamp_size, scale=scale, method='no_pixel').array\n image_galflow_scale = gf.lightprofiles.gaussian(fwhm=[fwhm], nx=stamp_size, ny=stamp_size, scale=scale)[0,...]\n\n # check flux input\n obj = galsim.Gaussian(fwhm=fwhm, flux=flux)\n image_galsim_flux = obj.drawImage(nx=stamp_size, ny=stamp_size, scale=1., method='no_pixel').array\n image_galflow_flux = gf.lightprofiles.gaussian(fwhm=[fwhm], flux=[flux], nx=stamp_size, ny=stamp_size)[0,...]\n\n # check even and odd stamp sizes\n obj = galsim.Gaussian(fwhm=fwhm, flux=flux)\n image_galsim_size = obj.drawImage(nx=stamp_size, ny=stamp_size+1, scale=1., method='no_pixel').array\n image_galflow_size = gf.lightprofiles.gaussian(fwhm=[fwhm], flux=[flux], nx=stamp_size, ny=stamp_size+1)[0,...]\n\n assert_allclose(image_galsim_sigma, image_galflow_sigma, atol=1e-5)\n assert_allclose(image_galsim_batch, image_galflow_batch, atol=1e-5)\n assert_allclose(image_galsim_hlr, image_galflow_hlr, atol=1e-5)\n assert_allclose(image_galsim_fwhm, image_galflow_fwhm, atol=1e-5)\n assert_allclose(image_galsim_scale, image_galflow_scale, rtol=1e-5)\n assert_allclose(image_galsim_flux, image_galflow_flux, atol=1e-5)\n assert_allclose(image_galsim_size, image_galflow_size, atol=1e-5)", "def test_is_steady_state_examples():\n steady_1 = [6 / 17, 6 / 17, 5 / 17]\n generator_matrix_1 = np.array(\n [[-2 / 3, 1 / 3, 1 / 3], [1 / 2, -1 / 2, 0], [1 / 5, 1 / 5, -2 / 5]]\n )\n\n steady_2 = np.array([0.0877193, 0.38596491, 0.52631579])\n generator_matrix_2 = np.array([[-0.6, 0.4, 0.2], [0, -0.5, 0.5], [0.1, 0.3, -0.4]])\n\n steady_3 = np.array([1, 2, 3])\n generator_matrix_3 = np.array([[-4, 2, 2], [0, -2, 2], [1, 5, -6]])\n\n assert is_steady_state(state=steady_1, Q=generator_matrix_1)\n assert is_steady_state(state=steady_2, Q=generator_matrix_2)\n assert not is_steady_state(state=steady_3, Q=generator_matrix_3)", "def test_bfgs_output(full_no_reset_inst):\n niter = 1\n full_no_reset_inst.minimizer = scipy.optimize.fmin_bfgs\n full_no_reset_inst.minimizer_kwargs = ({'maxiter': niter, 'full_output': 1,\n 'retall': 0, 'disp': 0})\n\n full_no_reset_inst.setup_forest_cxn('4q-qvm')\n full_no_reset_inst.train_test_split(train_ratio=0.2)\n initial_guess = [0, 0]\n\n # Cannot parse optimizer output\n with pytest.raises(ValueError):\n train_loss = full_no_reset_inst.train(initial_guess)\n\n opt_result_parse = lambda opt_res: (opt_res[0], opt_res[1])\n full_no_reset_inst.opt_result_parse = opt_result_parse\n train_loss = full_no_reset_inst.train(initial_guess)\n assert isinstance(train_loss, numpy.float)", "def test_SMEB():\n testing_function('sme', bilinear=True)", "def test_gaussian_args(self):\n self.logTestName()\n\n with self.assertRaisesRegex(TypeError, \"missing 1 required positional argument: 'wires'\"):\n dev = qml.device('strawberryfields.gaussian')", "def test_two_gaussian_potential_no_trigger(self):\n\n trigger = two_gaussian_potential(coords[0])[2]\n self.assertFalse(trigger)", "def test_psf_photometry_gaussian():\n\n psf = IntegratedGaussianPRF(sigma=GAUSSIAN_WIDTH)\n f = psf_photometry(image, INTAB, psf)\n for n in ['x', 'y', 'flux']:\n assert_allclose(f[n + '_0'], f[n + '_fit'], rtol=1e-3)", "def test_example_system():\n sys = ExampleSystem()\n sim = Sim()\n sim.add_system(sys)\n sim.simulate(5,0.1)\n assert abs(sys.states.x - 0.609483796797075) < 1e-14", "def gradLikelihood(self, state):\n return", "def test_correct_state(self, rep, tol):\n\n dev = qml.device(\"default.tensor.tf\", wires=2, representation=rep)\n\n state = dev._state()\n\n expected = np.array([[1, 0], [0, 0]])\n assert np.allclose(state, expected, atol=tol, rtol=0)\n\n @qml.qnode(dev)\n def circuit():\n qml.Hadamard(wires=0)\n return qml.expval(qml.PauliZ(0))\n\n circuit()\n state = dev._state()\n\n expected = np.array([[1, 0], [1, 0]]) / np.sqrt(2)\n assert np.allclose(state, expected, atol=tol, rtol=0)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test that the Interferometer gate works correctly
def test_interferometer(self, tol): U = np.array( [ [0.83645892 - 0.40533293j, -0.20215326 + 0.30850569j], [-0.23889780 - 0.28101519j, -0.88031770 - 0.29832709j], ] ) wires = [0, 1] gate_name = "Interferometer" operation = qml.Interferometer cutoff_dim = 10 dev = qml.device("strawberryfields.fock", wires=2, cutoff_dim=cutoff_dim) sf_operation = dev._operation_map[gate_name] assert dev.supports_operation(gate_name) @qml.qnode(dev) def circuit(*args): qml.TwoModeSqueezing(0.1, 0, wires=[0, 1]) operation(*args, wires=wires) return qml.expval(qml.NumberOperator(0)), qml.expval(qml.NumberOperator(1)) res = circuit(U) sf_res = SF_gate_reference(sf_operation, cutoff_dim, wires, U) assert np.allclose(res, sf_res, atol=tol, rtol=0)
[ "def test_imu_sensor(self):\n # Create an engine: no controller and no internal dynamics\n engine = jiminy.Engine()\n setup_controller_and_engine(engine, self.robot)\n\n # Run simulation and extract log data\n x0 = np.array([0.1, 0.1])\n tf = 2.0\n time, gyro_jiminy, accel_jiminy = \\\n SimulateSimplePendulum._simulate_and_get_imu_data_evolution(\n engine, tf, x0, split=True)\n\n # Pendulum dynamics\n def dynamics(t: float, x: np.ndarray) -> np.ndarray:\n return np.stack(\n (x[..., 1], self.g / self.l * np.sin(x[..., 0])), axis=-1)\n\n # Integrate this non-linear dynamics\n x_rk_python = integrate_dynamics(time, x0, dynamics)\n\n # Compute sensor acceleration, i.e. acceleration in polar coordinates\n theta = x_rk_python[:, 0]\n dtheta = x_rk_python[:, 1]\n dtheta = x_rk_python[:, 1]\n\n # Acceleration: to resolve algebraic loop (current acceleration is\n # function of input which itself is function of sensor signal, sensor\n # data is computed using q_t, v_t, a_t\n ddtheta = dynamics(0.0, x_rk_python)[:, 1]\n\n expected_accel = np.stack([\n - self.l * ddtheta + self.g * np.sin(theta),\n np.zeros_like(theta),\n self.l * dtheta ** 2 - self.g * np.cos(theta)], axis=-1)\n expected_gyro = np.stack([\n np.zeros_like(theta),\n dtheta,\n np.zeros_like(theta)], axis=-1)\n\n # Compare sensor signal, ignoring first iterations that correspond to\n # system initialization\n self.assertTrue(np.allclose(\n expected_gyro[2:, :], gyro_jiminy[2:, :], atol=TOLERANCE))\n self.assertTrue(np.allclose(\n expected_accel[2:, :], accel_jiminy[2:, :], atol=TOLERANCE))", "def test_integration(self):\n\n d_i = Decider(100, 0.05)\n p_i = Pump('127.0.0.1', '8000')\n s_i = Sensor('127.0.0.2', '8000')\n c_i = Controller(s_i, p_i, d_i)\n c_i.pump.set_state = MagicMock(return_value=True)\n\n for water_level in range(75, 125, 5):\n for action in c_i.actions.values():\n # Measuring water level.\n c_i.sensor.measure = MagicMock(return_value=water_level)\n # Checking pump state.\n c_i.pump.get_state = MagicMock(return_value=d_i.decide\n (water_level, action,\n c_i.actions))\n c_i.tick()", "def test_forceTempThrshEvent(self):\n\n self.myNode.addSensor(IPTempSensor('TMP-1',[0,1],self.myNode,'LOW'))\n self.myNode.scanSensors('LOW') # scan once all sensors\n\n self.assertEqual(self.myNode.getHub().getEventQ().getEventQLength(),1)", "async def test_discover_sensor(hass: HomeAssistant, rfxtrx_automatic) -> None:\n rfxtrx = rfxtrx_automatic\n\n # 1\n await rfxtrx.signal(\"0a520801070100b81b0279\")\n base_id = \"sensor.wt260_wt260h_wt440h_wt450_wt450h_07_01\"\n\n state = hass.states.get(f\"{base_id}_humidity\")\n assert state\n assert state.state == \"27\"\n assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == PERCENTAGE\n\n state = hass.states.get(f\"{base_id}_humidity_status\")\n assert state\n assert state.state == \"normal\"\n assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) is None\n\n state = hass.states.get(f\"{base_id}_signal_strength\")\n assert state\n assert state.state == \"-64\"\n assert (\n state.attributes.get(ATTR_UNIT_OF_MEASUREMENT)\n == SIGNAL_STRENGTH_DECIBELS_MILLIWATT\n )\n\n state = hass.states.get(f\"{base_id}_temperature\")\n assert state\n assert state.state == \"18.4\"\n assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == UnitOfTemperature.CELSIUS\n\n state = hass.states.get(f\"{base_id}_battery\")\n assert state\n assert state.state == \"100\"\n assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == PERCENTAGE\n\n # 2\n await rfxtrx.signal(\"0a52080405020095240279\")\n base_id = \"sensor.wt260_wt260h_wt440h_wt450_wt450h_05_02\"\n state = hass.states.get(f\"{base_id}_humidity\")\n\n assert state\n assert state.state == \"36\"\n assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == PERCENTAGE\n\n state = hass.states.get(f\"{base_id}_humidity_status\")\n assert state\n assert state.state == \"normal\"\n assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) is None\n\n state = hass.states.get(f\"{base_id}_signal_strength\")\n assert state\n assert state.state == \"-64\"\n assert (\n state.attributes.get(ATTR_UNIT_OF_MEASUREMENT)\n == SIGNAL_STRENGTH_DECIBELS_MILLIWATT\n )\n\n state = hass.states.get(f\"{base_id}_temperature\")\n assert state\n assert state.state == \"14.9\"\n assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == UnitOfTemperature.CELSIUS\n\n state = hass.states.get(f\"{base_id}_battery\")\n assert state\n assert state.state == \"100\"\n assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == PERCENTAGE\n\n # 1 Update\n await rfxtrx.signal(\"0a52085e070100b31b0279\")\n base_id = \"sensor.wt260_wt260h_wt440h_wt450_wt450h_07_01\"\n\n state = hass.states.get(f\"{base_id}_humidity\")\n assert state\n assert state.state == \"27\"\n assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == PERCENTAGE\n\n state = hass.states.get(f\"{base_id}_humidity_status\")\n assert state\n assert state.state == \"normal\"\n assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) is None\n\n state = hass.states.get(f\"{base_id}_signal_strength\")\n assert state\n assert state.state == \"-64\"\n assert (\n state.attributes.get(ATTR_UNIT_OF_MEASUREMENT)\n == SIGNAL_STRENGTH_DECIBELS_MILLIWATT\n )\n\n state = hass.states.get(f\"{base_id}_temperature\")\n assert state\n assert state.state == \"17.9\"\n assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == UnitOfTemperature.CELSIUS\n\n state = hass.states.get(f\"{base_id}_battery\")\n assert state\n assert state.state == \"100\"\n assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == PERCENTAGE\n\n assert len(hass.states.async_all()) == 10", "def test_gate_parameters(self):\n true_params = {\n \"squeezing_amplitude_0\": Ranges([0], [1], variable_name=\"squeezing_amplitude_0\"),\n \"phase_0\": Ranges([0], [0, 6.3], variable_name=\"phase_0\"),\n \"phase_1\": Ranges([0.5, 1.4], variable_name=\"phase_1\"),\n }\n spec_params = Device(spec=device_spec).gate_parameters\n assert true_params == spec_params", "def test_T0(self):\n self.assertAlmostEqual(self.stick.T0.value_si, self.T0, 6)", "def test_gate_arg(self):\n # create a test program\n sf_prog = Program(2)\n\n with sf_prog.context as q:\n ops.Sgate(0.54, 0.324) | q[1]\n\n xir_prog = io.to_xir(sf_prog)\n\n expected = [(\"Sgate\", [0.54, 0.324], (1,))]\n assert [(stmt.name, stmt.params, stmt.wires) for stmt in xir_prog.statements] == expected", "def test_currentAngle(self):\n motor = self.motor\n # Initial value\n self.assertEqual(motor.currentAngle(), 0)\n # Positive displacement\n motor.currentDisplacement = 10\n self.assertEqual(motor.currentAngle(), 360.0)\n # Negative displacement\n motor.currentDisplacement = -10\n self.assertEqual(motor.currentAngle(), -360.0)", "async def test_temp_change_ac_on_within_tolerance(\n hass: HomeAssistant, setup_comp_3\n) -> None:\n calls = _setup_switch(hass, False)\n await common.async_set_temperature(hass, 25)\n _setup_sensor(hass, 25.2)\n await hass.async_block_till_done()\n assert len(calls) == 0", "def test_one_mode(self, tol):\n varphi = [0.42342]\n\n with qml.tape.OperationRecorder() as rec:\n Interferometer(theta=[], phi=[], varphi=varphi, wires=0)\n\n assert len(rec.queue) == 1\n assert isinstance(rec.queue[0], qml.Rotation)\n assert np.allclose(rec.queue[0].parameters, varphi, atol=tol)", "def test_gate_multimode(self):\n xir_prog = xir.Program()\n xir_prog.add_statement(xir.Statement(\"BSgate\", {\"theta\": 0.54, \"phi\": np.pi}, (0, 2)))\n\n sf_prog = io.to_program(xir_prog)\n\n assert len(sf_prog) == 1\n assert sf_prog.circuit\n assert sf_prog.circuit[0].op.__class__.__name__ == \"BSgate\"\n assert sf_prog.circuit[0].op.p[0] == 0.54\n assert sf_prog.circuit[0].op.p[1] == np.pi\n assert sf_prog.circuit[0].reg[0].ind == 0\n assert sf_prog.circuit[0].reg[1].ind == 2", "def test_undifferentiable_operation(self, operable_mock_device_2_wires):\n\n def circuit(x):\n qml.BasisState(np.array([x, 0]), wires=[0, 1])\n qml.RX(x, wires=[0])\n return qml.expval(qml.PauliZ(0))\n\n node = qml.QNode(circuit, operable_mock_device_2_wires)\n\n with pytest.raises(ValueError, match=\"Cannot differentiate wrt parameter\"):\n node.jacobian(0.5)", "def test_sensor_delay(self):\n # Configure the IMU\n imu_options = self.imu_sensor.get_options()\n imu_options['delayInterpolationOrder'] = 0\n imu_options['delay'] = 0.0\n self.imu_sensor.set_options(imu_options)\n\n # Create an engine: no controller and no internal dynamics\n engine = jiminy.Engine()\n setup_controller_and_engine(engine, self.robot)\n\n # Configure the engine: No gravity + Continuous time simulation\n engine_options = engine.get_options()\n engine_options[\"stepper\"][\"sensorsUpdatePeriod\"] = 1.0e-3\n engine.set_options(engine_options)\n\n # Run simulation and extract imu data\n x0 = np.array([0.1, 0.0])\n tf = 2.0\n time, imu_jiminy = \\\n SimulateSimplePendulum._simulate_and_get_imu_data_evolution(\n engine, tf, x0, split=False)\n\n # Deduce shifted imu data\n imu_jiminy_shifted_0 = interp1d(\n time, imu_jiminy, kind='zero',\n bounds_error=False, fill_value=imu_jiminy[0], axis=0\n )(time - 1.0e-2)\n imu_jiminy_shifted_1 = interp1d(\n time, imu_jiminy,\n kind='linear', bounds_error=False, fill_value=imu_jiminy[0], axis=0\n )(time - 1.0e-2)\n\n # Configure the IMU\n imu_options = self.imu_sensor.get_options()\n imu_options['delayInterpolationOrder'] = 0\n imu_options['delay'] = 1.0e-2\n self.imu_sensor.set_options(imu_options)\n\n # Run simulation and extract imu data\n time, imu_jiminy_delayed_0 = \\\n SimulateSimplePendulum._simulate_and_get_imu_data_evolution(\n engine, tf, x0, split=False)\n\n # Configure the IMU\n imu_options = self.imu_sensor.get_options()\n imu_options['delayInterpolationOrder'] = 1\n imu_options['delay'] = 1.0e-2\n self.imu_sensor.set_options(imu_options)\n\n # Run simulation\n time, imu_jiminy_delayed_1 = \\\n SimulateSimplePendulum._simulate_and_get_imu_data_evolution(\n engine, tf, x0, split=False)\n\n # Compare sensor signals\n self.assertLessEqual(\n np.mean(imu_jiminy_delayed_0 - imu_jiminy_shifted_0), 1.0e-5)\n self.assertTrue(np.allclose(\n imu_jiminy_delayed_1, imu_jiminy_shifted_1, atol=TOLERANCE))", "def test_operations_after_observables(self, operable_mock_device_2_wires):\n\n def circuit(x):\n qml.RX(x, wires=[0])\n ex = qml.expval(qml.PauliZ(wires=1))\n qml.RY(0.5, wires=[0])\n return qml.expval(qml.PauliZ(wires=0))\n\n node = qml.QNode(circuit, operable_mock_device_2_wires)\n\n with pytest.raises(QuantumFunctionError, match=\"gates must precede\"):\n node(0.5)", "async def test_temp_change_ac_on_outside_tolerance(\n hass: HomeAssistant, setup_comp_3\n) -> None:\n calls = _setup_switch(hass, False)\n await common.async_set_temperature(hass, 25)\n _setup_sensor(hass, 30)\n await hass.async_block_till_done()\n assert len(calls) == 1\n call = calls[0]\n assert call.domain == HASS_DOMAIN\n assert call.service == SERVICE_TURN_ON\n assert call.data[\"entity_id\"] == ENT_SWITCH", "def test_set_measurement_invalid_gate_time(self):\r\n with self.assertRaises(fygen.InvalidGateTimeError):\r\n self.fy.set_measurement(gate_time=4)", "def test_target_basis_01(self):\n circuit = QuantumCircuit(1)\n circuit.s(0)\n circuit.z(0)\n circuit.t(0)\n circuit.rz(np.pi, 0)\n theta = Parameter(\"theta\")\n target = Target(num_qubits=2)\n target.add_instruction(CXGate())\n target.add_instruction(PhaseGate(theta))\n target.add_instruction(SXGate())\n passmanager = PassManager()\n passmanager.append(CommutativeCancellation(target=target))\n new_circuit = passmanager.run(circuit)\n expected = QuantumCircuit(1)\n expected.rz(11 * np.pi / 4, 0)\n expected.global_phase = 11 * np.pi / 4 / 2 - np.pi / 2\n\n self.assertEqual(new_circuit, expected)", "def test_sensor_init():\n assert len(app.binary_sensors) > 0", "def testGyroDiffIndicator_Trivial(self):\n mode = common.FULL_COMMS_MODE\n indicator = estimator.EstimatorGyroDiffIndicator(mode)\n messages = self._SynthesizeControlTelemetry()\n\n with mock.patch('makani.control.common.IsControlSystemRunning',\n lambda x: True):\n _, result_stoplight = indicator.Filter(messages)\n\n self.assertEqual(stoplights.STOPLIGHT_NORMAL, result_stoplight)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test that the DisplacedSqueezedState gate works correctly
def test_displaced_squeezed_state(self, tol): a = 0.312 b = 0.123 c = 0.532 d = 0.124 wires = [0] gate_name = "DisplacedSqueezedState" operation = qml.DisplacedSqueezedState cutoff_dim = 10 dev = qml.device("strawberryfields.fock", wires=2, cutoff_dim=cutoff_dim) sf_operation = dev._operation_map[gate_name] assert dev.supports_operation(gate_name) @qml.qnode(dev) def circuit(*args): qml.TwoModeSqueezing(0.1, 0, wires=[0, 1]) operation(*args, wires=wires) return qml.expval(qml.NumberOperator(0)), qml.expval(qml.NumberOperator(1)) res = circuit(a, b, c, d) sf_res = SF_gate_reference(sf_operation, cutoff_dim, wires, a, b, c, d) assert np.allclose(res, sf_res, atol=tol, rtol=0)
[ "def test_correct_state(self, rep, tol):\n\n dev = qml.device(\"default.tensor.tf\", wires=2, representation=rep)\n\n state = dev._state()\n\n expected = np.array([[1, 0], [0, 0]])\n assert np.allclose(state, expected, atol=tol, rtol=0)\n\n @qml.qnode(dev)\n def circuit():\n qml.Hadamard(wires=0)\n return qml.expval(qml.PauliZ(0))\n\n circuit()\n state = dev._state()\n\n expected = np.array([[1, 0], [1, 0]]) / np.sqrt(2)\n assert np.allclose(state, expected, atol=tol, rtol=0)", "def test_compose_front(self):\n # UnitaryChannel evolution\n chan1 = SuperOp(self.sopX)\n chan2 = SuperOp(self.sopY)\n chan = chan1.compose(chan2, front=True)\n targ = SuperOp(self.sopZ)\n self.assertEqual(chan, targ)\n\n # 50% depolarizing channel\n chan1 = SuperOp(self.depol_sop(0.5))\n chan = chan1.compose(chan1, front=True)\n targ = SuperOp(self.depol_sop(0.75))\n self.assertEqual(chan, targ)\n\n # Random superoperator\n mat1 = self.rand_matrix(4, 4)\n mat2 = self.rand_matrix(4, 4)\n chan1 = SuperOp(mat1)\n chan2 = SuperOp(mat2)\n targ = SuperOp(np.dot(mat2, mat1))\n self.assertEqual(chan2.compose(chan1, front=True), targ)\n targ = SuperOp(np.dot(mat1, mat2))\n self.assertEqual(chan1.compose(chan2, front=True), targ)\n\n # Compose different dimensions\n chan1 = SuperOp(self.rand_matrix(16, 4))\n chan2 = SuperOp(self.rand_matrix(4, 16))\n chan = chan1.compose(chan2, front=True)\n self.assertEqual(chan.dim, (4, 4))\n chan = chan2.compose(chan1, front=True)\n self.assertEqual(chan.dim, (2, 2))", "def swap_gate_unitary_nondeterministic():\n targets = []\n # initial state as |10+>\n # Swap(0,1).(X^I^H), Permutation (0,1,2) -> (1,0,2)\n targets.append(\n np.array(\n [\n [0, 0, 0, 0, 1, 1, 0, 0],\n [0, 0, 0, 0, 0, 0, 1, 1],\n [0, 0, 0, 0, 1, -1, 0, 0],\n [0, 0, 0, 0, 0, 0, 1, -1],\n [1, 1, 0, 0, 0, 0, 0, 0],\n [0, 0, 1, 1, 0, 0, 0, 0],\n [1, -1, 0, 0, 0, 0, 0, 0],\n [0, 0, 1, -1, 0, 0, 0, 0],\n ]\n )\n / np.sqrt(2)\n )\n # Swap(0,2).(X^I^H), # Permutation (0,1,2) -> (2,1,0),\n targets.append(\n np.array(\n [\n [0, 0, 0, 0, 1, 1, 0, 0],\n [1, 1, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 1, 1],\n [0, 0, 1, 1, 0, 0, 0, 0],\n [0, 0, 0, 0, 1, -1, 0, 0],\n [1, -1, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 1, -1],\n [0, 0, 1, -1, 0, 0, 0, 0],\n ]\n )\n / np.sqrt(2)\n )\n # Swap(2,0).Swap(0,1).(X^I^H), Permutation (0,1,2) -> (2,0,1)\n targets.append(\n np.array(\n [\n [0, 0, 0, 0, 1, 1, 0, 0],\n [0, 0, 0, 0, 1, -1, 0, 0],\n [0, 0, 0, 0, 0, 0, 1, 1],\n [0, 0, 0, 0, 0, 0, 1, -1],\n [1, 1, 0, 0, 0, 0, 0, 0],\n [1, -1, 0, 0, 0, 0, 0, 0],\n [0, 0, 1, 1, 0, 0, 0, 0],\n [0, 0, 1, -1, 0, 0, 0, 0],\n ]\n )\n / np.sqrt(2)\n )\n return targets", "def test_faulty_full_no_reset_no_daggered_circuits():\n q_in = {'q0': 0, 'q1': 1}\n q_latent = {'q1': 1}\n q_refresh = {'q2': 2}\n n_shots = 10\n trash_training = False\n reset = False\n\n sp_circuit = lambda theta, qubit_indices: Program(RY(theta[0], qubit_indices[1]),\n CNOT(qubit_indices[1], qubit_indices[0]))\n \n list_SP_circuits = []\n angle_list = numpy.linspace(-10, 10, 5) # Generate 5 data pts\n\n for angle in angle_list:\n state_prep_unitary = sp_circuit([angle], [0, 1])\n list_SP_circuits.append(state_prep_unitary)\n\n training_circuit = lambda theta, qubit_indices=[0, 1]: Program(RY(-theta[0]/2, qubit_indices[0]),\n CNOT(qubit_indices[1], qubit_indices[0]))\n\n with pytest.raises(ValueError):\n faulty = quantum_autoencoder(state_prep_circuits=list_SP_circuits,\n training_circuit=training_circuit,\n q_in=q_in, q_latent=q_latent, q_refresh=q_refresh,\n trash_training=trash_training, reset=reset, \n n_shots=n_shots, verbose=False)", "def test_6q_circuit_20q_coupling(self):\n # ┌───┐┌───┐┌───┐┌───┐┌───┐\n # q0_0: ┤ X ├┤ X ├┤ X ├┤ X ├┤ X ├\n # └─┬─┘└─┬─┘└─┬─┘└─┬─┘└─┬─┘\n # q0_1: ──┼────■────┼────┼────┼──\n # │ ┌───┐ │ │ │\n # q0_2: ──┼──┤ X ├──┼────■────┼──\n # │ └───┘ │ │\n # q1_0: ──■─────────┼─────────┼──\n # ┌───┐ │ │\n # q1_1: ─────┤ X ├──┼─────────■──\n # └───┘ │\n # q1_2: ────────────■────────────\n qr0 = QuantumRegister(3, \"q0\")\n qr1 = QuantumRegister(3, \"q1\")\n circuit = QuantumCircuit(qr0, qr1)\n circuit.cx(qr1[0], qr0[0])\n circuit.cx(qr0[1], qr0[0])\n circuit.cx(qr1[2], qr0[0])\n circuit.x(qr0[2])\n circuit.cx(qr0[2], qr0[0])\n circuit.x(qr1[1])\n circuit.cx(qr1[1], qr0[0])\n\n dag = circuit_to_dag(circuit)\n pass_ = SabreLayout(CouplingMap(self.cmap20), seed=0, swap_trials=32, layout_trials=32)\n pass_.run(dag)\n\n layout = pass_.property_set[\"layout\"]\n self.assertEqual([layout[q] for q in circuit.qubits], [7, 8, 12, 6, 11, 13])", "def test_far_swap_with_gate_the_front(self):\n coupling = CouplingMap([[0, 1], [1, 2], [2, 3]])\n\n qr = QuantumRegister(4, 'qr')\n circuit = QuantumCircuit(qr)\n circuit.h(qr[3])\n circuit.cx(qr[3], qr[0])\n dag = circuit_to_dag(circuit)\n\n expected = QuantumCircuit(qr)\n expected.h(qr[3])\n expected.swap(qr[3], qr[2])\n expected.swap(qr[2], qr[1])\n expected.cx(qr[1], qr[0])\n\n pass_ = BasicSwap(coupling)\n after = pass_.run(dag)\n\n self.assertEqual(circuit_to_dag(expected), after)", "def test_state_break_smaller():\n sim = Sim()\n sys = VanDerPol()\n sys.add_break_smaller(\"x\",-1.0)\n sim.add_system(sys)\n sim.simulate(20,0.01)\n\n #If correct the simulation should break at time 2.52\n assert sys.res.time[-1] == 2.52", "def test_swap_operator_is_block_positive(dim):\n mat = swap_operator(dim)\n np.testing.assert_equal(is_block_positive(mat), True)\n np.testing.assert_equal(is_block_positive(mat, k=2), False)", "def test_unambiguous_state_exclusion_one_state():\n mat = bell(0) * bell(0).conj().T\n states = [mat]\n\n res = state_exclusion(states, probs=None, method=\"unambiguous\")\n np.testing.assert_equal(np.isclose(res, 0), True)", "def test_far_swap_with_gate_the_back(self):\n coupling = CouplingMap([[0, 1], [1, 2], [2, 3]])\n\n qr = QuantumRegister(4, 'qr')\n circuit = QuantumCircuit(qr)\n circuit.cx(qr[3], qr[0])\n circuit.h(qr[3])\n dag = circuit_to_dag(circuit)\n\n expected = QuantumCircuit(qr)\n expected.swap(qr[3], qr[2])\n expected.swap(qr[2], qr[1])\n expected.cx(qr[1], qr[0])\n expected.h(qr[1])\n\n pass_ = BasicSwap(coupling)\n after = pass_.run(dag)\n\n self.assertEqual(circuit_to_dag(expected), after)", "def test_fock_state(self):\n self.logTestName()\n\n a = 0.54321\n r = 0.123\n\n hbar = 2\n dev = qml.device('strawberryfields.gaussian', wires=2, hbar=hbar)\n\n # test correct number state expectation |<n|a>|^2\n @qml.qnode(dev)\n def circuit(x):\n qml.Displacement(x, 0, wires=0)\n return qml.expval(qml.FockStateProjector(np.array([2]), wires=0))\n\n expected = np.abs(np.exp(-np.abs(a)**2/2)*a**2/np.sqrt(2))**2\n self.assertAlmostEqual(circuit(a), expected)\n\n # test correct number state expectation |<n|S(r)>|^2\n @qml.qnode(dev)\n def circuit(x):\n qml.Squeezing(x, 0, wires=0)\n return qml.expval(qml.FockStateProjector(np.array([2, 0]), wires=[0, 1]))\n\n expected = np.abs(np.sqrt(2)/(2)*(-np.tanh(r))/np.sqrt(np.cosh(r)))**2\n self.assertAlmostEqual(circuit(r), expected)", "def test_move_multiple(self):\n new_state = self.state.move(1, 2, 4).move(3, 1, 4).move(6, 0, 3)\n assert new_state.replicas == (\n (1, 2),\n (4, 3),\n (0, 1, 2, 3),\n (0, 4, 2, 3),\n (2,),\n (0, 1, 2),\n (3, 1, 4),\n )\n assert new_state.broker_partition_counts == (3, 4, 5, 4, 3)\n assert new_state.broker_weights == (16, 21, 24, 20, 16)\n assert new_state.broker_leader_weights == (16, 2, 6, 8, 3)\n assert new_state.broker_leader_counts == (3, 1, 1, 1, 1)\n assert new_state.topic_broker_count == (\n (0, 1, 1, 1, 1),\n (2, 1, 2, 2, 1),\n (0, 0, 1, 0, 0),\n (1, 2, 1, 1, 1),\n )\n assert new_state.topic_broker_imbalance == (0, 0, 0, 0)\n assert abs(new_state.broker_partition_count_cv - 0.1969) < 1e-4\n assert abs(new_state.broker_weight_cv - 0.1584) < 1e-4\n assert abs(new_state.broker_leader_weight_cv - 0.7114) < 1e-4\n assert new_state.weighted_topic_broker_imbalance == 0\n assert new_state.rg_replicas == (\n (1, 1, 2, 2, 0, 2, 2),\n (1, 1, 2, 2, 1, 1, 1),\n )\n assert new_state.movement_count == 3\n assert new_state.movement_size == 19\n assert new_state.leader_movement_count == 2", "def test_5q_circuit_20q_coupling(self):\n # ┌───┐\n # q_0: ──■───────┤ X ├───────────────\n # │ └─┬─┘┌───┐\n # q_1: ──┼────■────┼──┤ X ├───────■──\n # ┌─┴─┐ │ │ ├───┤┌───┐┌─┴─┐\n # q_2: ┤ X ├──┼────┼──┤ X ├┤ X ├┤ X ├\n # └───┘┌─┴─┐ │ └───┘└─┬─┘└───┘\n # q_3: ─────┤ X ├──■─────────┼───────\n # └───┘ │\n # q_4: ──────────────────────■───────\n qr = QuantumRegister(5, \"q\")\n circuit = QuantumCircuit(qr)\n circuit.cx(qr[0], qr[2])\n circuit.cx(qr[1], qr[3])\n circuit.cx(qr[3], qr[0])\n circuit.x(qr[2])\n circuit.cx(qr[4], qr[2])\n circuit.x(qr[1])\n circuit.cx(qr[1], qr[2])\n\n dag = circuit_to_dag(circuit)\n pass_ = SabreLayout(CouplingMap(self.cmap20), seed=0, swap_trials=32, layout_trials=32)\n pass_.run(dag)\n\n layout = pass_.property_set[\"layout\"]\n self.assertEqual([layout[q] for q in circuit.qubits], [18, 11, 13, 12, 14])", "def test_conclusive_state_exclusion_one_state():\n rho = bell(0) * bell(0).conj().T\n states = [rho]\n\n res = state_exclusion(states, probs=None, method=\"conclusive\")\n np.testing.assert_equal(np.isclose(res, 1), True)", "def halfway_inst():\n q_in = {'q0': 0, 'q1': 1}\n q_latent = {'q1': 1}\n n_shots = 10\n trash_training = True\n\n sp_circuit = lambda theta, qubit_indices: Program(RY(theta[0], qubit_indices[1]),\n CNOT(qubit_indices[1], qubit_indices[0]))\n\n list_SP_circuits = []\n angle_list = numpy.linspace(-10, 10, 5) # Generate 5 data pts\n\n for angle in angle_list:\n state_prep_unitary = sp_circuit([angle], [0, 1])\n list_SP_circuits.append(state_prep_unitary)\n\n training_circuit = lambda theta, qubit_indices=[0, 1]: Program(RY(-theta[0]/2, qubit_indices[0]),\n CNOT(qubit_indices[1], qubit_indices[0]))\n return quantum_autoencoder(state_prep_circuits=list_SP_circuits,\n training_circuit=training_circuit,\n q_in=q_in, q_latent=q_latent,\n trash_training=trash_training,\n n_shots=n_shots, verbose=False)", "def test_cat_state(self, tol):\n a = 0.312\n b = 0.123\n c = 0.532\n wires = [0]\n\n gate_name = \"CatState\"\n operation = qml.CatState\n\n cutoff_dim = 10\n dev = qml.device(\"strawberryfields.fock\", wires=2, cutoff_dim=cutoff_dim)\n\n sf_operation = dev._operation_map[gate_name]\n\n assert dev.supports_operation(gate_name)\n\n @qml.qnode(dev)\n def circuit(*args):\n qml.TwoModeSqueezing(0.1, 0, wires=[0, 1])\n operation(*args, wires=wires)\n return qml.expval(qml.NumberOperator(0)), qml.expval(qml.NumberOperator(1))\n\n res = circuit(a, b, c)\n sf_res = SF_gate_reference(sf_operation, cutoff_dim, wires, a * np.exp(1j * b), c)\n assert np.allclose(res, sf_res, atol=tol, rtol=0)", "def test_conditional_solid_reconstruction(self):\n\n test_shape = ExtrudeStraightShape(\n points=[(0, 0), (0, 20), (20, 20)],\n distance=20\n )\n\n assert test_shape.solid is not None\n assert test_shape.hash_value is not None\n initial_hash_value = test_shape.hash_value\n\n test_shape.distance = 30\n\n assert test_shape.solid is not None\n assert test_shape.hash_value is not None\n assert initial_hash_value != test_shape.hash_value", "def test_multiple_registers_with_good_layout(self):\n coupling = CouplingMap([[0, 1], [1, 2]])\n\n qr_q = QuantumRegister(2, 'q')\n qr_a = QuantumRegister(1, 'a')\n cr_c = ClassicalRegister(3, 'c')\n circ = QuantumCircuit(qr_q, qr_a, cr_c)\n circ.cx(qr_q[0], qr_a[0])\n circ.cx(qr_q[1], qr_a[0])\n circ.measure(qr_q[0], cr_c[0])\n circ.measure(qr_q[1], cr_c[1])\n circ.measure(qr_a[0], cr_c[2])\n dag = circuit_to_dag(circ)\n\n layout = Layout({qr_q[0]: 0, qr_a[0]: 1, qr_q[1]: 2})\n\n pass_ = StochasticSwap(coupling, layout, 20, 13)\n after = pass_.run(dag)\n\n self.assertEqual(dag, after)", "def swap_gate_statevector_nondeterministic():\n targets = []\n # initial state as |10+>\n # Swap(0,1).(X^I^H), Permutation (0,1,2) -> (1,0,2), |1+0>\n targets.append(np.array([0, 0, 0, 0, 1, 0, 1, 0]) / np.sqrt(2))\n # Swap(0,2).(X^I^H), # Permutation (0,1,2) -> (2,1,0),\n targets.append(np.array([0, 1, 0, 0, 0, 1, 0, 0]) / np.sqrt(2))\n # Swap(2,0).Swap(0,1).(X^I^H), Permutation (0,1,2) -> (2,0,1)\n targets.append(np.array([0, 1, 0, 1, 0, 0, 0, 0]) / np.sqrt(2))\n return targets" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test that the FockState gate works correctly
def test_fock_state(self, tol): arg = 1 wires = [0] gate_name = "FockState" operation = qml.FockState cutoff_dim = 10 dev = qml.device("strawberryfields.fock", wires=2, cutoff_dim=cutoff_dim) sf_operation = dev._operation_map[gate_name] assert dev.supports_operation(gate_name) @qml.qnode(dev) def circuit(*args): qml.TwoModeSqueezing(0.1, 0, wires=[0, 1]) operation(*args, wires=wires) return qml.expval(qml.NumberOperator(0)), qml.expval(qml.NumberOperator(1)) res = circuit(arg) sf_res = SF_gate_reference(sf_operation, cutoff_dim, wires, arg) assert np.allclose(res, sf_res, atol=tol, rtol=0)
[ "def test_fock_state(self):\n self.logTestName()\n\n a = 0.54321\n r = 0.123\n\n hbar = 2\n dev = qml.device('strawberryfields.gaussian', wires=2, hbar=hbar)\n\n # test correct number state expectation |<n|a>|^2\n @qml.qnode(dev)\n def circuit(x):\n qml.Displacement(x, 0, wires=0)\n return qml.expval(qml.FockStateProjector(np.array([2]), wires=0))\n\n expected = np.abs(np.exp(-np.abs(a)**2/2)*a**2/np.sqrt(2))**2\n self.assertAlmostEqual(circuit(a), expected)\n\n # test correct number state expectation |<n|S(r)>|^2\n @qml.qnode(dev)\n def circuit(x):\n qml.Squeezing(x, 0, wires=0)\n return qml.expval(qml.FockStateProjector(np.array([2, 0]), wires=[0, 1]))\n\n expected = np.abs(np.sqrt(2)/(2)*(-np.tanh(r))/np.sqrt(np.cosh(r)))**2\n self.assertAlmostEqual(circuit(r), expected)", "def test_correct_state(self, rep, tol):\n\n dev = qml.device(\"default.tensor.tf\", wires=2, representation=rep)\n\n state = dev._state()\n\n expected = np.array([[1, 0], [0, 0]])\n assert np.allclose(state, expected, atol=tol, rtol=0)\n\n @qml.qnode(dev)\n def circuit():\n qml.Hadamard(wires=0)\n return qml.expval(qml.PauliZ(0))\n\n circuit()\n state = dev._state()\n\n expected = np.array([[1, 0], [1, 0]]) / np.sqrt(2)\n assert np.allclose(state, expected, atol=tol, rtol=0)", "def test_fock_state_vector(self, tol):\n args = psi\n\n wires = [0]\n\n gate_name = \"FockStateVector\"\n operation = qml.FockStateVector\n\n cutoff_dim = 10\n dev = qml.device(\"strawberryfields.fock\", wires=2, cutoff_dim=cutoff_dim)\n\n sf_operation = dev._operation_map[gate_name]\n\n assert dev.supports_operation(gate_name)\n\n @qml.qnode(dev)\n def circuit(*args):\n qml.TwoModeSqueezing(0.1, 0, wires=[0, 1])\n operation(*args, wires=wires)\n return qml.expval(qml.NumberOperator(0)), qml.expval(qml.NumberOperator(1))\n\n res = circuit(psi)\n sf_res = SF_gate_reference(sf_operation, cutoff_dim, wires, psi)\n assert np.allclose(res, sf_res, atol=tol, rtol=0)", "async def test_state_triggers(hass: HomeAssistant) -> None:\n hass.states.async_set(\"sensor.test_monitored\", STATE_OFF)\n\n config = {\n \"binary_sensor\": {\n \"name\": \"Test_Binary\",\n \"platform\": \"bayesian\",\n \"observations\": [\n {\n \"platform\": \"state\",\n \"entity_id\": \"sensor.test_monitored\",\n \"to_state\": \"off\",\n \"prob_given_true\": 0.9999,\n \"prob_given_false\": 0.9994,\n },\n ],\n \"prior\": 0.2,\n \"probability_threshold\": 0.32,\n }\n }\n await async_setup_component(hass, \"binary_sensor\", config)\n await hass.async_block_till_done()\n\n assert hass.states.get(\"binary_sensor.test_binary\").state == STATE_OFF\n\n events = []\n async_track_state_change_event(\n hass, \"binary_sensor.test_binary\", callback(lambda event: events.append(event))\n )\n\n context = Context()\n hass.states.async_set(\"sensor.test_monitored\", STATE_ON, context=context)\n await hass.async_block_till_done()\n await hass.async_block_till_done()\n\n assert events[0].context == context", "def test_controlled_by_gates_fusion(backend):\n c = Circuit(4)\n c.add((gates.H(i) for i in range(4)))\n c.add(gates.RX(1, theta=0.1234).controlled_by(0))\n c.add(gates.RX(3, theta=0.4321).controlled_by(2))\n c.add((gates.RY(i, theta=0.5678) for i in range(4)))\n c.add(gates.RX(1, theta=0.1234).controlled_by(0))\n c.add(gates.RX(3, theta=0.4321).controlled_by(2))\n fused_c = c.fuse()\n np.testing.assert_allclose(fused_c(), c())", "def test_gaussian_state(self, tol):\n V = np.array([[0.5, 0], [0, 2]])\n r = np.array([0, 0])\n\n wires = [0]\n\n gate_name = \"GaussianState\"\n operation = qml.GaussianState\n\n cutoff_dim = 10\n dev = qml.device(\"strawberryfields.fock\", wires=2, cutoff_dim=cutoff_dim)\n\n sf_operation = dev._operation_map[gate_name]\n\n assert dev.supports_operation(gate_name)\n\n @qml.qnode(dev)\n def circuit(*args):\n qml.TwoModeSqueezing(0.1, 0, wires=[0, 1])\n operation(*args, wires=wires)\n return qml.expval(qml.NumberOperator(0)), qml.expval(qml.NumberOperator(1))\n\n res = circuit(V, r)\n sf_res = SF_gate_reference(sf_operation, cutoff_dim, wires, V, r)\n assert np.allclose(res, sf_res, atol=tol, rtol=0)", "def _verify(self, expected_state, expected_speed, expected_oscillating,\n expected_direction):\n state = self.hass.states.get(_TEST_FAN)\n attributes = state.attributes\n assert state.state == expected_state\n assert attributes.get(ATTR_SPEED, None) == expected_speed\n assert attributes.get(ATTR_OSCILLATING, None) == expected_oscillating\n assert attributes.get(ATTR_DIRECTION, None) == expected_direction", "def test_fock_state_projector(self, tol):\n cutoff_dim = 12\n a = 0.54321\n r = 0.123\n\n hbar = 2\n dev = qml.device(\"strawberryfields.fock\", wires=2, hbar=hbar, cutoff_dim=cutoff_dim)\n\n # test correct number state expectation |<n|a>|^2\n @qml.qnode(dev)\n def circuit(x):\n qml.Displacement(x, 0, wires=0)\n return qml.expval(qml.FockStateProjector(np.array([2]), wires=0))\n\n expected = np.abs(np.exp(-np.abs(a) ** 2 / 2) * a ** 2 / np.sqrt(2)) ** 2\n assert np.allclose(circuit(a), expected, atol=tol, rtol=0)\n\n # test correct number state expectation |<n|S(r)>|^2\n @qml.qnode(dev)\n def circuit(x):\n qml.Squeezing(x, 0, wires=0)\n return qml.expval(qml.FockStateProjector(np.array([2, 0]), wires=[0, 1]))\n\n expected = np.abs(np.sqrt(2) / (2) * (-np.tanh(r)) / np.sqrt(np.cosh(r))) ** 2\n assert np.allclose(circuit(r), expected, atol=tol, rtol=0)", "def test_group(self):\n\n class DoneState(State):\n def __init__(self):\n State.__init__(self,outcomes=['done'])\n def execute(self,ud=None):\n return 'done'\n\n sm = StateMachine(['succeeded','done'])\n with sm:\n StateMachine.add('FAILSAUCE',DoneState())\n transitions = {'aborted':'FAILSAUCE','preempted':'FAILSAUCE'}\n with sm:\n StateMachine.add('FIRST', SimpleActionState(self.node, 'fibonacci', Fibonacci, goal = g1), transitions)\n StateMachine.add('SECOND', SimpleActionState(self.node, 'fibonacci', Fibonacci, goal = g2), transitions)\n StateMachine.add('THIRD', SimpleActionState(self.node, 'fibonacci', Fibonacci, goal = g1), transitions)\n spinner = threading.Thread(target=self.spin)\n spinner.start()\n outcome = sm.execute()\n\n assert outcome == 'done'", "def test_all_finitediff_state(self, interface, return_type, shots, wire_specs, diff_method):\n\n # this error message is a bit cryptic, but it's consistent across\n # all the interfaces\n msg = \"state\\\\(wires=\\\\[0?\\\\]\\\\)\\\\ is\\\\ not\\\\ in\\\\ list\"\n\n complex = return_type == \"StateVector\"\n\n with pytest.raises(ValueError, match=msg):\n circuit = get_qnode(interface, diff_method, return_type, shots, wire_specs)\n x = get_variable(interface, wire_specs, complex=complex)\n\n if shots is not None:\n with pytest.warns(UserWarning, match=\"unaffected by sampling\"):\n compute_gradient(x, interface, circuit, return_type, complex=complex)\n else:\n compute_gradient(x, interface, circuit, return_type, complex=complex)", "async def test_sensor_state(hass: HomeAssistant) -> None:\n prior = 0.2\n config = {\n \"binary_sensor\": {\n \"name\": \"Test_Binary\",\n \"platform\": \"bayesian\",\n \"observations\": [\n {\n \"platform\": \"state\",\n \"entity_id\": \"sensor.test_monitored\",\n \"to_state\": \"off\",\n \"prob_given_true\": 0.8,\n \"prob_given_false\": 0.4,\n }\n ],\n \"prior\": prior,\n \"probability_threshold\": 0.32,\n }\n }\n\n assert await async_setup_component(hass, \"binary_sensor\", config)\n await hass.async_block_till_done()\n\n hass.states.async_set(\"sensor.test_monitored\", \"on\")\n await hass.async_block_till_done()\n state = hass.states.get(\"binary_sensor.test_binary\")\n\n assert state.attributes.get(\"occurred_observation_entities\") == [\n \"sensor.test_monitored\"\n ]\n assert state.attributes.get(\"observations\")[0][\"prob_given_true\"] == 0.8\n assert state.attributes.get(\"observations\")[0][\"prob_given_false\"] == 0.4\n assert abs(0.0769 - state.attributes.get(\"probability\")) < 0.01\n # Calculated using bayes theorum where P(A) = 0.2, P(~B|A) = 0.2 (as negative observation), P(~B|notA) = 0.6\n assert state.state == \"off\"\n\n hass.states.async_set(\"sensor.test_monitored\", \"off\")\n await hass.async_block_till_done()\n state = hass.states.get(\"binary_sensor.test_binary\")\n\n assert state.attributes.get(\"occurred_observation_entities\") == [\n \"sensor.test_monitored\"\n ]\n assert abs(0.33 - state.attributes.get(\"probability\")) < 0.01\n # Calculated using bayes theorum where P(A) = 0.2, P(~B|A) = 0.8 (as negative observation), P(~B|notA) = 0.4\n assert state.state == \"on\"\n\n hass.states.async_remove(\"sensor.test_monitored\")\n await hass.async_block_till_done()\n state = hass.states.get(\"binary_sensor.test_binary\")\n\n assert state.attributes.get(\"occurred_observation_entities\") == []\n assert abs(prior - state.attributes.get(\"probability\")) < 0.01\n assert state.state == \"off\"\n\n hass.states.async_set(\"sensor.test_monitored\", STATE_UNAVAILABLE)\n await hass.async_block_till_done()\n state = hass.states.get(\"binary_sensor.test_binary\")\n\n assert state.attributes.get(\"occurred_observation_entities\") == []\n assert abs(prior - state.attributes.get(\"probability\")) < 0.01\n assert state.state == \"off\"\n\n hass.states.async_set(\"sensor.test_monitored\", STATE_UNKNOWN)\n await hass.async_block_till_done()\n state = hass.states.get(\"binary_sensor.test_binary\")\n\n assert state.attributes.get(\"occurred_observation_entities\") == []\n assert abs(prior - state.attributes.get(\"probability\")) < 0.01\n assert state.state == \"off\"", "def test_new_state(self):\n self.new_helper(\"State\")", "def test_reproduce_turn_on(self):\n calls = mock_service(self.hass, 'light', SERVICE_TURN_ON)\n\n self.hass.states.set('light.test', 'off')\n\n state.reproduce_state(self.hass, ha.State('light.test', 'on'))\n\n self.hass.block_till_done()\n\n assert len(calls) > 0\n last_call = calls[-1]\n assert 'light' == last_call.domain\n assert SERVICE_TURN_ON == last_call.service\n assert ['light.test'] == last_call.data.get('entity_id')", "def test_gate_multimode(self):\n xir_prog = xir.Program()\n xir_prog.add_statement(xir.Statement(\"BSgate\", {\"theta\": 0.54, \"phi\": np.pi}, (0, 2)))\n\n sf_prog = io.to_program(xir_prog)\n\n assert len(sf_prog) == 1\n assert sf_prog.circuit\n assert sf_prog.circuit[0].op.__class__.__name__ == \"BSgate\"\n assert sf_prog.circuit[0].op.p[0] == 0.54\n assert sf_prog.circuit[0].op.p[1] == np.pi\n assert sf_prog.circuit[0].reg[0].ind == 0\n assert sf_prog.circuit[0].reg[1].ind == 2", "def test_protocol(self):\n\n my_event_callback = Mock(spec=\"UNKNOWN WHAT SHOULD GO HERE FOR evt_callback\")\n p = Protocol(Prompt, NEWLINE, my_event_callback)\n self.assertEqual(str(my_event_callback.mock_calls), \"[call('DRIVER_ASYNC_EVENT_STATE_CHANGE')]\")\n\n p._protocol_fsm\n\n self.assertEqual(p._protocol_fsm.enter_event, 'DRIVER_EVENT_ENTER')\n self.assertEqual(p._protocol_fsm.exit_event, 'DRIVER_EVENT_EXIT')\n self.assertEqual(p._protocol_fsm.previous_state, None)\n self.assertEqual(p._protocol_fsm.current_state, 'DRIVER_STATE_UNKNOWN')\n self.assertEqual(repr(p._protocol_fsm.states), repr(ProtocolState))\n self.assertEqual(repr(p._protocol_fsm.events), repr(ProtocolEvent))\n\n state_handlers = {\n (ProtocolState.UNKNOWN, ProtocolEvent.ENTER): '_handler_unknown_enter',\n (ProtocolState.UNKNOWN, ProtocolEvent.EXIT): '_handler_unknown_exit',\n (ProtocolState.UNKNOWN, ProtocolEvent.DISCOVER): '_handler_unknown_discover',\n (ProtocolState.UNKNOWN, ProtocolEvent.FORCE_STATE): '_handler_unknown_force_state',\n (ProtocolState.COMMAND, ProtocolEvent.FORCE_STATE): '_handler_unknown_force_state',\n (ProtocolState.COMMAND, ProtocolEvent.ENTER): '_handler_command_enter',\n (ProtocolState.COMMAND, ProtocolEvent.EXIT): '_handler_command_exit',\n (ProtocolState.COMMAND, ProtocolEvent.START_AUTOSAMPLE): '_handler_command_start_autosample',\n (ProtocolState.COMMAND, ProtocolEvent.GET): '_handler_command_get',\n (ProtocolState.COMMAND, ProtocolEvent.SET): '_handler_command_set',\n (ProtocolState.COMMAND, ProtocolEvent.INIT_LOGGING): '_handler_command_init_logging',\n (ProtocolState.COMMAND, ProtocolEvent.CLOCK_SYNC): '_handler_command_clock_sync',\n (ProtocolState.COMMAND, ProtocolEvent.ACQUIRE_STATUS): '_handler_command_aquire_status',\n (ProtocolState.COMMAND, ProtocolEvent.START_DIRECT): '_handler_command_start_direct',\n (ProtocolState.COMMAND, ProtocolEvent.SAMPLE_REFERENCE_OSCILLATOR): '_handler_sample_ref_osc',\n (ProtocolState.COMMAND, ProtocolEvent.TEST_EEPROM): '_handler_command_test_eeprom',\n (ProtocolState.COMMAND, ProtocolEvent.RESET_EC): '_handler_command_reset_ec',\n (ProtocolState.AUTOSAMPLE, ProtocolEvent.ENTER): '_handler_autosample_enter',\n (ProtocolState.AUTOSAMPLE, ProtocolEvent.EXIT): '_handler_autosample_exit',\n (ProtocolState.AUTOSAMPLE, ProtocolEvent.GET): '_handler_command_get',\n (ProtocolState.AUTOSAMPLE, ProtocolEvent.STOP_AUTOSAMPLE): '_handler_autosample_stop_autosample',\n (ProtocolState.DIRECT_ACCESS, ProtocolEvent.ENTER): '_handler_direct_access_enter',\n (ProtocolState.DIRECT_ACCESS, ProtocolEvent.EXIT): '_handler_direct_access_exit',\n (ProtocolState.DIRECT_ACCESS, ProtocolEvent.EXECUTE_DIRECT): '_handler_direct_access_execute_direct',\n (ProtocolState.DIRECT_ACCESS, ProtocolEvent.STOP_DIRECT): '_handler_direct_access_stop_direct'\n }\n\n log.debug(str(state_handlers[(ProtocolState.UNKNOWN, ProtocolEvent.EXIT)]))\n for key in p._protocol_fsm.state_handlers.keys():\n #log.debug(\"W*****>>> \" + str(key))\n #log.debug(\"X*****>>> \" + str(p._protocol_fsm.state_handlers[key].__func__.func_name))\n #log.debug(\"Y*****>>> \" + str(state_handlers[key]))\n self.assertEqual(p._protocol_fsm.state_handlers[key].__func__.func_name, state_handlers[key])\n self.assertTrue(key in state_handlers)\n\n for key in state_handlers.keys():\n self.assertEqual(p._protocol_fsm.state_handlers[key].__func__.func_name, state_handlers[key])\n self.assertTrue(key in p._protocol_fsm.state_handlers)", "async def test_fan_step(hass: HomeAssistant, knx: KNXTestKit) -> None:\n await knx.setup_integration(\n {\n FanSchema.PLATFORM: {\n CONF_NAME: \"test\",\n KNX_ADDRESS: \"1/2/3\",\n FanSchema.CONF_MAX_STEP: 4,\n }\n }\n )\n\n # turn on fan with default speed (50% - step 2)\n await hass.services.async_call(\n \"fan\", \"turn_on\", {\"entity_id\": \"fan.test\"}, blocking=True\n )\n await knx.assert_write(\"1/2/3\", (2,))\n\n # turn up speed to 75% - step 3\n await hass.services.async_call(\n \"fan\", \"turn_on\", {\"entity_id\": \"fan.test\", \"percentage\": 75}, blocking=True\n )\n await knx.assert_write(\"1/2/3\", (3,))\n\n # turn off fan\n await hass.services.async_call(\n \"fan\", \"turn_off\", {\"entity_id\": \"fan.test\"}, blocking=True\n )\n await knx.assert_write(\"1/2/3\", (0,))\n\n # receive step 4 (100%) telegram\n await knx.receive_write(\"1/2/3\", (4,))\n state = hass.states.get(\"fan.test\")\n assert state.state is STATE_ON\n assert state.attributes.get(\"percentage\") == 100\n\n # receive step 1 (25%) telegram\n await knx.receive_write(\"1/2/3\", (1,))\n state = hass.states.get(\"fan.test\")\n assert state.state is STATE_ON\n assert state.attributes.get(\"percentage\") == 25\n\n # receive step 0 (off) telegram\n await knx.receive_write(\"1/2/3\", (0,))\n state = hass.states.get(\"fan.test\")\n assert state.state is STATE_OFF\n\n # fan does not respond to read\n await knx.receive_read(\"1/2/3\")\n await knx.assert_telegram_count(0)", "def test_set_transition_state():\n\n def assert_state(instance):\n \"\"\"\n ensure the running state is set\n \"\"\"\n assert instance.state == \"do_thing_running\"\n\n x = get_thing()\n x.do_thing(assert_state)\n\n # ensure the target transition is set when the process is done\n assert x.state == x.CHOICES.done", "async def test_controlling_state_via_topic(hass, mqtt_mock):\n assert await async_setup_component(hass, fan.DOMAIN, {\n fan.DOMAIN: {\n 'platform': 'mqtt',\n 'name': 'test',\n 'state_topic': 'state-topic',\n 'command_topic': 'command-topic',\n 'payload_off': 'StAtE_OfF',\n 'payload_on': 'StAtE_On',\n 'oscillation_state_topic': 'oscillation-state-topic',\n 'oscillation_command_topic': 'oscillation-command-topic',\n 'payload_oscillation_off': 'OsC_OfF',\n 'payload_oscillation_on': 'OsC_On',\n 'speed_state_topic': 'speed-state-topic',\n 'speed_command_topic': 'speed-command-topic',\n 'payload_off_speed': 'speed_OfF',\n 'payload_low_speed': 'speed_lOw',\n 'payload_medium_speed': 'speed_mEdium',\n 'payload_high_speed': 'speed_High',\n }\n })\n\n state = hass.states.get('fan.test')\n assert state.state is STATE_OFF\n assert not state.attributes.get(ATTR_ASSUMED_STATE)\n\n async_fire_mqtt_message(hass, 'state-topic', 'StAtE_On')\n state = hass.states.get('fan.test')\n assert state.state is STATE_ON\n\n async_fire_mqtt_message(hass, 'state-topic', 'StAtE_OfF')\n state = hass.states.get('fan.test')\n assert state.state is STATE_OFF\n assert state.attributes.get('oscillating') is False\n\n async_fire_mqtt_message(hass, 'oscillation-state-topic', 'OsC_On')\n state = hass.states.get('fan.test')\n assert state.attributes.get('oscillating') is True\n\n async_fire_mqtt_message(hass, 'oscillation-state-topic', 'OsC_OfF')\n state = hass.states.get('fan.test')\n assert state.attributes.get('oscillating') is False\n\n assert state.attributes.get('speed') == fan.SPEED_OFF\n\n async_fire_mqtt_message(hass, 'speed-state-topic', 'speed_lOw')\n state = hass.states.get('fan.test')\n assert state.attributes.get('speed') == fan.SPEED_LOW\n\n async_fire_mqtt_message(hass, 'speed-state-topic', 'speed_mEdium')\n state = hass.states.get('fan.test')\n assert state.attributes.get('speed') == fan.SPEED_MEDIUM\n\n async_fire_mqtt_message(hass, 'speed-state-topic', 'speed_High')\n state = hass.states.get('fan.test')\n assert state.attributes.get('speed') == fan.SPEED_HIGH\n\n async_fire_mqtt_message(hass, 'speed-state-topic', 'speed_OfF')\n state = hass.states.get('fan.test')\n assert state.attributes.get('speed') == fan.SPEED_OFF", "def testPushState(self):\n parser = expression_parser.EventFilterExpressionParser()\n parser._Reset()\n\n parser._state = 'INITIAL'\n self.assertEqual(len(parser._state_stack), 0)\n\n next_state = parser._PushState()\n self.assertIsNone(next_state)\n self.assertEqual(len(parser._state_stack), 1)\n self.assertEqual(parser._state_stack[0], 'INITIAL')\n self.assertEqual(parser._state, 'INITIAL')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test that the FockStateVector gate works correctly
def test_fock_state_vector(self, tol): args = psi wires = [0] gate_name = "FockStateVector" operation = qml.FockStateVector cutoff_dim = 10 dev = qml.device("strawberryfields.fock", wires=2, cutoff_dim=cutoff_dim) sf_operation = dev._operation_map[gate_name] assert dev.supports_operation(gate_name) @qml.qnode(dev) def circuit(*args): qml.TwoModeSqueezing(0.1, 0, wires=[0, 1]) operation(*args, wires=wires) return qml.expval(qml.NumberOperator(0)), qml.expval(qml.NumberOperator(1)) res = circuit(psi) sf_res = SF_gate_reference(sf_operation, cutoff_dim, wires, psi) assert np.allclose(res, sf_res, atol=tol, rtol=0)
[ "def test_qubit_state_vector(self, init_state, tol, rep):\n dev = DefaultTensorTF(wires=1, representation=rep)\n state = init_state(1)\n\n dev.execute([qml.QubitStateVector(state, wires=[0])], [], {})\n\n res = dev._state().numpy().flatten()\n expected = state\n assert np.allclose(res, expected, atol=tol, rtol=0)", "def test_statevector_simulator_cpp(self):\n result = execute(self.q_circuit, backend='local_statevector_simulator_cpp').result()\n self.assertEqual(result.get_status(), 'COMPLETED')\n actual = result.get_statevector(self.q_circuit)\n\n # state is 1/sqrt(2)|00> + 1/sqrt(2)|11>, up to a global phase\n self.assertAlmostEqual((abs(actual[0]))**2, 1/2)\n self.assertEqual(actual[1], 0)\n self.assertEqual(actual[2], 0)\n self.assertAlmostEqual((abs(actual[3]))**2, 1/2)", "def test_fock_state(self, tol):\n arg = 1\n wires = [0]\n\n gate_name = \"FockState\"\n operation = qml.FockState\n\n cutoff_dim = 10\n dev = qml.device(\"strawberryfields.fock\", wires=2, cutoff_dim=cutoff_dim)\n\n sf_operation = dev._operation_map[gate_name]\n\n assert dev.supports_operation(gate_name)\n\n @qml.qnode(dev)\n def circuit(*args):\n qml.TwoModeSqueezing(0.1, 0, wires=[0, 1])\n operation(*args, wires=wires)\n return qml.expval(qml.NumberOperator(0)), qml.expval(qml.NumberOperator(1))\n\n res = circuit(arg)\n sf_res = SF_gate_reference(sf_operation, cutoff_dim, wires, arg)\n assert np.allclose(res, sf_res, atol=tol, rtol=0)", "def test_all_finitediff_state(self, interface, return_type, shots, wire_specs, diff_method):\n\n # this error message is a bit cryptic, but it's consistent across\n # all the interfaces\n msg = \"state\\\\(wires=\\\\[0?\\\\]\\\\)\\\\ is\\\\ not\\\\ in\\\\ list\"\n\n complex = return_type == \"StateVector\"\n\n with pytest.raises(ValueError, match=msg):\n circuit = get_qnode(interface, diff_method, return_type, shots, wire_specs)\n x = get_variable(interface, wire_specs, complex=complex)\n\n if shots is not None:\n with pytest.warns(UserWarning, match=\"unaffected by sampling\"):\n compute_gradient(x, interface, circuit, return_type, complex=complex)\n else:\n compute_gradient(x, interface, circuit, return_type, complex=complex)", "def test_fock_state(self):\n self.logTestName()\n\n a = 0.54321\n r = 0.123\n\n hbar = 2\n dev = qml.device('strawberryfields.gaussian', wires=2, hbar=hbar)\n\n # test correct number state expectation |<n|a>|^2\n @qml.qnode(dev)\n def circuit(x):\n qml.Displacement(x, 0, wires=0)\n return qml.expval(qml.FockStateProjector(np.array([2]), wires=0))\n\n expected = np.abs(np.exp(-np.abs(a)**2/2)*a**2/np.sqrt(2))**2\n self.assertAlmostEqual(circuit(a), expected)\n\n # test correct number state expectation |<n|S(r)>|^2\n @qml.qnode(dev)\n def circuit(x):\n qml.Squeezing(x, 0, wires=0)\n return qml.expval(qml.FockStateProjector(np.array([2, 0]), wires=[0, 1]))\n\n expected = np.abs(np.sqrt(2)/(2)*(-np.tanh(r))/np.sqrt(np.cosh(r)))**2\n self.assertAlmostEqual(circuit(r), expected)", "def test_correct_state(self, rep, tol):\n\n dev = qml.device(\"default.tensor.tf\", wires=2, representation=rep)\n\n state = dev._state()\n\n expected = np.array([[1, 0], [0, 0]])\n assert np.allclose(state, expected, atol=tol, rtol=0)\n\n @qml.qnode(dev)\n def circuit():\n qml.Hadamard(wires=0)\n return qml.expval(qml.PauliZ(0))\n\n circuit()\n state = dev._state()\n\n expected = np.array([[1, 0], [1, 0]]) / np.sqrt(2)\n assert np.allclose(state, expected, atol=tol, rtol=0)", "def test_boost_vector_states():\n sim = Sim()\n sys = RigidBody()\n\n sys.store(\"position\")\n\n sys.inputs.force = [1.0,0.0,0.0]\n sys.inputs.mass = 1.0\n\n sim.add_system(sys)\n sim.simulate(20,0.01)\n\n pos = sys.res.position\n diff = np.abs(pos[-1,:]-[200,0,0])\n assert np.max(diff) <= 1", "def test_invalid_qubit_state_vector(self, rep):\n dev = DefaultTensorTF(wires=2, representation=rep)\n state = np.array([0, 123.432])\n\n with pytest.raises(\n ValueError, match=r\"can apply QubitStateVector only to all of the 2 wires\"\n ):\n dev.execute([qml.QubitStateVector(state, wires=[0])], [], {})", "def swap_gate_statevector_nondeterministic():\n targets = []\n # initial state as |10+>\n # Swap(0,1).(X^I^H), Permutation (0,1,2) -> (1,0,2), |1+0>\n targets.append(np.array([0, 0, 0, 0, 1, 0, 1, 0]) / np.sqrt(2))\n # Swap(0,2).(X^I^H), # Permutation (0,1,2) -> (2,1,0),\n targets.append(np.array([0, 1, 0, 0, 0, 1, 0, 0]) / np.sqrt(2))\n # Swap(2,0).Swap(0,1).(X^I^H), Permutation (0,1,2) -> (2,0,1)\n targets.append(np.array([0, 1, 0, 1, 0, 0, 0, 0]) / np.sqrt(2))\n return targets", "def test_getActionVector(self):\n\n # set up the network for testing\n self.testNetwork._createInitialWeights()\n self.testNetwork._createComputationalGraph()\n\n actVec = self.testNetwork.getActionVector(np.array([0.1, 0.1]))\n\n # Make the checks\n self.assertTrue(len(actVec) == self.layers[-1])\n self.assertTrue(np.sum(actVec) == 1)\n self.assertTrue(np.sum(actVec == 1) == 1)", "def test_pragma_get_statevec_pyquest(init) -> None:\n op = ops.PragmaGetStateVector\n test_dict: Dict[str, List[complex]] = {'ro': [0, 0, 0, 0]}\n operation = op(readout='ro',\n circuit=Circuit(),\n )\n env = utils.createQuestEnv()()\n qubits = utils.createQureg()(2, env)\n state = 1 / np.sqrt(2) * np.array([1, 1, 0, 0])\n cheat.initStateFromAmps()(qubits, np.real(state), np.imag(state))\n\n pyquest_call_operation(operation=operation, qureg=qubits, classical_bit_registers=dict(),\n classical_float_registers=dict(),\n classical_complex_registers=test_dict,\n output_bit_register_dict=dict(),)\n utils.destroyQureg()(qubits=qubits, env=env)\n utils.destroyQuestEnv()(env)\n npt.assert_array_almost_equal(test_dict['ro'], init[1], decimal=4)", "def func_state_pre_test(list_float_target_state: List[float], int_shots: int) -> List[float]:\n env = QEnv()\n\n # from QCompute import Define\n # Define.hubToken = ''\n # env.backend(BackendName.CloudBaiduSim2Wind)\n env.backend(BackendName.LocalBaiduSim2)\n\n int_dim = len(list_float_target_state) # the dimension of the input vector\n num_qubit_sys = max(int(np.ceil(np.log2(int_dim))), 1) # the number of qubits we need to encode the input vector\n reg_sys = list(env.Q[idx] for idx in range(num_qubit_sys)) # create the quantum register\n\n # call the quantum circuit to prepare quantum state\n circ_state_pre(reg_sys, [], list_float_target_state, reg_borrowed=[])\n\n # measure the quantum state we have prepared\n MeasureZ(reg_sys, list(reversed(range(num_qubit_sys))))\n\n task_result = env.commit(int_shots, fetchMeasure=True)['counts'] # commit to the task\n\n list_population = [0 for _ in range(2 ** num_qubit_sys)] # register for finial populations\n for idx_key in task_result.keys():\n list_population[int(idx_key, base=2)] = task_result[idx_key]\n return list_population", "def test_controlled_by_gates_fusion(backend):\n c = Circuit(4)\n c.add((gates.H(i) for i in range(4)))\n c.add(gates.RX(1, theta=0.1234).controlled_by(0))\n c.add(gates.RX(3, theta=0.4321).controlled_by(2))\n c.add((gates.RY(i, theta=0.5678) for i in range(4)))\n c.add(gates.RX(1, theta=0.1234).controlled_by(0))\n c.add(gates.RX(3, theta=0.4321).controlled_by(2))\n fused_c = c.fuse()\n np.testing.assert_allclose(fused_c(), c())", "def test_state(self):\n\n sv = Statevector.from_label(\"+-rl\")\n output = state_drawer(sv, \"latex_source\")\n expected_output = (\n r\"\\frac{1}{4} |0000\\rangle- \\frac{i}{4} |0001\\rangle+\\frac{i}{4} |0010\\rangle\"\n r\"+\\frac{1}{4} |0011\\rangle- \\frac{1}{4} |0100\\rangle+\\frac{i}{4} |0101\\rangle\"\n r\" + \\ldots +\\frac{1}{4} |1011\\rangle- \\frac{1}{4} |1100\\rangle\"\n r\"+\\frac{i}{4} |1101\\rangle- \\frac{i}{4} |1110\\rangle- \\frac{1}{4} |1111\\rangle\"\n )\n self.assertEqual(output, expected_output)", "def test_gaussian_state(self, tol):\n V = np.array([[0.5, 0], [0, 2]])\n r = np.array([0, 0])\n\n wires = [0]\n\n gate_name = \"GaussianState\"\n operation = qml.GaussianState\n\n cutoff_dim = 10\n dev = qml.device(\"strawberryfields.fock\", wires=2, cutoff_dim=cutoff_dim)\n\n sf_operation = dev._operation_map[gate_name]\n\n assert dev.supports_operation(gate_name)\n\n @qml.qnode(dev)\n def circuit(*args):\n qml.TwoModeSqueezing(0.1, 0, wires=[0, 1])\n operation(*args, wires=wires)\n return qml.expval(qml.NumberOperator(0)), qml.expval(qml.NumberOperator(1))\n\n res = circuit(V, r)\n sf_res = SF_gate_reference(sf_operation, cutoff_dim, wires, V, r)\n assert np.allclose(res, sf_res, atol=tol, rtol=0)", "def cz_gate_statevector_nondeterministic():\n targets = []\n # (I^H).CZ.(H^H) = CX10.(H^I), Bell state\n targets.append(np.array([1, 0, 0, 1]) / np.sqrt(2))\n # (H^I).CZ.(H^H) = CX01.(I^H), Bell state\n targets.append(np.array([1, 0, 0, 1]) / np.sqrt(2))\n return targets", "def test_autograd_state_backprop(self, wire_specs):\n msg = \"cannot reshape array of size .*\"\n\n with pytest.raises(ValueError, match=msg):\n circuit = get_qnode(\"autograd\", \"backprop\", \"StateVector\", None, wire_specs)\n x = get_variable(\"autograd\", wire_specs)\n compute_gradient(x, \"autograd\", circuit, \"StateVector\")", "def _test_update_state_fn(self):\n return encoding_stage._tf_style_update_state(\n lambda _, s, sut, name: {'state': s['state'] + sut['tensor']})", "def test_float_vector(capfd):\n attribute = 'optimize'\n name = 'State Vector'\n tag = 'STW'\n type_id = 'float_vec'\n default_value = None\n config_dict = {'STW': ['0.0 1.0 2.0', {'optimize': 'no'}]}\n result = convertXMLAttributesDictEntry(\n name, config_dict, tag, attribute, type_id, default_value=None)\n out, err = capfd.readouterr()\n ''' This test will check if it is returning [0.0 0.0 0.0] '''\n assert (result == np.array([0.0, 0.0, 0.0])).all()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test that the FockDensityMatrix gate works correctly
def test_fock_density_matrix(self, tol): dm = np.outer(psi, psi.conj()) wires = [0] gate_name = "FockDensityMatrix" operation = qml.FockDensityMatrix cutoff_dim = 10 dev = qml.device("strawberryfields.fock", wires=2, cutoff_dim=cutoff_dim) sf_operation = dev._operation_map[gate_name] assert dev.supports_operation(gate_name) @qml.qnode(dev) def circuit(*args): qml.TwoModeSqueezing(0.1, 0, wires=[0, 1]) operation(*args, wires=wires) return qml.expval(qml.NumberOperator(0)), qml.expval(qml.NumberOperator(1)) res = circuit(dm) sf_res = SF_gate_reference(sf_operation, cutoff_dim, wires, dm) assert np.allclose(res, sf_res, atol=tol, rtol=0)
[ "def test_pragma_get_densitymatrix_pyquest() -> None:\n op = ops.PragmaGetDensityMatrix\n test_dict: Dict[str, List[complex]] = {'ro': [0, 0, 0, 0]}\n operation = op(readout='ro',\n circuit=Circuit()\n )\n env = utils.createQuestEnv()()\n qubits = utils.createQureg()(1, env)\n state = 1 / np.sqrt(2) * np.array([1, 1])\n density_matrix = 1 / 2 * np.array([[1, 1], [1, 1]]).flatten()\n cheat.initStateFromAmps()(qubits, np.real(state), np.imag(state))\n\n pyquest_call_operation(operation=operation, qureg=qubits, classical_bit_registers=dict(),\n classical_float_registers=dict(),\n classical_complex_registers=test_dict,\n output_bit_register_dict=dict(),)\n utils.destroyQureg()(qubits=qubits, env=env)\n utils.destroyQuestEnv()(env)\n npt.assert_array_almost_equal(test_dict['ro'], density_matrix)", "def test_density_matrix_qnode_tf_jit(self):\n dev = qml.device(\"default.qubit\", wires=2)\n\n @qml.qnode(dev, interface=\"tf\")\n def circuit(x):\n qml.IsingXX(x, wires=[0, 1])\n return qml.state()\n\n density_matrix = tf.function(\n qml.qinfo.reduced_dm(circuit, wires=[0]),\n jit_compile=True,\n input_signature=(tf.TensorSpec(shape=(), dtype=tf.float32),),\n )\n density_matrix = density_matrix(tf.Variable(0.0, dtype=tf.float32))\n assert np.allclose(density_matrix, [[1, 0], [0, 0]])", "def test_reduced_density_matrix(self, local_device, shots, tol):\n dev = local_device(2)\n\n @qml.qnode(dev)\n def circuit():\n qml.Hadamard(wires=0)\n qml.CNOT(wires=[0, 1])\n qml.CRX(np.pi / 2, wires=[0, 1])\n return qml.density_matrix(wires=[0])\n\n output = circuit()\n expected_dm = np.array([[0.5, 1j / np.sqrt(8)], [-1j / np.sqrt(8), 0.5]])\n assert np.allclose(output, expected_dm, **tol)", "def test_density(self):\n self.ld.compute(self.box, self.pos, self.pos)\n\n # Test access\n self.ld.density\n self.ld.num_neighbors\n self.ld.box\n\n self.assertTrue(self.ld.box == freud.box.Box.cube(10))\n\n npt.assert_array_less(np.fabs(self.ld.density - 10.0), 1.5)\n\n npt.assert_array_less(\n np.fabs(self.ld.num_neighbors - 1130.973355292), 200)", "def test_functionals(self):\n for i, f in enumerate(self.get_basis_functions()):\n for j, d in enumerate(self.dofs):\n if i == j:\n assert d.eval(f).expand().simplify() == 1\n else:\n assert d.eval(f).expand().simplify() == 0\n assert d.entity_dim() is not None", "def test_dm_broadcast(self, device, interface, tol):\n dev = qml.device(device, wires=2)\n\n @qml.qnode(dev, interface=interface)\n def circuit(x):\n qml.PauliX(0)\n qml.IsingXX(x, wires=[0, 1])\n return qml.density_matrix(wires=[0, 1])\n\n x = qml.math.asarray([0.4, 0.6, 0.8], like=interface)\n density_matrix = qml.qinfo.reduced_dm(circuit, wires=[0])(x)\n\n expected = np.zeros((3, 2, 2))\n expected[:, 0, 0] = np.sin(x / 2) ** 2\n expected[:, 1, 1] = np.cos(x / 2) ** 2\n\n assert qml.math.allclose(expected, density_matrix, atol=tol)", "def test_init(self):\n ft_shape = (3, 4, 5) # define shape of the tensor in full form\n R = 2 # define Kryskal rank of a tensor in CP form\n core_values = np.ones(R)\n true_orig_fmat_list = [np.arange(orig_dim * R).reshape(orig_dim, R) for orig_dim in ft_shape]\n fmat_list = [fmat.copy() for fmat in true_orig_fmat_list]\n true_mode_names = [\"mode-0\", \"mode-1\", \"mode-2\"]\n\n tensor_cpd = TensorCPD(fmat=fmat_list, core_values=core_values)\n assert isinstance(tensor_cpd.fmat, list)\n assert tensor_cpd.order == len(fmat_list)\n assert isinstance(tensor_cpd.rank, tuple)\n assert tensor_cpd.rank == (R,)\n assert isinstance(tensor_cpd._core_values, np.ndarray)\n np.testing.assert_array_equal(tensor_cpd._core_values, core_values)\n assert tensor_cpd._core_values is not core_values\n assert tensor_cpd.mode_names == true_mode_names\n assert tensor_cpd.ft_shape == ft_shape\n\n # ------ tests for factor matrices\n for mode, fmat in enumerate(tensor_cpd.fmat):\n # check that values are the same but there are not references\n np.testing.assert_array_equal(fmat, fmat_list[mode])\n assert fmat is not fmat_list[mode]\n\n # check that changes to the matrices have no affect on the TensorCPD\n # (double check for not being references)\n fmat_list[mode] = fmat_list[mode] * 2\n np.testing.assert_array_equal(fmat, true_orig_fmat_list[mode])\n assert fmat is not true_orig_fmat_list[mode]\n\n # ------ tests for core\n true_core = np.array([[[1., 0.],\n [0., 0.]],\n\n [[0., 0.],\n [0., 1.]]]\n )\n assert isinstance(tensor_cpd.core, Tensor)\n np.testing.assert_array_equal(tensor_cpd.core.data, true_core)", "def test_gaussian_circuit(self):\n self.logTestName()\n\n dev = qml.device('strawberryfields.gaussian', wires=1)\n\n @qml.qnode(dev)\n def circuit(x):\n qml.Displacement(x, 0, wires=0)\n return qml.expval(qml.NumberOperator(0))\n\n self.assertAlmostEqual(circuit(1), 1, delta=self.tol)", "def test_flow__distance_raster_MFD_diagonals_true():\n\n # instantiate a model grid\n\n mg = RasterModelGrid((5, 4), xy_spacing=(1, 1))\n\n # instantiate an elevation array\n\n z = np.array(\n [[0, 0, 0, 0], [0, 21, 10, 0], [0, 31, 20, 0], [0, 32, 30, 0], [0, 0, 0, 0]],\n dtype=\"float64\",\n )\n\n # add the elevation field to the grid\n\n mg.add_field(\"topographic__elevation\", z, at=\"node\")\n\n # instantiate the expected flow__distance array\n # considering flow directions calculated with MFD algorithm\n\n flow__distance_expected = np.array(\n [\n [0, 0, 0, 0],\n [0, 1, 0, 0],\n [0, math.sqrt(2), 1, 0],\n [0, 1 + math.sqrt(2), 2, 0],\n [0, 0, 0, 0],\n ],\n dtype=\"float64\",\n )\n flow__distance_expected = np.reshape(\n flow__distance_expected, mg.number_of_node_rows * mg.number_of_node_columns\n )\n\n # setting boundary conditions\n\n mg.set_closed_boundaries_at_grid_edges(\n bottom_is_closed=True,\n left_is_closed=True,\n right_is_closed=True,\n top_is_closed=True,\n )\n\n # calculating flow directions with FlowAccumulator component\n\n fa = FlowAccumulator(\n mg, \"topographic__elevation\", flow_director=\"MFD\", diagonals=True\n )\n fa.run_one_step()\n\n # calculating flow distance map\n\n flow__distance = calculate_flow__distance(mg, add_to_grid=True, clobber=True)\n\n # test that the flow__distance utility works as expected\n\n assert_array_equal(flow__distance_expected, flow__distance)", "def test_flow__distance_raster_MFD_diagonals_false():\n\n # instantiate a model grid\n\n mg = RasterModelGrid((5, 4), xy_spacing=(1, 1))\n\n # instantiate an elevation array\n\n z = np.array(\n [[0, 0, 0, 0], [0, 21, 10, 0], [0, 31, 20, 0], [0, 32, 30, 0], [0, 0, 0, 0]],\n dtype=\"float64\",\n )\n\n # add the elevation field to the grid\n\n mg.add_field(\"topographic__elevation\", z, at=\"node\")\n\n # instantiate the expected flow__distance array\n # considering flow directions calculated with MFD algorithm\n\n flow__distance_expected = np.array(\n [[0, 0, 0, 0], [0, 1, 0, 0], [0, 2, 1, 0], [0, 3, 2, 0], [0, 0, 0, 0]],\n dtype=\"float64\",\n )\n flow__distance_expected = np.reshape(\n flow__distance_expected, mg.number_of_node_rows * mg.number_of_node_columns\n )\n\n # setting boundary conditions\n\n mg.set_closed_boundaries_at_grid_edges(\n bottom_is_closed=True,\n left_is_closed=True,\n right_is_closed=True,\n top_is_closed=True,\n )\n\n # calculating flow directions with FlowAccumulator component\n\n fa = FlowAccumulator(\n mg, \"topographic__elevation\", flow_director=\"MFD\", diagonals=False\n )\n fa.run_one_step()\n\n # calculating flow distance map\n\n flow__distance = calculate_flow__distance(mg, add_to_grid=True, clobber=True)\n\n # test that the flow__distance utility works as expected\n\n assert_array_equal(flow__distance_expected, flow__distance)", "def testDiMatrix(self):\n absoluteTolerance = 0.003;# Absolute error tolerance for test data (we only have it to 4 digits)\n relativeTolerance = 0.1; # Relative error tolerance (probably not necessary)\n kx = 1.0006; # x component of k vector\n ky = 0.4247; # y component of k vector\n l0 = 2.7; # Free-space wavelength\n k0 = 2.3271; # Free-space wavenumber\n\n # LAYER 1 DATA\n er = 2.0;\n ur = 1.0;\n kz = 0.9046;\n A = complexArray([[2.0049, -0.0427], [-0.0427, 2.0873]]);\n B = complexArray([[-0.0049, 0.0427], [0.0427, -0.0873]]);\n X = complexArray([[0.1493 + 0.9888j, 0+0j],[0+0j, 0.4193 + 0.9888j]]);\n\n D_calc = calculateScatteringDMatrix(A, B, X);\n D_actual = complexArray([[2.0057 - 0.0003j, -0.0445 + 0.0006j],[-0.0445 + 0.0006j, 2.0916 - 0.0013j]]);\n assertAlmostEqual(D_actual, D_calc, absoluteTolerance, relativeTolerance);\n\n # LAYER 2 DATA\n # Since now we have the d-matrix to higher precision we can test it more strongly.\n absoluteTolerance = 0.0001;# Absolute error tolerance for test data (we only have it to 4 digits)\n relativeTolerance = 0.001; # Relative error tolerance (probably not necessary)\n er = 1.0;\n ur = 3.0;\n kz = 1.3485;\n L = 0.5*l0;\n\n A = complexArray([[3.8324, 0.2579],[0.2579, 3.3342]]);\n B = complexArray([[-1.8324, -0.2579], [-0.2579, -1.3342]]);\n X = complexArray([[-0.4583 - 0.8888j, 0+0j],[0+0j, -0.4583 - 0.8888j]]);\n\n D_calc = calculateScatteringDMatrix(A, B, X);\n D_actual = complexArray([[4.3436 - 0.7182j, 0.3604 - 0.1440j], [0.3604 - 0.1440j, 3.6475 - 0.4401j]]);\n assertAlmostEqual(D_actual, D_calc, absoluteTolerance, relativeTolerance);", "def test_purity_non_density_matrix():\n rho = np.array([[1, 2], [3, 4]])\n\n with np.testing.assert_raises(ValueError):\n purity(rho)", "def test_density(self):\n\n r_max = self.r_max + 0.5*self.diameter\n test_set = util.make_raw_query_nlist_test_set(\n self.box, self.pos, self.pos, \"ball\", r_max, 0, True)\n for nq, neighbors in test_set:\n self.ld.compute(nq, neighbors=neighbors)\n\n # Test access\n self.ld.density\n self.ld.num_neighbors\n self.ld.box\n\n self.assertTrue(self.ld.box == freud.box.Box.cube(10))\n\n npt.assert_array_less(np.fabs(self.ld.density - 10.0), 1.5)\n\n npt.assert_array_less(\n np.fabs(self.ld.num_neighbors - 1130.973355292), 200)", "def test_3():\n d = 3\n x = np.zeros((d))\n func_val = mt_obj.griewank_func(x, d)\n assert(func_val == 0)\n assert(np.all(mt_obj.griewank_grad(x, d) == np.zeros((d))))", "def density_mat_fn(circuit, num_wires):\n dev_wires = range(num_wires)\n dev = qml.device(\"default.qubit\", wires=dev_wires)\n zero_state = np.array([0] * len(dev_wires))\n\n @qml.qnode(dev)\n def density_mat(wires_out, *circ_args, basis_state=zero_state, **circ_kwargs):\n qml.BasisState(basis_state, wires=dev_wires)\n circuit(*circ_args, **circ_kwargs)\n return qml.density_matrix(wires=wires_out)\n\n return density_mat", "def test_correct_state(self, rep, tol):\n\n dev = qml.device(\"default.tensor.tf\", wires=2, representation=rep)\n\n state = dev._state()\n\n expected = np.array([[1, 0], [0, 0]])\n assert np.allclose(state, expected, atol=tol, rtol=0)\n\n @qml.qnode(dev)\n def circuit():\n qml.Hadamard(wires=0)\n return qml.expval(qml.PauliZ(0))\n\n circuit()\n state = dev._state()\n\n expected = np.array([[1, 0], [1, 0]]) / np.sqrt(2)\n assert np.allclose(state, expected, atol=tol, rtol=0)", "def test_density_to_cartesian(self):\n\n q0 = state.zeros(1)\n rho = q0.density()\n x, y, z = helper.density_to_cartesian(rho)\n self.assertEqual(x, 0.0)\n self.assertEqual(y, 0.0)\n self.assertEqual(z, 1.0)\n\n q1 = state.ones(1)\n rho = q1.density()\n x, y, z = helper.density_to_cartesian(rho)\n self.assertEqual(x, 0.0)\n self.assertEqual(y, 0.0)\n self.assertEqual(z, -1.0)\n\n qh = ops.Hadamard()(q0)\n rho = qh.density()\n x, y, z = helper.density_to_cartesian(rho)\n self.assertTrue(math.isclose(np.real(x), 1.0, abs_tol=1e-6))\n self.assertTrue(math.isclose(np.real(y), 0.0))\n self.assertTrue(math.isclose(np.real(z), 0.0, abs_tol=1e-6))\n\n qr = ops.RotationZ(math.pi/2)(qh)\n rho = qr.density()\n x, y, z = helper.density_to_cartesian(rho)\n self.assertTrue(math.isclose(np.real(x), 0.0, abs_tol=1e-6))\n self.assertTrue(math.isclose(np.real(y), 1.0, abs_tol=1e-6))\n self.assertTrue(math.isclose(np.real(z), 0.0, abs_tol=1e-6))", "def test_cmfd_feed_ng():\n # Initialize and set CMFD mesh\n cmfd_mesh = cmfd.CMFDMesh()\n cmfd_mesh.lower_left = (-1.25984, -1.25984, -1.0)\n cmfd_mesh.upper_right = (1.25984, 1.25984, 1.0)\n cmfd_mesh.dimension = (2, 2, 1)\n cmfd_mesh.energy = (0.0, 0.625, 5.53080, 20000000)\n cmfd_mesh.albedo = (1.0, 1.0, 1.0, 1.0, 1.0, 1.0)\n\n # Initialize and run CMFDRun object\n cmfd_run = cmfd.CMFDRun()\n cmfd_run.mesh = cmfd_mesh\n cmfd_run.reset = [5]\n cmfd_run.tally_begin = 10\n cmfd_run.solver_begin = 10\n cmfd_run.display = {'dominance': True}\n cmfd_run.feedback = True\n cmfd_run.downscatter = True\n cmfd_run.gauss_seidel_tolerance = [1.e-15, 1.e-20]\n cmfd_run.run()\n\n # Initialize and run CMFD test harness\n harness = CMFDTestHarness('statepoint.20.h5', cmfd_run)\n harness.main()", "def test_design_matrix() -> None:\n lag_list = [[2], [1, 3]]\n fit_intercept = [False, True]\n y = torch.tensor([1, 2, 3, 4, 5, 6], dtype=torch.int16)\n X1_expected = torch.tensor([\n [2, 3, 4]\n ], dtype=torch.int16).transpose(0, 1)\n X2_expected = torch.tensor([\n [1, 1, 1],\n [3, 4, 5],\n [1, 2, 3]\n ], dtype=torch.int16).transpose(0, 1)\n y_expect = torch.tensor([4, 5, 6], dtype=torch.int16)\n y, X = ut.design_matrix(y, lag_list, fit_intercept)\n if torch.equal(y, y_expect):\n print(\"Dependent variable test pass!\")\n else:\n print(\"Dependent variable test fail!\")\n raise\n if torch.equal(X[0], X1_expected):\n print(\"First regime test 1 pass, design_matrix\")\n else:\n print(\"First regime test 1 failed, design_matrix\")\n print(X[0])\n print(X1_expected)\n raise\n if torch.equal(X[1], X2_expected):\n print(\"Second regime test 1 pass, design_matrix\")\n return\n else:\n print(\"Second regime test 1 failed, design_matrix\")\n print(X[1])\n print(X2_expected)\n raise" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test that the CatState gate works correctly
def test_cat_state(self, tol): a = 0.312 b = 0.123 c = 0.532 wires = [0] gate_name = "CatState" operation = qml.CatState cutoff_dim = 10 dev = qml.device("strawberryfields.fock", wires=2, cutoff_dim=cutoff_dim) sf_operation = dev._operation_map[gate_name] assert dev.supports_operation(gate_name) @qml.qnode(dev) def circuit(*args): qml.TwoModeSqueezing(0.1, 0, wires=[0, 1]) operation(*args, wires=wires) return qml.expval(qml.NumberOperator(0)), qml.expval(qml.NumberOperator(1)) res = circuit(a, b, c) sf_res = SF_gate_reference(sf_operation, cutoff_dim, wires, a * np.exp(1j * b), c) assert np.allclose(res, sf_res, atol=tol, rtol=0)
[ "def test_new_state(self):\n self.new_helper(\"State\")", "def test_covid_data_is_for_correct_state(self):\n self.assertEqual(self.state,\n self.data_processor.agg_data_frame['State'].\n values.all())", "def test_CatFedAfterEating(self):\r\n name = \"Cat Name\"\r\n cat = Cat(name)\r\n cat.eat()\r\n self.assertTrue(cat.fed)", "def test_create(self):\n self.assertIsInstance(self.obj, State)", "def apply_state(self, state):", "def test_state_via_state_topic(self):\n self.hass.config.components = ['mqtt']\n self.assertTrue(setup_component(self.hass, cover.DOMAIN, {\n cover.DOMAIN: {\n 'platform': 'mqtt',\n 'name': 'test',\n 'state_topic': 'state-topic',\n 'command_topic': 'command-topic',\n 'qos': 0,\n 'payload_open': 'OPEN',\n 'payload_close': 'CLOSE',\n 'payload_stop': 'STOP'\n }\n }))\n\n state = self.hass.states.get('cover.test')\n self.assertEqual(STATE_UNKNOWN, state.state)\n\n fire_mqtt_message(self.hass, 'state-topic', '0')\n self.hass.block_till_done()\n\n state = self.hass.states.get('cover.test')\n self.assertEqual(STATE_CLOSED, state.state)\n\n fire_mqtt_message(self.hass, 'state-topic', '50')\n self.hass.block_till_done()\n\n state = self.hass.states.get('cover.test')\n self.assertEqual(STATE_OPEN, state.state)\n\n fire_mqtt_message(self.hass, 'state-topic', '100')\n self.hass.block_till_done()\n\n state = self.hass.states.get('cover.test')\n self.assertEqual(STATE_OPEN, state.state)\n\n fire_mqtt_message(self.hass, 'state-topic', STATE_CLOSED)\n self.hass.block_till_done()\n\n state = self.hass.states.get('cover.test')\n self.assertEqual(STATE_CLOSED, state.state)\n\n fire_mqtt_message(self.hass, 'state-topic', STATE_OPEN)\n self.hass.block_till_done()\n\n state = self.hass.states.get('cover.test')\n self.assertEqual(STATE_OPEN, state.state)", "def testPushState(self):\n parser = expression_parser.EventFilterExpressionParser()\n parser._Reset()\n\n parser._state = 'INITIAL'\n self.assertEqual(len(parser._state_stack), 0)\n\n next_state = parser._PushState()\n self.assertIsNone(next_state)\n self.assertEqual(len(parser._state_stack), 1)\n self.assertEqual(parser._state_stack[0], 'INITIAL')\n self.assertEqual(parser._state, 'INITIAL')", "def random_state(self, state):\n pass", "def test_init_state(self) -> None:\n # Execute\n state = self.state_factory()\n\n # Assert\n assert isinstance(state, State)", "def test_circuit_init(self):\n circuit, target = self.simple_circuit_no_measure()\n op = SuperOp(circuit)\n target = SuperOp(target)\n self.assertEqual(op, target)", "def test_type_state(self):\n self.assertEqual(type(self.My_state), State)", "def test_reproduce_bad_state(self):\n calls = mock_service(self.hass, 'light', SERVICE_TURN_ON)\n\n self.hass.states.set('light.test', 'off')\n\n state.reproduce_state(self.hass, ha.State('light.test', 'bad'))\n\n self.hass.block_till_done()\n\n assert len(calls) == 0\n assert 'off' == self.hass.states.get('light.test').state", "def test_correct_state(self, rep, tol):\n\n dev = qml.device(\"default.tensor.tf\", wires=2, representation=rep)\n\n state = dev._state()\n\n expected = np.array([[1, 0], [0, 0]])\n assert np.allclose(state, expected, atol=tol, rtol=0)\n\n @qml.qnode(dev)\n def circuit():\n qml.Hadamard(wires=0)\n return qml.expval(qml.PauliZ(0))\n\n circuit()\n state = dev._state()\n\n expected = np.array([[1, 0], [1, 0]]) / np.sqrt(2)\n assert np.allclose(state, expected, atol=tol, rtol=0)", "def action(self, state):\n pass", "def test_get_all_feature_states_returns_correct_value_when_traits_passed_manually(\n self,\n ):\n # Given - an identity with a trait that has an integer value of 10\n trait_key = \"trait-key\"\n trait_value = 10\n identity = Identity.objects.create(\n identifier=\"test-identity\", environment=self.environment\n )\n trait = Trait(\n identity=identity,\n trait_key=trait_key,\n integer_value=trait_value,\n value_type=INTEGER,\n )\n\n # and a segment that matches all identities with a trait value greater than or equal to 5\n segment = Segment.objects.create(name=\"Test segment 1\", project=self.project)\n rule = SegmentRule.objects.create(segment=segment, type=SegmentRule.ALL_RULE)\n Condition.objects.create(\n rule=rule, property=trait_key, value=5, operator=GREATER_THAN_INCLUSIVE\n )\n\n # and a feature flag\n default_state = False\n feature_flag = Feature.objects.create(\n project=self.project, name=\"test_flag\", default_enabled=default_state\n )\n\n # which is overridden by the segment\n enabled_for_segment = not default_state\n FeatureSegment.objects.create(\n feature=feature_flag,\n segment=segment,\n environment=self.environment,\n priority=1,\n enabled=enabled_for_segment,\n )\n\n # When - we get all feature states for an identity\n feature_states = identity.get_all_feature_states(traits=[trait])\n\n # Then - the flag is returned with the correct state\n assert len(feature_states) == 1\n assert feature_states[0].enabled == enabled_for_segment", "async def test_state_triggers(hass: HomeAssistant) -> None:\n hass.states.async_set(\"sensor.test_monitored\", STATE_OFF)\n\n config = {\n \"binary_sensor\": {\n \"name\": \"Test_Binary\",\n \"platform\": \"bayesian\",\n \"observations\": [\n {\n \"platform\": \"state\",\n \"entity_id\": \"sensor.test_monitored\",\n \"to_state\": \"off\",\n \"prob_given_true\": 0.9999,\n \"prob_given_false\": 0.9994,\n },\n ],\n \"prior\": 0.2,\n \"probability_threshold\": 0.32,\n }\n }\n await async_setup_component(hass, \"binary_sensor\", config)\n await hass.async_block_till_done()\n\n assert hass.states.get(\"binary_sensor.test_binary\").state == STATE_OFF\n\n events = []\n async_track_state_change_event(\n hass, \"binary_sensor.test_binary\", callback(lambda event: events.append(event))\n )\n\n context = Context()\n hass.states.async_set(\"sensor.test_monitored\", STATE_ON, context=context)\n await hass.async_block_till_done()\n await hass.async_block_till_done()\n\n assert events[0].context == context", "def test_coherent_state_deriv():\n t = symbols('t', is_positive=True)\n alpha = Function('alpha')\n expr = CoherentStateKet(alpha(t), hs=1)\n assert not expr.diff(t).is_zero", "async def test_reproducing_states(\n hass: HomeAssistant, caplog: pytest.LogCaptureFixture\n) -> None:\n\n assert await async_setup_component(\n hass,\n \"input_number\",\n {\n \"input_number\": {\n \"test_number\": {\"min\": \"5\", \"max\": \"100\", \"initial\": VALID_NUMBER1}\n }\n },\n )\n\n # These calls should do nothing as entities already in desired state\n await async_reproduce_state(\n hass,\n [\n State(\"input_number.test_number\", VALID_NUMBER1),\n # Should not raise\n State(\"input_number.non_existing\", \"234\"),\n ],\n )\n\n assert hass.states.get(\"input_number.test_number\").state == VALID_NUMBER1\n\n # Test reproducing with different state\n await async_reproduce_state(\n hass,\n [\n State(\"input_number.test_number\", VALID_NUMBER2),\n # Should not raise\n State(\"input_number.non_existing\", \"234\"),\n ],\n )\n\n assert hass.states.get(\"input_number.test_number\").state == VALID_NUMBER2\n\n # Test setting state to number out of range\n await async_reproduce_state(hass, [State(\"input_number.test_number\", \"150\")])\n\n # The entity states should be unchanged after trying to set them to out-of-range number\n assert hass.states.get(\"input_number.test_number\").state == VALID_NUMBER2\n\n await async_reproduce_state(\n hass,\n [\n # Test invalid state\n State(\"input_number.test_number\", \"invalid_state\"),\n # Set to state it already is.\n State(\"input_number.test_number\", VALID_NUMBER2),\n ],\n )", "def test_fock_state(self):\n self.logTestName()\n\n a = 0.54321\n r = 0.123\n\n hbar = 2\n dev = qml.device('strawberryfields.gaussian', wires=2, hbar=hbar)\n\n # test correct number state expectation |<n|a>|^2\n @qml.qnode(dev)\n def circuit(x):\n qml.Displacement(x, 0, wires=0)\n return qml.expval(qml.FockStateProjector(np.array([2]), wires=0))\n\n expected = np.abs(np.exp(-np.abs(a)**2/2)*a**2/np.sqrt(2))**2\n self.assertAlmostEqual(circuit(a), expected)\n\n # test correct number state expectation |<n|S(r)>|^2\n @qml.qnode(dev)\n def circuit(x):\n qml.Squeezing(x, 0, wires=0)\n return qml.expval(qml.FockStateProjector(np.array([2, 0]), wires=[0, 1]))\n\n expected = np.abs(np.sqrt(2)/(2)*(-np.tanh(r))/np.sqrt(np.cosh(r)))**2\n self.assertAlmostEqual(circuit(r), expected)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test that the expectation value of the NumberOperator observable yields the correct result
def test_number_operator(self, tol): cutoff_dim = 10 dev = qml.device("strawberryfields.fock", wires=2, cutoff_dim=cutoff_dim) gate_name = "NumberOperator" assert dev.supports_observable(gate_name) op = qml.NumberOperator sf_expectation = dev._observable_map[gate_name] wires = [0] @qml.qnode(dev) def circuit(*args): qml.Displacement(0.1, 0, wires=0) qml.TwoModeSqueezing(0.1, 0, wires=[0, 1]) return qml.expval(op(*args, wires=wires)) assert np.allclose( circuit(), SF_expectation_reference(sf_expectation, cutoff_dim, wires), atol=tol, rtol=0 )
[ "def test_tensor_number_operator(self, tol):\n cutoff_dim = 10\n\n dev = qml.device(\"strawberryfields.fock\", wires=2, cutoff_dim=cutoff_dim)\n\n gate_name = \"TensorN\"\n assert dev.supports_observable(gate_name)\n\n op = qml.TensorN\n sf_expectation = dev._observable_map[gate_name]\n wires = [0, 1]\n\n @qml.qnode(dev)\n def circuit():\n qml.Displacement(0.1, 0, wires=0)\n qml.TwoModeSqueezing(0.1, 0, wires=[0, 1])\n return qml.expval(op(wires=wires))\n\n expval = circuit()\n assert np.allclose(\n expval, SF_expectation_reference(sf_expectation, cutoff_dim, wires), atol=tol, rtol=0\n )", "def test_number(self):\n self.assertTrue(self.atom.number == self.atom.element.number)", "def test_n(self):\n self.assertAlmostEqual(self.singleExponentialDown.n, self.n, 4)", "def test_math(self):\n self.assertTrue((1 + 1) == 2)", "def test_operator_get_operator(self):\n pass", "def test_pow_method_with_non_numeric_power_raises_error(self):\n\n class DummyOp(qml.operation.Operation):\n r\"\"\"Dummy custom operator\"\"\"\n num_wires = 1\n\n with pytest.raises(ValueError, match=\"Cannot raise an Operator\"):\n _ = DummyOp(wires=[0]) ** DummyOp(wires=[0])", "def test_as_number_coercion(self):\n for _state in ('0', '0.0', 0, 0.0):\n assert 0.0 == state.state_as_number(\n ha.State('domain.test', _state, {}))\n for _state in ('1', '1.0', 1, 1.0):\n assert 1.0 == state.state_as_number(\n ha.State('domain.test', _state, {}))", "def test_n(self):\n self.assertAlmostEqual(self.stick.n.value_si, self.n, 6)", "def test_decorators():\n g=PhysicalQuantity(98, 'mm')/ PhysicalQuantity(1, 's**2')\n assert g.value == 98\n assert g.base.value == 0.098\n assert str(g.unit) == \"mm/s^2\"", "def test_unbox_to_num():\n assert ComposedUnit([unit('m')], [unit('m')], 8) == 8", "def test_isnum(inp, exp):\n pytest.debug_func()\n assert nldt.isnum(inp) == exp", "def test_single_number(self) -> None:\n val = self.parse(self.arithmetic_lexer.lex(\"13\"))\n self.assertEqual(13, val)", "def test_num_buses_value_4(self):\n actual = a1.num_buses(1756)\n expected = 36\n self.assertEqual(actual,expected)", "def evaluates_to_number(self):\n return self.shape_for_testing == ()", "def test_special_observable_qnode_differentiation(self):\n\n class SpecialObject:\n \"\"\"SpecialObject\n\n A special object that conveniently encapsulates the return value of\n a special observable supported by a special device and which supports\n multiplication with scalars and addition.\n \"\"\"\n\n def __init__(self, val):\n self.val = val\n\n def __mul__(self, other):\n new = SpecialObject(self.val)\n new *= other\n return new\n\n def __imul__(self, other):\n self.val *= other\n return self\n\n def __rmul__(self, other):\n return self * other\n\n def __iadd__(self, other):\n self.val += other.val if isinstance(other, self.__class__) else other\n return self\n\n def __add__(self, other):\n new = SpecialObject(self.val)\n new += other.val if isinstance(other, self.__class__) else other\n return new\n\n def __radd__(self, other):\n return self + other\n\n class SpecialObservable(Observable):\n \"\"\"SpecialObservable\"\"\"\n\n num_wires = AnyWires\n num_params = 0\n par_domain = None\n\n def diagonalizing_gates(self):\n \"\"\"Diagonalizing gates\"\"\"\n return []\n\n class DeviceSupporingSpecialObservable(DefaultQubit):\n name = \"Device supporing SpecialObservable\"\n short_name = \"default.qibit.specialobservable\"\n observables = DefaultQubit.observables.union({\"SpecialObservable\"})\n\n def expval(self, observable, **kwargs):\n if self.analytic and isinstance(observable, SpecialObservable):\n val = super().expval(qml.PauliZ(wires=0), **kwargs)\n return SpecialObject(val)\n\n return super().expval(observable, **kwargs)\n\n dev = DeviceSupporingSpecialObservable(wires=1, shots=None)\n\n # force diff_method='parameter-shift' because otherwise\n # PennyLane swaps out dev for default.qubit.autograd\n @qml.qnode(dev, diff_method=\"parameter-shift\")\n def qnode(x):\n qml.RY(x, wires=0)\n return qml.expval(SpecialObservable(wires=0))\n\n @qml.qnode(dev, diff_method=\"parameter-shift\")\n def reference_qnode(x):\n qml.RY(x, wires=0)\n return qml.expval(qml.PauliZ(wires=0))\n\n assert np.isclose(qnode(0.2).item().val, reference_qnode(0.2))\n assert np.isclose(qml.jacobian(qnode)(0.2).item().val, qml.jacobian(reference_qnode)(0.2))", "def test_num_buses_value_0(self):\n actual = a1.num_buses(0)\n expected = 0\n self.assertEqual(actual,expected)", "def test_arithmetic(self):\n self.assertEqualRun(\"(+ (* 3 (- 2 1)) (/ 12 3))\", 7)\n self.assertEqualRun(\"(max 0 (min 100 50))\", 50)\n self.assertTrueRun(\"(eq? 7 7)\")\n self.assertTrueRun(\"(not (eq? 7 8))\")\n self.assertTrueRun(\"(< 7 8)\")\n self.assertTrueRun(\"(not (> 1 8))\")\n self.assertTrueRun(\"(<= 7 7)\")\n self.assertTrueRun(\"(not (>= 7 8))\")", "def test_no_exp(num, ref):\n assert pmisc.number._no_exp(num) == ref", "def test_calculator_result(self):\n self.calculator = Calculator()\n assert self.calculator.result == 0" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test that the expectation value of the TensorN observable yields the correct result
def test_tensor_number_operator(self, tol): cutoff_dim = 10 dev = qml.device("strawberryfields.fock", wires=2, cutoff_dim=cutoff_dim) gate_name = "TensorN" assert dev.supports_observable(gate_name) op = qml.TensorN sf_expectation = dev._observable_map[gate_name] wires = [0, 1] @qml.qnode(dev) def circuit(): qml.Displacement(0.1, 0, wires=0) qml.TwoModeSqueezing(0.1, 0, wires=[0, 1]) return qml.expval(op(wires=wires)) expval = circuit() assert np.allclose( expval, SF_expectation_reference(sf_expectation, cutoff_dim, wires), atol=tol, rtol=0 )
[ "async def test_multiple_numeric_observations(hass: HomeAssistant) -> None:\n\n config = {\n \"binary_sensor\": {\n \"platform\": \"bayesian\",\n \"name\": \"Test_Binary\",\n \"observations\": [\n {\n \"platform\": \"numeric_state\",\n \"entity_id\": \"sensor.test_monitored\",\n \"below\": 10,\n \"above\": 0,\n \"prob_given_true\": 0.4,\n \"prob_given_false\": 0.0001,\n },\n {\n \"platform\": \"numeric_state\",\n \"entity_id\": \"sensor.test_monitored\",\n \"below\": 100,\n \"above\": 30,\n \"prob_given_true\": 0.6,\n \"prob_given_false\": 0.0001,\n },\n ],\n \"prior\": 0.1,\n }\n }\n\n assert await async_setup_component(hass, \"binary_sensor\", config)\n await hass.async_block_till_done()\n\n hass.states.async_set(\"sensor.test_monitored\", STATE_UNKNOWN)\n await hass.async_block_till_done()\n\n state = hass.states.get(\"binary_sensor.test_binary\")\n\n for _, attrs in state.attributes.items():\n json.dumps(attrs)\n assert state.attributes.get(\"occurred_observation_entities\") == []\n assert state.attributes.get(\"probability\") == 0.1\n # No observations made so probability should be the prior\n\n assert state.state == \"off\"\n\n hass.states.async_set(\"sensor.test_monitored\", 20)\n await hass.async_block_till_done()\n\n state = hass.states.get(\"binary_sensor.test_binary\")\n\n assert state.attributes.get(\"occurred_observation_entities\") == [\n \"sensor.test_monitored\"\n ]\n assert round(abs(0.026 - state.attributes.get(\"probability\")), 7) < 0.01\n # Step 1 Calculated where P(A) = 0.1, P(~B|A) = 0.6 (negative obs), P(~B|notA) = 0.9999 -> 0.0625\n # Step 2 P(A) = 0.0625, P(B|A) = 0.4 (negative obs), P(B|notA) = 0.9999 -> 0.26\n\n assert state.state == \"off\"\n\n hass.states.async_set(\"sensor.test_monitored\", 35)\n await hass.async_block_till_done()\n\n state = hass.states.get(\"binary_sensor.test_binary\")\n assert state.attributes.get(\"occurred_observation_entities\") == [\n \"sensor.test_monitored\"\n ]\n assert abs(1 - state.attributes.get(\"probability\")) < 0.01\n # Step 1 Calculated where P(A) = 0.1, P(~B|A) = 0.6 (negative obs), P(~B|notA) = 0.9999 -> 0.0625\n # Step 2 P(A) = 0.0625, P(B|A) = 0.6, P(B|notA) = 0.0001 -> 0.9975\n\n assert state.state == \"on\"\n assert state.attributes.get(\"observations\")[0][\"platform\"] == \"numeric_state\"\n assert state.attributes.get(\"observations\")[1][\"platform\"] == \"numeric_state\"", "def test_predict(self):\n assert 2 == 2", "def test_single_expectation_value(self, tol, batch_dim):\n if batch_dim is not None:\n pytest.skip(msg=\"JVP computation of batched tapes is disallowed, see #4462\")\n dev = qml.device(\"default.qubit\", wires=2)\n x = 0.543 if batch_dim is None else 0.543 * np.arange(1, 1 + batch_dim)\n y = -0.654\n\n with qml.queuing.AnnotatedQueue() as q:\n qml.RX(x, wires=[0])\n qml.RY(y, wires=[1])\n qml.CNOT(wires=[0, 1])\n qml.expval(qml.PauliZ(0) @ qml.PauliX(1))\n\n tape = qml.tape.QuantumScript.from_queue(q)\n tape.trainable_params = {0, 1}\n tangent = np.array([1.0, 1.0])\n\n tapes, fn = qml.gradients.jvp(tape, tangent, param_shift)\n assert len(tapes) == 4\n\n res = fn(dev.batch_execute(tapes))\n assert res.shape == () if batch_dim is None else (batch_dim,)\n\n exp = np.sum(np.array([-np.sin(y) * np.sin(x), np.cos(y) * np.cos(x)]), axis=0)\n assert np.allclose(res, exp, atol=tol, rtol=0)", "async def test_sensor_numeric_state(hass: HomeAssistant) -> None:\n config = {\n \"binary_sensor\": {\n \"platform\": \"bayesian\",\n \"name\": \"Test_Binary\",\n \"observations\": [\n {\n \"platform\": \"numeric_state\",\n \"entity_id\": \"sensor.test_monitored\",\n \"below\": 10,\n \"above\": 5,\n \"prob_given_true\": 0.7,\n \"prob_given_false\": 0.4,\n },\n {\n \"platform\": \"numeric_state\",\n \"entity_id\": \"sensor.test_monitored1\",\n \"below\": 7,\n \"above\": 5,\n \"prob_given_true\": 0.9,\n \"prob_given_false\": 0.2,\n },\n ],\n \"prior\": 0.2,\n }\n }\n\n assert await async_setup_component(hass, \"binary_sensor\", config)\n await hass.async_block_till_done()\n\n hass.states.async_set(\"sensor.test_monitored\", 6)\n await hass.async_block_till_done()\n\n state = hass.states.get(\"binary_sensor.test_binary\")\n\n assert state.attributes.get(\"occurred_observation_entities\") == [\n \"sensor.test_monitored\"\n ]\n assert abs(state.attributes.get(\"probability\") - 0.304) < 0.01\n # A = sensor.test_binary being ON\n # B = sensor.test_monitored in the range [5, 10]\n # Bayes theorum is P(A|B) = P(B|A) * P(A) / P(B|A)*P(A) + P(B|~A)*P(~A).\n # Where P(B|A) is prob_given_true and P(B|~A) is prob_given_false\n # Calculated using P(A) = 0.2, P(B|A) = 0.7, P(B|~A) = 0.4 -> 0.30\n\n hass.states.async_set(\"sensor.test_monitored\", 4)\n await hass.async_block_till_done()\n\n state = hass.states.get(\"binary_sensor.test_binary\")\n\n assert state.attributes.get(\"occurred_observation_entities\") == [\n \"sensor.test_monitored\"\n ]\n assert abs(state.attributes.get(\"probability\") - 0.111) < 0.01\n # As abve but since the value is equal to 4 then this is a negative observation (~B) where P(~B) == 1 - P(B) because B is binary\n # We therefore want to calculate P(A|~B) so we use P(~B|A) (1-0.7) and P(~B|~A) (1-0.4)\n # Calculated using bayes theorum where P(A) = 0.2, P(~B|A) = 1-0.7 (as negative observation), P(~B|notA) = 1-0.4 -> 0.11\n\n assert state.state == \"off\"\n\n hass.states.async_set(\"sensor.test_monitored\", 6)\n await hass.async_block_till_done()\n hass.states.async_set(\"sensor.test_monitored1\", 6)\n await hass.async_block_till_done()\n\n state = hass.states.get(\"binary_sensor.test_binary\")\n assert state.attributes.get(\"observations\")[0][\"prob_given_true\"] == 0.7\n assert state.attributes.get(\"observations\")[1][\"prob_given_true\"] == 0.9\n assert state.attributes.get(\"observations\")[1][\"prob_given_false\"] == 0.2\n assert abs(state.attributes.get(\"probability\") - 0.663) < 0.01\n # Here we have two positive observations as both are in range. We do a 2-step bayes. The output of the first is used as the (updated) prior in the second.\n # 1st step P(A) = 0.2, P(B|A) = 0.7, P(B|notA) = 0.4 -> 0.304\n # 2nd update: P(A) = 0.304, P(B|A) = 0.9, P(B|notA) = 0.2 -> 0.663\n\n assert state.state == \"on\"\n\n hass.states.async_set(\"sensor.test_monitored1\", 0)\n await hass.async_block_till_done()\n hass.states.async_set(\"sensor.test_monitored\", 4)\n await hass.async_block_till_done()\n\n state = hass.states.get(\"binary_sensor.test_binary\")\n assert abs(state.attributes.get(\"probability\") - 0.0153) < 0.01\n # Calculated using bayes theorum where P(A) = 0.2, P(~B|A) = 0.3, P(~B|notA) = 0.6 -> 0.11\n # 2nd update: P(A) = 0.111, P(~B|A) = 0.1, P(~B|notA) = 0.8\n\n assert state.state == \"off\"\n\n hass.states.async_set(\"sensor.test_monitored\", 15)\n await hass.async_block_till_done()\n\n state = hass.states.get(\"binary_sensor.test_binary\")\n\n assert state.state == \"off\"\n\n assert len(async_get(hass).issues) == 0", "def test_equality_of_models_with_and_without_observables():\n # Now specify a set of observables\n observables = [np.random.randint(2, 6)]\n point_constr = {\"observables\": observables}\n\n # Get simulated data and likelihood for myopic model.\n params, options = generate_random_model(myopic=True, point_constr=point_constr)\n\n # Get all reward values\n index_reward = [\n x for x in set(params.index.get_level_values(0)) if \"nonpec\" in x or \"wage\" in x\n ]\n\n # Get all indices that have\n obs_labels = generate_obs_labels(observables, index_reward)\n\n # Set these values to zero\n params.loc[obs_labels, \"value\"] = 0\n\n # Simulate the base model\n simulate = rp.get_simulate_func(params, options)\n df = simulate(params)\n\n # Put two new values into the eq\n for x in obs_labels:\n params.loc[x, \"value\"] = params.loc[(x[0], \"constant\"), \"value\"]\n\n for x in index_reward:\n params.loc[(x, \"constant\"), \"value\"] = 0\n\n # Simulate the new model\n df_ = simulate(params)\n\n # test for equality\n pd.testing.assert_frame_equal(df_, df)", "def test_ContinuousModel_multivariate():\n\n class MyModel(ContinuousModel):\n def __init__(self):\n self.weight = Parameter([5, 3], name=\"Weight\")\n self.bias = Parameter([1, 3], name=\"Bias\")\n self.std = ScaleParameter([1, 3], name=\"Std\")\n\n def __call__(self, x):\n return Normal(x @ self.weight() + self.bias(), self.std())\n\n # Instantiate the model\n model = MyModel()\n\n # Data\n x = np.random.randn(100, 5).astype(\"float32\")\n w = np.random.randn(5, 3).astype(\"float32\")\n y = x @ w + 1\n\n # Fit the model\n model.fit(x, y, batch_size=50, epochs=2, lr=0.01)\n\n # pred_dist_plot should not work with nonscalar output\n with pytest.raises(NotImplementedError):\n model.pred_dist_plot(x[:10, :], n=10)\n\n # predictive_prc should not work with nonscalar output\n with pytest.raises(NotImplementedError):\n model.predictive_prc(x[:10, :], y[:10, :], n=10)", "def test_special_observable_qnode_differentiation(self):\n\n class SpecialObject:\n \"\"\"SpecialObject\n\n A special object that conveniently encapsulates the return value of\n a special observable supported by a special device and which supports\n multiplication with scalars and addition.\n \"\"\"\n\n def __init__(self, val):\n self.val = val\n\n def __mul__(self, other):\n new = SpecialObject(self.val)\n new *= other\n return new\n\n def __imul__(self, other):\n self.val *= other\n return self\n\n def __rmul__(self, other):\n return self * other\n\n def __iadd__(self, other):\n self.val += other.val if isinstance(other, self.__class__) else other\n return self\n\n def __add__(self, other):\n new = SpecialObject(self.val)\n new += other.val if isinstance(other, self.__class__) else other\n return new\n\n def __radd__(self, other):\n return self + other\n\n class SpecialObservable(Observable):\n \"\"\"SpecialObservable\"\"\"\n\n num_wires = AnyWires\n num_params = 0\n par_domain = None\n\n def diagonalizing_gates(self):\n \"\"\"Diagonalizing gates\"\"\"\n return []\n\n class DeviceSupporingSpecialObservable(DefaultQubit):\n name = \"Device supporing SpecialObservable\"\n short_name = \"default.qibit.specialobservable\"\n observables = DefaultQubit.observables.union({\"SpecialObservable\"})\n\n def expval(self, observable, **kwargs):\n if self.analytic and isinstance(observable, SpecialObservable):\n val = super().expval(qml.PauliZ(wires=0), **kwargs)\n return SpecialObject(val)\n\n return super().expval(observable, **kwargs)\n\n dev = DeviceSupporingSpecialObservable(wires=1, shots=None)\n\n # force diff_method='parameter-shift' because otherwise\n # PennyLane swaps out dev for default.qubit.autograd\n @qml.qnode(dev, diff_method=\"parameter-shift\")\n def qnode(x):\n qml.RY(x, wires=0)\n return qml.expval(SpecialObservable(wires=0))\n\n @qml.qnode(dev, diff_method=\"parameter-shift\")\n def reference_qnode(x):\n qml.RY(x, wires=0)\n return qml.expval(qml.PauliZ(wires=0))\n\n assert np.isclose(qnode(0.2).item().val, reference_qnode(0.2))\n assert np.isclose(qml.jacobian(qnode)(0.2).item().val, qml.jacobian(reference_qnode)(0.2))", "def expectation(wavefn, operator, backend):\n\texp = ExpectationFactory.build(operator = operator, backend = backend)\n\tcomposed_circuit = exp.convert(StateFn(operator, is_measurement=True)).compose(CircuitStateFn(wavefn))\n\tsampler = CircuitSampler(backend)\n\tvals = sampler.convert(composed_circuit).eval()\n\treturn np.real(vals)", "def testSamples(self):\n # Increasing the number of samples can help reduce the variance and make the\n # sample mean closer to the distribution mean.\n num_samples = 1000\n theta, sigma = 0.1, 0.2\n ou = common.ornstein_uhlenbeck_process(\n tf.zeros([10]), damping=theta, stddev=sigma\n )\n samples = np.ndarray([num_samples, 10])\n self.evaluate(tf.compat.v1.global_variables_initializer())\n for i in range(num_samples):\n samples[i] = self.evaluate(ou)\n\n diffs = np.ndarray([num_samples - 1, 10])\n for i in range(num_samples - 1):\n diffs[i] = samples[i + 1] - (1 - theta) * samples[i]\n flat_diffs = diffs.reshape([-1])\n\n mean, variance = flat_diffs.mean(), flat_diffs.var()\n # To avoid flakiness, we can only expect the sample statistics to match\n # the population statistics to one or two decimal places.\n self.assertAlmostEqual(mean, 0.0, places=1)\n self.assertAlmostEqual(variance, sigma * sigma, places=2)", "def test_correct_state(self, rep, tol):\n\n dev = qml.device(\"default.tensor.tf\", wires=2, representation=rep)\n\n state = dev._state()\n\n expected = np.array([[1, 0], [0, 0]])\n assert np.allclose(state, expected, atol=tol, rtol=0)\n\n @qml.qnode(dev)\n def circuit():\n qml.Hadamard(wires=0)\n return qml.expval(qml.PauliZ(0))\n\n circuit()\n state = dev._state()\n\n expected = np.array([[1, 0], [1, 0]]) / np.sqrt(2)\n assert np.allclose(state, expected, atol=tol, rtol=0)", "def test_loss_and_train_output(\n test: test_utils.TestCase,\n expect_equal_loss_values: bool,\n agent: tf_agent.TFAgent,\n experience: types.NestedTensor,\n weights: Optional[types.Tensor] = None,\n **kwargs\n):\n loss_info_from_loss = agent.loss(\n experience=experience, weights=weights, **kwargs\n )\n loss_info_from_train = agent.train(\n experience=experience, weights=weights, **kwargs\n )\n if not tf.executing_eagerly():\n test.evaluate(tf.compat.v1.global_variables_initializer())\n loss_info_from_loss = test.evaluate(loss_info_from_loss)\n loss_info_from_train = test.evaluate(loss_info_from_train)\n\n test.assertIsInstance(loss_info_from_train, tf_agent.LossInfo)\n test.assertEqual(type(loss_info_from_train), type(loss_info_from_loss))\n\n # Compare loss values.\n if expect_equal_loss_values:\n test.assertEqual(\n loss_info_from_train.loss,\n loss_info_from_loss.loss,\n msg=(\n 'Expected equal loss values, but train() has output '\n '{loss_from_train} vs loss() output {loss_from_loss}.'.format(\n loss_from_train=loss_info_from_train.loss,\n loss_from_loss=loss_info_from_loss.loss,\n )\n ),\n )\n else:\n test.assertNotEqual(\n loss_info_from_train.loss,\n loss_info_from_loss.loss,\n msg=(\n 'Expected train() and loss() output to have different loss values, '\n 'but both are {loss}.'.format(loss=loss_info_from_train.loss)\n ),\n )\n\n # Check that both `LossInfo` outputs have matching dtypes and shapes.\n nest_utils.assert_tensors_matching_dtypes_and_shapes(\n loss_info_from_train,\n loss_info_from_loss,\n test,\n '`LossInfo` from train()',\n '`LossInfo` from loss()',\n )", "def test_distribution_of_observables():\n # Now specify a set of observables\n point_constr = {\"observables\": [np.random.randint(2, 6)], \"simulation_agents\": 1000}\n\n params, options = generate_random_model(point_constr=point_constr)\n\n simulate = rp.get_simulate_func(params, options)\n df = simulate(params)\n\n # Check observable probabilities\n probs = df[\"Observable_0\"].value_counts(normalize=True, sort=False)\n\n # Check proportions\n n_levels = point_constr[\"observables\"][0]\n for level in range(n_levels):\n # Some observables might be missing in the simulated data because of small\n # probabilities. Test for zero probability in this case.\n probability = probs.loc[level] if level in probs.index else 0\n\n params_probability = params.loc[\n (f\"observable_observable_0_{level}\", \"probability\"), \"value\"\n ]\n\n np.testing.assert_allclose(probability, params_probability, atol=0.05)", "def expectation(n):\n print(n)\n p, i = probabilities(n)\n print('\\nProbabilities: ', p[:10])\n print('Iterations: ', i[:10])\n expect = np.dot(p, i)\n return expect", "def test_multiple_expectation_values(self, tol, batch_dim):\n if batch_dim is not None:\n pytest.skip(msg=\"JVP computation of batched tapes is disallowed, see #4462\")\n dev = qml.device(\"default.qubit\", wires=2)\n x = 0.543 if batch_dim is None else 0.543 * np.arange(1, 1 + batch_dim)\n y = -0.654\n\n with qml.queuing.AnnotatedQueue() as q:\n qml.RX(x, wires=[0])\n qml.RY(y, wires=[1])\n qml.CNOT(wires=[0, 1])\n qml.expval(qml.PauliZ(0))\n qml.expval(qml.PauliX(1))\n\n tape = qml.tape.QuantumScript.from_queue(q)\n tape.trainable_params = {0, 1}\n tangent = np.array([1.0, 2.0])\n\n tapes, fn = qml.gradients.jvp(tape, tangent, param_shift)\n assert len(tapes) == 4\n\n res = fn(dev.batch_execute(tapes))\n assert isinstance(res, tuple)\n assert len(res) == 2\n assert all(r.shape == () if batch_dim is None else (batch_dim,) for r in res)\n\n exp = [-np.sin(x), 2 * np.cos(y)]\n if batch_dim is not None:\n exp[1] = np.tensordot(np.ones(batch_dim), exp[1], axes=0)\n assert np.allclose(res, exp, atol=tol, rtol=0)", "def assert_is_model_tensor(self, x: TensorLike) -> None:", "def test_number_operator(self, tol):\n cutoff_dim = 10\n\n dev = qml.device(\"strawberryfields.fock\", wires=2, cutoff_dim=cutoff_dim)\n\n gate_name = \"NumberOperator\"\n assert dev.supports_observable(gate_name)\n\n op = qml.NumberOperator\n sf_expectation = dev._observable_map[gate_name]\n wires = [0]\n\n @qml.qnode(dev)\n def circuit(*args):\n qml.Displacement(0.1, 0, wires=0)\n qml.TwoModeSqueezing(0.1, 0, wires=[0, 1])\n return qml.expval(op(*args, wires=wires))\n\n assert np.allclose(\n circuit(), SF_expectation_reference(sf_expectation, cutoff_dim, wires), atol=tol, rtol=0\n )", "def test_model(self):\n power_ebsilon = -31.769\n power_tespy = round(\n self.nw.busses['total output power'].P.val / 1e6, 3)\n msg = (\n 'The total power calculated (' + str(power_tespy) + ') does not '\n 'match the power calculated with the EBSILON model (' +\n str(power_ebsilon) + ').')\n assert power_tespy == power_ebsilon, msg\n\n T_c79_ebsilon = 296.254\n T_c79_tespy = round(self.nw.get_conn('79').T.val, 3)\n msg = (\n 'The temperature at connection 79 calculated (' +\n str(T_c79_tespy) + ') does not match the temperature calculated '\n 'with the EBSILON model (' + str(T_c79_ebsilon) + ').')\n assert T_c79_tespy == T_c79_ebsilon, msg", "def test_n(self):\n self.assertAlmostEqual(self.surfarr.n.value_si, self.n, 6)", "def test_T0(self):\n self.m.T0.value = 50044.3322\n # I don't understand why this is failing... something about float128\n # Does not fail for me (both lines) -- RvH 02/22/2015\n self.assertTrue(numpy.isclose(self.m.T0.value, 50044.3322))\n self.assertEqual(self.m.T0.value, 50044.3322)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test that the expectation for the generalized quadrature observable yields the correct result
def test_quad_operator(self, tol): cutoff_dim = 10 a = 0.312 dev = qml.device("strawberryfields.fock", wires=2, cutoff_dim=cutoff_dim) op = qml.QuadOperator gate_name = "QuadOperator" assert dev.supports_observable(gate_name) sf_expectation = dev._observable_map[gate_name] wires = [0] @qml.qnode(dev) def circuit(*args): qml.Displacement(0.1, 0, wires=0) qml.TwoModeSqueezing(0.1, 0, wires=[0, 1]) return qml.expval(op(*args, wires=wires)) assert np.allclose( circuit(a), SF_expectation_reference(sf_expectation, cutoff_dim, wires, a), atol=tol, rtol=0, )
[ "def _quadrature_expectation(p, obj1, feature1, obj2, feature2, num_gauss_hermite_points):\n num_gauss_hermite_points = 40 if num_gauss_hermite_points is None else num_gauss_hermite_points\n\n if obj2 is None:\n eval_func = lambda x: get_eval_func(obj1, feature1)(x)\n mu, cov = p.mu[:-1], p.cov[0, :-1] # cross covariances are not needed\n elif obj1 is None:\n eval_func = lambda x: get_eval_func(obj2, feature2)(x)\n mu, cov = p.mu[1:], p.cov[0, 1:] # cross covariances are not needed\n else:\n eval_func = lambda x: (get_eval_func(obj1, feature1, np.s_[:, :, None])(tf.split(x, 2, 1)[0]) *\n get_eval_func(obj2, feature2, np.s_[:, None, :])(tf.split(x, 2, 1)[1]))\n mu = tf.concat((p.mu[:-1, :], p.mu[1:, :]), 1) # Nx2D\n cov_top = tf.concat((p.cov[0, :-1, :, :], p.cov[1, :-1, :, :]), 2) # NxDx2D\n cov_bottom = tf.concat((tf.matrix_transpose(p.cov[1, :-1, :, :]), p.cov[0, 1:, :, :]), 2)\n cov = tf.concat((cov_top, cov_bottom), 1) # Nx2Dx2D\n\n return mvnquad(eval_func, mu, cov, num_gauss_hermite_points)", "def _quadrature_expectation(p, obj1, feature1, obj2, feature2, num_gauss_hermite_points):\n num_gauss_hermite_points = 100 if num_gauss_hermite_points is None else num_gauss_hermite_points\n\n if obj2 is None:\n eval_func = lambda x: get_eval_func(obj1, feature1)(x)\n elif obj1 is None:\n raise NotImplementedError(\"First object cannot be None.\")\n else:\n eval_func = lambda x: (get_eval_func(obj1, feature1, np.s_[:, :, None])(x) *\n get_eval_func(obj2, feature2, np.s_[:, None, :])(x))\n\n if isinstance(p, DiagonalGaussian):\n if isinstance(obj1, kernels.Kernel) and isinstance(obj2, kernels.Kernel) \\\n and obj1.on_separate_dims(obj2): # no joint expectations required\n\n eKxz1 = quadrature_expectation(p, (obj1, feature1),\n num_gauss_hermite_points=num_gauss_hermite_points)\n eKxz2 = quadrature_expectation(p, (obj2, feature2),\n num_gauss_hermite_points=num_gauss_hermite_points)\n return eKxz1[:, :, None] * eKxz2[:, None, :]\n\n else:\n cov = tf.matrix_diag(p.cov)\n else:\n cov = p.cov\n return mvnquad(eval_func, p.mu, cov, num_gauss_hermite_points)", "def test_qgt_validation(self, qgt_type):\n args = () if qgt_type == ReverseQGT else (self.estimator,)\n qgt = qgt_type(*args)\n\n a = Parameter(\"a\")\n qc = QuantumCircuit(1)\n qc.rx(a, 0)\n parameter_values = [[np.pi / 4]]\n with self.subTest(\"assert number of circuits does not match\"):\n with self.assertRaises(ValueError):\n qgt.run([qc, qc], parameter_values)\n with self.subTest(\"assert number of parameter values does not match\"):\n with self.assertRaises(ValueError):\n qgt.run([qc], [[np.pi / 4], [np.pi / 2]])\n with self.subTest(\"assert number of parameters does not match\"):\n with self.assertRaises(ValueError):\n qgt.run([qc], parameter_values, parameters=[[a], [a]])", "def test_integral_9():\n epsilon = 0.005\n parameters['target'] = 5000\n parameters['mu1'], parameters['mu2'], parameters['mu3'] = 300., 400., 500.\n parameters['mu4'], parameters['mu5'], parameters['mu6'] = 600., 700., 800.\n parameters['mu7'], parameters['mu8'], parameters['mu9'] = 600., 700., 800.\n parameters['sigma1'], parameters['sigma2'], parameters['sigma3'] = 90., 190., 290.\n parameters['sigma4'], parameters['sigma5'], parameters['sigma6'] = 290., 190., 90.\n parameters['sigma7'], parameters['sigma8'], parameters['sigma9'] = 290., 190., 90.\n parameters['distribution'] = 'norm'\n parameters['retailers'] = 9\n parameters['N'] = 200000\n parameters['scaling'] = False\n parameters['Q1'] = 500.\n parameters['Q2'] = 600.\n parameters['Q3'] = 500.\n parameters['Q4'] = 500.\n parameters['Q5'] = 600.\n parameters['Q6'] = 700.\n parameters['Q7'] = 500.\n parameters['Q8'] = 600.\n parameters['Q9'] = 700.\n\n v = integral(parameters)\n assert abs(v - 0.0211) <= epsilon\n\n parameters['target'] = 2000\n parameters['mu1'], parameters['mu2'], parameters['mu3'] = 300., 300., 300.\n parameters['mu4'], parameters['mu5'], parameters['mu6'] = 300., 300., 300.\n parameters['mu7'], parameters['mu8'], parameters['mu9'] = 300., 300., 300.\n parameters['Q1'] = 500.\n parameters['Q2'] = 600.\n parameters['Q3'] = 500.\n parameters['Q4'] = 500.\n parameters['Q5'] = 600.\n parameters['Q6'] = 700.\n parameters['Q7'] = 500.\n parameters['Q8'] = 600.\n parameters['Q9'] = 700.\n\n v = integral(parameters)\n assert abs(v - 0.851235) <= epsilon\n\n parameters['target'] = 4000\n parameters['Q1'] = 500.\n parameters['Q2'] = 600.\n parameters['Q3'] = 500.\n parameters['Q4'] = 500.\n parameters['Q5'] = 600.\n parameters['Q6'] = 700.\n parameters['Q7'] = 500.\n parameters['Q8'] = 600.\n parameters['Q9'] = 700.\n\n v = integral(parameters)\n assert abs(v - 0.00096) <= epsilon", "def test_integration_error(low, high):\n a, b = 1, 1\n mu, sigma = 0, 1\n print(\"Integrating likelihood function with item parameters \"\n \"a = {}, b = {}, theta = Normal({}, {}) over [{},{}]\".format(a, b, mu, sigma, low, high))\n s, error = quadrature(likelihood, low, high, args=(a, b, mu, sigma), tol=1e-08, rtol=1e-08, maxiter=50)\n print(\"Fixed-tolerance quadrature: result = %.8f error = %.2e\" % (s, error))\n for n in range(5, 21):\n sn, _ = fixed_quad(likelihood, low, high, args=(a, b, mu, sigma), n=n)\n print(\"Fixed-point quadrature: n = %2d result = %.8f error = %.2e\" % (n, sn, np.abs(s - sn)))", "def test_quad_form(self):\n self.logTestName()\n H, _ = rotation(self.phi, mode=1, hbar=self.hbar)\n H = normal_ordered(get_quad_operator(H, hbar=self.hbar), hbar=self.hbar)\n expected = QuadOperator('q1 q1', -0.5)\n expected += QuadOperator('p1 p1', -0.5)\n expected += QuadOperator('', 1)\n self.assertEqual(H, expected)", "def test_evaluate_with_quantities():\n\n # We create two models here - one with quantities, and one without. The one\n # without is used to create the reference values for comparison.\n\n g = Gaussian1D(1, 1, 0.1)\n gq = Gaussian1D(1 * u.J, 1 * u.m, 0.1 * u.m)\n\n # We first check that calling the Gaussian with quantities returns the\n # expected result\n assert_quantity_allclose(gq(1 * u.m), g(1) * u.J)\n\n # Units have to be specified for the Gaussian with quantities - if not, an\n # error is raised\n with pytest.raises(UnitsError, match=MESSAGE.format(\"Gaussian1D\", \"\", \"m \")):\n gq(1)\n\n # However, zero is a special case\n assert_quantity_allclose(gq(0), g(0) * u.J)\n\n # We can also evaluate models with equivalent units\n assert_allclose(gq(0.0005 * u.km).value, g(0.5))\n\n # But not with incompatible units\n with pytest.raises(UnitsError, match=MESSAGE.format(\"Gaussian1D\", \"s\", \"m\")):\n gq(3 * u.s)\n\n # We also can't evaluate the model without quantities with a quantity\n with pytest.raises(\n UnitsError,\n match=r\"Can only apply 'subtract' function to dimensionless quantities .*\",\n ):\n g(3 * u.m)\n # TODO: determine what error message should be here\n # assert exc.value.args[0] == (\"Units of input 'x', m (length), could not be \"\n # \"converted to required dimensionless input\")", "def test_special_observable_qnode_differentiation(self):\n\n class SpecialObject:\n \"\"\"SpecialObject\n\n A special object that conveniently encapsulates the return value of\n a special observable supported by a special device and which supports\n multiplication with scalars and addition.\n \"\"\"\n\n def __init__(self, val):\n self.val = val\n\n def __mul__(self, other):\n new = SpecialObject(self.val)\n new *= other\n return new\n\n def __imul__(self, other):\n self.val *= other\n return self\n\n def __rmul__(self, other):\n return self * other\n\n def __iadd__(self, other):\n self.val += other.val if isinstance(other, self.__class__) else other\n return self\n\n def __add__(self, other):\n new = SpecialObject(self.val)\n new += other.val if isinstance(other, self.__class__) else other\n return new\n\n def __radd__(self, other):\n return self + other\n\n class SpecialObservable(Observable):\n \"\"\"SpecialObservable\"\"\"\n\n num_wires = AnyWires\n num_params = 0\n par_domain = None\n\n def diagonalizing_gates(self):\n \"\"\"Diagonalizing gates\"\"\"\n return []\n\n class DeviceSupporingSpecialObservable(DefaultQubit):\n name = \"Device supporing SpecialObservable\"\n short_name = \"default.qibit.specialobservable\"\n observables = DefaultQubit.observables.union({\"SpecialObservable\"})\n\n def expval(self, observable, **kwargs):\n if self.analytic and isinstance(observable, SpecialObservable):\n val = super().expval(qml.PauliZ(wires=0), **kwargs)\n return SpecialObject(val)\n\n return super().expval(observable, **kwargs)\n\n dev = DeviceSupporingSpecialObservable(wires=1, shots=None)\n\n # force diff_method='parameter-shift' because otherwise\n # PennyLane swaps out dev for default.qubit.autograd\n @qml.qnode(dev, diff_method=\"parameter-shift\")\n def qnode(x):\n qml.RY(x, wires=0)\n return qml.expval(SpecialObservable(wires=0))\n\n @qml.qnode(dev, diff_method=\"parameter-shift\")\n def reference_qnode(x):\n qml.RY(x, wires=0)\n return qml.expval(qml.PauliZ(wires=0))\n\n assert np.isclose(qnode(0.2).item().val, reference_qnode(0.2))\n assert np.isclose(qml.jacobian(qnode)(0.2).item().val, qml.jacobian(reference_qnode)(0.2))", "def test_qgt_multi_arguments(self, qgt_type):\n args = () if qgt_type == ReverseQGT else (self.estimator,)\n qgt = qgt_type(*args, derivative_type=DerivativeType.REAL)\n\n a = Parameter(\"a\")\n b = Parameter(\"b\")\n qc = QuantumCircuit(1)\n qc.rx(a, 0)\n qc.ry(b, 0)\n qc2 = QuantumCircuit(1)\n qc2.rx(a, 0)\n qc2.ry(b, 0)\n\n param_list = [[np.pi / 4], [np.pi / 2]]\n correct_values = [[[1 / 4]], [[1 / 4, 0], [0, 0]]]\n param_list = [[np.pi / 4, np.pi / 4], [np.pi / 2, np.pi / 2]]\n qgt_results = qgt.run([qc, qc2], param_list, [[a], None]).result().qgts\n for i, _ in enumerate(param_list):\n np.testing.assert_allclose(qgt_results[i], correct_values[i], atol=1e-3)", "def test_polynomial_extrapolator(self):\n points = 0.7\n params = PolynomialExtrapolator(degree=3).extrapolate(\n points=[points], param_dict=PARAM_DICT\n )\n sq_diff = [\n (actual - expected) ** 2 for actual, expected in zip(params[points], PARAM_DICT[points])\n ]\n self.assertLess(sum(sq_diff), 1e-3)", "def testTriangularMF_evaluateRightRamp(self):\n self.assertAlmostEquals(0.8, self.triangular_mf.Evaluate(0.6))", "def test_integral_6():\n epsilon = 0.005\n parameters['A'] = 2200\n parameters['target'] = 1500\n parameters['mu1'], parameters['mu2'], parameters['mu3'] = 400., 300., 300.\n parameters['mu4'], parameters['mu5'], parameters['mu6'] = 400., 300., 300.\n parameters['sigma1'], parameters['sigma2'], parameters['sigma3'] = 190., 190., 190.\n parameters['sigma4'], parameters['sigma5'], parameters['sigma6'] = 190., 190., 190.\n parameters['distribution'] = 'norm'\n parameters['retailers'] = 6\n parameters['scaling'] = False\n parameters['Q1'] = 300.\n parameters['Q2'] = 300.\n parameters['Q3'] = 300.\n parameters['Q4'] = 300.\n parameters['Q5'] = 300.\n parameters['Q6'] = 300.\n v = integral(parameters)\n assert abs(v - 0.43182) <= epsilon\n\n parameters['target'] = 1000\n v = integral(parameters)\n assert abs(v - 0.93976) <= epsilon\n\n parameters['target'] = 3000.\n parameters['mu1'], parameters['mu2'], parameters['mu3'] = 300., 400., 500.\n parameters['mu4'], parameters['mu5'], parameters['mu6'] = 600., 700., 800.\n parameters['Q1'] = 500.\n parameters['Q2'] = 600.\n parameters['Q3'] = 500.\n parameters['Q4'] = 500.\n parameters['Q5'] = 600.\n parameters['Q6'] = 700.\n v = integral(parameters)\n assert abs(v - 0.24906) <= epsilon\n\n parameters['sigma1'], parameters['sigma2'], parameters['sigma3'] = 90., 190., 290.\n parameters['sigma4'], parameters['sigma5'], parameters['sigma6'] = 290., 190., 90.\n parameters['Q1'] = 500.\n parameters['Q2'] = 600.\n parameters['Q3'] = 500.\n parameters['Q4'] = 500.\n parameters['Q5'] = 600.\n parameters['Q6'] = 700.\n v = integral(parameters)\n assert abs(v - 0.21608) <= epsilon", "def test_set_get_Q(self):\n\t\tb = RigidBody()\n\n\t\tQ = [1,0,0,0]\n\t\tb.set_Q(Q)\n\t\tself.assertEqual(b.state_vector[6:10], Q)\n\t\tself.assertEqual(b.get_Q(), Q)\n\t\t\n\t\tQ = [0,1,0,0]\n\t\tb.set_Q(Q)\n\t\tself.assertEqual(b.state_vector[6:10], Q)\n\t\tself.assertEqual(b.get_Q(), Q)\n\t\t\n\t\tQ = [0,0,1,0]\n\t\tb.set_Q(Q)\n\t\tself.assertEqual(b.state_vector[6:10], Q)\n\t\tself.assertEqual(b.get_Q(), Q)\n\t\t\n\t\tQ = [0,0,0,1]\n\t\tb.set_Q(Q)\n\t\tself.assertEqual(b.state_vector[6:10], Q)\n\t\tself.assertEqual(b.get_Q(), Q)\n\n\t\tQ = [0.5,0,0,0]\n\t\tb.set_Q(Q)\n\t\tQ = [1,0,0,0]\n\t\tfor i in range(len(Q)):\n\t\t\tself.assertTrue(b.get_Q()[i] - Q[i] < EPS_A)\n\t\t\tself.assertTrue(b.state_vector[6+i] - Q[i] < EPS_A)\n\n\t\tQ = [3,-4,0,0]\n\t\tb.set_Q(Q)\n\t\tQ = [3/5,-4/5,0,0]\n\t\tfor i in range(len(Q)):\n\t\t\tself.assertTrue(b.get_Q()[i] - Q[i] < EPS_A)\n\t\t\tself.assertTrue(b.state_vector[6+i] - Q[i] < EPS_A)", "def test_single_expectation_value(self, tol, batch_dim):\n if batch_dim is not None:\n pytest.skip(msg=\"JVP computation of batched tapes is disallowed, see #4462\")\n dev = qml.device(\"default.qubit\", wires=2)\n x = 0.543 if batch_dim is None else 0.543 * np.arange(1, 1 + batch_dim)\n y = -0.654\n\n with qml.queuing.AnnotatedQueue() as q:\n qml.RX(x, wires=[0])\n qml.RY(y, wires=[1])\n qml.CNOT(wires=[0, 1])\n qml.expval(qml.PauliZ(0) @ qml.PauliX(1))\n\n tape = qml.tape.QuantumScript.from_queue(q)\n tape.trainable_params = {0, 1}\n tangent = np.array([1.0, 1.0])\n\n tapes, fn = qml.gradients.jvp(tape, tangent, param_shift)\n assert len(tapes) == 4\n\n res = fn(dev.batch_execute(tapes))\n assert res.shape == () if batch_dim is None else (batch_dim,)\n\n exp = np.sum(np.array([-np.sin(y) * np.sin(x), np.cos(y) * np.cos(x)]), axis=0)\n assert np.allclose(res, exp, atol=tol, rtol=0)", "def test_value(self):\n\n # Number of modes\n d = 10\n\n # Number of shots\n shots = 100\n\n # rundom parameters for squeezing gates\n squeezing_params_r = np.random.random(d)\n squeezing_params_phi = np.random.random(d)\n\n # random unitary matrix for perform interferometer\n interferometer_param = unitary_group.rvs(d)\n\n ###################################\n\n # Piquasso python program\n with pq.Program() as pq_program:\n # Apply random squeezings\n for idx in range(d):\n pq.Q(idx) | pq.Squeezing(r=squeezing_params_r[idx], phi=squeezing_params_phi[idx])\n\n # Apply random interferometer\n pq.Q() | pq.Interferometer(interferometer_param)\n\n # Measure all modes with shots shots\n pq.Q() | pq.ThresholdMeasurement()\n\n simulator = pq.GaussianSimulator(d=d)\n\n # Measuring runtime\n startTime = time.time()\n result = simulator.execute(program=pq_program, shots=shots)\n pypq_results = np.array(result.samples)\n endTime = time.time()\n\n piquasso_time = endTime - startTime\n\n ###################################\n\n # Piquasso boost program\n with pq.Program() as pq_program:\n # Apply random squeezings\n for idx in range(d):\n pq.Q(idx) | pq.Squeezing(r=squeezing_params_r[idx], phi=squeezing_params_phi[idx])\n\n # Apply random interferometer\n pq.Q() | pq.Interferometer(interferometer_param)\n\n # Measure all modes with shots shots\n pq.Q() | pq.ThresholdMeasurement()\n\n simulator = pqb.BoostedGaussianSimulator(d=d)\n\n # Measuring runtime\n startTime = time.time()\n result = simulator.execute(program=pq_program, shots=shots)\n cpq_results = np.array(result.samples)\n endTime = time.time()\n\n piquasso_boost_time = endTime - startTime\n\n ###################################\n\n print(' ')\n print('*******************************************')\n print('Number of modes: ', d)\n print('Time elapsed with piquasso : ' + str(piquasso_time))\n print('Time elapsed with piquasso boost: ' + str(piquasso_boost_time))\n print('The result of piquasso python: \\n' , pypq_results)\n print('The result of piquasso C++: \\n' , cpq_results)\n print( \"speedup: \" + str(piquasso_time/piquasso_boost_time) )", "def testTriangularMF_evaluateLeftRamp(self):\n self.assertAlmostEquals(0.4, self.triangular_mf.Evaluate(0.2))", "def test_guess_correct():\n\n assert update_guess(1, 0.3, 0.1, 0.7) >= 0.3\n assert update_guess(1, 0.1, 0.3, 0.7) >= 0.1\n assert update_guess(1, 0.01, 0.01, 0.01) >= 0.01\n assert update_guess(1, 0.49, 0.49, 0.99) >= 0.49", "def testGetArea_triangular(self):\n self.assertAlmostEquals(0.375, self.triangular_mf.GetArea(0.5))", "def test_quadratic_bowl_with_initial_simplex(self):\n minimum = np.array([1.0, 1.0])\n scales = np.array([2.0, 3.0])\n def quadratic(x):\n return tf.reduce_sum(\n scales * tf.math.squared_difference(x, minimum), axis=-1)\n\n initial_population = tf.random.uniform([40, 2], seed=1243)\n results = self.evaluate(\n differential_evolution.minimize(\n quadratic,\n initial_population=initial_population,\n func_tolerance=1e-12,\n seed=2484))\n self.assertTrue(results.converged)\n self.assertArrayNear(results.position, minimum, 1e-6)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test that PolyXP works as expected
def test_polyxp(self, tol): cutoff_dim = 12 a = 0.14321 nbar = 0.2234 hbar = 2 dev = qml.device("strawberryfields.fock", wires=1, hbar=hbar, cutoff_dim=cutoff_dim) Q = np.array([0, 1, 0]) # x expectation @qml.qnode(dev) def circuit(x): qml.Displacement(x, 0, wires=0) return qml.expval(qml.PolyXP(Q, 0)) # test X expectation assert np.allclose(circuit(a), hbar * a, atol=tol, rtol=0) Q = np.diag([-0.5, 1 / (2 * hbar), 1 / (2 * hbar)]) # mean photon number @qml.qnode(dev) def circuit(x): qml.ThermalState(nbar, wires=0) qml.Displacement(x, 0, wires=0) return qml.expval(qml.PolyXP(Q, 0)) # test X expectation assert np.allclose(circuit(a), nbar + np.abs(a) ** 2, atol=tol, rtol=0)
[ "def test_polyxp(self):\n self.logTestName()\n\n a = 0.54321\n nbar = 0.5234\n\n hbar = 2\n dev = qml.device('strawberryfields.gaussian', wires=1, hbar=hbar)\n Q = np.array([0, 1, 0]) # x expectation\n\n @qml.qnode(dev)\n def circuit(x):\n qml.Displacement(x, 0, wires=0)\n return qml.expval(qml.PolyXP(Q, 0))\n\n # test X expectation\n self.assertAlmostEqual(circuit(a), hbar*a)\n\n Q = np.diag([-0.5, 1/(2*hbar), 1/(2*hbar)]) # mean photon number\n\n @qml.qnode(dev)\n def circuit(x):\n qml.ThermalState(nbar, wires=0)\n qml.Displacement(x, 0, wires=0)\n return qml.expval(qml.PolyXP(Q, 0))\n\n # test X expectation\n self.assertAlmostEqual(circuit(a), nbar+np.abs(a)**2)", "def runPoly():\n X,y=preprocess()\n Polynomial(X,y)", "def test_polynomial_call(self):\n mod5 = IntegersModP(5)\n polysMod5 = polynomials_over(mod5).factory\n # 1 + x\n poly = polysMod5([1, 1])\n # z = 3\n z = mod5(3)\n assert z + 1 == poly(z)\n # 1 + x + x^2 (1 + 3 + 9 == 13 == 3)\n poly2 = polysMod5([1, 1, 1])\n assert 1 + z + z**2 == poly2(z)\n assert poly2(z) == mod5(3)", "def test_poly1():\n xi = np.array([-1., 0., 2., 1.])\n yi = np.array([-2., 1., 1., 0.])\n c = poly_interp(xi,yi)\n c_true = np.array([1, 0, -2, 1])\n print \"c = \", c\n print \"c_true = \", c_true\n # test that all elements have small error:\n assert np.allclose(c, c_true), \\\n \"Incorrect result, c = %s, Expected: c = %s\" % (c,c_true)", "def test_polyuq_empirical(self):\n# # Generate data\n n = 1\n # Load linear dataset with n_observations=500,n_dim=10,bias=0.5,n_relevent=2,noise=0.2,train/test split = 0.8\n data = np.load('./tests/test_data/linear_data.npz')\n X_train = data['X_train']; y_train = data['y_train']\n N,dim = X_train.shape\n\n # Fit poly and approx its variance\n param = Parameter(distribution='Uniform', lower=-1, upper=1, order=n)\n myParameters = [param for i in range(dim)] # one-line for loop for parameters\n myBasis = Basis('total-order')\n poly = Poly(myParameters, myBasis, method='least-squares', sampling_args={'sample-points':X_train, 'sample-outputs':y_train.reshape(-1,1)} )\n poly.set_model()\n y_pred, y_std = poly.get_polyfit(X_train,uq=True)\n np.testing.assert_array_almost_equal(y_std.mean(), 0.67484, decimal=5, err_msg='Problem!')", "def test_polynomial_extrapolator(self):\n points = 0.7\n params = PolynomialExtrapolator(degree=3).extrapolate(\n points=[points], param_dict=PARAM_DICT\n )\n sq_diff = [\n (actual - expected) ** 2 for actual, expected in zip(params[points], PARAM_DICT[points])\n ]\n self.assertLess(sum(sq_diff), 1e-3)", "def test_points_from_polygon(rp):\n assert EuclideanWorld([rp]).get_points() == set(rp.vertices)", "def polyPrimitive(sideLength=\"string\", axis=\"string\", radius=\"string\", polyType=int, constructionHistory=bool, name=\"string\"):\n pass", "def polyEval(p, x):\n\tk = len(p)-1 # last valid index\n\tif(k < 0):\n\t\treturn 0\n\ty = p[k]\n\twhile(k > 0):\n\t\tk -= 1\n\t\ty = y*x + p[k]\n\treturn y", "def test_robust(self):\n methods = ['huber','least-absolute-residual']\n opts = ['osqp','scipy'] \n f = lambda x: (-0.3*x**4 -3*x**3 +0.6*x**2 +2.4*x - 0.5)\n\n N = 50 # number of training points (note, some will be removed below)\n n = 4 # degree of polynomial\n state = 15 # random seed\n \n # Add some noise\n noise_var = 0.1\n x = np.sort(np.random.RandomState(state).uniform(-1,1,N))\n y = f(x) + np.random.RandomState(state).normal(0,noise_var,size=N).T\n \n # delete training points between 0 < x < 0.3\n pos = ((x>0)*(x<0.3)).nonzero()[0]\n x = np.delete(x,pos)\n y = np.delete(y,pos)\n\n # Add some outliers\n randrange = range(10,17)\n y[randrange] = y[randrange]+np.random.RandomState(1).normal(0,4**2,len(randrange))\n \n # Test data\n x = x.reshape(-1,1)\n xtest = np.linspace(-1,1,100).reshape(-1,1)\n ytest = f(xtest)\n\n # param and basis\n param = Parameter(distribution='uniform', lower=-1, upper=1, order=n)\n basis = Basis('univariate')\n\n # Test Poly regressions\n for method in methods:\n for opt in opts:\n if method != 'huber' and opt != 'scipy': # TODO - remove this if statement once scipy huber regression implemented\n poly = Poly(parameters=param, basis=basis, method=method,\n sampling_args= {'mesh': 'user-defined', 'sample-points':x.reshape(-1,1), 'sample-outputs': y.reshape(-1,1)},\n solver_args={'M':0.2**2,'verbose':False,'optimiser':opt})\n poly.set_model()\n _,r2 = poly.get_polyscore(X_test=xtest,y_test=ytest)\n self.assertTrue(r2 > 0.997,msg='Poly method = %a, optimiser = %a' %(method,opt))", "def testSimplex(self):\n self.glp.simplex()\n obj = glpk.glp_get_obj_val(self.glp.lp)\n self.assertAlmostEqual(obj, 0.9259122)", "def test_polyxp_variance(self, tol):\n dev = qml.device(\"strawberryfields.fock\", wires=1, cutoff_dim=15)\n\n @qml.qnode(dev)\n def circuit(r, phi):\n qml.Squeezing(r, 0, wires=0)\n qml.Rotation(phi, wires=0)\n return qml.var(qml.PolyXP(np.array([0, 1, 0]), wires=0))\n\n r = 0.105\n phi = -0.654\n\n var = circuit(r, phi)\n expected = np.exp(2 * r) * np.sin(phi) ** 2 + np.exp(-2 * r) * np.cos(phi) ** 2\n assert np.allclose(var, expected, atol=tol, rtol=0)", "def polyfunc(x, *p):\n y = 0\n for n, P in enumerate(p):\n y += P * x ** n\n return y", "def test_polyuq_prescribed(self):\n # Generate data\n dim = 1\n n = 5\n N = 100\n our_function = lambda x: 0.3*x**4 -1.6*x**3 +0.6*x**2 +2.4*x - 0.5\n X = np.linspace(-1,1,N)\n y = our_function(X)\n\n # Array of prescribed variances at each training data point\n# y_var = state.uniform(0.05,0.2,N_train)**2\n y_var = 0.1*our_function(X)*X\n \n # Fit poly with prescribed variances\n param = Parameter(distribution='Uniform', lower=-1, upper=1, order=n)\n myParameters = [param for i in range(dim)] # one-line for loop for parameters\n myBasis = Basis('univariate')\n poly = Poly(myParameters, myBasis, method='least-squares', sampling_args={'sample-points':X.reshape(-1,1), \n 'sample-outputs':y.reshape(-1,1), \n 'sample-output-variances':y_var} )\n poly.set_model()\n y_pred, y_std = poly.get_polyfit(X,uq=True)\n\n np.testing.assert_array_almost_equal(y_std.mean(), 0.64015, decimal=5, err_msg='Problem!')", "def test_projection_logic(self):", "def testIteration(self):\n for num in range(3, 30):\n poly = self.polygon(num)\n self.assertEqual(len(poly), num)\n points1 = [p for p in poly]\n points2 = poly.getVertices()\n self.assertEqual(len(points1), num + 1)\n self.assertEqual(len(points2), num + 1)\n self.assertEqual(points2[0], points2[-1]) # Closed representation\n for p1, p2 in zip(points1, points2):\n self.assertEqual(p1, p2)\n for i, p1 in enumerate(points1):\n self.assertEqual(poly[i], p1)", "def test_p_atch_cobtxid(self):\n pass", "def powPoly(p,poly):\n if p==0:\n return mkNonZero(1,mkZero())\n elif p==1:\n return poly\n else:\n return mulPoly(poly,powPoly(p-1,poly))", "def test_cvx_polynomial():\n\n a = 4.0\n\n A = cp.Variable(2,2)\n\n # No equality constraint\n\n prob = cp.Problem( \n cp.Minimize( a**2 * A[0,0] - 2 * a * A[0,1] + A[1,1] ), [\n A == cp.semidefinite(2),\n A[0,0] == 1.\n ])\n prob.solve()\n\n print prob.status\n print A.value\n\n assert False" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test that FockStateProjector works as expected
def test_fock_state_projector(self, tol): cutoff_dim = 12 a = 0.54321 r = 0.123 hbar = 2 dev = qml.device("strawberryfields.fock", wires=2, hbar=hbar, cutoff_dim=cutoff_dim) # test correct number state expectation |<n|a>|^2 @qml.qnode(dev) def circuit(x): qml.Displacement(x, 0, wires=0) return qml.expval(qml.FockStateProjector(np.array([2]), wires=0)) expected = np.abs(np.exp(-np.abs(a) ** 2 / 2) * a ** 2 / np.sqrt(2)) ** 2 assert np.allclose(circuit(a), expected, atol=tol, rtol=0) # test correct number state expectation |<n|S(r)>|^2 @qml.qnode(dev) def circuit(x): qml.Squeezing(x, 0, wires=0) return qml.expval(qml.FockStateProjector(np.array([2, 0]), wires=[0, 1])) expected = np.abs(np.sqrt(2) / (2) * (-np.tanh(r)) / np.sqrt(np.cosh(r))) ** 2 assert np.allclose(circuit(r), expected, atol=tol, rtol=0)
[ "def test_fock_state(self):\n self.logTestName()\n\n a = 0.54321\n r = 0.123\n\n hbar = 2\n dev = qml.device('strawberryfields.gaussian', wires=2, hbar=hbar)\n\n # test correct number state expectation |<n|a>|^2\n @qml.qnode(dev)\n def circuit(x):\n qml.Displacement(x, 0, wires=0)\n return qml.expval(qml.FockStateProjector(np.array([2]), wires=0))\n\n expected = np.abs(np.exp(-np.abs(a)**2/2)*a**2/np.sqrt(2))**2\n self.assertAlmostEqual(circuit(a), expected)\n\n # test correct number state expectation |<n|S(r)>|^2\n @qml.qnode(dev)\n def circuit(x):\n qml.Squeezing(x, 0, wires=0)\n return qml.expval(qml.FockStateProjector(np.array([2, 0]), wires=[0, 1]))\n\n expected = np.abs(np.sqrt(2)/(2)*(-np.tanh(r))/np.sqrt(np.cosh(r)))**2\n self.assertAlmostEqual(circuit(r), expected)", "async def test_reproducing_states(\n hass: HomeAssistant, caplog: pytest.LogCaptureFixture\n) -> None:\n\n assert await async_setup_component(\n hass,\n \"input_number\",\n {\n \"input_number\": {\n \"test_number\": {\"min\": \"5\", \"max\": \"100\", \"initial\": VALID_NUMBER1}\n }\n },\n )\n\n # These calls should do nothing as entities already in desired state\n await async_reproduce_state(\n hass,\n [\n State(\"input_number.test_number\", VALID_NUMBER1),\n # Should not raise\n State(\"input_number.non_existing\", \"234\"),\n ],\n )\n\n assert hass.states.get(\"input_number.test_number\").state == VALID_NUMBER1\n\n # Test reproducing with different state\n await async_reproduce_state(\n hass,\n [\n State(\"input_number.test_number\", VALID_NUMBER2),\n # Should not raise\n State(\"input_number.non_existing\", \"234\"),\n ],\n )\n\n assert hass.states.get(\"input_number.test_number\").state == VALID_NUMBER2\n\n # Test setting state to number out of range\n await async_reproduce_state(hass, [State(\"input_number.test_number\", \"150\")])\n\n # The entity states should be unchanged after trying to set them to out-of-range number\n assert hass.states.get(\"input_number.test_number\").state == VALID_NUMBER2\n\n await async_reproduce_state(\n hass,\n [\n # Test invalid state\n State(\"input_number.test_number\", \"invalid_state\"),\n # Set to state it already is.\n State(\"input_number.test_number\", VALID_NUMBER2),\n ],\n )", "def test_set_transition_state():\n\n def assert_state(instance):\n \"\"\"\n ensure the running state is set\n \"\"\"\n assert instance.state == \"do_thing_running\"\n\n x = get_thing()\n x.do_thing(assert_state)\n\n # ensure the target transition is set when the process is done\n assert x.state == x.CHOICES.done", "def test_fock_state(self, tol):\n arg = 1\n wires = [0]\n\n gate_name = \"FockState\"\n operation = qml.FockState\n\n cutoff_dim = 10\n dev = qml.device(\"strawberryfields.fock\", wires=2, cutoff_dim=cutoff_dim)\n\n sf_operation = dev._operation_map[gate_name]\n\n assert dev.supports_operation(gate_name)\n\n @qml.qnode(dev)\n def circuit(*args):\n qml.TwoModeSqueezing(0.1, 0, wires=[0, 1])\n operation(*args, wires=wires)\n return qml.expval(qml.NumberOperator(0)), qml.expval(qml.NumberOperator(1))\n\n res = circuit(arg)\n sf_res = SF_gate_reference(sf_operation, cutoff_dim, wires, arg)\n assert np.allclose(res, sf_res, atol=tol, rtol=0)", "def test_create(self):\n self.assertIsInstance(self.obj, State)", "def test_projection_logic(self):", "def test_new_state(self):\n self.new_helper(\"State\")", "async def test_state_triggers(hass: HomeAssistant) -> None:\n hass.states.async_set(\"sensor.test_monitored\", STATE_OFF)\n\n config = {\n \"binary_sensor\": {\n \"name\": \"Test_Binary\",\n \"platform\": \"bayesian\",\n \"observations\": [\n {\n \"platform\": \"state\",\n \"entity_id\": \"sensor.test_monitored\",\n \"to_state\": \"off\",\n \"prob_given_true\": 0.9999,\n \"prob_given_false\": 0.9994,\n },\n ],\n \"prior\": 0.2,\n \"probability_threshold\": 0.32,\n }\n }\n await async_setup_component(hass, \"binary_sensor\", config)\n await hass.async_block_till_done()\n\n assert hass.states.get(\"binary_sensor.test_binary\").state == STATE_OFF\n\n events = []\n async_track_state_change_event(\n hass, \"binary_sensor.test_binary\", callback(lambda event: events.append(event))\n )\n\n context = Context()\n hass.states.async_set(\"sensor.test_monitored\", STATE_ON, context=context)\n await hass.async_block_till_done()\n await hass.async_block_till_done()\n\n assert events[0].context == context", "def test_disable_running_transition():\n\n def assert_new(instance):\n \"\"\"\n ensure the state is still the original state\n \"\"\"\n assert instance.state == \"new\"\n\n x = get_thing()\n x.disable_running_state(assert_new)", "def test_init_state(self) -> None:\n # Execute\n state = self.state_factory()\n\n # Assert\n assert isinstance(state, State)", "async def test_state(hass: HomeAssistant) -> None:\n config = {\"air_quality\": {\"platform\": \"demo\"}}\n\n assert await async_setup_component(hass, \"air_quality\", config)\n await hass.async_block_till_done()\n\n state = hass.states.get(\"air_quality.demo_air_quality_home\")\n assert state is not None\n\n assert state.state == \"14\"", "async def test_reproduce_group(opp):\n context = Context()\n\n def clone_state(state, entity_id):\n \"\"\"Return a cloned state with different entity_id.\"\"\"\n return State(\n entity_id,\n state.state,\n state.attributes,\n last_changed=state.last_changed,\n last_updated=state.last_updated,\n context=state.context,\n )\n\n with patch(\n \"openpeerpower.components.group.reproduce_state.async_reproduce_state\"\n ) as fun:\n fun.return_value = Future()\n fun.return_value.set_result(None)\n\n opp.states.async_set(\n \"group.test\",\n \"off\",\n {\"entity_id\": [\"light.test1\", \"light.test2\", \"switch.test1\"]},\n )\n opp.states.async_set(\"light.test1\", \"off\")\n opp.states.async_set(\"light.test2\", \"off\")\n opp.states.async_set(\"switch.test1\", \"off\")\n\n state = State(\"group.test\", \"on\")\n\n await async_reproduce_states(opp, [state], context=context)\n\n fun.assert_called_once_with(\n opp,\n [\n clone_state(state, \"light.test1\"),\n clone_state(state, \"light.test2\"),\n clone_state(state, \"switch.test1\"),\n ],\n context=context,\n reproduce_options=None,\n )", "def test_functions(self):\n self.assertIsNotNone(State.__doc__)", "def test_reproduce_complex_data(self):\n calls = mock_service(self.hass, 'light', SERVICE_TURN_ON)\n\n self.hass.states.set('light.test', 'off')\n\n complex_data = ['hello', {'11': '22'}]\n\n state.reproduce_state(self.hass, ha.State('light.test', 'on', {\n 'complex': complex_data\n }))\n\n self.hass.block_till_done()\n\n assert len(calls) > 0\n last_call = calls[-1]\n assert 'light' == last_call.domain\n assert SERVICE_TURN_ON == last_call.service\n assert complex_data == last_call.data.get('complex')", "def test_fock_state_vector(self, tol):\n args = psi\n\n wires = [0]\n\n gate_name = \"FockStateVector\"\n operation = qml.FockStateVector\n\n cutoff_dim = 10\n dev = qml.device(\"strawberryfields.fock\", wires=2, cutoff_dim=cutoff_dim)\n\n sf_operation = dev._operation_map[gate_name]\n\n assert dev.supports_operation(gate_name)\n\n @qml.qnode(dev)\n def circuit(*args):\n qml.TwoModeSqueezing(0.1, 0, wires=[0, 1])\n operation(*args, wires=wires)\n return qml.expval(qml.NumberOperator(0)), qml.expval(qml.NumberOperator(1))\n\n res = circuit(psi)\n sf_res = SF_gate_reference(sf_operation, cutoff_dim, wires, psi)\n assert np.allclose(res, sf_res, atol=tol, rtol=0)", "def make_test_state(self, app_label, operation, **kwargs):\n project_state = self.set_up_test_model(app_label, **kwargs)\n new_state = project_state.clone()\n operation.state_forwards(app_label, new_state)\n return project_state, new_state", "def func_state_pre_test(list_float_target_state: List[float], int_shots: int) -> List[float]:\n env = QEnv()\n\n # from QCompute import Define\n # Define.hubToken = ''\n # env.backend(BackendName.CloudBaiduSim2Wind)\n env.backend(BackendName.LocalBaiduSim2)\n\n int_dim = len(list_float_target_state) # the dimension of the input vector\n num_qubit_sys = max(int(np.ceil(np.log2(int_dim))), 1) # the number of qubits we need to encode the input vector\n reg_sys = list(env.Q[idx] for idx in range(num_qubit_sys)) # create the quantum register\n\n # call the quantum circuit to prepare quantum state\n circ_state_pre(reg_sys, [], list_float_target_state, reg_borrowed=[])\n\n # measure the quantum state we have prepared\n MeasureZ(reg_sys, list(reversed(range(num_qubit_sys))))\n\n task_result = env.commit(int_shots, fetchMeasure=True)['counts'] # commit to the task\n\n list_population = [0 for _ in range(2 ** num_qubit_sys)] # register for finial populations\n for idx_key in task_result.keys():\n list_population[int(idx_key, base=2)] = task_result[idx_key]\n return list_population", "def test_reproduce_turn_on(self):\n calls = mock_service(self.hass, 'light', SERVICE_TURN_ON)\n\n self.hass.states.set('light.test', 'off')\n\n state.reproduce_state(self.hass, ha.State('light.test', 'on'))\n\n self.hass.block_till_done()\n\n assert len(calls) > 0\n last_call = calls[-1]\n assert 'light' == last_call.domain\n assert SERVICE_TURN_ON == last_call.service\n assert ['light.test'] == last_call.data.get('entity_id')", "def test_group(self):\n\n class DoneState(State):\n def __init__(self):\n State.__init__(self,outcomes=['done'])\n def execute(self,ud=None):\n return 'done'\n\n sm = StateMachine(['succeeded','done'])\n with sm:\n StateMachine.add('FAILSAUCE',DoneState())\n transitions = {'aborted':'FAILSAUCE','preempted':'FAILSAUCE'}\n with sm:\n StateMachine.add('FIRST', SimpleActionState(self.node, 'fibonacci', Fibonacci, goal = g1), transitions)\n StateMachine.add('SECOND', SimpleActionState(self.node, 'fibonacci', Fibonacci, goal = g2), transitions)\n StateMachine.add('THIRD', SimpleActionState(self.node, 'fibonacci', Fibonacci, goal = g1), transitions)\n spinner = threading.Thread(target=self.spin)\n spinner.start()\n outcome = sm.execute()\n\n assert outcome == 'done'" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests that variance for PolyXP measurement works
def test_polyxp_variance(self, tol): dev = qml.device("strawberryfields.fock", wires=1, cutoff_dim=15) @qml.qnode(dev) def circuit(r, phi): qml.Squeezing(r, 0, wires=0) qml.Rotation(phi, wires=0) return qml.var(qml.PolyXP(np.array([0, 1, 0]), wires=0)) r = 0.105 phi = -0.654 var = circuit(r, phi) expected = np.exp(2 * r) * np.sin(phi) ** 2 + np.exp(-2 * r) * np.cos(phi) ** 2 assert np.allclose(var, expected, atol=tol, rtol=0)
[ "def test_polyuq_prescribed(self):\n # Generate data\n dim = 1\n n = 5\n N = 100\n our_function = lambda x: 0.3*x**4 -1.6*x**3 +0.6*x**2 +2.4*x - 0.5\n X = np.linspace(-1,1,N)\n y = our_function(X)\n\n # Array of prescribed variances at each training data point\n# y_var = state.uniform(0.05,0.2,N_train)**2\n y_var = 0.1*our_function(X)*X\n \n # Fit poly with prescribed variances\n param = Parameter(distribution='Uniform', lower=-1, upper=1, order=n)\n myParameters = [param for i in range(dim)] # one-line for loop for parameters\n myBasis = Basis('univariate')\n poly = Poly(myParameters, myBasis, method='least-squares', sampling_args={'sample-points':X.reshape(-1,1), \n 'sample-outputs':y.reshape(-1,1), \n 'sample-output-variances':y_var} )\n poly.set_model()\n y_pred, y_std = poly.get_polyfit(X,uq=True)\n\n np.testing.assert_array_almost_equal(y_std.mean(), 0.64015, decimal=5, err_msg='Problem!')", "def pvar(data):\n return sum_of_squares(data)/len(data)", "def testTTest_DifferentVariance(self):\n result_low_var = ttest.WelchsTTest([2, 3, 2, 3], [4, 5, 4, 5])\n result_high_var = ttest.WelchsTTest([1, 4, 1, 4], [3, 6, 3, 6])\n self.assertLess(result_low_var.p, result_high_var.p)", "def _compute_det_variance(self):", "def test_polyuq_empirical(self):\n# # Generate data\n n = 1\n # Load linear dataset with n_observations=500,n_dim=10,bias=0.5,n_relevent=2,noise=0.2,train/test split = 0.8\n data = np.load('./tests/test_data/linear_data.npz')\n X_train = data['X_train']; y_train = data['y_train']\n N,dim = X_train.shape\n\n # Fit poly and approx its variance\n param = Parameter(distribution='Uniform', lower=-1, upper=1, order=n)\n myParameters = [param for i in range(dim)] # one-line for loop for parameters\n myBasis = Basis('total-order')\n poly = Poly(myParameters, myBasis, method='least-squares', sampling_args={'sample-points':X_train, 'sample-outputs':y_train.reshape(-1,1)} )\n poly.set_model()\n y_pred, y_std = poly.get_polyfit(X_train,uq=True)\n np.testing.assert_array_almost_equal(y_std.mean(), 0.67484, decimal=5, err_msg='Problem!')", "def test_leahy_norm_total_variance(self):\n ps = Powerspectrum(self.lc, norm=\"Leahy\")\n ps_var = (np.sum(self.lc.counts) / ps.n**2.0) * (\n np.sum(ps.power[:-1]) + ps.power[-1] / 2.0\n )\n\n assert np.isclose(ps_var, np.var(self.lc.counts), atol=0.01)", "def phase_spherical_variance():\n pass", "def calculate_variance(X):\n return np.var(X,axis=0)", "def prod_variance(X, varX, Y, varY):\n z = X**2.0 * varY + varX * varY + varX * Y**2.0\n return z", "def test_variance_weighted(self):\n self._test_variance(weights=self._generate_weights())", "def test_variance(speed, correlation_length, seed, distance, frequency, include_saturation):\n\n #duration = 1200.\n #fs = 8000.\n #nsamples = int(fs*duration)\n #ntaps = 8192\n #window = None\n #state = np.random.RandomState(seed)\n #mean_mu_squared = 3.0e-6\n #soundspeed = 343.\n #wavenumber = 2.*np.pi*frequency/soundspeed\n\n #modulated = (signal, fs, correlation_length, speed, distance, soundspeed, mean_mu_squared, ntaps=8192,\n #nfreqs=100, window=None, include_saturation=False, state=None, factor=5.0,\n #include_amplitude=True, include_phase=True)\n\n #modulated = Signal(modulated.take(nsamples).toarray())\n\n #amplitude = modulated.amplitude_envelope()\n #phase = modulated.instantaneous_\n\n\n\n #expected_logamp_var = variance_gaussian(distance, wavenumber, correlation_length, mean_mu_squared,\n #include_saturation=include_saturation)\n #expected_phase_var = variance_gaussian(distance, wavenumber, correlation_length, mean_mu_squared)\n\n #assert np.abs( logamp.var() - expected_logamp_var ) < 0.06\n #assert np.abs( phase.var() - expected_phase_var ) < 0.06", "def test_zero_noise_variance(self):\n alpha = numpy.random.rand() * numpy.pi\n beta = numpy.random.rand() * numpy.pi / 2\n rho = self.obj.simulate_qaoa(params=(alpha, beta))\n var_unmitigated = self.obj.unmitigated_variance(rho)\n rho_out = self.obj.simulate_virtual_distillation(rho)\n var_mitigated = self.obj.mitigated_variance(rho_out)\n self.assertAlmostEqual(round(var_mitigated),\n round(var_unmitigated / 2))", "def test_squeeze_variance_frontend(self, setup_eng, hbar, tol):\n eng, prog = setup_eng(1)\n\n with prog.context as q:\n ops.Sgate(R) | q\n ops.MeasureX | q\n\n res = np.empty(0)\n\n for i in range(N_MEAS):\n eng.run(prog)\n res = np.append(res, q[0].val)\n eng.reset()\n\n assert np.allclose(\n np.var(res), np.exp(-2 * R) * hbar / 2, atol=STD_10 + tol, rtol=0\n )", "def _test_variance(self, weights: Optional[numpy.ndarray]):\n try:\n closed = self.instance.variance(num_candidates=self.num_candidates, weights=weights)\n except NoClosedFormError as error:\n raise SkipTest(\"no implementation of closed-form variance\") from error\n\n # variances are non-negative\n self.assertLessEqual(0, closed)\n\n generator = numpy.random.default_rng(seed=0)\n low, simulated, high = self.instance.numeric_variance_with_ci(\n num_candidates=self.num_candidates,\n num_samples=self.num_samples,\n generator=generator,\n weights=weights,\n )\n self.assertLessEqual(low, closed)\n self.assertLessEqual(closed, high)", "def variance(self):\n return self.stats().variance()", "def true_loss_variance(self, config=None):\r\n raise NotImplementedError()", "def test_polyxp(self):\n self.logTestName()\n\n a = 0.54321\n nbar = 0.5234\n\n hbar = 2\n dev = qml.device('strawberryfields.gaussian', wires=1, hbar=hbar)\n Q = np.array([0, 1, 0]) # x expectation\n\n @qml.qnode(dev)\n def circuit(x):\n qml.Displacement(x, 0, wires=0)\n return qml.expval(qml.PolyXP(Q, 0))\n\n # test X expectation\n self.assertAlmostEqual(circuit(a), hbar*a)\n\n Q = np.diag([-0.5, 1/(2*hbar), 1/(2*hbar)]) # mean photon number\n\n @qml.qnode(dev)\n def circuit(x):\n qml.ThermalState(nbar, wires=0)\n qml.Displacement(x, 0, wires=0)\n return qml.expval(qml.PolyXP(Q, 0))\n\n # test X expectation\n self.assertAlmostEqual(circuit(a), nbar+np.abs(a)**2)", "def test_gaussian_random_field_variance(plot=False):\n\n sigma = 1.3\n beta = 5\n k0 = 150\n P = lambda k: np.piecewise(k, k != 0, [lambda k: k/(1+k/k0)**(beta+1), sigma])\n \n n_r = 100\n n_grid = 400\n L = 1.0\n \n P_data = []\n for i in range(n_r):\n d = grftools.onedee.random_fields.create_gaussian_random_field(P=P, n_grid=n_grid, L=L)\n tmp, k_data = grftools.onedee.random_fields.pseudo_Pofk(d, d, L)\n P_data.append(tmp)\n \n P_data = np.array(P_data)\n \n tolerance_mean = 0.05\n assert np.abs(np.mean(np.var(P_data, axis=0)/P(k_data)**2-1)) < tolerance_mean\n \n if plot:\n import matplotlib.pyplot as plt\n\n plt.figure()\n plt.subplots_adjust(hspace=0)\n plt.suptitle(\"Var[P(k)]\")\n\n plt.subplot(211)\n plt.loglog(k_data, np.var(P_data, axis=0), label=\"Data\")\n plt.loglog(k_data, P(k_data)**2, label=\"Prediction\")\n plt.ylabel(\"Var[P(k)]\")\n plt.legend()\n\n plt.subplot(212)\n plt.semilogx(k_data, np.var(P_data, axis=0)/P(k_data)**2-1)\n plt.ylabel(\"Fractional difference\")\n plt.ylim(-0.5, 0.5)\n plt.xlabel(\"k\")\n plt.savefig(\"plots/1d_grf_pofk_variance_test.png\")", "def variance_lecun(node_input):\n\n return 1.0 / node_input" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test case for api_v1_authenticate_identity_redirect_url_get
def test_api_v1_authenticate_identity_redirect_url_get(self): pass
[ "def test_login_url(self):\n request = self.create_request()\n response = self.middleware.process_request(request)\n self.assert_redirect_url(response, '/login/?next=url/')", "def get_authorization_url(self, callback_url, **kwargs):", "def get_authorization_url(self):\n (status, token, error) = self._get_request_token()\n if not status:\n return (False, None, error)\n\n data = {\n 'url': authenticate_url(token[0]),\n 'access_token': token[0],\n 'access_token_secret': token[1]\n }\n\n return (True, data, None)", "def test_get_redirect(ini, count):\n logging.info(\"Count: {}\".format(count))\n resp = requests.get(str(get_url(ini) + '/redirect/' + str(count)))\n logging.info(\"Response: {}\".format(resp.text))\n assert resp.status_code == 200, \"Wrong status code of response.\"\n assert len(resp.history) == int(count), \"Wrong redirection number.\"", "def get_redirect_uri(self):\n return url_for(\n '.authorized',\n provider=self.provider_name,\n _external=True)", "def get_yext_redirect_uri():\n return url_for('handle_yext_auth_callback', _external=True)", "async def test_login_redirect_header(setup: SetupTest) -> None:\n setup.configure(\"oidc\")\n token = setup.create_upstream_oidc_token(groups=[\"admin\"])\n setup.set_oidc_token_response(\"some-code\", token)\n setup.set_oidc_configuration_response(setup.config.issuer.keypair)\n return_url = \"https://example.com/foo?a=bar&b=baz\"\n\n r = await setup.client.get(\n \"/login\",\n headers={\"X-Auth-Request-Redirect\": return_url},\n allow_redirects=False,\n )\n assert r.status_code == 307\n url = urlparse(r.headers[\"Location\"])\n query = parse_qs(url.query)\n\n # Simulate the return from the OpenID Connect provider.\n r = await setup.client.get(\n \"/login\",\n params={\"code\": \"some-code\", \"state\": query[\"state\"][0]},\n allow_redirects=False,\n )\n assert r.status_code == 307\n assert r.headers[\"Location\"] == return_url", "async def test_gen_auth_url(hass: HomeAssistant, mock_logi_circle) -> None:\n config_flow.register_flow_implementation(\n hass,\n \"test-auth-url\",\n client_id=\"id\",\n client_secret=\"secret\",\n api_key=\"123\",\n redirect_uri=\"http://example.com\",\n sensors=None,\n )\n flow = config_flow.LogiCircleFlowHandler()\n flow.hass = hass\n flow.flow_impl = \"test-auth-url\"\n await async_setup_component(hass, \"http\", {})\n\n result = flow._get_authorization_url()\n assert result == \"http://authorize.url\"", "def test_login_named_url(self):\n request = self.create_request()\n response = self.middleware.process_request(request)\n self.assert_redirect_url(response, '/login/?next=url/')", "def login_redirect_url(request, next_url=None):\n # Check for local login enabled\n if apps.is_installed(\"dbmi_client.login\"):\n\n # Use local login URL\n login_url = furl(request.build_absolute_uri(reverse(\"dbmi_login:authorize\")))\n\n else:\n\n # Build the URL using DBMI-AuthN\n login_url = furl(dbmi_settings.AUTHN_URL)\n login_url.path.segments.extend([\"login\", \"auth\"])\n\n # If no next URL, determine where to dump them after logout\n if not next_url:\n if dbmi_settings.LOGIN_REDIRECT_URL:\n next_url = request.build_absolute_uri(dbmi_settings.LOGIN_REDIRECT_URL)\n\n else:\n next_url = request.build_absolute_uri()\n\n # Add next url\n login_url.query.params.add(dbmi_settings.LOGIN_REDIRECT_KEY, next_url)\n logger.debug(f\"Login next URL: {next_url}\")\n\n # Add the default client ID\n client_id = next(iter(dbmi_settings.AUTH_CLIENTS.keys()))\n login_url.query.params.add(\"client_id\", client_id)\n logger.debug(f\"Auth client ID: {client_id}\")\n\n # Check for branding\n if dbmi_settings.AUTHN_TITLE or dbmi_settings.AUTHN_ICON_URL:\n\n # Add the included parameters\n branding = {}\n if dbmi_settings.AUTHN_TITLE:\n branding[\"title\"] = dbmi_settings.AUTHN_TITLE\n\n if dbmi_settings.AUTHN_TITLE:\n branding[\"icon_url\"] = dbmi_settings.AUTHN_ICON_URL\n\n if dbmi_settings.AUTHN_COLOR:\n branding[\"color\"] = dbmi_settings.AUTHN_COLOR\n\n if dbmi_settings.AUTHN_BACKGROUND:\n branding[\"background\"] = dbmi_settings.AUTHN_BACKGROUND\n\n # Encode it and pass it along\n branding_param = base64.urlsafe_b64encode(json.dumps(branding).encode(\"utf-8\")).decode(\"utf-8\")\n login_url.query.params.add(\"branding\", branding_param)\n\n logger.debug(\"Login URL: {}\".format(login_url.url))\n return login_url.url", "def get_signin_url(self):\n params = {'client_id': self.CLIENT_ID,\n 'redirect_uri': self.REDIRECT_URL,\n 'response_type': 'code',\n 'scope': ' '.join(str(i) for i in self.scopes)\n }\n signin_url = self.authorize_url.format(urlencode(params))\n return signin_url", "def test_redirect_uri():\n\n expected_hostname = \"localhost\"\n expected_port = 42424\n expected_message = \"test_redirect_uri\"\n server = Mock(side_effect=Exception(expected_message)) # exception prevents this test actually authenticating\n credential = InteractiveBrowserCredential(\n redirect_uri=\"htps://{}:{}\".format(expected_hostname, expected_port), _server_class=server\n )\n with pytest.raises(ClientAuthenticationError) as ex:\n credential.get_token(\"scope\")\n\n assert expected_message in ex.value.message\n server.assert_called_once_with(expected_hostname, expected_port, timeout=ANY)", "def test_redirect_to_sign_in(self):\n response = self.client.get(reverse('ad-rep-downline-recruits'), \n follow=True)\n self.assertEqual(response.redirect_chain[0][1], 302)\n self.assert_sign_in_page(response)\n self.assertEqual(response.request['QUERY_STRING'], \n 'next=%2Fad-rep%2Fdownline-recruits%2F')", "def create_login_url():\n\n params = {\n 'response_type' : 'code',\n 'redirect_uri' : constants.OAUTH_CALLBACK_URL,\n 'client_id' : constants.CLIENT_ID,\n 'scope' : 'read:user',\n 'district_id' : constants.DISTRICT_ID\n }\n\n return redirect(constants.AUTHORIZE_REQUEST_URL +\n '?' + urllib.urlencode(params))", "def check_redirect(client, url, expected_redirect_regex):\n resp = client.get(url, follow_redirects=False)\n assert resp.status_code == HTTP_REDIRECT, \\\n \"Call to %s returned: %d, not the expected %d\" % (url, resp.status_code,\n HTTP_REDIRECT)\n regex = re.compile(expected_redirect_regex)\n assert regex.search(resp.location), \\\n \"Call to %s redirects to: %s, not matching the expected regex %s\" \\\n % (url, resp.location, expected_redirect_regex)\n return resp", "def test_signin_view_success(self):\n response = self.client.post(reverse('baph_signin'),\n data={'identification': 'john@example.com',\n 'password': 'blowfish'})\n\n #self.assertRedirects(response, reverse('baph_profile_detail',\n # kwargs={'username': 'john'}))\n\n # Redirect to supplied ``next`` value.\n response = self.client.post(reverse('baph_signin'),\n data={'identification': 'john@example.com',\n 'password': 'blowfish',\n 'next': settings.LOGIN_REDIRECT_URL})\n self.assertRedirects(response, settings.LOGIN_REDIRECT_URL)", "def test_get_landing_page_url(self):\n pass", "def check_redirect(client, url, expected_redirect_regex):\n resp = client.get(url, follow_redirects=False)\n assert resp.status_code == 302, \\\n \"Call to %s returned: %d, not the expected %d\"%(url, resp.status_code, 302)\n regex = re.compile(expected_redirect_regex)\n assert regex.search(resp.location), \\\n \"Call to %s redirects to: %s, not matching the expected regex %s\" \\\n % (url, resp.location, expected_redirect_regex)\n return resp", "async def test_oauth2_callback(setup: SetupTest) -> None:\n setup.configure(\"oidc\")\n token = setup.create_upstream_oidc_token(groups=[\"admin\"])\n setup.set_oidc_token_response(\"some-code\", token)\n setup.set_oidc_configuration_response(setup.config.issuer.keypair)\n assert setup.config.oidc\n return_url = \"https://example.com/foo\"\n\n r = await setup.client.get(\n \"/login\", params={\"rd\": return_url}, allow_redirects=False\n )\n assert r.status_code == 307\n url = urlparse(r.headers[\"Location\"])\n query = parse_qs(url.query)\n assert query[\"redirect_uri\"][0] == setup.config.oidc.redirect_url\n\n # Simulate the return from the OpenID Connect provider.\n r = await setup.client.get(\n \"/oauth2/callback\",\n params={\"code\": \"some-code\", \"state\": query[\"state\"][0]},\n allow_redirects=False,\n )\n assert r.status_code == 307\n assert r.headers[\"Location\"] == return_url" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test case for api_v1_authenticate_post
def test_api_v1_authenticate_post(self): pass
[ "def test_authentication_challenge_authenticate_post(self):\n pass", "def test_authentication_challenge_get_post(self):\n pass", "def test_post_authentication_duo_verify_success_with_passcode(self):\n\n url = reverse('authentication_duo_verify')\n\n data = {\n 'token': self.token,\n 'duo_token': '123456'\n }\n\n self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.token, HTTP_AUTHORIZATION_VALIDATOR=self.authorization_validator)\n response = self.client.post(url, data)\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def test_post_authentication_duo_verify_success_without_passcode(self):\n\n url = reverse('authentication_duo_verify')\n\n data = {\n 'token': self.token,\n }\n\n self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.token, HTTP_AUTHORIZATION_VALIDATOR=self.authorization_validator)\n response = self.client.post(url, data)\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def test_obtain_auth_token(self):\n\t\turl = reverse('api-token-auth')\n\t\tdata = {\n\t\t\t'username': self.user.username,\n\t\t\t'password': 'testpass',\n\t\t}\n\t\tresponse = self.client.post(url, data, format='json')\n\t\tself.assertEqual(response.data['token'], self.token.key)", "def test_create_account_using_post(self):\n pass", "def post(self):\n test_user = api.payload\n if 'username' in test_user:\n username = test_user['username']\n if 'password' in test_user:\n password = test_user['password']\n res = User.login(username=username, password=password, registered_users=registered_users)\n return res", "def test_login_endpoint(self):\n res = self.app.post('/api/v1/auth/signin', data=json.dumps(self.user_login), content_type='application/json')\n self.assertEqual(res.status_code, 200)\n data = json.loads(res.data)\n self.assertTrue('token' in data)", "def test_post_success(self):\n tmp_data = tmp_user_id(self.user.id)\n\n data = {\n 'tmp_user_id': tmp_data['otp'],\n 'otp_code': self.user.otp.get_otp_code()\n }\n response = self.client.post(\n reverse('login_otp'), data=data, format='json'\n )\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertTrue('token' in response.data)", "def post_authorization(self):\n pass", "def test_create_token_exchange_using_post(self):\n pass", "def test_auth(self):\n pass", "def test_authenticate_view_set_post_case_insensitive_2_accounts(self):\n # Create users\n user = account_models.User.objects\\\n .create_user(email='mrtest@mypapaya.io', password='WhoWantsToBeAMillionaire?', username='aov1')\n\n account_models.User.objects \\\n .create_user(email='MRtest@mypapaya.io', password='WhosAMillionaire', username='aov2')\n\n # Log user in\n client = APIClient()\n\n payload = {\n 'email': 'MRtest@mypapaya.io',\n 'password': 'WhoWantsToBeAMillionaire?'\n }\n\n request = client.post('/api/auth', data=payload, format='json')\n response = request.data\n token = response['token']\n\n self.assertIsNotNone(token)\n\n # Get user data\n client.credentials(HTTP_AUTHORIZATION='Token ' + token)\n\n me_request = client.get('/api/me', format='json')\n me_result = me_request.data\n\n self.assertEquals(me_result['id'], user.id)", "def test_login_account(self):\n\n data = {\n 'username': tester_data[\"username\"],\n 'password': tester_data[\"password\"],\n }\n\n res = self.c.post('/api/rest/loginAccount/', data)\n self.assertEqual(200, res.status_code)", "def test_authentication_challenge_cancel_post(self):\n pass", "def test_post_authentication_duo_verify_error_with_status_message(self):\n\n url = reverse('authentication_duo_verify')\n\n data = {\n 'token': self.token,\n 'duo_token': '123456'\n }\n\n self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.token, HTTP_AUTHORIZATION_VALIDATOR=self.authorization_validator)\n response = self.client.post(url, data)\n\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def post(self):\n\n cont = self.request_continue_url()\n\n # Authenticate via username or email + password\n identifier = self.request_string('identifier')\n password = self.request_string('password')\n if not identifier or not password:\n errors = {}\n if not identifier: errors['noemail'] = True\n if not password: errors['nopassword'] = True\n self.render_json({'errors': errors})\n return\n\n user_data = UserData.get_from_username_or_email(identifier.strip())\n if not user_data or not user_data.validate_password(password):\n errors = {}\n errors['badlogin'] = True\n # TODO(benkomalo): IP-based throttling of failed logins?\n self.render_json({'errors': errors})\n return\n\n # Successful login\n Login.return_login_json(self, user_data, cont)", "def auth(client, email, password): # pragma: no cover\n user = User.authenticate(client, email, password)\n click.echo(user.meta.api_token)", "def test_api_v1_users_post(self):\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test case for api_v1_authenticate_renew_get
def test_api_v1_authenticate_renew_get(self): pass
[ "def renew(self):\n \n self.check_auth()\n response = self.oauth.get(self.renew_token_url)\n response.raise_for_status()\n return True", "def _renew_token(self):\n self.token = self._api_auth()", "def test_token_refresh_retry(self, requests_mock):\n first_request = True\n\n def generate_response(*_, **__):\n nonlocal first_request\n if first_request:\n first_request = False\n return MockedTokenResponse(status_code=401)\n else:\n return MockedTokenResponse(status_code=200)\n requests_mock.post.side_effect = generate_response\n\n message = MessageBrokerRecord(messageDest='vibrent')\n message_broker = MessageBrokerFactory.create(message)\n\n # create a auth info record with expired token\n expired_at = clock.CLOCK.now()\n self._create_auth_info_record('vibrent', 'current_token', expired_at)\n\n self.assertEqual('new_token', message_broker.get_access_token())", "def test_token_expire_after_renewal(self):\n self.token.created = self.token.created - datetime.timedelta(days=40)\n self.token.save()\n response = self.csrf_client.post(\n '/auth-token/', {'username': self.username,\n 'password': self.password}, format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertNotEqual(response.data['token'], self.key)", "def reauthenticate(self) -> dict:\n\n # Make the request.\n content = self.session.make_request(\n method='post',\n endpoint='/api/iserver/reauthenticate'\n )\n\n return content", "def test_authenticated_patron_can_authenticate_with_expired_credentials(self):\n one_year_ago = datetime.datetime.utcnow() - datetime.timedelta(days=365)\n with self.request_context_with_library(\"/\"):\n patron = self.controller.authenticated_patron(\n self.valid_credentials\n )\n patron.expires = one_year_ago\n\n patron = self.controller.authenticated_patron(\n self.valid_credentials\n )\n eq_(one_year_ago, patron.expires)", "def test_password_reset_expires_token(populate_users, authenticated_client):\n rv = authenticated_client.get('/api/auth/me',\n content_type='application/json')\n assert rv.status_code == 200\n time.sleep(2)\n admin = populate_users[0]\n user_management.set_password(admin)\n admin.save()\n rv = authenticated_client.get('/api/auth/protected/',\n content_type='application/json')\n assert rv.status_code == 401", "def test_get_new_access_token(self):\n # get new access token and confirm side effects\n self.authorizer._get_new_access_token()\n self.on_refresh.assert_called_once()\n self.assertNotEqual(self.access_token, self.authorizer.access_token)\n\n # confirm AuthClient is still usable with new token\n get_res = self.tc.get_endpoint(GO_EP1_ID)\n self.assertEqual(get_res[\"id\"], GO_EP1_ID)", "def refresh_token():\n\n enc_token = jwt_helper.get_token_from_cookie(cookies=request.cookies, key='refToken')\n __, jwt_content = jwt_helper.decode(token=enc_token, token_type='refresh')\n\n # check_jti()\n subject = jwt_content['sub']\n refresh_token, access_token = jwt_helper.gen_tokens(subject)\n resp = jwt_helper.make_token_response(access_token, refresh_token)\n return resp", "def test_authenticate_expired_token(self):\n data = {\n 'username': self.user.username,\n 'password': 'Test123!'\n }\n\n response = self.client.post(reverse('token_api'), data, format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n token = TemporaryToken.objects.get(\n user__username=self.user.username,\n )\n token.expire()\n\n self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)\n\n # This could be any url and any method. It is only used to test the\n # token authentication.\n response = self.client.delete(\n reverse(\n 'authentication-detail',\n kwargs={'pk': 'invalid_token'},\n ),\n )\n\n content = {'detail': 'Token has expired'}\n\n self.assertEqual(json.loads(response.content), content)\n\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_api_v1_authenticate_post(self):\n pass", "def test_user_can_revoke_api_key(self):\n self.client.login(username=\"alice@example.org\", password=\"password\")\n api_key = self.alice.profile.api_key\n self.assertEqual(api_key, 'abc') # Assert that api key created\n\n form = {\"revoke_api_key\": \"\"}\n self.client.post(\"/accounts/profile/\", form) # revoke the api key\n self.alice.profile.refresh_from_db()\n api_key = self.alice.profile.api_key\n self.assertEqual(\"\", api_key)", "def renew_token(self) -> None:\n logging.debug('QivivoAPI: Asking for token')\n send_data = urllib.parse.urlencode({'grant_type': 'client_credentials',\n 'client_id': self.client_id,\n 'client_secret': self.client_secret,\n })\n\n send_data = send_data.encode('ascii')\n try:\n r = json.load(urllib.request.urlopen(self.oauth_url, send_data))\n except urllib.error.HTTPError as e:\n logging.error(\"QivivoAPI: urllib error: \" + e.reason)\n logging.error(\"QivivoAPI; API error: \" + e.read())\n self.token = r['access_token']\n self.token_date = datetime.now()\n logging.info('QivivoAPI: token initialised with: ' + self.token)\n return", "def _refresh(self):\n # Request and set a new API token.\n new_token = self.authenticate(self._username, self._password)\n self._token = new_token\n logger.info('New API token received: \"{}\".'.format(new_token))\n return self._token", "def test_task_renew_redirect(self):\n\n # login testuser\n self.client.login(username='testuser_task', password='8dR7ilC8cnCr8U2aq14V')\n # get object\n taskname_1 = Taskname.objects.get(taskname_name='taskname_1')\n # get object\n task_1 = Task.objects.get(taskname=taskname_1)\n # create url\n destination = urllib.parse.quote('/task/' + str(task_1.task_id) + '/', safe='/')\n # get response\n response = self.client.get('/task/' + str(task_1.task_id) + '/renew/', follow=True)\n # compare\n self.assertRedirects(response, destination, status_code=302, target_status_code=200)", "def renew_token(self, token):\n api_token_data = {}\n self.log.in_token_renewal = True # pause API logging\n\n # log token information\n try:\n params = {'expiredToken': token}\n url = '{}/appAuth'.format(self.token_url)\n r = self.session.get(url, params=params, verify=self.verify)\n\n if not r.ok:\n err_reason = r.text or r.reason\n err_msg = (\n 'Token Retry Error. API status code: {}, API message: {}, '\n 'Token: {}.'.format(r.status_code, err_reason, self.printable_token(token))\n )\n self.log.error(err_msg)\n raise RuntimeError(1042, err_msg)\n except exceptions.SSLError: # pragma: no cover\n raise RuntimeError('Token renewal failed with an SSL Error.')\n\n # process response for token\n try:\n api_token_data = r.json()\n except (AttributeError, ValueError) as e: # pragma: no cover\n raise RuntimeError('Token renewal failed ({}).'.format(e))\n finally:\n self.log.in_token_renewal = False\n\n return api_token_data", "def _renew(self, data):\n self.created_at = datetime.utcnow()\n if data is None:\n return\n \n self.access_token = data['access_token']\n self.refresh_token = data.get('refresh_token', '')\n self.expires_in = data['expires_in']\n scopes = self.scopes\n scopes.clear()\n for scope in data['scope'].split():\n try:\n scopes.add(SCOPES[scope])\n except KeyError:\n pass", "def test_init_v3_token_auth_ref_v2(self):\n\n expected_auth_ref = {'token': {'id': 'ctx_token', 'expires': '123'},\n 'version': 'v2.0'}\n self._stubs_v3(method='auth_ref', auth_ref=expected_auth_ref)\n self.m.ReplayAll()\n\n ctx = utils.dummy_context()\n ctx.username = None\n ctx.password = None\n ctx.trust_id = None\n ctx.auth_token = 'ctx_token'\n ctx.auth_token_info = {'access': {\n 'token': {'id': 'abcd1234', 'expires': '123'}}}\n heat_ks_client = heat_keystoneclient.KeystoneClient(ctx)\n heat_ks_client.client\n self.assertIsNotNone(heat_ks_client._client)", "def __init__(self, username: str, password: str,\r\n write_refresh: Callable[[str], None], refresh_token=None) -> None:\r\n\r\n self.client = ExtendedClient()\r\n \r\n try:\r\n print(\"Attempting login with refresh token: \", end=\"\")\r\n self.client.authenticate(refresh_token)\r\n print(\"Sucess\")\r\n except LoginError:\r\n print(\"Login Failed using username and password.\")\r\n try:\r\n self.client.login(username, password)\r\n new_token = self.client.refresh_token\r\n write_refresh(new_token) #save token\r\n except LoginError:\r\n raise Exception(\"Authentication Error\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Extract trending topics from Twitter response.
def extract_twitter_trends(resp): trend_list = [trend['name'] for trend in resp[0]['trends']] return trend_list
[ "def trendingTweets():\n api = twitter.Api(consumer_key=key,consumer_secret=secret,access_token_key=access_key,access_token_secret=access_secret)\n trending_topics = api.GetTrendsWoeid(BOSTON_WOEID)\n for tweet in trending_topics:\n util.safe_print(tweet.GetText())", "def trendingTopics():\n api = twitter.Api(consumer_key=key,consumer_secret=secret,access_token_key=access_key,access_token_secret=access_secret)\n trending_topics = api.GetTrendsWoeid(BOSTON_WOEID)\n for topic in trending_topics:\n util.safe_print(topic.name)", "def hot():\n try:\n listing = feedparser.parse(TRENDING_URL)['entries']\n trends = [item['title'] for item in listing]\n return trends\n except Exception as e:\n print('ERR hot terms failed!', str(e))\n return None", "def fetchTweets(queryTopic,twitter):\r\n \r\n raw_data = twitter.search(q=str(queryTopic), count= 10, lang='en')\r\n\r\n tweets = []\r\n\r\n #search through JSON data and extract the tweets only.\r\n for tweet in raw_data['statuses']:\r\n tweets.append((tweet['text']).encode('ascii', 'ignore'))\r\n \r\n \r\n for i in range(0,len(tweets)):\r\n #removing all links, because really its just gonna mess up topic modeling\r\n tweets[i] =re.sub(r'\\w+:\\/{2}[\\d\\w-]+(\\.[\\d\\w-]+)*(?:(?:\\/[^\\s/]*))*', '', tweets[i])\r\n #removing #'s, '\\n''s, and 'RT'\r\n tweets[i] = tweets[i].replace(\"#\",\"\")\r\n tweets[i] = tweets[i].replace(\"\\n\",\"\")\r\n if tweets[i][:2] == \"RT\":\r\n while(tweets[i][:2] != ': '):\r\n tweets[i] = tweets[i][1:]\r\n tweets[i] = tweets[i][2:]\r\n \r\n \r\n tweets = filter(lambda x: len(x) > 3, tweets)\r\n \r\n return tweets", "def display_trends():\n #setting the input to the list returned from GetTrendsCurrent()\n trends = api.GetTrendsWoeid(woeid=23424977, exclude=None)\n #for the list of objects trends, provide the name and url attribute to the\n top_tweets = []\n for trend in trends:\n top_tweets.append((trend.name, trend.url))\n top_tweets = top_tweets[:5]\n return top_tweets", "def parse_rummager_topics(results):\n pages = []\n\n for result in results:\n pages.append(\n Topic(\n name=result['title'],\n base_path=result['slug'],\n document_type=DocumentType[result['format']]\n )\n )\n\n return pages", "def crawl_tweets(topic_string,num_of_calls,date_string):\n empty_list = []\n for i in range(num_of_calls):\n empty_list.append(twitter.search(q=topic_string,lang = 'en',count=100,until=date_string))\n \n return empty_list", "def google_trends(term: str) -> dict:\n pytrend = TrendReq()\n pytrend.build_payload(kw_list=[term])\n region_wise = pytrend.interest_by_region()\n top10 = region_wise.sort_values(by=term, ascending=False).head(10)\n# top10 = pd.DataFrame.to_dict(top10)\n top10 = st.bar_chart(top10)\n related_queries = pytrend.related_queries()\n related_queries = pd.DataFrame(related_queries[term]['rising'].sort_values(by=\"value\", ascending=False))\n# related_queries = st.bar_chart(related_queries)\n# stopwords = stoplists.gtrends_stop_words\n# remove_words = [word for word in related_queries['query'] if word in stopwords]\n# related_queries = related_queries[~related_queries['query'].isin(remove_words)]\n# related_queries = pd.DataFrame.to_dict(related_queries)\n return (top10,related_queries)", "def parse_watson_result(result):\n if \"results\" not in result:\n raise ValueError(\"Result does not have \\\"results\\\" attribute\")\n ret_list = []\n for sen_item in result[\"results\"]:\n if len(sen_item) == 0:\n raise ValueError(\"No results\")\n # Watson by default return the first sentence\n sentence_dict = sen_item[\"alternatives\"][0]\n timestamps = sentence_dict[\"timestamps\"]\n confidence = sentence_dict[\"confidence\"]\n text = sentence_dict[\"transcript\"].strip()\n start_time = timestamps[0][1]\n end_time = timestamps[-1][2]\n sentence = {\"text\": text,\n \"confidence\": confidence,\n \"start_time\": start_time,\n \"end_time\": end_time\n }\n ret_list.append(sentence)\n return ret_list", "def analyze_tweet(tweet, results):\n\n ######################################\n # fields that are relevant for user-level and tweet-level analysis\n # count the number of valid Tweets here\n # if it doesn't have at least a body and an actor, it's not a tweet\n try: \n body = tweet[\"body\"]\n userid = tweet[\"actor\"][\"id\"].split(\":\")[-1]\n results[\"tweet_count\"] += 1\n except (ValueError, KeyError):\n if \"non-tweet_lines\" in results:\n results[\"non-tweet_lines\"] += 1\n return\n\n # count the number of tweets from each user\n if \"tweets_per_user\" in results:\n results[\"tweets_per_user\"][tweet[\"actor\"][\"id\"][15:]] += 1\n \n #######################################\n # fields that are relevant for the tweet-level analysis\n # ------------------> term counts\n # Tweet body term count\n if \"body_term_count\" in results:\n results[\"body_term_count\"].add(tweet[\"body\"])\n\n # count the occurences of different hashtags\n if \"hashtags\" in results:\n if \"hashtags\" in tweet[\"twitter_entities\"]:\n for h in tweet[\"twitter_entities\"][\"hashtags\"]:\n results[\"hashtags\"][h[\"text\"].lower()] += 1\n \n try:\n # count the occurences of different top-level domains\n if (\"urls\" in results) and (\"urls\" in tweet[\"gnip\"]):\n for url in tweet[\"gnip\"][\"urls\"]:\n try:\n results[\"urls\"][url[\"expanded_url\"].split(\"/\")[2]] += 1\n except (KeyError,IndexError,AttributeError):\n pass\n # and the number of links total\n if (\"number_of_links\" in results) and (\"urls\" in tweet[\"gnip\"]):\n results[\"number_of_links\"] += len(tweet[\"gnip\"][\"urls\"])\n except KeyError:\n pass\n \n # -----------> timelines\n # make a timeline of UTC day of Tweets posted\n if \"utc_timeline\" in results:\n date = tweet[\"postedTime\"][0:10]\n results[\"utc_timeline\"][date] += 1\n\n # make a timeline in normalized local time (poster's time) of all of the Tweets\n if \"local_timeline\" in results:\n utcOffset = tweet[\"actor\"][\"utcOffset\"]\n if utcOffset is not None:\n posted = tweet[\"postedTime\"]\n hour_and_minute = (datetime.datetime.strptime(posted[0:16], \"%Y-%m-%dT%H:%M\") + \n datetime.timedelta(seconds = int(utcOffset))).time().strftime(\"%H:%M\")\n results[\"local_timeline\"][hour_and_minute] += 1\n \n # ------------> mention results\n # which users are @mentioned in the Tweet\n if \"at_mentions\" in results:\n for u in tweet[\"twitter_entities\"][\"user_mentions\"]:\n # update the mentions with weight + 1 and \n # list all of the screennames (in case a name changes)\n if u[\"id_str\"] is not None:\n results[\"at_mentions\"][u[\"id_str\"]][\"weight\"] += 1 \n results[\"at_mentions\"][u[\"id_str\"]][\"screennames\"].update([u[\"screen_name\"].lower()])\n \n # count the number of times each user gets replies\n if (\"in_reply_to\" in results) and (\"inReplyTo\" in tweet):\n results[\"in_reply_to\"][tweet[\"inReplyTo\"][\"link\"].split(\"/\")[3].lower()] += 1\n\n # --------------> RTs and quote Tweet\n # count share actions (RTs and quote-Tweets)\n # don't count self-quotes or self-RTs, because that's allowed now\n if ((\"quote_of_user\" in results) or (\"RT_of_user\" in results)) and (tweet[\"verb\"] == \"share\"):\n # if it's a quote tweet\n if (\"quote_of_user\" in results) and (\"twitter_quoted_status\" in tweet[\"object\"]):\n quoted_id = tweet[\"object\"][\"twitter_quoted_status\"][\"actor\"][\"id\"][15:]\n quoted_name = tweet[\"object\"][\"twitter_quoted_status\"][\"actor\"][\"preferredUsername\"]\n if quoted_id != tweet[\"actor\"][\"id\"]:\n results[\"quote_of_user\"][quoted_id][\"weight\"] += 1 \n results[\"quote_of_user\"][quoted_id][\"screennames\"].update([quoted_name])\n # if it's a RT\n elif (\"RT_of_user\" in results):\n rt_of_name = tweet[\"object\"][\"actor\"][\"preferredUsername\"].lower()\n rt_of_id = tweet[\"object\"][\"actor\"][\"id\"][15:]\n if rt_of_id != tweet[\"actor\"][\"id\"]:\n results[\"RT_of_user\"][rt_of_id][\"weight\"] += 1 \n results[\"RT_of_user\"][rt_of_id][\"screennames\"].update([rt_of_name])\n\n # Tweet expended url content term count\n if \"url_content\" in results:\n try:\n urls = tweet[\"gnip\"][\"urls\"]\n except KeyError:\n urls = []\n url_content = \"\"\n for url in urls:\n try:\n expanded_url_title = url[\"expanded_url_title\"]\n if expanded_url_title is None:\n expanded_url_title = \"\"\n except KeyError:\n expanded_url_title = \"\"\n try:\n expanded_url_description = url[\"expanded_url_description\"]\n if expanded_url_description is None:\n expanded_url_description = \"\"\n except KeyError:\n expanded_url_description = \"\"\n url_content = url_content + \" \" + expanded_url_title + \" \" + expanded_url_description\n results[\"url_content\"].add(url_content)\n \n ############################################\n # actor-property qualities\n # ------------> bio terms\n if \"bio_term_count\" in results:\n if tweet[\"actor\"][\"id\"][:15] not in results[\"tweets_per_user\"]:\n try:\n if tweet[\"actor\"][\"summary\"] is not None:\n results[\"bio_term_count\"].add(tweet[\"actor\"][\"summary\"])\n except KeyError:\n pass\n \n # ---------> profile locations\n if \"profile_locations_regions\" in results:\n # if possible, get the user's address\n try:\n address = tweet[\"gnip\"][\"profileLocations\"][0][\"address\"]\n country_key = address.get(\"country\", \"no country available\")\n region_key = address.get(\"region\", \"no region available\")\n except KeyError:\n country_key = \"no country available\"\n region_key = \"no region available\"\n results[\"profile_locations_regions\"][country_key + \" , \" + region_key] += 1", "def get_trending_list(num: int) -> list:\n\n articles = []\n url_articles = \"https://seekingalpha.com/news/trending_news\"\n response = requests.get(url_articles, headers={\"User-Agent\": get_user_agent()})\n\n # Check that the API response was successful\n if response.status_code != 200:\n print(\"Invalid response\\n\")\n else:\n for item in response.json():\n article_url = item[\"uri\"]\n if not article_url.startswith(\"/news/\"):\n continue\n article_id = article_url.split(\"/\")[2].split(\"-\")[0]\n articles.append(\n {\n \"title\": item[\"title\"],\n \"publishedAt\": item[\"publish_on\"][: item[\"publish_on\"].rfind(\".\")],\n \"url\": \"https://seekingalpha.com\" + article_url,\n \"id\": article_id,\n }\n )\n\n return articles[:num]", "def get_trends(path):\r\n with open(path, 'r', encoding='UTF-8') as f:\r\n data = f.read()[14:-4]\r\n trends = data.split('}, {')\r\n return trends", "def process_tweets(tweets, classify_tweet_type=True, extract_tweet_entities=True):\n\n for line in tweets:\n\n if classify_tweet_type is True:\n # classify tweet as retweet/mention/tweet\n if \"retweeted_status\" in line:\n line[\"TWEET_TYPE\"] = \"retweet\"\n elif len(line[\"entities\"][\"user_mentions\"]) > 0:\n line[\"TWEET_TYPE\"] = \"mention\"\n else:\n line[\"TWEET_TYPE\"] = \"tweet\"\n\n if extract_tweet_entities is True:\n # check if line contains a menetion, and if so, extract all users mentione\n tweeties = []\n line[\"TWEETIES\"] = \"\"\n if len(line[\"entities\"][\"user_mentions\"]) > 0:\n tweeties.extend(line[\"entities\"][\"user_mentions\"])\n line[\"TWEETIES\"] = \" \".join([user[\"screen_name\"] for user in tweeties])\n\n # check if line contains a hashtag, and if so, extact all hashtags\n hashtags = []\n line[\"HASHTAGS\"] = \"\"\n if len(line[\"entities\"][\"hashtags\"]) > 0:\n hashtags.extend(line[\"entities\"][\"hashtags\"])\n line[\"HASHTAGS\"] = \" \".join([tag[\"text\"] for tag in hashtags])\n\n # check if line contains a URL, and if so, extract all expanded URLS\n expanded_urls = []\n line[\"EXPANDED_URLS\"] = \"\"\n if len(line[\"entities\"][\"urls\"]) > 0:\n expanded_urls.extend(line[\"entities\"][\"urls\"])\n line[\"EXPANDED_URLS\"] = \" \".join(\n [url[\"expanded_url\"] for url in expanded_urls]\n )\n\n # check if line has lat/long, and if so, extract lat/long\n line[\"LATITUDE\"] = \"\"\n line[\"LONGITUDE\"] = \"\"\n if line[\"geo\"] is not None:\n line[\"LATITUDE\"] = line[\"geo\"][\"coordinates\"][0]\n line[\"LONGITUDE\"] = line[\"geo\"][\"coordinates\"][1]\n\n return tweets", "def trending(words,startYr,endYr):\n startLst=[]\n endLst=[]\n WordTrendLst=[]\n lst=[]\n for i in words:\n for j in words[i]:\n if j.year==startYr:\n if j.count>=1000:\n startLst.append(i)\n for i in startLst:\n for j in words[i]:\n if j.year==endYr:\n if j.count>=1000:\n endLst.append(i)\n for i in endLst:\n for j in words[i]:\n if j.year==startYr:\n trendValue=j.count\n if j.year==endYr:\n trendValue=j.count/trendValue\n WordTrendLst.append(WordTrend(i,trendValue))\n trendValue=0\n return(sorted(WordTrendLst,key=lambda WordTrend:WordTrend.trend))[::-1]", "def forecast(response):\n\n soup = BeautifulSoup(response, \"lxml\")\n hourly = ForecastHourlyExtractor.extract(soup)\n twoday = ForecastTwodayExtractor.extract(soup)\n tenday = ForecastTendayExtractor.extract(soup)\n return (hourly, twoday, tenday)", "def extractData(parsedTweet):\n\n #extract hashtags as a list\n hashtags = [x['text'] for x in parsedTweet['entities']['hashtags']]\n\n #extract created_at and convert into an integer of seconds since epoch\n timestamp = int(time.mktime(time.strptime(parsedTweet['created_at'][0:20] +\\\n parsedTweet['created_at'][26:],\n '%a %b %d %H:%M:%S %Y')))\n return hashtags, timestamp", "def news_trending(subscription_key):\n client = NewsSearchClient(\n endpoint=\"https://api.cognitive.microsoft.com\",\n credentials=CognitiveServicesCredentials(subscription_key)\n )\n\n try:\n trending_topics = client.news.trending(market=\"en-us\")\n print(\"Search news trending topics in Bing\")\n\n if trending_topics.value:\n first_topic = trending_topics.value[0]\n print(\"News result count: {}\".format(len(trending_topics.value)))\n print(\"First topic name: {}\".format(first_topic.name))\n print(\"First topic query: {}\".format(first_topic.query.text))\n print(\"First topic image url: {}\".format(first_topic.image.url))\n print(\"First topic webSearchUrl: {}\".format(\n first_topic.web_search_url))\n print(\"First topic newsSearchUrl: {}\".format(\n first_topic.news_search_url))\n else:\n print(\"Didn't see any topics result data..\")\n\n except Exception as err:\n print(\"Encountered exception. {}\".format(err))", "def get_tweets(self):\n\t\treturn self.tweets", "def test_trends():\n # resp = list(client.trends(max=5))\n # assert len(resp) > 0\n # TODO(milesmcc): reactivate this test when GETTR puts posts back on its homepage" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generates the next logical pk based on the current contents of the DataSet
def next_pk(self): pk = 0 while True: while pk in [obj.pk for obj in self.dset]: pk += 1 yield pk
[ "def next_primary_key(cls):\n tb_name = cls._meta.db_table\n cls_db = cls._meta.database\n cursor = cls_db.execute_sql(\"SELECT `AUTO_INCREMENT` AS `next` \"\n \"FROM information_schema.`TABLES` \"\n \"WHERE TABLE_SCHEMA = %s\"\n \"AND TABLE_NAME = %s\",\n (cls_db.database, tb_name,))\n row = cursor.fetchone()\n cursor.close()\n return row[0]", "def generate_next_id(cls):\n group_id = cls.__auto_next_id\n cls.__auto_next_id = group_id+(7<<8)\n return group_id", "def getNextDatasetRec(self):\n if self.__dataset__:\n self.__rec_no__ = min(len(self.__dataset__) - 1,\n self.__rec_no__ + 1)\n return self.__dataset__[self.__rec_no__]\n return None", "def ds_gen_id(self,ds_name,act_cmd):\n row_cfg = self.df_cfg[self.df_cfg['ds_name']==ds_name]\n col_id = row_cfg['col_id'].values[0]\n col_name = row_cfg['col_name'].values[0]\n col_key = row_cfg['col_key'].values[0]\n \n df = self.load_ds(ds_name)\n if col_key !=\"\":\n keys = col_key.split(\",\")\n df.sort_values(by=keys)\n \n lst = df[col_name].values.tolist()\n if act_cmd=='N':\n #new\n format_str = \"N|%i||%s\"\n for i in range(len(lst)):\n id_str = format_str %(i+1,lst[i])\n print(id_str)\n if act_cmd=='M': #insert missing\n pass", "def get_dataset_first_record_id(self):\n return self.get_dataset_record_by_limit_offset_order(limit=1, offset=0, order=\"record_id\")", "def autogenerate_pk(self):\n self.pk = uuid.uuid4().hex", "def nextUniqueName(self):\n \n pass", "def first_key(self):\n raise NotImplementedError", "def get_dataset_id(self, dataset):\n raise NotImplementedError", "def get_next_row_id(self):\n if len(self.deleted_row_ids) > 0:\n # is there an existing deleted row?\n rowid = self.deleted_row_ids.pop()\n else:\n rowid = len(self.tuples)\n # append an empty row\n self.tuples.append([])\n return rowid", "def get_next_available_key(self) -> str:\n\n last_key = self._get_last_project_key()\n assert last_key.startswith(self.initials)\n key_number = int(last_key[len(self.initials) :])\n key_number += 1\n next_available_key = f\"{self.initials}{key_number:05d}\"\n return next_available_key", "def generate_id(self, portal_type, batch_size=None):", "def FNextKeyId(self, *args):\n return _snap.TIntStrH_FNextKeyId(self, *args)", "def _get_next_transaction_id(self):\r\n transaction_id = self._transaction_id\r\n self._transaction_id += 1\r\n if self._transaction_id > 8388607:\r\n self._transaction_id = 2\r\n return transaction_id", "def _generateId(self):\n while True:\n if self._v_nextid is None:\n self._v_nextid = random.randrange(0, 2**31)\n uid = self._v_nextid\n self._v_nextid += 1\n if uid not in self._tagid_to_obj:\n return uid\n #self._v_nextid = None", "def FNextKeyId(self, *args):\n return _snap.TIntIntH_FNextKeyId(self, *args)", "def _get_next(self, prev_job_id, state):\n plan_key = Job.encode_plan_key(prev_job_id, state)\n job_id = self.plan.get(plan_key)\n return job_id", "def get_next_project_id(self, public_key):\n last_id = list(self.execute(\"SELECT MAX(id) FROM projects WHERE public_key = ?\", (database_blob(public_key),)))[0][0]\n if not last_id:\n last_id = 0\n return last_id + 1", "def _pk(self, obj):\n pk_values = tuple(getattr(obj, c.name)\n for c in obj.__mapper__.primary_key)\n if len(pk_values) == 1:\n return pk_values[0]\n return pk_values" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Adds a new DataObj to a DataSet. To use, pass each attribute and its desired value as kwargs
def create(self, pk=None, **kwargs): pk = pk or next(self.pk_gen) obj = self.cls(**kwargs) dobj = self.DataObj(obj, pk) self.dset.add(dobj)
[ "def add(self, **kwargs: dict):\n\n # all keys are mandatory for references\n reference_keys = set(['from_object_uuid', 'from_object_class_name', 'from_property_name',\\\n 'to_object_uuid'])\n\n if kwargs.keys() == reference_keys: \n with self._commit_lock:\n self._last_update = time.time()\n self._reference_batch.add(**kwargs)\n self._update_batch_if_necessary()\n return\n\n # only mandatory keys\n object_keys = set(['data_object', 'class_name'])\n all_object_keys = set(['data_object', 'class_name', 'uuid', 'vector'])\n\n if (not object_keys - kwargs.keys()) and set(kwargs).issubset(all_object_keys):\n with self._commit_lock:\n self._last_update = time.time()\n self._objects_batch.add(**kwargs)\n self._update_batch_if_necessary()\n return\n \n raise TypeError(\"Wrong arguments for adding data to batcher!\\n\"\n f\"Accepted arguments for references: {reference_keys}\\n\"\n f\"Accepted arguments for objects: {all_object_keys}! 'uuid' and 'vector' - optional\\n\")", "def add_dataset(self, **kwargs) -> None:\n dataset = XLDataset(**kwargs)\n\n if dataset.split == \"training\":\n self.training.append(dataset)\n elif dataset.split == \"validation\":\n self.validation.append(dataset)\n elif dataset.split == \"test\":\n self.test.append(dataset)\n else:\n raise ValueError(f\"Unknown value for 'split' in \"\n \"{dataset.pxid}.\")", "def __add__(self, dataset):\n for attr in ['extent', 'crs', 'sensor', 'acquisition_mode', 'proc_steps', 'outname_base']:\n if getattr(self, attr) != getattr(dataset, attr):\n raise ValueError('value mismatch: {}'.format(attr))\n # self.filename.append(dataset.filename)\n for key in dataset.measurements.keys():\n if key in self.measurements.keys():\n raise RuntimeError('only different measurements can be combined to one dataset')\n self.measurements.update(dataset.measurements)\n return self", "def attach_data_to(self, obj, data):\n obj.attach_data(self.key, data)", "def store_dataset(group, name, obj):\n dset = group.create_dataset(name, **obj.kwds)\n update_attrs(dset, obj.attrs)", "def add_perfdata(self, *args, **kwargs) -> None:\n self._perfdata.append(Perfdata(*args, **kwargs))", "def addedDataObject(ob, event):\n log.info('Added data object')\n ob.index_object()", "def _AddObject(self, _id, obj):\n if self._vbo is None:\n return\n index = -1\n if len(self._empty_indices) > 0:\n index = self._empty_indices.pop()\n elif self._max_index < self._data_size - 1:\n self._max_index += 1\n index = self._max_index\n if index > 0:\n values = self.__descToArray(obj)\n num_values = len(values)\n self._vbo[\n index * num_values:(index + 1) * num_values] = narray(values, \"f\")\n self._indices[_id] = index\n else:\n self._vbo = None\n # trigger _BuildData for next draw", "def add_additional_info_dataset(cls, dataset_id, additional_info):\n dataset = cls.get_by_id(int(dataset_id))\n if not dataset:\n return\n\n logging.error(additional_info)\n for key, value in additional_info.items():\n dataset.additional_data[key.lower()] = value\n if value:\n tag = create_indexed_tag(key, value)\n dataset.indexed_data.append(tag)\n\n dataset.indexed_data = uniquify(dataset.indexed_data)\n dataset.put()\n\n return dataset", "def createAndAdd(data):", "def add(self, dataset):\n if not isinstance(dataset, Dataset):\n raise TypeError('input must be of type pyroSAR.datacube.Dataset')\n self.check_integrity(dataset, allow_new_measurements=True)\n \n # set the general product definition attributes if they are None\n for attr in self.__fixture_metadata + self.__fixture_storage:\n if getattr(self, attr) is None:\n setattr(self, attr, getattr(dataset, attr))\n \n # if it is not yet present, add the dataset measurement definition to that of the product\n for measurement, content in dataset.measurements.items():\n if measurement not in self.measurements.keys():\n self.__add_measurement(dtype=content['dtype'],\n name=content['name'],\n nodata=content['nodata'],\n units=content['units'])", "def add(self, key, value):\n self.__dataset[key] = value", "def add_data(self, data_type, host, *args, **kwargs):\n self._perform_data_action(self.FN_ADD, data_type.name,\n host, *args, **kwargs)", "def create_dataset(self, name, *args, **kwargs):\n with h5file(self.file.filename, 'a', **self.file.params) as f:\n g = f[self.name]\n attrs = kwargs.pop('attrs') if 'attrs' in kwargs else dict()\n ds = g.create_dataset(name, *args, **kwargs)\n ds.attrs.update(attrs)", "def create_data_object(self, **kws):\n # if we have been passed an id, it's because we're performing\n # a modification\n existing_id = kws.get('id', None)\n if existing_id:\n obj = getattr(self.aq_explicit, existing_id)\n else:\n obj = self.data_object_factory()\n \n for defn in self.data_definition:\n indexName = defn.indexName\n dataType = defn.propertyDataType\n defn.setup_catalog(self)\n if defn.automatic:\n if defn.setDefault:\n if callable(defn.default):\n default = defn.default()\n else:\n default = defn.default\n obj.add_change_property(indexName, default, dataType)\n else:\n # TODO: we need to handle the _non_ default case!\n pass\n elif kws.has_key(indexName):\n val = kws.get(indexName)\n obj.add_change_property(indexName, val, dataType)\n # it's also possible that this object has other names,\n # for instance, dc_title is also known as title\n for altIndex in getattr(defn, 'alternativeIndexNames', []):\n obj.add_change_property(altIndex, val, dataType)\n \n # find and insert the data, if it exists\n for key in kws:\n if key == 'data':\n if type(kws[key]) == types.FileType:\n obj.set_content(file.read())\n else:\n obj.set_content(kws[key])\n \n # don't create the object again if the object already exists!\n if not existing_id:\n self.aq_explicit._setObject(obj.getId(), obj)\n new_object = getattr(self.aq_explicit, obj.getId())\n \n # look for any security management requests in the form\n for sec in self.security_management:\n # it is up to the validation framework to determine if we can\n # get this far\n vals = kws.get(sec.id, [])\n if type(vals) == types.StringType:\n vals = [vals]\n sec.set_permissions(new_object, vals)\n new_object.reindexObjectSecurity()\n \n return True", "def add(self, dict):\n self.data.update(dict)", "def add_data(self, *elems):\n d = {}\n for e in elems:\n if isinstance(e, DataElement):\n d[e.uuid()] = e\n else:\n raise ValueError(\"Invalid element '%s'\" % e)\n self._kvstore.add_many(d)", "def append(self, dataset, identifier):\n\n if isinstance(dataset, str):\n dataset = self._dataset_class(dataset_path=dataset)\n\n if not isinstance(dataset, self._dataset_class):\n raise CompatibilityException('Incompatible dataset. '\n 'You can only add instances of '\n 'type {}'.format(self._dataset_class))\n\n if len(dataset.description)>0:\n identifier = dataset.description\n\n if not self._is_init:\n self._ids = set(dataset.samplet_ids)\n self.targets = dataset.targets\n self._target_sizes = dataset.target_sizes\n\n self.num_samplets = len(self._ids)\n self._modalities[identifier] = dataset.data\n self.feature_names[identifier] = dataset.feature_names\n self.num_features.append(dataset.num_features)\n\n # maintaining a no-data pyradigm Dataset internally to reuse its methods\n self._dataset = copy(dataset)\n # replacing its data with zeros\n self._dataset.data = {id_: np.zeros(1) for id_ in self._ids}\n\n if hasattr(dataset, 'attr'):\n self._common_attr = dataset.attr\n self._common_attr_dtype = dataset.attr_dtype\n else:\n self._common_attr = dict()\n self._common_attr_dtype = dict()\n\n self._attr = dict()\n\n self._is_init = True\n else:\n # this also checks for the size (num_samplets)\n if set(dataset.samplet_ids) != self._ids:\n raise CompatibilityException(\n 'Differing set of IDs in two datasets.'\n ' Unable to add this dataset to the MultiDataset.')\n\n if dataset.targets != self.targets:\n raise CompatibilityException(\n 'Targets for some IDs differ in the two datasets.'\n ' Unable to add this dataset to the MultiDataset.')\n\n if identifier not in self._modalities:\n self._modalities[identifier] = dataset.data\n self.feature_names[identifier] = dataset.feature_names\n self.num_features.append(dataset.num_features)\n else:\n raise KeyError('{} already exists in MultiDataset'\n ''.format(identifier))\n\n if hasattr(dataset, 'attr'):\n if len(self._common_attr) < 1:\n # no attributes were set at all - simple copy sufficient\n self._common_attr = dataset.attr.copy()\n self._common_attr_dtype = dataset.attr_dtype.copy()\n else:\n for a_name in dataset.attr:\n if a_name not in self._common_attr:\n self._common_attr[a_name] = dataset.attr[a_name]\n self._common_attr_dtype[a_name] = \\\n dataset.attr_dtype[a_name]\n elif self._common_attr[a_name] != dataset.attr[a_name]:\n raise ValueError(\n 'Values and/or IDs differ for attribute {}. '\n 'Ensure all datasets have common attributes '\n 'with the same values'.format(a_name))\n\n\n # each addition should be counted, if successful\n self.modality_count += 1", "def add_column(self,name,data):\n self.__dict__[name] = data" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the median of two sorted arrays a and b.
def findMedianSortedArrays(self, a, b): n = len(a) + len(b) if n % 2 == 0: # If the total length is even, take the average of the two medians. return (self._findKth(a, 0, b, 0, n // 2) + self._findKth(a, 0, b, 0, n // 2 + 1)) / 2.0 else: return self._findKth(a, 0, b, 0, n // 2 + 1)
[ "def findMedianSortedArrays(self, nums1, nums2):\n pass", "def findMedianSortedArrays(self, nums1: List[int], nums2: List[int]) -> float:\n n1 = len(nums1)\n n2 = len(nums2)\n if n1 > n2: #this is required because partition is one extra index and if we are running bsearch on larger array, then for shorter array r2 will go out of bounds eg [1,3] [2], party = 1\n return self.findMedianSortedArrays(nums2, nums1)\n low,high = 0 ,n1 #as opposed to n1 - 1\n while low <=high:\n partx = (low + high) // 2\n\n #setting the partition y index with respect to both the arrays\n party = ((n1 + n2+1) // 2) - partx\n\n #l1 points to highest element in first half of partitionx\n l1 = float(\"-inf\") if partx == 0 else nums1[partx-1]\n\n #r1 points to lowest element in second half of partitionx\n r1 = float(\"inf\") if partx == n1 else nums1[partx]\n\n #l2 points to highest element in first half of partitiony\n l2 = float(\"-inf\") if party == 0 else nums2[party-1]\n\n #r2 points to lowest element in first half of partitiony\n r2 = float(\"inf\") if party == n2 else nums2[party]\n \n if l1 <= r2 and l2 <= r1: #correct partition\n #compute the median based on odd and even\n n = n1 + n2\n if n % 2:\n return max(l1,l2)\n else:\n return (max(l1,l2) + min(r1,r2))/2\n elif l2 > r1: #moving the mid to right so that partitiony pointer moves to left so that chance of elements having smaller values on left greater than right is more (since l2 > r1)\n low = partx + 1\n else:\n #moving the mid to left so that partitiony pointer moves to right and as we move towards increasing order of elements, getting higher element in left from right(since l1 > r2) is beneficial to get the correct partition\n high = partx - 1\n return -1", "def median2(*args):\n return _seb.median2(*args)", "def mathematical_median(a: float, b: float, c: float) -> float:\n total = a + b + c\n smallest, largest = min(a, b, c), max(a,b,c)\n return total - smallest - largest", "def test_lex_median():\n keys1 = [\"e\", \"b\", \"b\", \"c\", \"d\", \"e\", \"e\", 'a']\n keys2 = [\"b\", \"b\", \"b\", \"d\", \"e\", \"e\", 'e', 'e']\n## keys3 = np.random.randint(0,2,(8,2))\n values = [1.2, 4.5, 4.3, 2.0, 5.6, 8.8, 9.1, 1]\n\n unique, median = group_by((keys1, keys2)).median(values)\n for i in zip(zip(*unique), median):\n print i", "def median(a, b, c):\r\n if b<=a<=c or c<=a<=b:\r\n return a\r\n elif a<=b<=c or c<=b<=a:\r\n return b\r\n else:\r\n return c", "def simple_median(a: float, b: float, c: float) -> float:\n if a > b and b > c:\n return b\n elif c > b and b > a:\n return b\n elif c > a and a > b:\n return a\n elif b > a and a > c:\n return a\n elif a > c and c > b:\n return c\n elif b > c and c > a:\n return c", "def median(numbers):\n assert len(numbers) > 3, 'Length of Numbers Must be more than 3'\n assert isinstance(numbers, collections.Iterable), 'Is Iterable'\n numbers = sorted(numbers)\n center = len(numbers) // 2\n if len(numbers) % 2 == 0:\n return sum(numbers[center - 1:center + 1]) // 2\n else:\n return numbers[center]", "def median(images):\n return np.median(np.dstack(images), axis=2).astype(np.uint8)", "def test_median_modulo():\n\n assert median([2048, 4096, 49152, 64738]) == 26624.0", "def findMedianSortedArray(self, A):\n S = len(A)\n m = S//2\n if S % 2 == 1:\n return A[m]\n return 0.5*(A[m] + A[m-1])", "def median_filter(x, y, num_bins, bin_width=None, x_min=None, x_max=None):\n if num_bins < 2:\n raise ValueError(\"num_bins must be at least 2. Got: %d\" % num_bins)\n\n # Validate the lengths of x and y.\n x_len = len(x)\n if x_len < 2:\n raise ValueError(\"len(x) must be at least 2. Got: %s\" % x_len)\n if x_len != len(y):\n raise ValueError(\"len(x) (got: %d) must equal len(y) (got: %d)\" % (x_len,\n len(y)))\n\n # Validate x_min and x_max.\n x_min = x_min if x_min is not None else x[0]\n x_max = x_max if x_max is not None else x[-1]\n if x_min >= x_max:\n raise ValueError(\"x_min (got: %d) must be less than x_max (got: %d)\" %\n (x_min, x_max))\n if x_min > x[-1]:\n raise ValueError(\n \"x_min (got: %d) must be less than or equal to the largest value of x \"\n \"(got: %d)\" % (x_min, x[-1]))\n\n # Validate bin_width.\n bin_width = bin_width if bin_width is not None else (x_max - x_min) / num_bins\n if bin_width <= 0:\n raise ValueError(\"bin_width must be positive. Got: %d\" % bin_width)\n if bin_width >= x_max - x_min:\n raise ValueError(\n \"bin_width (got: %d) must be less than x_max - x_min (got: %d)\" %\n (bin_width, x_max - x_min))\n\n bin_spacing = (x_max - x_min - bin_width) / (num_bins - 1)\n\n # Bins with no y-values will fall back to the global median.\n result = np.repeat(np.median(y), num_bins)\n\n # Find the first element of x >= x_min. This loop is guaranteed to produce\n # a valid index because we know that x_min <= x[-1].\n x_start = 0\n while x[x_start] < x_min:\n x_start += 1\n\n # The bin at index i is the median of all elements y[j] such that\n # bin_min <= x[j] < bin_max, where bin_min and bin_max are the endpoints of\n # bin i.\n bin_min = x_min # Left endpoint of the current bin.\n bin_max = x_min + bin_width # Right endpoint of the current bin.\n j_start = x_start # Inclusive left index of the current bin.\n j_end = x_start # Exclusive end index of the current bin.\n\n for i in range(num_bins):\n # Move j_start to the first index of x >= bin_min.\n while j_start < x_len and x[j_start] < bin_min:\n j_start += 1\n\n # Move j_end to the first index of x >= bin_max (exclusive end index).\n while j_end < x_len and x[j_end] < bin_max:\n j_end += 1\n\n if j_end > j_start:\n # Compute and insert the median bin value.\n result[i] = np.median(y[j_start:j_end])\n\n # Advance the bin.\n bin_min += bin_spacing\n bin_max += bin_spacing\n\n return result", "def test_median_real():\n\n assert median([2048, 4096, 49152]) == 4096", "def test_scenario_b(self):\n median_filter = MedianFilter(5, 5)\n\n for scan, expected_res in zip(self.scans_b, self.res_b):\n median_filter.add_measurement(scan)\n median_filter.update()\n assert np.allclose(expected_res, median_filter.get_measurement()), \"Error, incorrect median found\"", "def getMedianInfo(self, nums, start_pos, end_pos):\n half_position = float(start_pos + end_pos) / 2\n left_position = int(math.floor(half_position))\n if half_position - left_position < 0.1:\n median = nums[left_position]\n else:\n median = float(nums[left_position] + nums[left_position + 1]) / 2\n\n length_from_start_to_median = left_position - start_pos\n\n return median, length_from_start_to_median", "def median(args):\n total_nums = len(args)\n middle = total_nums/2 \n avg_middle = (int(args[middle]) + int(args[middle - 1]))/float(2)\n if total_nums % 2 == 0:\n print avg_middle\n else:\n print int(args[middle])", "def median(data_set):\n data_set_length = len(data_set)\n sorted_data_set = sorted(data_set)\n midpoint = data_set_length // 2\n if data_set_length % 2:\n return sorted_data_set[midpoint]\n else:\n hi = sorted_data_set[midpoint]\n lo = sorted_data_set[midpoint - 1]\n return (hi + lo) / 2", "def median(data):\n try:\n data = sorted(list(data))\n n = len(data)\n if n%2==0:\n return (data[(n//2)-1]+data[n//2])/2\n else:\n return data[n//2]\n except IndexError:\n raise TypeError(\"needs at least one argument\")", "def move_median(a, window, min_count=None, axis=-1): # real signature unknown; restored from __doc__\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Reads a JSON file and returns a Dict representing the file
def read_json_file(file_path: str) -> Dict: with open(file_path, 'r') as file: data = file.read() return json.loads(data)
[ "def get_json_dict(json_file_name: str) -> dict:\n with open(json_file_name, 'r') as JSON:\n return json.load(JSON)", "def read_json_file(path_):\n with open(path_, \"r\") as f:\n return json.loads(f.read(), object_pairs_hook=OrderedDict)", "def get_json_dict(filepath):\n with open(filepath, encoding=\"utf8\") as infile:\n return json.load(infile)", "def load_json(json_file):\n \n with open(json_file, \"r\") as file:\n dictionary = json.load(file)\n return dictionary", "def load_json(filepath):\n data = dict()\n with open(filepath) as data_file: \n data = json.load(data_file)\n return data", "def import_json(file_path: str) -> dict:\n with open(file_path, \"r\", encoding=\"utf8\") as json_file:\n return json.load(json_file)", "def open_json(path: str, mode: str = 'r') -> dict:\n data = {}\n with open(path, mode) as json_file:\n data = json.loads(json_file.read())\n return data", "def _read_json(self,fname):\n\n with open(fname) as f:\n data = json.load(f)\n\n return data", "def get_json_dict(json_file: str):\n with open(json_file, 'r', encoding='utf-8') as file:\n data = json.load(file)\n if type(data) is list:\n return data[0]\n else:\n return data", "def read_config(self, json_file):\n config = {}\n with open(json_file, 'r') as f:\n config = json.load(f)\n\n return config", "def open_json(self, filename: str) -> dict | None:\n json_path = os.path.join(self.directory, filename)\n try:\n with open(json_path, \"r\") as json_file:\n return json.load(json_file)\n except FileNotFoundError:\n print(f\"Couldn't find {filename}. (path: {json_path}) file.\")\n return None", "def load_data(filepath):\n with open(filepath, \"r\") as input_file:\n json_data = json.load(input_file)\n return json_data", "def load_json(filepath, **kwargs):\n with open(filepath, 'r') as fp:\n return json.load(fp, **kwargs)", "def read_json_data(storage_file: str):\n with open(storage_file, 'r') as f:\n data = json.load(f)\n return data", "def read_json_files():\n\n jsons = dict()\n with open('json_files/config.json') as file:\n data_conf = json.load(file)\n jsons['base_url'] = data_conf['base_url']\n jsons['implicit_wait'] = data_conf['implicit_wait']\n jsons['os'] = data_conf['os']\n jsons['is_headless'] = (data_conf['headless'] == 'True')\n\n with open('json_files/state.json') as file:\n data_states = json.load(file)\n jsons['list_states'] = data_states['states']\n\n with open('json_files/district.json') as file:\n jsons['dict_districts'] = json.load(file)\n\n with open('json_files/sub_district.json') as file:\n jsons['dict_sub_districts'] = json.load(file)\n\n with open('json_files/gram_panchayat.json') as file:\n jsons['dict_gram_panchayats'] = json.load(file)\n\n with open('json_files/village.json') as file:\n jsons['dict_villages'] = json.load(file)\n\n return jsons", "def __read(path):\n with open(path, \"r\") as f:\n data = json.load(f)\n logger = __logger()\n logger.info(\"Read the whole json, from path: {}.\".format(path))\n return data", "def read_json_file(var_path, filename):\n vars_fh = open(filename, 'rb')\n json_vars = json.load(vars_fh)\n if not isinstance(json_vars, dict):\n raise Exception(\"JSON file needs to be a dictionary\")\n\n vars_dict = {}\n for (k, v) in json_vars.iteritems():\n vars_dict[\"{}_{}\".format(var_path, k)] = v\n return vars_dict", "def _load_json_from_path(json_path: str) -> Dict:\n with open(json_path, \"r\", encoding='utf-8') as json_file:\n if os.stat(json_path).st_size != 0: # If the file is not empty:\n return json.load(json_file)", "def open_json_file(filepath):\n with open(filepath, 'r') as json_file:\n return json.load(json_file)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets dict of team names and team Id numbers from league page.
def _getTeamDict(self): teamIds = self.html.xpath('//ul[@id="games-tabs1"]/li/a/@href') teamIds = [re.findall('teamId=(\d+)', i)[0] for i in teamIds] teamNames = self.html.xpath('//ul[@id="games-tabs1"]/li/a/text()') teamNames = [name.strip().upper().replace(' ', ' ') for name in teamNames] teamDict = dict(zip(teamIds, teamNames)) return teamDict
[ "def team_ids():\n response = json_response('https://fantasy.premierleague.com/drf/teams/')\n teams = {}\n for team in response:\n teams[team['code']] = team['name']\n return teams", "def parse(self, html):\n team = dict()\n soup = BeautifulSoup(html)\n\n if soup.find(text='No team found.') is not None:\n logging.error('FIRST lacks team.')\n return None\n\n for tr in soup.findAll('tr'):\n tds = tr.findAll('td')\n if len(tds) > 1:\n field = str(tds[0].string)\n if field == \"Team Number\":\n team[\"team_number\"] = int(tds[1].b.string)\n if field == \"Team Name\":\n team[\"name\"] = unicode(tds[1].string)\n if field == \"Team Location\":\n #TODO: Filter out &nbsp;'s and stuff -greg 5/21/2010\n team[\"address\"] = unicode(tds[1].string)\n if field == \"Rookie Season\":\n team[\"rookie_year\"] = int(tds[1].string)\n if field == \"Team Nickname\":\n team[\"nickname\"] = unicode(tds[1].string)\n if field == \"Team Website\":\n try:\n website_str = re.sub(r'^/|/$', '', unicode(tds[1].a[\"href\"])) # strip starting and trailing slashes\n if not website_str.startswith('http://') and not website_str.startswith('https://'):\n website_str = 'http://%s' % website_str\n team['website'] = db.Link(website_str)\n except Exception, details:\n logging.info(\"Team website is invalid for team %s.\" % team['team_number'])\n logging.info(details)\n\n self._html_unescape_items(team)\n\n return team, False", "def output_team_info(session, league_id, team_id):\n response = session.get(tm.url('nba', league_id, team_id))\n league = tm.league(response.text)\n team = tm.team(response.text)\n print(\"Success!\")\n print('League Name: %s \\nTeam Name: %s\\n' % (league, team))", "def get_team_info(data_tag):\n name_span, = data_tag.findAll('span', {'class': 'name'})\n team_name = name_span.text\n slot_id = int(data_tag[SLOT_KEY])\n # NOTE: Assumes the team ID is 1 more than the slot ID.\n team_id = slot_id + 1\n return team_id, team_name", "def get_teams():\n api.get_teams()", "def scrape_teams():\n teams = []\n\n response = requests.get('http://www.basketball-reference.com/leagues/NBA_2015.html')\n soup = bs4.BeautifulSoup(response.content)\n team_soup = soup.find(id='all_standings').find(class_=\"valign_top\")\n\n eastern_conference_soup = team_soup.tbody.contents\n for index in range(3,len(eastern_conference_soup),2): \n if index > 11 and index < 15:\n pass\n elif index > 23 and index < 27:\n pass\n elif index > 35:\n pass\n else:\n if index <= 11:\n division = 'Atlantic'\n elif index > 12 and index <= 23:\n division = 'Central'\n elif index > 24 and index <35:\n division = 'Southeast'\n name = eastern_conference_soup[index].td.a.string \n team_url = eastern_conference_soup[index].td.a['href']\n teams.append(Team(str(name),team_url,division,'Eastern'))\n\n\n western_conference_soup = team_soup.contents[3].tbody.contents\n for index in range(3,len(western_conference_soup),2):\n if index > 11 and index < 15:\n pass\n elif index > 23 and index < 27:\n pass\n elif index > 35:\n pass\n else:\n if index <= 11:\n division = 'Northwest'\n elif index > 12 and index <= 23:\n division = 'Pacific'\n elif index > 24 and index <35:\n division = 'Southwest'\n name = western_conference_soup[index].td.a.string \n team_url = western_conference_soup[index].td.a['href']\n teams.append(Team(str(name),team_url,division,'Western'))\n\n return teams", "def info_team(teams):\n Dict_teams = {}\n for team in teams:\n id = team[\"id\"]\n name = team[\"name\"]\n Dict_teams[id]={}\n Dict_teams[id][\"name\"] = name\n Dict_teams[id][\"competition\"] = {\"1\": \"No\", \"2\": \"No\", \"3\": \"No\"}\n Dict_teams[id][\"users\"] = {}\n Dict_teams[id][\"devices\"] = {}\n Dict_teams[id][\"devices\"][\"All\"] = [] # information from the entire working time range\n Dict_teams[id][\"devices\"][\"edition\"] = {\"1\": [], \"2\": [], \"3\": []}\n Dict_teams[id][\"users\"][\"All\"] = []#information from the entire working time range\n Dict_teams[id][\"users\"][\"edition\"] = {\"1\": [], \"2\": [], \"3\": []}\n\n return Dict_teams", "def parse_plays(game_id, league='nba'):\n league = league.lower()\n espn = 'http://scores.espn.go.com/' + league + '/playbyplay?' +\\\n game_id + '&period=0'\n url = urllib2.urlopen(espn)\n print url.geturl()\n\n soup = bs(url.read(), ['fast', 'lxml'])\n table = soup.find('table', {'class': 'mod-data'})\n thead = [thead.extract() for thead in table.findAll('thead')] \n rows = (list(tr(text=True)) for tr in table.findAll('tr'))\n game = adjust_game(rows, league)\n teams = thead[0].findChildren('th', {'width':'40%'})\n away_team, home_team = [team.string.title() for team in teams]\n print len(game), away_team, home_team\n return away_team, home_team, game", "def scrape_team_stats(self):\n response = requests.get(root_url + self.team_url)\n soup = bs4.BeautifulSoup(response.content)\n roster = soup.find(id='per_game').tbody\n\n for player_number in range(1,len(roster),2):\n playerStatTable = roster.contents[player_number].contents\n perGameStats = []\n for stat in range(1,len(playerStatTable),2):\n perGameStats.append(playerStatTable[stat].string)\n self.players.append(Player(perGameStats))", "def get_team(team_id):\n request_url = str(API_ENDPOINT) + str('/team/')+str(team_id)\n\n response = requests.get(request_url)\n if response.status_code != 200:\n print(\"Fehler GET get_team\")\n print(response.content)\n return \"error\", \"error\", \"error\"\n\n response_data = response.json()\n team_name = response_data['name']\n isis_name = response_data['isisName']\n game_type = response_data['type']\n\n print(\"name: \"+str(team_name))\n print(\"isisName: \"+str(isis_name))\n print(\"type: \"+str(game_type))\n return team_name, isis_name, game_type", "def get_user_team_id(self):\n self.info_json = (json.loads(str(self.yahoo_query.get_league_info())))\n \n for team in self.info_json[\"standings\"][\"teams\"]:\n if (team[\"team\"][\"name\"] == self.user_team_name):\n return (team[\"team\"][\"team_id\"])\n \n raise Exception(\"Team name not found in league info, please check spelling of team name.\")\n return", "def player_ids():\n players = player_list()\n players_id = {}\n players_teamid = {}\n for player in players:\n players_id[player['id']] = player['web_name']\n players_teamid[player['web_name']] = player['team_code']\n return players_id, players_teamid", "def _get_team_info(self, TEAM_ABBREV):\n\n with open('./teams.json') as incoming:\n teams = json.load(incoming)\n\n return teams[TEAM_ABBREV]", "def get_team_id_maps_for_date(date):\n response_json = get_scoreboard_response_json_for_date(date)\n games = make_array_of_dicts_from_response_json(response_json, 0)\n team_id_game_id_map = {}\n team_id_opponent_id_map = {}\n for game in games:\n team_id_game_id_map[game['HOME_TEAM_ID']] = game['GAME_ID']\n team_id_game_id_map[game['VISITOR_TEAM_ID']] = game['GAME_ID']\n team_id_opponent_id_map[game['HOME_TEAM_ID']] = game['VISITOR_TEAM_ID']\n team_id_opponent_id_map[game['VISITOR_TEAM_ID']] = game['HOME_TEAM_ID']\n return team_id_game_id_map, team_id_opponent_id_map", "def _get_teams(match):\n teams = [{\n 'name': team['Name'],\n 'id': team['id']\n } for team in match.find_all('Tm')]\n if len(teams) != 2:\n return None\n return teams", "def teams(self):\n teams = dict()\n for player in self.players:\n team = player['team']\n if team not in teams:\n teams[team] = []\n teams[team].append(player['login'])\n return teams", "def my_team():\n\n return [(9941835, 'Darren', 'Gebler'), (9983601, 'Davide', 'Dolcetti')]", "def get_team_info(self, team_id):\n team_key = self.get_league_key() + \".t.\" + str(team_id)\n return self.query(\n \"https://fantasysports.yahooapis.com/fantasy/v2/team/\" + str(team_key) +\n \";out=metadata,stats,standings,roster,draftresults,matchups\", [\"team\"], Team)", "def get_teams_in_league(self, league, season = None):\n try:\n params = {}\n if season:\n params['season'] = season['startDate'][:4]\n\n http_query = 'competitions/{league_id}/teams'.format(league_id=league['id'])\n req = self._get(http_query, params)\n league_teams = req.json()\n if len(league_teams[\"teams\"]) == 0:\n return\n else:\n return league_teams\n except APIErrorException as e:\n click.secho(e.args[0],\n fg=\"red\", bold=True)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Format html draft table string to pandas dataframe.
def _formatDraftTable(self, html): rnd = df[0].ix[0].replace('ROUND ', '') df.drop([0], inplace=True) df['ROUND'] = rnd df['PICK'] = pd.to_numeric(df[0]) df['MANAGER'] = df[2] df = self._formatAuctionDraftTable(df) df = df[['ROUND', 'PICK', 'MANAGER', 'PLAYER', 'TEAM', 'POS', 'KEEPER']] return df
[ "def parse_table(table_el):\n table_dict = {\"header\": [], \"value\": []}\n for tr in table_el.find_all(\"tr\"):\n th = None\n td = None\n if tr.find(\"th\"):\n th = tr.th.text\n if tr.find(\"td\"):\n td = tr.td.text\n\n table_dict[\"header\"].append(th)\n table_dict[\"value\"].append(td)\n return pd.DataFrame(table_dict)", "def _formatTransactionTable(self, htmlStr, tds):\n df = pd.read_html(htmlStr, header=1)[0]\n dates = [' '.join(i.itertext()) for i in tds[::4]]\n df['DATE'] = dates\n details = [' '.join(i.itertext()).replace(' ', ' ').replace(' ,', ',')\n for i in tds[2::4]]\n df['DETAIL'] = details\n addDropKey = u'Transaction\\xa0\\xa0Add/Drop'\n addDropStr = '(\\w+) dropped (.+?), \\w+ \\w+ to (Waivers|Free Agency)'\\\n '|(\\w+) added (.+?), \\w+ \\w+ from (Waivers|Free Agency)'\n addDrop = pd.Series(df[df['TYPE'].str.match(addDropKey)]['DETAIL'].str.\n findall(addDropStr))\n addDrop = addDrop.apply(lambda x: [x[0][:3], x[1][:3:-1]])\n addKey = u'Transaction\\xa0\\xa0Add'\n addStr = '(\\w+) added (.+?), \\w+ \\w+ from (Waivers|Free Agency)'\n add = pd.Series(df[df['TYPE'].str.match(addKey)]['DETAIL'].str.\n findall(addStr))\n add = add.apply(lambda x: [x[0][::-1]])\n dropKey = u'Transaction\\xa0\\xa0Drop'\n dropStr = '(\\w+) dropped (.+?), \\w+ \\w+ to (Waivers|Free Agency)'\n drop = pd.Series(df[df['TYPE'].str.match(dropKey)]['DETAIL'].str.\n findall(dropStr))\n tradeKey = u'Transaction\\xa0\\xa0Trade Processed'\n tradeStr = '(\\w+) traded (.+?), \\w+ \\w+ to (\\w+)'\n trade = pd.Series(df[df['TYPE'].str.match(tradeKey)]['DETAIL'].str.\n findall(tradeStr))\n transactions = pd.concat([addDrop, add, drop, trade])\n transactions.name = 'TRANSACTION'\n df = df.join(transactions)\n return df", "def extract_table(htmlstr):\n match = re.search(r'<table.*?/table>', htmlstr, re.DOTALL)\n tablehtml = match.group()\n tableList = re.findall(r'<tr>.*?</tr>', tablehtml, re.DOTALL)\n table = []\n for row in tableList:\n cell = re.findall('<td>(.*?)</td>', row, re.DOTALL)\n table.append(cell)\n return table", "def get_report(html_uri):\n page = requests.get(html_uri)\n soup = BeautifulSoup(page.text, \"lxml\")\n table = soup.find('tbody')\n trs = [tr for tr in table.find_all('tr')]\n report_table = []\n for i, tr in enumerate(trs):\n if i == 0:\n columns = [th.text.strip() for th in tr.find_all('th')]\n else:\n report_table.append([th.text.strip() for th in tr.find_all('td')])\n report_df = pd.DataFrame(report_table, columns=columns)\n return report_df", "def _fix_html_tables_old_pandoc(self, htmlstring):\n result = []\n pos = 0\n re_tables = re.compile(r\"<table.*</table>\", re.DOTALL)\n re_tbody = re.compile(r\"<tbody.*</tbody>\", re.DOTALL)\n tables = re_tables.finditer(htmlstring)\n for table in tables:\n # process the html before the match\n result.append(htmlstring[pos:table.start()])\n # now the table itself\n table_html = htmlstring[table.start():table.end()]\n tbody = re_tbody.search(table_html)\n if not tbody is None:\n result.append(table_html[0:tbody.start()])\n tbody_html = table_html[tbody.start():tbody.end()]\n tbody_html = tbody_html.replace(\"<th\",\"<td\")\n tbody_html = tbody_html.replace(\"</th>\", \"</td>\")\n result.append(tbody_html)\n result.append(table_html[tbody.end():])\n else:\n result.append(table_html)\n pos = table.end()\n result.append(htmlstring[pos:])\n\n return \"\".join(result)", "def pandas_df(markdown_table: str) -> pd.DataFrame:\n table = make_table(markdown_table)\n column_names, types = get_column_names_types(table)\n table_data = get_data_from_table(table)\n output_table = []\n for row in table_data:\n output_table.append(tuple(map(get_python_type, zip(row, types))))\n\n return pd.DataFrame(output_table, columns=column_names)", "def pandas_df_to_markdown_table(df: pd.DataFrame) -> str:\n\n fmt = ['---' for i in range(len(df.columns))]\n df_fmt = pd.DataFrame([fmt], columns=df.columns)\n df_formatted = pd.concat([df_fmt, df])\n return Markdown(df_formatted.to_csv(sep=\"|\", index=False)).data", "def from_html(html_code, **kwargs):\r\n\r\n parser = TableHandler(**kwargs)\r\n parser.feed(html_code)\r\n return parser.tables", "def parse_dataframe(data: pd.DataFrame, rows: int = 5) -> str:\n return f\"{data.head(rows).to_markdown()}\\n\\n\"", "def html_table_to_dict(html):\n soup = BeautifulSoup(html, 'html.parser')\n tables = soup.find_all('table')\n results = []\n for table in tables:\n table_headers = [header.text for header in table.find('thead').find_all('th')]\n table_body = []\n for row in table.find('tbody').find_all('tr'):\n row_dict = {}\n for i, cell in enumerate(row.find_all('td')):\n row_dict[table_headers[i]] = cell.text\n table_body.append(row_dict)\n results.append(table_body)\n return results", "def get_tables(parsed_html):\n table_els = parsed_html.find_all(\"table\")\n table_dfs = []\n for table_el in table_els:\n table_df = parse_table(table_el)\n table_dfs.append(table_df)\n return table_dfs", "def fetch_table(url):\n \n \n # pretend to be the chrome brower to solve the forbidden problem\n header = {\n \"User-Agent\": \"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.75 Safari/537.36\",\n \"X-Requested-With\": \"XMLHttpRequest\"\n }\n html_text = requests.get(url, headers=header)\n \n # to solve garbled text\n html_text.encoding = html_text.apparent_encoding\n pd_table = pd.read_html(html_text.text)[0]\n \n return pd_table", "def table(data):\n return pd.DataFrame(json_normalize(data))", "def fixMalformedHTML(self, backup=False, restored=False):\n html = self.driver.page_source\n html = re.sub('<td>\\s+<td valign=\"middle\">', '<td valign=\"middle\">', html, flags=re.I)\n html = re.sub('</td>\\s+<td>', '</td>', html, flags=re.I)\n # Parse the (hopefully) not-busted HTML\n soup = BeautifulSoup(html, \"html5lib\")\n # Extract info from table rows..\n rows = soup.table.table.tbody.find_all('tr', recursive=False)\n \n if backup:\n self.createDictData(rows)\n elif restored:\n self.createDictDataRestoredFile(rows) # some new function here for doing \n else:\n return None", "def get_data_fr_htmlsrc(self, page_source):\n dom_object = DOM(page_source)\n # get date\n date_data = self.get_date_fr_src(dom_object)\n \n data_df = pd.read_html(dom_object('div#tots')[0].content, index_col =0)[0]\n data_df = self.modify_sgx_main_data_df(data_df)\n\n data_df['Date'] = date_data\n data_df['Date'] = pd.to_datetime(data_df['Date'])\n\n return data_df", "def return_html( self ):\n\n htmltbl = []\n\n ts = self.__start_table()\n \n htmltbl.append( ts )\n\n for row in range( self.maxRow ):\n\n tr = self.__start_row( row )\n trc = self.__end_row ( )\n\n htmltbl.append( tr )\n\n for col in range( self.maxCol ):\n\n td = self.__resCell( row,col )\n\n if td: #Spanned cells return None\n htmltbl.append( td )\n\n htmltbl.append( trc + \"\\n\" )\n\n htmltbl.append( self.__end_table() + \"\\n\\n\" ) \n\n return string.join( htmltbl, '' )", "def get_formatted_tracklist_table_html(track_data: pd.DataFrame):\n if track_data.empty:\n print('A list of tracks is required.')\n return\n pd.set_option('display.max_colwidth', None)\n keys = ['name', 'album_image_url_small', 'artist_name', 'album_name', 'share_url']\n new_keys = ['Song Title', 'Cover Art', 'Artist', 'Album', 'Share URL']\n track_data = track_data[keys].rename(columns=dict(zip(keys, new_keys)))\n\n def image_formatter(im):\n return f'<img src=\"{im}\" />'\n\n formatters = {\n 'Cover Art': image_formatter\n }\n playlist_table = track_data.to_html(formatters=formatters, escape=False, index=False, render_links=True)\n playlist_table = playlist_table.replace('style=\"text-align: right;\"', '')\n playlist_table = playlist_table.replace('<tr>', '<tr style=\"border: solid 1px #CCC;\">')\n playlist_table = playlist_table.replace(\n '<table border=\"1\" class=\"dataframe\">',\n '<table style=\"border-collapse: collapse; border: solid 1px #CCC;\">'\n )\n return playlist_table", "def generate_table(df, max_rows=10):\r\n return html.Table([\r\n html.Thead(\r\n html.Tr([html.Th(col) for col in df.columns])\r\n ),\r\n html.Tbody([\r\n html.Tr([\r\n html.Td(df.iloc[i][col]) for col in df.columns\r\n ]) for i in range(min(len(df), max_rows))\r\n ])\r\n ])", "def array_html_block_table(self, arr):\n\n (suppress, suppress_thresh) = self._get_suppress()\n\n st_tab = \"style='border: 2px solid black;'\"\n st_tr = \"style='border: 1px dotted; padding: 2px;'\"\n st_th = \"style='border: 1px dotted; padding: 2px; text-align: center;'\"\n st_tdval = \"style='border: 1px dotted; padding: 2px; text-align: right;'\"\n spc = arr.space\n if len(spc.ket_set):\n ket_indices = list(spc.ket_space().index_iter())\n else:\n ket_indices = [None]\n if len(spc.bra_set):\n bra_indices = list(spc.bra_space().index_iter())\n else:\n bra_indices = [None]\n fmt = spc.base_field.latex_formatter(arr.nparray.flatten(), dollar_if_tex=True)\n\n ht = ''\n\n if self.use_latex_label_in_html:\n ht += '$'+spc._latex_()+'$'\n else:\n # FIXME - here, and elsewhere, use unicode symbols '&#x27e8;' and '&#x27e9;'\n # for html.\n ht += spc._html_()+'<br>'\n\n ht += \"<table style='margin: 0px 0px;'>\\n\"\n\n if spc.ket_set:\n ht += \"<colgroup \"+st_tab+\"></colgroup>\\n\"\n if len(spc.bra_set):\n colgrp_size = spc.bra_space().shape[-1]\n for i in range(spc.bra_space().dim() // colgrp_size):\n ht += (\"<colgroup span=%d \"+st_tab+\"></colgroup>\\n\") % colgrp_size\n else:\n ht += \"<colgroup \"+st_tab+\"></colgroup>\\n\"\n\n if spc.bra_set:\n ht += \"<tbody \"+st_tab+\">\\n\"\n ht += '<tr '+st_tr+'>'\n if spc.ket_set:\n ht += '<td '+st_th+'> </td>'\n\n for b_idx in bra_indices:\n ht += '<td '+st_th+'><nobr>'\n\n #if self.use_latex_label_in_html:\n # ht += r'$\\scriptsize{\\left< '\n # ht += ','.join([str(x) for x in b_idx]) # FIXME - latex label for indices?\n # ht += r' \\right|}$'\n #else:\n ht += '&#x27e8;'+(','.join(['<tt>'+str(x)+'</tt>' for x in b_idx]))+'|'\n\n ht += '</nobr></td>'\n\n ht += '</tr>\\n'\n ht += '</tbody>\\n'\n\n last_k = None\n for k_idx in ket_indices:\n if k_idx is not None and len(k_idx) > 1 and k_idx[-2] != last_k:\n if last_k is not None:\n ht += '</tbody>\\n'\n ht += \"<tbody \"+st_tab+\">\\n\"\n last_k = k_idx[-2]\n ht += '<tr '+st_tr+'>'\n if spc.ket_set:\n ht += '<td '+st_th+'><nobr>'\n\n #if self.use_latex_label_in_html:\n # ht += r'$\\scriptsize{\\left| '\n # ht += ','.join([str(x) for x in k_idx]) # FIXME - latex label for indices?\n # ht += r' \\right>}$'\n #else:\n ht += '|'+(','.join(['<tt>'+str(x)+'</tt>' for x in k_idx]))+'&#x27e9;'\n\n ht += '</nobr></td>'\n for b_idx in bra_indices:\n if k_idx is None and b_idx is None:\n assert 0\n elif k_idx is None:\n idx = b_idx\n elif b_idx is None:\n idx = k_idx\n else:\n idx = k_idx + b_idx\n v = arr[idx]\n if suppress and spc.base_field.eval_suppress_small(v, suppress_thresh):\n if self.zero_color_html != '':\n vs = \"<font color='\"+self.zero_color_html+\"'>0</font>\"\n else:\n vs = \"0\"\n else:\n vs = \"<nobr><tt>\"+fmt(v)+\"</tt></nobr>\"\n ht += '<td '+st_tdval+'>'+vs+'</td>'\n ht += '</tr>\\n'\n ht += '</tbody>\\n'\n ht += '</table>\\n'\n\n return ht" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Format active stats html table to data frame.
def _formatActiveStatsTable(self, df): df.drop(df.shape[0]-1, inplace=True) if df.iloc[:, 2].dtype == 'object': rows = df[df.iloc[:, 2] == '--'].index df.iloc[rows] = df.iloc[rows].replace(to_replace='--', value=np.nan) df = df.apply(pd.to_numeric, errors='ignore') reStr = '^(?P<PLAYER>.+?), (?P<TEAM>\w+)\xa0(?P<POS>.+?)' \ '(?P<DTD>$|\xa0\xa0DTD$)' df = df.join(df['PLAYER, TEAM POS'].str.extract(reStr, expand=True)) df.drop('PLAYER, TEAM POS', axis=1, inplace=True) df['POS'] = df['POS'].apply(lambda x: x.split(', ')) # Drop extra columns df = df.select(lambda x: not re.search('Unnamed: \d+', x), axis=1) return df
[ "def _formatDraftTable(self, html):\n rnd = df[0].ix[0].replace('ROUND ', '')\n df.drop([0], inplace=True)\n df['ROUND'] = rnd\n df['PICK'] = pd.to_numeric(df[0])\n df['MANAGER'] = df[2]\n df = self._formatAuctionDraftTable(df)\n df = df[['ROUND', 'PICK', 'MANAGER', 'PLAYER', 'TEAM', 'POS',\n 'KEEPER']]\n return df", "def table(data):\n return pd.DataFrame(json_normalize(data))", "def summary_html_table():\n df = pd.DataFrame.from_csv('summarytable.csv')\n df = df[['Accession','Location','Protein Product','Gene Name']]\n df['Accession'] = df['Accession'].apply(\n lambda x: '<a href=\\\"http://student.cryst.bbk.ac.uk/cgi-bin/cgiwrap/em001/cgi-script.py?type={0}&input={1}\\\">{1}</a>'.format('Gene_ID',x))\n pd.set_option('display.max_colwidth', 1000)\n with open('indexhead.html') as f:\n html = f.read() + df.to_html(escape=False,index=False)\n with open('index.html','w') as g:\n g.write(html)", "def data_frame_to_html(data_frame: DataFrame) -> str:\n return data_frame.to_html(float_format=\"%.2f\", index=False,\n classes=[\"table table-striped table-sm\"])", "def toHtml(self):\n tableRow = ht.TR()\n tableRow.append(ht.TD(self.name))\n tableRow.append(ht.TD(self.version))\n ## FIXME: want to use CSS classes and not define color explicitly\n status = ht.FONT(self.status, color=self.statusColor)\n tableRow.append(ht.TD(ht.A(status, href=self.outputUrl)))\n return tableRow", "def df_stats_expanded(self) -> pd.DataFrame:\n df = self.df_stats.copy(deep=True)\n for bt in self.beat_types:\n df[f\"beat_{bt}\"] = df[\"beat_type_num\"].apply(lambda d: d.get(bt, 0))\n for rt in self.rhythm_types:\n df[f\"rhythm_{rt}\"] = df[\"rhythm_len\"].apply(lambda d: d.get(rt, 0))\n return df.drop(columns=[\"beat_num\", \"beat_type_num\", \"rhythm_len\"])", "def to_html(self,fn='tableone.html'):\n tablefmt = 'html'\n with open(fn, 'w') as f:\n f.write(tabulate(self.tableone, tablefmt=tablefmt))", "def _formatTransactionTable(self, htmlStr, tds):\n df = pd.read_html(htmlStr, header=1)[0]\n dates = [' '.join(i.itertext()) for i in tds[::4]]\n df['DATE'] = dates\n details = [' '.join(i.itertext()).replace(' ', ' ').replace(' ,', ',')\n for i in tds[2::4]]\n df['DETAIL'] = details\n addDropKey = u'Transaction\\xa0\\xa0Add/Drop'\n addDropStr = '(\\w+) dropped (.+?), \\w+ \\w+ to (Waivers|Free Agency)'\\\n '|(\\w+) added (.+?), \\w+ \\w+ from (Waivers|Free Agency)'\n addDrop = pd.Series(df[df['TYPE'].str.match(addDropKey)]['DETAIL'].str.\n findall(addDropStr))\n addDrop = addDrop.apply(lambda x: [x[0][:3], x[1][:3:-1]])\n addKey = u'Transaction\\xa0\\xa0Add'\n addStr = '(\\w+) added (.+?), \\w+ \\w+ from (Waivers|Free Agency)'\n add = pd.Series(df[df['TYPE'].str.match(addKey)]['DETAIL'].str.\n findall(addStr))\n add = add.apply(lambda x: [x[0][::-1]])\n dropKey = u'Transaction\\xa0\\xa0Drop'\n dropStr = '(\\w+) dropped (.+?), \\w+ \\w+ to (Waivers|Free Agency)'\n drop = pd.Series(df[df['TYPE'].str.match(dropKey)]['DETAIL'].str.\n findall(dropStr))\n tradeKey = u'Transaction\\xa0\\xa0Trade Processed'\n tradeStr = '(\\w+) traded (.+?), \\w+ \\w+ to (\\w+)'\n trade = pd.Series(df[df['TYPE'].str.match(tradeKey)]['DETAIL'].str.\n findall(tradeStr))\n transactions = pd.concat([addDrop, add, drop, trade])\n transactions.name = 'TRANSACTION'\n df = df.join(transactions)\n return df", "def render_html(table, data):\n return render(renderers.HtmlRenderer, table, data)", "def get_report(html_uri):\n page = requests.get(html_uri)\n soup = BeautifulSoup(page.text, \"lxml\")\n table = soup.find('tbody')\n trs = [tr for tr in table.find_all('tr')]\n report_table = []\n for i, tr in enumerate(trs):\n if i == 0:\n columns = [th.text.strip() for th in tr.find_all('th')]\n else:\n report_table.append([th.text.strip() for th in tr.find_all('td')])\n report_df = pd.DataFrame(report_table, columns=columns)\n return report_df", "def shooting_func(team, url):\n html = requests.get(url).text\n data = bs(html, 'html5')\n table = data.find('table')\n columns = []\n\n for header in table.find_all('th'):\n columns.append(header.string)\n columns = columns[5:29] #gets necessary column headers\n\n\n #display(columns)\n rows = [] #initliaze list to store all rows of data\n for rownum, row in enumerate(table.find_all('tr')): #find all rows in table\n if len(row.find_all('td')) > 0: \n #if rownum % 2 == 0: #uses every other row, there is an unxplained extra row for each match\n rowdata = [] #initiliaze list of row data\n for i in range(len(row.find_all('td'))): #get all column values for row\n rowdata.append(row.find_all('td')[i].text)\n rows.append(rowdata)\n\n df = pd.DataFrame(rows, columns=columns)\n df = df[:-1]\n df.drop('Match Report', axis=1, inplace=True)\n df['Team'] = team[:3] \n return df", "def perf_metrics(self):\n self.performance_metrics = pd.DataFrame()\n for model_name in self.reporters:\n current_metrics = self.reporters[model_name].evaluator.get_metrics()\n current_metrics.index = [model_name]\n self.performance_metrics = pd.concat(\n [self.performance_metrics, current_metrics], axis=0\n )\n self.performance_metrics.columns = self.performance_metrics.columns.droplevel(\n level=1\n ) # no train test\n from tigerml.core.reports.html import HTMLTable, preset_styles\n\n table = HTMLTable(self.performance_metrics)\n bad_metrics = [\"MAPE\", \"WMAPE\", \"MAE\", \"RMSE\"]\n table.apply_conditional_format(\n cols=[\n x\n for x in self.performance_metrics.columns\n if all([col not in x for col in bad_metrics])\n ],\n style=preset_styles.more_is_good_2colors,\n )\n table.apply_conditional_format(\n cols=[\n x\n for x in self.performance_metrics.columns\n if any([col in x for col in bad_metrics])\n ],\n style=preset_styles.less_is_good_2colors,\n )\n\n return table", "def parse_table(table_el):\n table_dict = {\"header\": [], \"value\": []}\n for tr in table_el.find_all(\"tr\"):\n th = None\n td = None\n if tr.find(\"th\"):\n th = tr.th.text\n if tr.find(\"td\"):\n td = tr.td.text\n\n table_dict[\"header\"].append(th)\n table_dict[\"value\"].append(td)\n return pd.DataFrame(table_dict)", "def get_formatted_tracklist_table_html(track_data: pd.DataFrame):\n if track_data.empty:\n print('A list of tracks is required.')\n return\n pd.set_option('display.max_colwidth', None)\n keys = ['name', 'album_image_url_small', 'artist_name', 'album_name', 'share_url']\n new_keys = ['Song Title', 'Cover Art', 'Artist', 'Album', 'Share URL']\n track_data = track_data[keys].rename(columns=dict(zip(keys, new_keys)))\n\n def image_formatter(im):\n return f'<img src=\"{im}\" />'\n\n formatters = {\n 'Cover Art': image_formatter\n }\n playlist_table = track_data.to_html(formatters=formatters, escape=False, index=False, render_links=True)\n playlist_table = playlist_table.replace('style=\"text-align: right;\"', '')\n playlist_table = playlist_table.replace('<tr>', '<tr style=\"border: solid 1px #CCC;\">')\n playlist_table = playlist_table.replace(\n '<table border=\"1\" class=\"dataframe\">',\n '<table style=\"border-collapse: collapse; border: solid 1px #CCC;\">'\n )\n return playlist_table", "def get_mars_facts_table_html(self):\n mars_planet_profile = self.get_mars_facts_table_df()\n\n mars_planet_profile_html = mars_planet_profile.to_html(index=False, header=False, classes=[\"table\"])\n mars_planet_profile_html = mars_planet_profile_html.replace('border=\"1\"', \"\")\n\n return mars_planet_profile_html", "def fetch_table(url):\n \n \n # pretend to be the chrome brower to solve the forbidden problem\n header = {\n \"User-Agent\": \"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.75 Safari/537.36\",\n \"X-Requested-With\": \"XMLHttpRequest\"\n }\n html_text = requests.get(url, headers=header)\n \n # to solve garbled text\n html_text.encoding = html_text.apparent_encoding\n pd_table = pd.read_html(html_text.text)[0]\n \n return pd_table", "def tableToHTML( self ):\n output = ''\n output += '<div class=\"myTable\">'\n\n output += '<div class=\"myTableHeader\">'\n output += '<ul>'\n for col in self.columns:\n output += '<li>%s</li>' % col\n output += '</ul>'\n output += '</div>'\n\n for row in range( 0, len( self.tableData ) ):\n output += '<div class=\"myTableRow%d\">' % ( ( row % 2 ) + 1 )\n output += '<ul>'\n for col in self.tableData[row]:\n output += '<li>%s</li>' % col\n output += '</ul>'\n output += '</div>'\n\n output += '</div>'\n\n return output", "def format_table(row):\n shelter_name = row[\"FacilityName\"]\n last_report = row[\"timestamp_local\"]\n district = integrify(row[\"CouncilDistrict\"])\n occupied_beds = integrify(row[\"occupied_beds_computed\"])\n aval_beds = integrify(row[\"open_beds_computed\"])\n male_tot = integrify(row[\"Total Men Currently at Site\"])\n female_total = integrify(row[\"Total Women Currently at Site\"])\n pets = integrify(row[\"Number of Pets Currently at Site\"])\n ems_calls = integrify(row[\"Number of EMS Calls\"])\n ems_transport = integrify(row[\"Number of EMS Transports\"])\n num_quar = integrify(row[\"Clients currently quarantined\"])\n trail_open = integrify(row[\"Number of Open Trailers\"])\n trail_occupied_women = integrify(row[\"Total Women Currently in Trailer\"])\n trail_occupied_men = integrify(row[\"Total Men Currently in Trailer\"])\n trail_occupied_pets = integrify(row[\"Total Pets Currently in Trailer\"])\n\n shelter = f\"\"\"<b>{shelter_name}</b><br>\n <i>Council District {district}</i><br>\n <i>Report Time: {last_report}</i><br>\n <p style=\"margin-top:2px; margin-bottom: 2px\">Occupied Beds: {occupied_beds}</p>\n <p style=\"margin-top:2px; margin-bottom: 2px\">Available Beds: {aval_beds}</p>\n <p style=\"margin-top:2px; margin-bottom: 2px\">Male: {male_tot}</p>\n <p style=\"margin-top:2px; margin-bottom: 2px\">Female: {female_total}</p>\n <p style=\"margin-top:2px; margin-bottom: 2px\">Pets: {pets}</p><br>\n <i>Trailer Details: </i>\n <p style=\"margin-top:2px; margin-bottom: 2px\">Trailer Open Beds: {trail_open}</p>\n <p style=\"margin-top:2px; margin-bottom: 2px\">\n Trailer Occupied - Men: {trail_occupied_men}\n </p>\n <p style=\"margin-top:2px; margin-bottom: 2px\">\n Trailer Occupied - Women: {trail_occupied_women}\n </p>\n <p style=\"margin-top:2px; margin-bottom: 2px\">\n Trailer Occupied - Pets: {trail_occupied_pets}\n </p><br>\n <i>Health Details: </i>\n <p style=\"margin-top:2px; margin-bottom: 2px\">Number of EMS Calls: {ems_calls}</p>\n <p style=\"margin-top:2px; margin-bottom: 2px\">\n Number of EMS Transports: {ems_transport}\n </p>\n <p style=\"margin-top:2px; margin-bottom: 2px\">\n Number of currently quarantined clients: {num_quar}\n </p>\n\n\n \"\"\"\n return shelter.strip()", "def get_data_fr_htmlsrc(self, page_source):\n dom_object = DOM(page_source)\n # get date\n date_data = self.get_date_fr_src(dom_object)\n \n data_df = pd.read_html(dom_object('div#tots')[0].content, index_col =0)[0]\n data_df = self.modify_sgx_main_data_df(data_df)\n\n data_df['Date'] = date_data\n data_df['Date'] = pd.to_datetime(data_df['Date'])\n\n return data_df" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Download team "Active Stats" page.
def _downloadActiveStatsTable(self, teamId, batter=True): assert str(teamId) in self.teamDict teamName = self.teamDict[teamId] teamId = ('teamId', teamId) activeStatsUrl = ('http://games.espn.com/flb/activestats?' + urlencode((self.leagueId, self.seasonId, teamId))) if batter: html = self._getHTML(activeStatsUrl, login=self.login) else: html = self._getHTML(activeStatsUrl + '&filter=2', login=self.login) htmlStr = (tostring(html.xpath( '//table[@class="playerTableTable tableBody"]')[0])) dfTable = pd.read_html(htmlStr, header=1)[0] df = self._formatActiveStatsTable(dfTable) df['MANAGER'] = teamName cols = df.columns.tolist() return df[[cols[-1]] + cols[-5:-1] + cols[:-5]]
[ "def output_team_info(session, league_id, team_id):\n response = session.get(tm.url('nba', league_id, team_id))\n league = tm.league(response.text)\n team = tm.team(response.text)\n print(\"Success!\")\n print('League Name: %s \\nTeam Name: %s\\n' % (league, team))", "def _get_page(player_name):\r\n formatted_name = _format_name(player_name)\r\n url = \"https://www.foxsports.com/nba/\" + formatted_name + \"-player-stats\"\r\n page = requests.get(url)\r\n return page", "def get_nrlstats_team_stats(div, date, teams):\n table_divs = div.find_all('div')\n for table_div in table_divs:\n if 'id' not in table_div.attrs.keys():\n continue\n print(table_div['id'])\n\n if table_div['id'] == \"tab-tsHalf-0-data\":\n # Total game stats.\n file = open(\"team_stats_total.csv\", 'w')\n elif table_div['id'] == \"tab-tsHalf-1-data\":\n # First half game stats.\n file = open(\"team_stats_first_half.csv\", 'w')\n elif table_div['id'] == \"tab-tsHalf-2-data\":\n # Second half game stats.\n file = open(\"team_stats_second_half.csv\", 'w')\n else:\n continue\n \n rows = table_div.find_all('tr')\n \n # Each row (except the heading row) contains a stat.\n for row in rows[0:]:\n print(row)\n \n stat, val_1, val_2 = get_nrlstats_row_values(row)\n \n if stat == None:\n continue\n \n string = stat + \", \" + val_1 + \", \" + val_2 + \"\\n\"\n file.write(string);\n \n file.close()", "def _fetch_player_stats(self, player_id):\r\n\r\n stats_url = self.player_url + \\\r\n '{}/player/stats'.format(player_id)\r\n\r\n overview_url = self.player_url + \\\r\n '{}/player/overview'.format(player_id)\r\n \r\n time.sleep(1) # Prevent accidently bombarding PL with requests\r\n stats_request = requests.get(stats_url)\r\n stats_page = stats_request.text\r\n status_code_stats = stats_request.status_code\r\n\r\n time.sleep(1) # Prevent accidently bombarding PL with requests \r\n overview_request = requests.get(overview_url) \r\n overview_page = overview_request.text\r\n status_code_overview = overview_requests.status_code\r\n\r\n if stats_request != 200 or overview_request != 200:\r\n input('{}: {} (stats) & {} (overview)'.format(player_id, \r\n stats_request, \r\n overview_request) )\r\n\r\n\r\n return {'stats': stats_page, 'overview': overview_page}", "def _get_offense_stats(self, team):\n pass", "def get_team_stats(self, team_id):\n team_key = self.get_league_key() + \".t.\" + str(team_id)\n return self.query(\n \"https://fantasysports.yahooapis.com/fantasy/v2/team/\" + str(team_key) + \"/stats\",\n [\"team\", [\"team_points\", \"team_stats\"]])", "def scrape_team_stats(self):\n response = requests.get(root_url + self.team_url)\n soup = bs4.BeautifulSoup(response.content)\n roster = soup.find(id='per_game').tbody\n\n for player_number in range(1,len(roster),2):\n playerStatTable = roster.contents[player_number].contents\n perGameStats = []\n for stat in range(1,len(playerStatTable),2):\n perGameStats.append(playerStatTable[stat].string)\n self.players.append(Player(perGameStats))", "def overall_tournament_results(self, web_support):\n html_file = create_html_page(\"Overall Tournament Results\")\n\n results = tournament.playerStandings(0, self.database, self.cursor)\n print_html_standings(html_file, results, 0)\n\n html_file.write(\"</div>\\n</body>\\n</html>\\n\")\n html_file.close()\n url = os.path.abspath(html_file.name)\n\n if web_support == \"True\":\n webbrowser.open('file://' + url, new=2) # open in a new tab, if possible", "def getLeagueActiveStatsTable(self, batter=True):\n activeTable = pd.DataFrame()\n for teamId in self.teamDict:\n df = self._downloadActiveStatsTable(teamId, batter=batter)\n activeTable = pd.concat([activeTable, df])\n return activeTable", "def check_for_team_updates(self):\n self._check_year()\n url = self._get_url(next_update=True)\n \n print(\"Checking for team update at \" + url + \"...\")\n r = requests.get(url)\n\n if r.status_code == 200:\n self.last_team_update[1] += 1\n return url\n else:\n return None", "def coaching(league_id, week, debug):\n if not debug:\n oauth_logger = logging.getLogger('yahoo_oauth')\n oauth_logger.disabled = True\n\n oauth = OAuth1(None, None, from_file='config.json')\n\n if not oauth.token_is_valid():\n oauth.refresh_access_token()\n\n\n url = \"http://fantasysports.yahooapis.com/fantasy/v2/team/{0}.t.3/roster/players;week={1}\".format(league_id, week)\n response = oauth.session.get(url)\n\n if response.status_code == 200:\n print(response.content)\n else:\n print(\"Error contacting Yahoo API: \", response.status_code, response.reason)", "def get_team_home_games(team):\n discontinued_teams = [\"express\",\"revolution\"]\n if team in discontinued_teams:\n return\n print(\" \", team)\n new_games = []\n teams = pandas.read_csv(\"2016_audl_teams.csv\")\n #Code to pull from web\n #response = requests.get(\"http://theaudl.com/teams/\" + team + \"/schedule/2016\")\n #content = response.content\n #Updated for saved pages of 2017 teams historical(2016) results\n with open(\"team-pages/\" + team + \".html\", errors = 'ignore') as content:\n parser = BeautifulSoup(content, 'html.parser')\n\n\n score_table = parser.find_all(\"table\")[0]\n\n\n is_playoffs = 0\n\n rows = score_table.find_all(\"tr\")\n rows = rows[1:] #drop header\n for row in rows:\n print(row)\n print(row.text)\n if 'PLAYOFFS' in row.text:\n is_playoffs = 1\n continue\n cols = row.find_all(\"td\")\n\n #find home team and only continue if it matches team we are getting games for\n #also include if the home team is a discontinued team\n home_team_href = get_href(cols[1].find_all('a')[0].get('href'))\n if home_team_href != team and home_team_href not in discontinued_teams:\n continue\n #Get team abbreviation\n home_team = teams[teams['href'] == home_team_href]['abbr'].iloc[0]\n\n #get date and format correctly for our table\n date_string = cols[0].text\n dt = datetime.datetime.strptime(date_string + \" 2016\",\"%B %d %Y\").date()\n str_date = dt.strftime(\"%m/%d/%Y\")\n\n #Get away team and translate to abbreviation\n away_team_href = get_href(cols[3].find_all('a')[0].get('href'))\n away_team = teams[teams['href'] == away_team_href]['abbr'].iloc[0]\n\n score_line = cols[2].text\n score_regex = r\"(\\d+)\\s*\\-\\s*(\\d+)\"\n scores = re.match(score_regex,score_line)\n if scores == None:\n home_score = score_line\n away_score = score_line\n else:\n home_score = scores.group(1)\n away_score = scores.group(2)\n new_games.append([str_date,home_team,home_score,away_team,away_score,is_playoffs])\n return new_games", "def download():\n sort = __get_sort_query_param()\n name = __get_name_query_param()\n\n filepath, base_filepath = __get_file_paths(sort, name)\n\n if not os.path.exists(base_filepath):\n players = __get_base_query(name, sort).all()\n with open(base_filepath, 'w') as f:\n writer = csv.writer(f)\n writer.writerow([c.display for c in columns.values()])\n writer.writerows([player.to_table_data() for player in players])\n return send_file(filepath, as_attachment=True, max_age=-1)", "def get_teams():\n api.get_teams()", "def get_stats(self):\n \"\"\"\n Function to submit GET request to stats endpoint.\n\n Args:\n\n Returns:\n res (obj): Response object from GET request\n \"\"\"\n endpoint = '/stats'\n response = requests.get(baseUrl + endpoint)\n\n return response", "def fetch_goalies(link):\n url = '{0}{1}'.format(NHL_API_URL_BASE, link)\n response = requests.get(url)\n stuff = response.json()\n try:\n home_goalie = stuff['liveData']['boxscore']['teams']['home']['goalies']\n away_goalie = stuff['liveData']['boxscore']['teams']['away']['goalies']\n except requests.exceptions.RequestException:\n print(\"Error encountered getting live stats\")\n return home_goalie, away_goalie", "def get_nrlstats_game_stats(div, date, teams):\n table_divs = div.find_all('div')\n for table_div in table_divs:\n if 'id' not in table_div.attrs.keys():\n continue\n print(table_div['id'])\n\n if table_div['id'] == \"tab-mdHalf-0-data\":\n # Total game stats.\n file = open(\"game_stats_total.csv\", 'w')\n elif table_div['id'] == \"tab-mdHalf-1-data\":\n # First half game stats.\n file = open(\"game_stats_first_half.csv\", 'w')\n elif table_div['id'] == \"tab-mdHalf-2-data\":\n # Second half game stats.\n file = open(\"game_stats_second_half.csv\", 'w')\n else:\n continue\n \n rows = table_div.find_all('tr')\n \n # Each row (except the heading row) contains a stat.\n for row in rows[0:]:\n print(row)\n \n stat, val_1, val_2 = get_nrlstats_row_values(row)\n \n if stat == None:\n continue\n \n string = stat + \", \" + val_1 + \", \" + val_2 + \"\\n\"\n file.write(string);\n \n file.close()", "def get_team_info(self, team_id):\n team_key = self.get_league_key() + \".t.\" + str(team_id)\n return self.query(\n \"https://fantasysports.yahooapis.com/fantasy/v2/team/\" + str(team_key) +\n \";out=metadata,stats,standings,roster,draftresults,matchups\", [\"team\"], Team)", "def player_info(): \r\n \"\"\" Name, DOB, Club, squad number etc \"\"\"\r\n all_teams = []\r\n for x in player_urls:\r\n teams = []\r\n for i in x:\r\n squad_r = requests.get(i)\r\n now = datetime.datetime.now()\r\n player_soup = BeautifulSoup(squad_r.text, 'html.parser')\r\n premier_soup1 = player_soup.find('div', {'class': 'row-table details -bp30'})\r\n\r\n # Exact match for class name\r\n divs = player_soup.select( 'div[class=\"col\"]')\r\n\r\n team = []\r\n pnew = [] \r\n club = player_soup.find(\"span\", itemprop=\"affiliation\")\r\n\r\n for div in divs:\r\n ps = div.find_all('p')\r\n pnew = [p.text.split(\":\")[1] for p in ps]\r\n pnew.append(club.text)\r\n for item in pnew: \r\n team.append(item)\r\n teams.append(team)\r\n all_teams.append(teams) \r\n# return(all_teams)\r\n player_dates(all_teams)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Format standings table to dataframe
def _formatStandingsTable(self, df, columns): df.columns = columns df.drop(df[df.iloc[:, 0].isnull()].index, inplace=True) df = df.select(lambda x: not re.search('1\d', str(x)), axis=1) return df
[ "def table(data):\n return pd.DataFrame(json_normalize(data))", "def __format(self, df):\n df = self.__numerics_to_strings(df)\n df = self.__roads_to_columns(df)\n return df", "def _formatDraftTable(self, html):\n rnd = df[0].ix[0].replace('ROUND ', '')\n df.drop([0], inplace=True)\n df['ROUND'] = rnd\n df['PICK'] = pd.to_numeric(df[0])\n df['MANAGER'] = df[2]\n df = self._formatAuctionDraftTable(df)\n df = df[['ROUND', 'PICK', 'MANAGER', 'PLAYER', 'TEAM', 'POS',\n 'KEEPER']]\n return df", "def createDataFrame():\n\n df = pd.DataFrame(data = {\"Calories\": None, \"Water / g\":None, \"Fat / g\": None, \"Protein / g\": None, \"Cholesterol / mg\":None}, index = DFmanager.getTimeIndex(), dtype = \"float64\")\n df.dropna(inplace = True)\n return df", "def _formatActiveStatsTable(self, df):\n df.drop(df.shape[0]-1, inplace=True)\n if df.iloc[:, 2].dtype == 'object':\n rows = df[df.iloc[:, 2] == '--'].index\n df.iloc[rows] = df.iloc[rows].replace(to_replace='--',\n value=np.nan)\n df = df.apply(pd.to_numeric, errors='ignore')\n reStr = '^(?P<PLAYER>.+?), (?P<TEAM>\\w+)\\xa0(?P<POS>.+?)' \\\n '(?P<DTD>$|\\xa0\\xa0DTD$)'\n df = df.join(df['PLAYER, TEAM POS'].str.extract(reStr, expand=True))\n df.drop('PLAYER, TEAM POS', axis=1, inplace=True)\n df['POS'] = df['POS'].apply(lambda x: x.split(', '))\n # Drop extra columns\n df = df.select(lambda x: not re.search('Unnamed: \\d+', x), axis=1)\n return df", "def _downloadStandingsTable(self):\n standingsUrl = ('http://games.espn.com/flb/standings?view=official&' +\n urlencode((self.leagueId, self.seasonId)))\n html = self._getHTML(standingsUrl, login=self.login)\n tables = html.xpath('//table[@class=\"tableBody\"]')\n dfs = []\n for table in tables:\n head, columns = self._parseHeaders(table)\n df = pd.read_html(tostring(table), skiprows=2)[0]\n df.name = head\n dfs.append(self._formatStandingsTable(df, columns))\n return dfs", "def create_dataframe(self):\n self.df = pd.DataFrame.from_records(self.all_residues)\n # add code to give meaningful columns names, including the one base on win_size, here\n # TODO\n window_size = self.half_window_size\n new_columns = [\"center\"]\n # For negative values\n neg_val = -1*window_size\n for i in range(neg_val,0):\n new_columns.append(str(i))\n\n # For positive values\n for i in range(1,window_size+1):\n new_columns.append(str(i))\n\n new_columns.append(\"state\")\n self.df.columns = new_columns", "def format_medical_table(self):\n self.format_medical_table_headers()\n self.format_medical_table_values()", "def format_table(row):\n shelter_name = row[\"FacilityName\"]\n last_report = row[\"timestamp_local\"]\n district = integrify(row[\"CouncilDistrict\"])\n occupied_beds = integrify(row[\"occupied_beds_computed\"])\n aval_beds = integrify(row[\"open_beds_computed\"])\n male_tot = integrify(row[\"Total Men Currently at Site\"])\n female_total = integrify(row[\"Total Women Currently at Site\"])\n pets = integrify(row[\"Number of Pets Currently at Site\"])\n ems_calls = integrify(row[\"Number of EMS Calls\"])\n ems_transport = integrify(row[\"Number of EMS Transports\"])\n num_quar = integrify(row[\"Clients currently quarantined\"])\n trail_open = integrify(row[\"Number of Open Trailers\"])\n trail_occupied_women = integrify(row[\"Total Women Currently in Trailer\"])\n trail_occupied_men = integrify(row[\"Total Men Currently in Trailer\"])\n trail_occupied_pets = integrify(row[\"Total Pets Currently in Trailer\"])\n\n shelter = f\"\"\"<b>{shelter_name}</b><br>\n <i>Council District {district}</i><br>\n <i>Report Time: {last_report}</i><br>\n <p style=\"margin-top:2px; margin-bottom: 2px\">Occupied Beds: {occupied_beds}</p>\n <p style=\"margin-top:2px; margin-bottom: 2px\">Available Beds: {aval_beds}</p>\n <p style=\"margin-top:2px; margin-bottom: 2px\">Male: {male_tot}</p>\n <p style=\"margin-top:2px; margin-bottom: 2px\">Female: {female_total}</p>\n <p style=\"margin-top:2px; margin-bottom: 2px\">Pets: {pets}</p><br>\n <i>Trailer Details: </i>\n <p style=\"margin-top:2px; margin-bottom: 2px\">Trailer Open Beds: {trail_open}</p>\n <p style=\"margin-top:2px; margin-bottom: 2px\">\n Trailer Occupied - Men: {trail_occupied_men}\n </p>\n <p style=\"margin-top:2px; margin-bottom: 2px\">\n Trailer Occupied - Women: {trail_occupied_women}\n </p>\n <p style=\"margin-top:2px; margin-bottom: 2px\">\n Trailer Occupied - Pets: {trail_occupied_pets}\n </p><br>\n <i>Health Details: </i>\n <p style=\"margin-top:2px; margin-bottom: 2px\">Number of EMS Calls: {ems_calls}</p>\n <p style=\"margin-top:2px; margin-bottom: 2px\">\n Number of EMS Transports: {ems_transport}\n </p>\n <p style=\"margin-top:2px; margin-bottom: 2px\">\n Number of currently quarantined clients: {num_quar}\n </p>\n\n\n \"\"\"\n return shelter.strip()", "def print_stepdb(df) -> pd.DataFrame:\n\n res = pd.DataFrame()\n\n if len(df) == 0: return res\n\n res['dur_mean'] = [df.duration.mean()]\n res['dur_std'] = [df.duration.std()]\n res['dur_25q'] = [df.duration.quantile(.25)]\n res['dur_50q'] = [df.duration.quantile(.50)]\n res['dur_75q'] = [df.duration.quantile(.75)]\n res['dur_skew'] = [df.duration.skew()]\n res['dur_kurtosis'] = [df.duration.kurtosis()]\n res['dur_mean_l'] = [df.duration.loc[df.LR == 'green'].mean()]\n res['dur_std_l'] = [df.duration.loc[df.LR == 'green'].std()]\n res['dur_mean_r'] = [df.duration.loc[df.LR == 'red'].mean()]\n res['dur_std_r'] = [df.duration.loc[df.LR == 'red'].std()]\n res['ap_mean'] = [df.ap_ptp.mean()]\n res['v_mean'] = [df.v_ptp.mean()]\n res['ml_mean'] = [df.ml_ptp.mean()]\n\n return res", "def to_frame(self):\n return self._merged_table", "def dataframe(self) -> pd.DataFrame:\n data = []\n columns = [\"lection\", 'season', 'week', 'day']\n for lection_membership in self.lections_in_system():\n if type(lection_membership.day) != MovableDay:\n raise NotImplementedError(f\"Cannot yet export for days of type {type(lection_membership.day)}.\")\n data.append(\n [\n lection_membership.lection.description, \n lection_membership.day.get_season_display(), \n lection_membership.day.week, \n lection_membership.day.get_day_of_week_display(), \n ]\n )\n df = pd.DataFrame(data, columns=columns)\n return df", "def _from_table(t):\n table = copy.deepcopy(t)\n # Default the time index to the first column\n index_name = table.colnames[0]\n # Check if another column is defined as the index/primary_key\n if table.primary_key:\n # Check there is only one primary_key/index column\n if len(table.primary_key) == 1:\n table.primary_key[0]\n else:\n raise ValueError(\"Invalid input Table, TimeSeries doesn't support conversion\"\n \" of tables with more then one index column.\")\n\n # Extract, convert and remove the index column from the input table\n index = table[index_name]\n # Convert if the index is given as an astropy Time object\n if isinstance(index, Time):\n index = index.datetime\n index = pd.to_datetime(index)\n table.remove_column(index_name)\n\n # Extract the column values from the table\n data = {}\n units = {}\n for colname in table.colnames:\n data[colname] = table[colname]\n units[colname] = table[colname].unit\n\n # Create a dataframe with this and return\n df = pd.DataFrame(data=data, index=index)\n return df, MetaDict(table.meta), units", "def df_stats_expanded(self) -> pd.DataFrame:\n df = self.df_stats.copy(deep=True)\n for bt in self.beat_types:\n df[f\"beat_{bt}\"] = df[\"beat_type_num\"].apply(lambda d: d.get(bt, 0))\n for rt in self.rhythm_types:\n df[f\"rhythm_{rt}\"] = df[\"rhythm_len\"].apply(lambda d: d.get(rt, 0))\n return df.drop(columns=[\"beat_num\", \"beat_type_num\", \"rhythm_len\"])", "def normalize_table(self):\n pass", "def create_tex_table(dbs):\n obs, series, pts = get_ordered_series(dbs)\n\n head = r\"\"\"\\begin{center}\n\\begin{tabular}{l|c|c|c}\n\\hline\n\"\"\"\n head += r\"\"\"Year & Cases & median Attack Ratio $ $S_0$ \\\\\n\\hline\n\"\"\"\n bot = r\"\"\"\n\\hline\n\\end{tabular}\n\\end{center}\n \"\"\"\n body = r\"\"\n st = []\n # years = sorted(list(series.keys()))\n print (series.keys())\n for i, (Y, V) in enumerate(series.items()):\n cases = obs[Y].sum()\n first_week = V.index[0]\n s0 = array(series[Y].S.ix[first_week])\n try:\n ratio = 1.0*cases/s0\n body += Y + r\" & {:.3} & {:.2} ({:.2}-{:.2}) & {:.3}({:.2}-{:.2})\\\\\".format(cases*100, nanmedian(ratio),\n stats.scoreatpercentile(ratio, 2.5),\n stats.scoreatpercentile(ratio, 97.5),\n nanmedian(s0)*100,\n stats.scoreatpercentile(s0, 2.5)*100,\n stats.scoreatpercentile(s0, 97.2)*100\n )\n body += \"\\n\"\n except KeyError as e:\n print (Y, first_week, e)\n except ValueError as e:\n print (s0, e)\n\n return head + body + bot", "def _generate_table(df_iree, df_shark, df_baseline, title):\n summary = pd.DataFrame(\n columns=[\n _MODEL,\n _BASELINE,\n _DATA_TYPE,\n _DIALECT,\n _DEVICE,\n _BASELINE_LATENCY,\n _IREE_LATENCY,\n _SHARK_LATENCY,\n _IREE_VS_BASELINE,\n _SHARK_VS_BASELINE,\n _IREE_VS_SHARK,\n _BASELINE_MEMORY,\n _IREE_MEMORY,\n _SHARK_MEMORY,\n ]\n )\n\n models = df_iree.model.unique()\n for model in models:\n iree_results_per_model = df_iree.loc[df_iree.model == model]\n dialects = iree_results_per_model.dialect.unique()\n for dialect in dialects:\n iree_results_per_dialect = iree_results_per_model.loc[\n iree_results_per_model.dialect == dialect\n ]\n data_types = iree_results_per_dialect.data_type.unique()\n for data_type in data_types:\n iree_results_per_datatype = iree_results_per_dialect.loc[\n iree_results_per_dialect.data_type == data_type\n ]\n device_types = iree_results_per_datatype.device.unique()\n for device in device_types:\n iree_results = iree_results_per_datatype.loc[\n iree_results_per_datatype.device == device\n ]\n if len(iree_results) != 3:\n print(\n f\"Warning! Expected number of results to be 3. Got\"\n f\" {len(iree_results)}\"\n )\n print(iree_results)\n continue\n\n baseline_results = df_baseline.loc[\n (df_baseline.model == model)\n & (df_baseline.dialect == dialect)\n & (df_baseline.data_type == data_type)\n & (df_baseline.device == device)\n ]\n\n if baseline_results.empty:\n # We use snapshots of latencies for baseline. If it is a new\n # benchmark that is not included in the snapshot yet, emit a\n # warning.\n print(\n f\"Warning: No baseline results found for {model}, {dialect},\"\n f\" {data_type}, {device}. Using IREE version as baseline. Please\"\n f\" update baseline csv.\"\n )\n engine = iree_results.engine.iloc[0]\n baseline_df = iree_results.loc[iree_results.engine == engine]\n baseline_latency = baseline_df.iloc[0][\"ms/iter\"]\n baseline_device_mb = baseline_df.iloc[0][\"device_memory_mb\"]\n else:\n engine = baseline_results.engine.iloc[0]\n baseline_df = baseline_results.loc[\n baseline_results.engine == engine\n ]\n baseline_latency = baseline_df.iloc[0][\"ms/iter\"]\n baseline_device_mb = baseline_df.iloc[0][\"device_memory_mb\"]\n\n iree_df = iree_results.loc[iree_results.engine == \"shark_iree_c\"]\n iree_latency = iree_df.iloc[0][\"ms/iter\"]\n iree_device_mb = iree_df.iloc[0][\"device_memory_mb\"]\n iree_vs_baseline = html_utils.format_latency_comparison(\n iree_latency, baseline_latency\n )\n\n if df_shark is not None:\n shark_results = df_shark.loc[\n (df_shark.model == model)\n & (df_shark.dialect == dialect)\n & (df_shark.data_type == data_type)\n & (df_shark.device == device)\n ]\n if shark_results.empty:\n print(\n f\"Warning: No SHARK results for {model}, {dialect}, {data_type}, {device}.\"\n )\n continue\n\n shark_df = shark_results.loc[\n shark_results.engine == \"shark_iree_c\"\n ]\n shark_latency = shark_df.iloc[0][\"ms/iter\"]\n shark_device_mb = shark_df.iloc[0][\"device_memory_mb\"]\n shark_vs_baseline = html_utils.format_latency_comparison(\n shark_latency, baseline_latency\n )\n iree_vs_shark = html_utils.format_latency_comparison(\n iree_latency, shark_latency\n )\n else:\n # If there are no SHARK benchmarks available, use default values.\n # These columns will be hidden later.\n shark_latency = 0\n shark_vs_baseline = \"<missing_comparison>\"\n iree_vs_shark = \"<missing_comparison>\"\n\n summary.loc[len(summary)] = [\n model,\n engine,\n data_type,\n dialect,\n device,\n f\"{baseline_latency:.1f}\",\n f\"{iree_latency:.1f}\",\n f\"{shark_latency:.1f}\",\n iree_vs_baseline,\n shark_vs_baseline,\n iree_vs_shark,\n f\"{baseline_device_mb:.3f}\",\n f\"{iree_device_mb:.3f}\",\n f\"{shark_device_mb:.3f}\",\n ]\n\n summary = summary.round(2)\n\n st = summary.style.set_table_styles(html_utils.get_table_css())\n st = st.hide(axis=\"index\")\n if df_shark is None:\n st = st.hide_columns(\n subset=[_SHARK_LATENCY, _SHARK_VS_BASELINE, _IREE_VS_SHARK]\n )\n st = st.set_caption(title)\n st = st.applymap(html_utils.style_performance, subset=_PERF_COLUMNS)\n st = st.set_properties(\n subset=[_MODEL],\n **{\n \"width\": \"300px\",\n \"text-align\": \"left\",\n },\n )\n st = st.set_properties(\n subset=[_BASELINE],\n **{\n \"width\": \"140\",\n \"text-align\": \"center\",\n },\n )\n st = st.set_properties(\n subset=[_DIALECT, _DATA_TYPE, _DEVICE],\n **{\n \"width\": \"100\",\n \"text-align\": \"center\",\n },\n )\n st = st.set_properties(\n subset=_LATENCY_COLUMNS,\n **{\n \"width\": \"100\",\n \"text-align\": \"right\",\n },\n )\n st = st.set_properties(\n subset=_PERF_COLUMNS,\n **{\"width\": \"150px\", \"text-align\": \"right\", \"color\": \"#ffffff\"},\n )\n st = st.set_properties(\n subset=_MEMORY_COLUMNS,\n **{\n \"width\": \"100\",\n \"text-align\": \"right\",\n },\n )\n\n return st.to_html() + \"<br/>\"", "def createSummaryTable(self):\n\n\n # create an empty dataframe with the column headings (must create a dummy row)\n summary = pd.DataFrame(\n [['Dummy', '00-00-00', np.nan, np.nan,np.nan ,np.nan, np.nan, np.nan, np.nan, np.nan]],\n columns=['Asset ID', 'Purchase date', 'Purchase price', 'Volume','Acquisition', 'Close', 'Market',\n 'Est Profit', '% Est Profit', 'Annual Return'])\n\n # add the perfprmance vector of each asset to the newly created dataframe\n for asset in self.assets:\n summary = pd.concat([summary, asset.perfVector])\n\n # remove the dummy row\n summary = summary[1:]\n\n\n # remove date indexes\n summary = pd.DataFrame(summary.values, columns=summary.columns)\n\n # create a dataframe with the sum of some of the performace indicators\n total = pd.DataFrame([['Total', '', '', '', summary['Acquisition'].sum(), '', summary['Market'].sum(), summary['Est Profit'].sum(), '', '']],\n columns=['Asset ID', 'Purchase date', 'Purchase price', 'Volume', 'Acquisition', 'Close', 'Market',\n 'Est Profit', '% Est Profit', 'Annual Return'])\n\n # add the sum dataframe to the summary table\n summary = pd.concat([summary, total])\n\n\n self.summary = summary", "def makeFormat(self):\n if not self.columns:\n #log.error(\"Table columns not set\")\n #return\n raise TableError(\"Table columns not set.\")\n out = \"%%-%ss\" % (self.columns[0]-1)\n for i in self.columns[1:-1]:\n out += \" | %%-%ss\" % (i-2)\n if len(self.columns)>1:\n out += \" | %%-%ss\" % (self.columns[-1]-1)\n self.format_str = out" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Download league official "Standings" table. There are two tables within the page, roto and season stats.
def _downloadStandingsTable(self): standingsUrl = ('http://games.espn.com/flb/standings?view=official&' + urlencode((self.leagueId, self.seasonId))) html = self._getHTML(standingsUrl, login=self.login) tables = html.xpath('//table[@class="tableBody"]') dfs = [] for table in tables: head, columns = self._parseHeaders(table) df = pd.read_html(tostring(table), skiprows=2)[0] df.name = head dfs.append(self._formatStandingsTable(df, columns)) return dfs
[ "def get_standings(self, season_id, wnba_season):\n path = \"wnba/trial/v4/en/seasons/{season_id}/{wnba_season}/standings\".format(\n season_id=season_id, wnba_season=wnba_season)\n print(path)\n return self._make_request(path)", "def get_league_standings(self):\n return self.query(\n \"https://fantasysports.yahooapis.com/fantasy/v2/league/\" + self.get_league_key() + \"/standings\",\n [\"league\", \"standings\"], Standings)", "def fetch_standings():\n # check if the data needs to be fetched // or stored json\n try:\n with open('app/data/gw_standings/standings_current.json', 'r') as file:\n data = json.loads(file.read())\n except:\n return get_live_result()\n\n updated = data['updated']\n try:\n status = data['status']\n except KeyError:\n status = \"ongoing\"\n gameweek = data['gameweek']\n\n if status == 'completed' and gameweek == find_current_gw():\n return data\n\n current = calendar.timegm(time.gmtime())\n\n if current - updated < 500:\n return data\n return get_live_result()", "def get_east_leagues_nw_division(url, division, season):\n existing_teams = DivisionResult.objects.league_table(\n season=season, division=division)\n soup = parse_url(url)\n divisionName = division.name.upper()\n divisionElement = soup.find(text=divisionName)\n currentRow = divisionElement.find_next('tr')\n nextDivisionElement = divisionElement.find_next('strong')\n blankRow = divisionElement.find_next(text=u'\\xa0')\n bottomRow = nextDivisionElement.find_parent('tr') if nextDivisionElement != None else blankRow.find_parent('tr')\n teams = []\n pos = 0\n while currentRow != bottomRow:\n columns = currentRow('td')\n pos += 1\n team = DivisionResult()\n team.division = division\n team.season = season\n team.position = pos\n name = columns[0].text.strip()\n if '---' not in name and name != '' and name is not None:\n set_team(team, name, division)\n # The 2nd column is not used!\n team.played = int(columns[2].text) if columns[2].text else 0\n team.won = int(columns[3].text) if columns[3].text else 0\n team.drawn = int(columns[4].text) if columns[4].text else 0\n team.lost = int(columns[5].text) if columns[5].text else 0\n team.goals_for = int(columns[6].text) if columns[6].text else 0\n team.goals_against = int(columns[7].text) if columns[7].text else 0\n team.goal_difference = int(columns[8].text) if columns[8].text else 0\n # Some league tables display percentage win instead. In this case calculate the total\n if columns[9].text.endswith('%'):\n team.points = team.won * Match.POINTS_FOR_WIN + team.drawn * Match.POINTS_FOR_DRAW\n else:\n team.points = int(columns[9].text) if columns[9].text else 0\n # The 11th column is not used!\n team.notes = columns[11].text\n team.save()\n teams.append(team)\n LOG.debug(\"Parsed team: {}\".format(team))\n try:\n currentRow = currentRow.find_next('tr')\n except:\n break\n\n # Only replace existing entries if we've got at least as many entries\n if len(teams) >= len(existing_teams):\n existing_teams.delete()\n for dr in teams:\n dr.save()\n else:\n LOG.debug(\"Did not save division results for {}: Only {} teams parsed ({} teams before)\".format(\n url, len(teams), len(existing_teams)))\n\n return teams", "def get_scoreboard(date):\n day = date.split('-')\n url = 'https://stats.ncaa.org/season_divisions/' + str(seasons.loc[seasons['season'] == int(day[2]),'id'].item()) + '/scoreboards?utf8=%E2%9C%93&game_date='+ day[0] +'%2F'+ day[1] + '%2F' + day[2]\n # url = \"https://stats.ncaa.org/season_divisions/17126/scoreboards?utf8=%E2%9C%93&season_division_id=&game_date=02%2F25%2F2020&conference_id=0&tournament_id=&commit=Submit\"\n page = requests.get(url)\n doc = lh.fromstring(page.content)\n matchups = []\n game = []\n dates = []\n away = []\n home = []\n links = []\n\n #get elements in td index 3 (away team names and home final scores)\n a_teams = doc.xpath(\"//div[@id='contentarea']/table/tbody/tr/td[3]\")\n\n #\n for a in range(len(a_teams)):\n if not 'totalcol' in [x for x in a_teams[a].classes]:\n away.append(a_teams[a][0].text if not len(a_teams[a]) < 1 else a_teams[a].text.replace('\\n', '').replace(' ', '').replace(' ', ''))\n\n #get elements in td index 2 (away team logos, home team names and blank element below attendance)\n h_teams = doc.xpath(\"//div[@id='contentarea']/table/tbody/tr/td[2]\")\n for h in range(len(h_teams)):\n if not 'img' in [a.tag for a in h_teams[h]]:\n if not len([a.text for a in h_teams[h]]) > 0:\n test = h_teams[h].text\n if not test is None:\n team = h_teams[h].text.replace('\\n', '').replace(' ', '').replace(' ', '')\n if not team == '':\n home.append(team)\n else:\n home.append(h_teams[h][0].text)\n\n l = doc.xpath(\"//div[@id='contentarea']/table/tbody/tr/td[1]\")\n na = []\n for i in range(round(len(l)/3)):\n e = l[(i)*3+2]\n if len(e) == 0:\n na.append(i)\n else:\n links.append(e[0].attrib['href'])\n\n deleted = 0\n for i in na:\n del away[i-deleted]\n del home[i-deleted]\n deleted += 1\n\n # Remove rankings and leading spaces\n for i in range(0,len(away)):\n if '#' in away[i]:\n away[i] = away[i][away[i].index(' ')+1:]\n else:\n away[i] = away[i][1:]\n\n if '#' in home[i]:\n home[i] = home[i][home[i].index(' ')+1:]\n else:\n home[i] = home[i][1:]\n # Check for doubleheaders\n m = away[i] + ' ' + home[i]\n if m in matchups:\n game[matchups.index(m)] = 1\n game.append(2)\n else:\n game.append(0)\n matchups.append(m)\n\n for j in range(len(away)):\n # Remove records\n record_check = re.search(r'([0-9]{1,2}-[0-9]{1,2})', home[j])\n if not record_check is None:\n home[j] = home[j].replace(' (' + home[j].split(' (')[-1], '')\n away[j] = away[j].replace(' (' + away[j].split(' (')[-1], '')\n dates.append(day[2] + day[0] + day[1])\n\n return pd.DataFrame({'away': away, 'home': home, 'game': game, 'link': links, 'date': dates})", "def get_east_leagues_division(url, division, season):\n existing_teams = DivisionResult.objects.league_table(\n season=season, division=division)\n\n soup = parse_url(url)\n division_name = division.name.upper()\n division_element = soup.find(text=division_name)\n current_row = division_element.find_next('tr')\n next_division_element = division_element.find_next('strong')\n blank_row = division_element.find_next(text=u'\\xa0')\n bottom_row = next_division_element.find_parent(\n 'tr') if next_division_element != None else blank_row.find_parent('tr')\n teams = []\n pos = 0\n while current_row != bottom_row:\n columns = current_row('td')\n pos += 1\n team = DivisionResult()\n team.division = division\n team.season = season\n team.position = pos\n name = columns[0].text.strip()\n if '---' not in name and name != '' and name is not None:\n set_team(team, name, division)\n # The 2nd column is not used!\n team.played = int(columns[2].text) if columns[2].text else 0\n team.won = int(columns[3].text) if columns[3].text else 0\n team.drawn = int(columns[4].text) if columns[4].text else 0\n team.lost = int(columns[5].text) if columns[5].text else 0\n team.goals_for = int(columns[6].text) if columns[6].text else 0\n team.goals_against = int(columns[7].text) if columns[7].text else 0\n team.goal_difference = int(\n columns[8].text) if columns[8].text else 0\n # Some league tables display percentage win instead. In this case calculate the total\n if columns[9].text.endswith('%'):\n team.points = team.won * Match.POINTS_FOR_WIN + team.drawn * Match.POINTS_FOR_DRAW\n else:\n team.points = int(columns[9].text) if columns[9].text else 0\n # The 11th column is not used!\n team.notes = columns[11].text\n teams.append(team)\n LOG.debug(\"Parsed team: {}\".format(team))\n try:\n current_row = current_row.find_next('tr')\n except:\n break\n\n # Only replace existing entries if we've got at least as many entries\n if len(teams) >= len(existing_teams):\n existing_teams.delete()\n for t in teams:\n t.save()\n else:\n LOG.debug(\"Did not save division results for {}: Only {} teams parsed ({} teams before)\".format(\n url, len(teams), len(existing_teams)))\n return teams", "def get_team_standings(self, team_id):\n team_key = self.get_league_key() + \".t.\" + str(team_id)\n return self.query(\n \"https://fantasysports.yahooapis.com/fantasy/v2/team/\" + str(team_key) + \"/standings\",\n [\"team\", \"team_standings\"], TeamStandings)", "def league_table(league_id, league_type):\n ls_page = 0\n managers = []\n if league_type == 'classic':\n suburl = 'leagues-classic-standings/'\n elif league_type == 'h2h':\n suburl = 'leagues-h2h-standings/'\n else:\n print('Please choose \\'classic\\' or \\'h2h\\' for league_type')\n return\n while True:\n ls_page += 1\n league_url = 'https://fantasy.premierleague.com/drf/' + suburl + str(league_id) + '?phase=1&le-page=1&ls-page=' + str(ls_page)\n response = json_response(league_url)\n for player in response['standings'][\"results\"]:\n managers.append(player)\n if response['standings']['has_next'] is False:\n break\n return managers", "def scrape_complete_season(season):\n # scrape full season and save to\n ns.scrape_season(season,\n data_format='csv',\n data_dir='/Users/chrisfeller/Desktop/Play_by_Play_Scraper/data/')", "def get_nrlstats_game_stats(div, date, teams):\n table_divs = div.find_all('div')\n for table_div in table_divs:\n if 'id' not in table_div.attrs.keys():\n continue\n print(table_div['id'])\n\n if table_div['id'] == \"tab-mdHalf-0-data\":\n # Total game stats.\n file = open(\"game_stats_total.csv\", 'w')\n elif table_div['id'] == \"tab-mdHalf-1-data\":\n # First half game stats.\n file = open(\"game_stats_first_half.csv\", 'w')\n elif table_div['id'] == \"tab-mdHalf-2-data\":\n # Second half game stats.\n file = open(\"game_stats_second_half.csv\", 'w')\n else:\n continue\n \n rows = table_div.find_all('tr')\n \n # Each row (except the heading row) contains a stat.\n for row in rows[0:]:\n print(row)\n \n stat, val_1, val_2 = get_nrlstats_row_values(row)\n \n if stat == None:\n continue\n \n string = stat + \", \" + val_1 + \", \" + val_2 + \"\\n\"\n file.write(string);\n \n file.close()", "def getClubsHtml(self):\n\n\t\t# Get the clubs index\n\t\tclubSection = self.getClubsSectionIndex()\n\n\t\t# Build the url for the HTML body of the section.\n\t\turl = self.builder.buildSectionUrl(self.nfl_page_id, clubSection, 'text')\n\t\t\n\t\t# Get the response\n\t\tnodes = self.__getRequest__(url)\n\n\t\t# Extract only the HTML string\n\t\thtml = nodes.xpath('/api/parse/text/text()')\n\n\t\t# Find the table that has the teams listed. \n\t\ttables = etree.HTML(html[0]).xpath('//table[@class=\"navbox plainrowheaders wikitable\"]')\n\t\treturn tables[0]", "def overall_tournament_results(self, web_support):\n html_file = create_html_page(\"Overall Tournament Results\")\n\n results = tournament.playerStandings(0, self.database, self.cursor)\n print_html_standings(html_file, results, 0)\n\n html_file.write(\"</div>\\n</body>\\n</html>\\n\")\n html_file.close()\n url = os.path.abspath(html_file.name)\n\n if web_support == \"True\":\n webbrowser.open('file://' + url, new=2) # open in a new tab, if possible", "def fetch_rosters(link):\n url = '{0}{1}'.format(NHL_API_URL_BASE, link)\n response = requests.get(url)\n stuff = response.json()\n try:\n home_roster = stuff['liveData']['boxscore']['teams']['home']['players']\n away_roster = stuff['liveData']['boxscore']['teams']['away']['players']\n except requests.exceptions.RequestException:\n print(\"Error encountered getting live stats\")\n return home_roster, away_roster", "def get_nrlstats_team_stats(div, date, teams):\n table_divs = div.find_all('div')\n for table_div in table_divs:\n if 'id' not in table_div.attrs.keys():\n continue\n print(table_div['id'])\n\n if table_div['id'] == \"tab-tsHalf-0-data\":\n # Total game stats.\n file = open(\"team_stats_total.csv\", 'w')\n elif table_div['id'] == \"tab-tsHalf-1-data\":\n # First half game stats.\n file = open(\"team_stats_first_half.csv\", 'w')\n elif table_div['id'] == \"tab-tsHalf-2-data\":\n # Second half game stats.\n file = open(\"team_stats_second_half.csv\", 'w')\n else:\n continue\n \n rows = table_div.find_all('tr')\n \n # Each row (except the heading row) contains a stat.\n for row in rows[0:]:\n print(row)\n \n stat, val_1, val_2 = get_nrlstats_row_values(row)\n \n if stat == None:\n continue\n \n string = stat + \", \" + val_1 + \", \" + val_2 + \"\\n\"\n file.write(string);\n \n file.close()", "def download_province_table():\n # Download table\n url = 'https://docs.google.com/spreadsheets/d/e/2PACX-1vTfinng5SDBH9RSJMHJk28dUlW3VVSuvqaBSGzU-fYRTVLCzOkw1MnY17L2tWsSOppHB96fr21Ykbyv/pub#'\n print('Downloading Argentinian provinces table from google drive ({})'.format(url))\n TEMPORAL_HTML_FILE = 'fromdrive_per_province.html'\n r = requests.get(url)\n assert r.status_code == 200, 'Wrong status code at dowloading provinces table'\n with open(TEMPORAL_HTML_FILE, 'w') as out_f:\n out_f.write(r.content.decode(\"utf-8\") )\n # Preprocess it\n dfs = pd.read_html(TEMPORAL_HTML_FILE)\n # Get first page table\n df = dfs[0]\n # Get the headers from first row\n df = df.rename(columns={ col: real_col for col,real_col in df.loc[0].iteritems() })\n # Rename the province column\n df = df.rename(columns=_set_provincia_name)\n # Erase 'trash' columns\n relevant_cols = ['PROVINCIA'] + [ col for col in df.columns if _is_date(col) ]\n df = df[relevant_cols]\n # Erase 'trash' rows\n df = df[df['PROVINCIA'].apply(_valid_provincia)]\n df = df.fillna(0)\n # Set indexes by type (confirmados, muertos, recuperados, activos) and province\n df['TYPE'] = df['PROVINCIA'].apply(_get_type)\n df['PROVINCIA'] = df['PROVINCIA'].apply(_get_provincia)\n df = df.set_index(['TYPE','PROVINCIA']).sort_index()\n for c in df.columns:\n df[c] = pd.to_numeric(df[c])\n return df", "def fetch_historical_standings(gw):\n\n try:\n with open(f'app/data/gw_standings/standings_{gw}.json', 'r') as file:\n return json.loads(file.read())\n except:\n return []", "def scrapeHistory():\n\n payload_array = [['day', 'game_date', 'event', 'year', 'time', 'home', 'home_score', 'away', 'away_score', 'tier']]\n\n # GET SCHOOLS LIST\n url = 'https://home.gotsoccer.com/rankings/team_async.aspx?TeamID=862573&pagesize=100&mode=History'\n #requests\n url_r = requests.get(url)\n #run the requests through soup\n url_soup = BeautifulSoup(url_r.content, \"html.parser\")\n\n dates = url_soup.findAll(\"p\",{\"class\":\"font-weight-bold\"})\n events = url_soup.findAll(\"p\",{\"class\":\"text-smaller\"})\n gameTables = url_soup.findAll(\"table\",{\"class\":\"game-table\"})\n\n for index, game in enumerate(gameTables): # default is zero\n\n date_list = dates[index].text.split(', ')\n datetime_object = datetime.strptime(dates[index].text, '%A, %B %d, %Y')\n game_day = date_list[0].lstrip()\n game_date = datetime_object\n game_year = date_list[2]\n event = events[index].text\n\n games = gameTables[index].findAll('tr')\n for gm in games:\n tds = gm.findAll('td')\n game_time = tds[0].text\n home_team = tds[1].text\n away_team = tds[3].text\n score_list = tds[2].find('span').text.split(' - ')\n home_score = score_list[0]\n away_score = score_list[1]\n game_tier = tds[4].text\n payload_array.append([game_day, game_date, event, game_year, game_time, home_team, home_score, away_team, away_score, game_tier])\n\n si = StringIO()\n cw = csv.writer(si)\n for row in payload_array:\n cw.writerow(row)\n\n # DEPLOY\n history_data = si.getvalue()\n return history_data", "def todays_games():\n\turl_str = \"http://mlb.mlb.com/gdcross/components/game/mlb\" \\\n \t\t\t+ \"/year_\" + fix_digit(date.today().year) \\\n \t\t\t+ \"/month_\" + fix_digit(date.today().month) \\\n \t\t\t+ \"/day_\" + fix_digit(date.today().day) \\\n \t\t\t+ \"/miniscoreboard.json\"\n\n\twith urllib.request.urlopen(url_str) as url:\n\t\tdata = json.loads(url.read().decode())\n\t\tgames = data[\"data\"][\"games\"][\"game\"]\n\t\thome = []\n\t\thome_score = []\n\t\taway = []\n\t\taway_score = []\n\t\tinning = []\n\t\touts = []\n\t\tstatus = []\n\t\tfor game in games:\n\t\t\thome.append(game.get(\"home_team_name\", 0))\n\t\t\thome_score.append(game.get(\"home_team_runs\", 0))\n\t\t\taway.append(game.get(\"away_team_name\", 0))\n\t\t\taway_score.append(game.get(\"away_team_runs\", 0))\n\t\t\tinning.append(game.get(\"inning\", 0))\n\t\t\touts.append(game.get(\"outs\",0))\n\t\t\tstatus.append(game.get(\"status\", 0))\n\t\tout = pd.DataFrame.from_items([(\"Home\", home), \\\n\t\t\t\t\t\t\t(\"Score\", home_score), \\\n\t\t\t\t\t\t\t(\"Away\", away), \\\n\t\t\t\t\t\t\t(\"Score\", away_score), \\\n\t\t\t\t\t\t\t(\"Inning\", inning), \\\n\t\t\t\t\t\t\t(\"Outs\", outs), \\\n\t\t\t\t\t\t\t(\"Status\", status)])\t\n\t\tprint(out)", "def get_team_home_games(team):\n discontinued_teams = [\"express\",\"revolution\"]\n if team in discontinued_teams:\n return\n print(\" \", team)\n new_games = []\n teams = pandas.read_csv(\"2016_audl_teams.csv\")\n #Code to pull from web\n #response = requests.get(\"http://theaudl.com/teams/\" + team + \"/schedule/2016\")\n #content = response.content\n #Updated for saved pages of 2017 teams historical(2016) results\n with open(\"team-pages/\" + team + \".html\", errors = 'ignore') as content:\n parser = BeautifulSoup(content, 'html.parser')\n\n\n score_table = parser.find_all(\"table\")[0]\n\n\n is_playoffs = 0\n\n rows = score_table.find_all(\"tr\")\n rows = rows[1:] #drop header\n for row in rows:\n print(row)\n print(row.text)\n if 'PLAYOFFS' in row.text:\n is_playoffs = 1\n continue\n cols = row.find_all(\"td\")\n\n #find home team and only continue if it matches team we are getting games for\n #also include if the home team is a discontinued team\n home_team_href = get_href(cols[1].find_all('a')[0].get('href'))\n if home_team_href != team and home_team_href not in discontinued_teams:\n continue\n #Get team abbreviation\n home_team = teams[teams['href'] == home_team_href]['abbr'].iloc[0]\n\n #get date and format correctly for our table\n date_string = cols[0].text\n dt = datetime.datetime.strptime(date_string + \" 2016\",\"%B %d %Y\").date()\n str_date = dt.strftime(\"%m/%d/%Y\")\n\n #Get away team and translate to abbreviation\n away_team_href = get_href(cols[3].find_all('a')[0].get('href'))\n away_team = teams[teams['href'] == away_team_href]['abbr'].iloc[0]\n\n score_line = cols[2].text\n score_regex = r\"(\\d+)\\s*\\-\\s*(\\d+)\"\n scores = re.match(score_regex,score_line)\n if scores == None:\n home_score = score_line\n away_score = score_line\n else:\n home_score = scores.group(1)\n away_score = scores.group(2)\n new_games.append([str_date,home_team,home_score,away_team,away_score,is_playoffs])\n return new_games" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return league active stats dataframe
def getLeagueActiveStatsTable(self, batter=True): activeTable = pd.DataFrame() for teamId in self.teamDict: df = self._downloadActiveStatsTable(teamId, batter=batter) activeTable = pd.concat([activeTable, df]) return activeTable
[ "def get_home_advantage_vars(games_stats):\n # write query to create df containing teams, and wins by location per game\n game_location_data = sqldf(\"\"\"\n SELECT h.game_id,\n h.team AS home_team,\n a.team AS away_team,\n h.PTS AS home_points,\n a.PTS AS away_points,\n (h.POSS + a.POSS) / 2 AS POSS,\n CASE WHEN h.PTS > a.PTS THEN 1 ELSE 0 END AS home_win,\n CASE WHEN h.PTS < a.PTS THEN 1 ELSE 0 END AS away_win\n FROM (SELECT * FROM games_stats WHERE location='home_team') AS h\n LEFT JOIN (SELECT * FROM games_stats WHERE location='away_team') AS a\n ON h.game_id = a.game_id\n AND h.team != a.team\n \"\"\")\n\n # create a df summarising wins per team by location\n wins_by_location = sqldf(\"\"\"\n WITH home_wins AS(\n SELECT home_team,\n SUM(home_win) AS home_wins,\n SUM(away_win) AS home_losses,\n COUNT(home_win) AS home_games,\n 100 * SUM(home_win) / COUNT(home_win) AS home_win_pct\n FROM game_location_data\n GROUP BY home_team\n ),\n away_wins AS (\n SELECT away_team,\n SUM(home_win) AS away_losses,\n SUM(away_win) AS away_wins,\n COUNT(away_win) AS away_games,\n 100 * SUM(away_win) / COUNT(away_win) AS away_win_pct\n FROM game_location_data\n GROUP BY away_team\n )\n\n SELECT hw.home_team AS team,\n hw.home_win_pct,\n aw.away_win_pct,\n 100 * (hw.home_wins + aw.away_wins) / (hw.home_games + aw.away_games) AS win_pct,\n hw.home_win_pct - aw.away_win_pct AS home_win_advantage\n FROM home_wins AS hw\n JOIN away_wins AS aw\n ON hw.home_team = aw.away_team\n \"\"\")\n\n # create a df summarising net rating per team by location\n rating_by_location = sqldf(\"\"\"\n WITH home_team_ratings AS(\n SELECT home_team,\n 100 * SUM(home_points) / SUM(POSS) AS home_ORtg,\n 100 * SUM(away_points) / SUM(POSS) AS home_DRtg,\n 100 * (SUM(home_points) / SUM(POSS)) - (SUM(away_points) / SUM(POSS)) AS home_NETRtg\n FROM game_location_data\n GROUP BY home_team\n ),\n away_team_ratings AS(\n SELECT away_team,\n 100 * SUM(away_points) / SUM(POSS) AS away_ORtg,\n 100 * SUM(home_points) / SUM(POSS) AS away_DRtg,\n 100 * (SUM(away_points) / SUM(POSS)) - (SUM(home_points) / SUM(POSS)) AS away_NETRtg\n FROM game_location_data\n GROUP BY away_team\n )\n\n SELECT htr.home_team AS team,\n htr.home_ORtg,\n htr.home_DRtg,\n htr.home_NETRtg,\n atr.away_ORtg,\n atr.away_DRtg,\n atr.away_NETRtg,\n htr.home_NETRtg - atr.away_NETRtg AS home_NETRtg_advantage,\n htr.home_ORtg - atr.away_ORtg AS home_ORtg_advantage,\n atr.away_DRtg - htr.home_DRtg AS home_DRtg_advantage\n FROM home_team_ratings AS htr\n JOIN away_team_ratings AS atr\n ON htr.home_team = atr.away_team\n \"\"\")\n\n # join all location related variables into one dataframe\n home_advantage_df = wins_by_location.merge(rating_by_location, on='team')\n\n return home_advantage_df", "def get_team_df(df):\n team_df = None\n # get all team names as list\n teams = df['team'].drop_duplicates().to_list()\n # print(teams)\n\n # create temp df to sort by only that team\n for team in teams:\n temp_team_df = df[(df['team'] == team)]\n dates = temp_team_df['date'].drop_duplicates().to_list()\n\n # for each unique date, create another temp df\n for date in dates:\n # sum up all stats on date, store into team_df\n date_df = temp_team_df[(temp_team_df['date'] == date)]\n # print(date_df.iloc[0])\n d = {key: [date_df[key].sum()] for key in constants.ScatterFilters.team_y_keys}\n temp_series = date_df.iloc[0]\n d['opponent'] = temp_series['opponent']\n d['outcome'] = temp_series['outcome']\n d['location'] = temp_series['location']\n # print(d)\n temp_df = pd.DataFrame(d, index=[team])\n temp_df['date'] = [date]\n # temp_player = date_df.iteritems()[0]\n\n if team_df is None:\n team_df = temp_df\n else:\n team_df = pd.concat([temp_df, team_df])\n\n # print(team_df.shape)\n # print(team_df.head(10))\n return team_df", "def _load_teams(self):\n self.teams = list(np.unique(self.input_df[[\"HomeTeam\", \"AwayTeam\"]].values.ravel('F')))\n self.results_df = pd.DataFrame(self.teams, columns=['team'])", "def _load_wins(self):\n self.results_df['wins'] = self.results_df.team.apply(self.team_total_wins)", "def get_pts_game():\n nba_stats_url = \"https://stats.nba.com/stats/leagueLeaders?LeagueID=00&PerMode=PerGame&Scope=S&Season=2017-18&SeasonType=Regular+Season&StatCategory=PTS\"\n pts_game_dict = get(nba_stats_url).json()\n players = [pts_game_dict[\"resultSet\"][\"rowSet\"][i][2] for i in range(len(pts_game_dict[\"resultSet\"][\"rowSet\"]))]\n pts_game = [pts_game_dict[\"resultSet\"][\"rowSet\"][i][22] for i in range(len(pts_game_dict[\"resultSet\"][\"rowSet\"]))]\n df = pd.DataFrame()\n df[\"player\"] = players\n df[\"pts_game\"] = pts_game\n return df", "def active(index=False):\n global _Data\n current_year = params.Season.current_start_year()\n rows = _Data[_Data['to_year'] >= current_year]\n return utils.as_tuples(df=rows, to_tuple=Player, index=index)", "def get_player_stats(self):\n\n\n return pd.concat([player.get_stats() for player in self.players],axis=0,sort=True).reset_index(drop=True)", "def _load_goals(self):\n self.results_df['goals'] = self.results_df.team.apply(self.team_total_goals)", "def _formatActiveStatsTable(self, df):\n df.drop(df.shape[0]-1, inplace=True)\n if df.iloc[:, 2].dtype == 'object':\n rows = df[df.iloc[:, 2] == '--'].index\n df.iloc[rows] = df.iloc[rows].replace(to_replace='--',\n value=np.nan)\n df = df.apply(pd.to_numeric, errors='ignore')\n reStr = '^(?P<PLAYER>.+?), (?P<TEAM>\\w+)\\xa0(?P<POS>.+?)' \\\n '(?P<DTD>$|\\xa0\\xa0DTD$)'\n df = df.join(df['PLAYER, TEAM POS'].str.extract(reStr, expand=True))\n df.drop('PLAYER, TEAM POS', axis=1, inplace=True)\n df['POS'] = df['POS'].apply(lambda x: x.split(', '))\n # Drop extra columns\n df = df.select(lambda x: not re.search('Unnamed: \\d+', x), axis=1)\n return df", "def team_info(self):\n df_team = pd.read_csv(datadir / 'TEAM.csv.gz')\n\n team_cols = {\n 'gid': 'game_id',\n 'tname': 'team',\n #'pts': 'tm_pts',\n 'ry': 'tm_rush_yds',\n 'ra': 'tm_rush_att',\n 'py': 'tm_pass_yds',\n 'pa': 'tm_pass_att',\n 'pc': 'tm_pass_comp',\n 'sk': 'tm_sacks',\n 'sky': 'tm_sack_yds',\n 'ints': 'tm_ints',\n 'iry': 'tm_int_yds',\n 'fum': 'tm_fumbles',\n 'pu': 'tm_punts',\n 'gpy': 'tm_punt_yds',\n 'fgm': 'tm_field_goals',\n 'fgat': 'tm_field_goal_att',\n 'pen': 'tm_penalty_yds',\n 'top': 'tm_possess_time',\n 'tdp': 'tm_pass_tds',\n 'tdr': 'tm_rush_tds',\n 'td': 'tm_tds',\n 'qba': 'tm_qb_rush_att',\n 'qby': 'tm_qb_rush_yds'}\n\n df_team = df_team[team_cols.keys()].rename(team_cols, axis=1)\n\n df_team = df_team.merge(self.quarterback_info, on=['game_id', 'team'])\n\n return df_team", "def _add_team_stats(df_player_stats: pd.DataFrame, df_team_stats: pd.DataFrame) -> pd.DataFrame:\n logging.info('Enriching player stats with team stats...')\n df_team_stats = df_team_stats.rename(columns={\n column: \"team_\" + column for column in df_team_stats if column not in ('team', 'week', 'year')\n })\n return df_player_stats.merge(df_team_stats, how='left', on=['team', 'week', 'year'])", "def retrieve_player_stats(player1,player2,date,r,sur,year):\n\t#COMMON OPPONENTS APPROACH\n\t#print(\"Retrieving data about {} with respect to {} for matches before {}...\".format(player1,player2,date))\n\t\n\t#TIME DISCOUNTING\n\t#we try to give higher weight to most recent matches\n\t#to do so, we select the rows of interest AND the difference (in years) from the present date which will serve as weight\n\n\t####\n\t#games played by player1 in the most recent 5 years\n\tg1=df[((df[\"winner_name\"]==player1) | (df[\"loser_name\"]==player1)) & ((df[\"tourney_date\"]<date) | (\\\n\t\t(df[\"tourney_date\"]==date) & (df[\"round\"]<r))) & (year-df[\"year\"]<=5)]\n\t\n\tow=list(g1.loc[(g1.winner_name==player1, 'loser_name')].values[:])\n\tol=list(g1.loc[(g1.loser_name==player1, 'winner_name') ].values[:])\n\to1=set(ow+ol) #player 1 opponents\n\n\t#games played by player2\n\tg2=df[((df[\"winner_name\"]==player2) | (df[\"loser_name\"]==player2)) & ((df[\"tourney_date\"]<date) | (\\\n\t\t(df[\"tourney_date\"]==date) & (df[\"round\"]<r))) & (year-df[\"year\"]<=5)]\n\t\n\tow=list(g2.loc[(df.winner_name==player2, 'loser_name')].values[:])\n\tol=list(g2.loc[(df.loser_name==player2, 'winner_name') ].values[:])\n\to2=set(ow+ol) #player 2 opponents\n\n\t#list of common opponents \n\tco=[x for x in o1 if x in o2]\n\t#print(\"Common opponents in the last 5 years:\")\n\t#print(co)\n\n\tcolumn_names=[\"fs\",\"w1sp\",\"w2sp\",\"wsp\",\"wrp\",\"tpw\",\"aces\",\"df\",\"bpc\",\"bps\",\"bpo\",\"bpw\",\"tmw\",\"data_amount\",\"opponent\",]\n\taverages=pd.DataFrame(columns=column_names) #df to be filled with one row per opponent\n\t\n\tif len(co)>=5:\n\t\t\n\t\tcount=0\n\t\t#now evaluate average statistics of player1 wrt to each common opponent, then we'll do the average\n\t\tfor o in co:\n\t\t\t#print(\"Matches of {} vs {}...\".format(player1,o))\n\t\t\ttot_w=0\n\t\t\ttot_l=0\n\n\t\t\t#select matches of player 1 vs opponent o\n\t\t\tm=df[((((df[\"winner_name\"]==player1) & (df[\"loser_name\"]==o))) | ((df[\"winner_name\"]==o) & (df[\"loser_name\"]==player1))) & \\\n\t\t\t((df[\"tourney_date\"]<date) | ((df[\"tourney_date\"]==date) & (df[\"round\"]<r))) & (year-df[\"year\"]<=5)]\n\t\t\tif m.shape[0] > 0:\n\t\t\t\t#we have min 2 past matches against opponent o\n\t\t\t\t#won matches\n\t\t\t\tw=m[m[\"winner_name\"]==player1].loc[:,['w_fs', 'w_w1s', 'w_w2s', 'w_wsp', 'w_wrp', 'w_tpw', 'w_apg', 'w_dfpg', 'w_bppg', 'w_bps', 'l_bppg', 'l_bps', 'loser_name',\\\n\t\t\t\t'tourney_date','surface']].rename(columns={'w_fs':'fs','w_w1s':'w1s','w_w2s':'w2s','w_wsp':'wsp','w_wrp':'wrp','w_tpw':'tpw','w_apg':'apg','w_dfpg':'dfpg','w_bppg':'bppg',\\\n\t\t\t\t'w_bps':'bps','l_bppg':'bpo','l_bps':'l_bps','loser_name':'opponent', 'tourney_date':'date','surface':'s'})\n\t\t\t\tif w.shape[0]>0:\n\t\t\t\t\tw[\"bpc\"]=w.apply(lambda row: 1-row[\"l_bps\"],axis=1)\n\t\t\t\t\t#set year difference param.\n\t\t\t\t\tw[\"year_diff\"]=w.apply(lambda row: int(date.year-row[\"date\"].year), axis=1)\n\n\t\t\t\t\ttot_w=w.shape[0]\n\t\t\t\tw=w.drop(\"l_bps\", axis=1)\n\n\t\t\t\t#lost matches\n\t\t\t\tl=m[m[\"loser_name\"]==player1].loc[:,['l_fs', 'l_w1s', 'l_w2s', 'l_wsp', 'l_wrp', 'l_tpw', 'l_apg', 'l_dfpg', 'l_bppg', 'l_bps', 'w_bppg', 'w_bps', 'winner_name',\\\n\t\t\t\t'tourney_date','surface']].rename(columns={'l_fs':'fs','l_w1s':'w1s','l_w2s':'w2s','l_wsp':'wsp','l_wrp':'wrp','l_tpw':'tpw','l_apg':'apg','l_dfpg':'dfpg','l_bppg':'bppg',\\\n\t\t\t\t'l_bps':'bps','w_bppg':'bpo','w_bps':'w_bps','winner_name':'opponent','tourney_date':'date','surface':'s'})\n\t\t\t\tif l.shape[0]>0:\n\t\t\t\t\tl[\"bpc\"]=l.apply(lambda row: 1-row[\"w_bps\"],axis=1)\n\t\t\t\t\t\n\t\t\t\t\tl[\"year_diff\"]=l.apply(lambda row: int(date.year-row[\"date\"].year), axis=1)\n\n\t\t\t\t\ttot_l=l.shape[0]\n\t\t\t\t\t\n\t\t\t\tl=l.drop(\"w_bps\", axis=1)\n\n\t\t\t\t#join the two datframes, so that we have all the matches\n\t\t\t\tj = pd.concat([w, l],sort=False)\n\t\t\t\t#weight for surface\n\t\t\t\tj[\"s_ref\"]=j.apply(lambda row: sur,axis=1) #reference surface of match under study\n\t\t\t\tj[\"s_w\"]=j.apply(surface_weighting,axis=1) #surface weight of each previous match\n\t\t\t\tj=j.drop(\"s\", axis=1) #not useful anymore\n\n\t\t\t\t#assign weight which decreases as year_diff is higher\n\t\t\t\tj[\"discounting\"]=j.apply(time_discount,axis=1)\n\t\t\t\t#further multiply time weights by surface weights\n\t\t\t\tj[\"discounting\"]=j.apply(lambda row: row[\"discounting\"]*row[\"s_w\"],axis=1)\n\t\t\t\tj=j.drop(\"s_ref\", axis=1)\n\t\t\t\tj=j.drop(\"s_w\", axis=1)\n\t\t\t\tj=j.drop(\"year_diff\", axis=1)\n\n\t\t\t\t#print(j)\n\t\t\t\ttot_weights=j[\"discounting\"].sum()\n\t\t\t\t#normalize weights to sum to 1\n\t\t\t\tj[\"discounting\"]=j.apply(lambda row: row[\"discounting\"]/j[\"discounting\"].sum(),axis=1)\n\t\t\t\t#print(j)\n\t\t\t\t#weight all the matches for the discounting param\n\t\t\t\t#hence, multiply columns 0-11 for column \"discounting\"\n\t\t\t\tj.update(j.iloc[:, 0:11].mul(j.discounting, 0))\n\t\t\t\tj[\"bpc\"]=j.apply(lambda row: row[\"bpc\"]*row[\"discounting\"],axis=1)\n\t\t\t\t#now to have the weghted average of each stat, sum all the column\n\t\t\t\tavg=list(j.sum(axis=0,numeric_only=True)[0:12])\n\t\t\t\tavg.append(tot_w/(tot_w+tot_l)) #append % of matches won against o\n\t\t\t\t#UNCERTAINTY\n\t\t\t\t#print(\"Uncertainty: 1/{}\".format(tot_weights))\n\t\t\t\tavg.append(tot_weights) #add \"data amount\" CHANGED FROM BEFORE!!\n\t\t\t\tavg.append(o)\n\t \t\t\n\t \t\t#NOW we have data for past matches of player1 against common opponent o\n\t\t\t\t#add to dataframe, go to next one\n\t\t\t\taverages.loc[count]=avg\n\t\t\t\tcount+=1\n\n\t\t\t\t#print(j)\n\t\t\t\n\t\t\t\n\t#at the end of the loop, return the dataframe\n\t#in the outer function, compute general uncertainties with data of the two players combined, \n\t#then evaluate average statistics btw all the common opponents for each player - finally, build the ultimate feature vector\n\t#print(averages)\n\treturn averages", "def _get_league_score_on_year(league_name, season): \n # get table with team name along with home goal and away goal.\n query = \"select r3.name as League_name, r.team_long_name as home_team_name1, \\\n r.team_short_name as home_team_name2,r2.team_long_name as away_team_name1, r2.team_short_name as \\\n away_team_name2,l.season,l.home_team_goal,l.away_team_goal from Match as l left join Team as r \\\n on l.home_team_api_id = r.team_api_id \\\n left join Team as r2 \\\n on l.away_team_api_id=r2.team_api_id\\\n left join League as r3\\\n on l.league_id = r3.id;\"\n df = _get_table(query, conn)\n # get all matches in one season for one league.\n res_df = df[(df.League_name == league_name) & (df.season == season)]\n # get all goals scored in home and away team.\n all_goals = [sum(res_df.home_team_goal),sum(res_df.away_team_goal)]\n # get individual teams goal\n teams_goals_df = res_df.groupby(by = \"home_team_name1\").sum()[[\"home_team_goal\",\"away_team_goal\"]]\n teams_goals_df[\"tot_goals\"] = teams_goals_df.home_team_goal + teams_goals_df.away_team_goal\n top_4_home_teams = teams_goals_df.sort_values(by=\"tot_goals\",ascending=False).head(4)\n return top_4_home_teams", "def read_games(self):\n\n urlmask = 'http://www.football-data.co.uk/mmz4281/{}/{}.csv'\n filemask = 'MatchHistory_{}_{}.csv'\n col_rename = {\n 'Div': 'league',\n 'Date': 'date',\n 'HomeTeam': 'home_team',\n 'AwayTeam': 'away_team',\n }\n\n df_list = []\n current_season_ends = str(date.today().year)[-2:]\n for lkey, skey in itertools.product(self._selected_leagues.values(),\n self.seasons):\n filepath = Path(datadir(), filemask.format(lkey, skey))\n url = urlmask.format(skey, lkey)\n current_season = skey[-2:] >= current_season_ends\n if current_season or (not filepath.exists()):\n self._download_and_save(url, filepath)\n\n df_list.append(\n pd.read_csv(str(filepath),\n parse_dates=['Date'],\n infer_datetime_format=True,\n dayfirst=True,\n encoding='UTF-8',\n )\n .assign(season=skey)\n )\n\n df = (\n pd.concat(df_list)\n .rename(columns=col_rename)\n .pipe(self._translate_league)\n .replace({'home_team': TEAMNAME_REPLACEMENTS,\n 'away_team': TEAMNAME_REPLACEMENTS})\n .dropna(subset=['home_team', 'away_team'])\n )\n\n df['game_id'] = df.apply(self._make_game_id, axis=1)\n df.set_index(['league', 'season', 'game_id'], inplace=True)\n df.sort_index(inplace=True)\n return df", "def get_active_leagues():\n response = requests.get(url=\"https://www.pathofexile.com/api/trade/data/leagues\")\n response_data = response.json()\n for item in response.headers.items():\n print(item)\n\n return [League(league_data['id'], league_data['text']) for league_data in response_data['result']]", "def get_advanced_stats(df, opponents_stats=True):\n # if opponents_stats do not exist, add them to df\n if not opponents_stats:\n df = get_games_opponents_stats(df)\n\n # get better estimation for possessions\n df['possessions'] = (df['POSS'] + df['OP_POSS']) / 2\n\n # calculate field goal attempts and estimated chances\n df['FGA'] = df['2PA'] + df['3PA']\n df['CHANCES'] = df['FGA'] + (0.44 * df['FTA'])\n df['OP_FGA'] = df['OP_2PA'] + df['OP_3PA']\n df['OP_CHANCES'] = df['OP_FGA'] + (0.44 * df['OP_FTA'])\n\n # calculate percentage stats indicating shooting efficiency\n df['3P%'] = 100 * df['3PM'] / df['3PA']\n df['2P%'] = 100 * df['2PM'] / df['2PA']\n df['FT%'] = 100 * df['FTM'] / df['FTA']\n df['eFG%'] = 100 * (df['2PM'] + (1.5 * df['3PM'])) / df['FGA']\n df['TS%'] = 100 * df['PTS'] / (2 * df['CHANCES'])\n df['OP_3P%'] = 100 * df['OP_3PM'] / df['OP_3PA']\n df['OP_2P%'] = 100 * df['OP_2PM'] / df['OP_2PA']\n df['OP_eFG%'] = 100 * (df['OP_2PM'] + (1.5 * df['OP_3PM'])) / df['OP_FGA']\n df['OP_TS%'] = 100 * df['OP_PTS'] / (2 * df['OP_CHANCES'])\n\n # calculate rebounding efficiency\n df['OREB%'] = 100 * df['OREB'] / (df['OREB'] + df['OP_DREB'])\n df['DREB%'] = 100 * df['DREB'] / (df['DREB'] + df['OP_OREB'])\n\n # calculate rate stats indicating offensive style of play\n df['3PR'] = 100 * df['3PA'] / (df['FGA'])\n df['FTR'] = 100 * df['FTA'] / (df['FGA'])\n df['ASTR'] = 100 * df['AST'] / (df['2PM'] + df['3PM'])\n df['TOVR'] = 100 * df['TOV'] / df['possessions']\n df['AST-TOV_R'] = df['AST'] / df['TOV']\n df['OP_STLR'] = 100 * df['OP_STL'] / df['possessions']\n df['OP_BLKR'] = 100 * df['OP_BLK'] / df['2PA']\n df['PFDR'] = 100 * df['PFD'] / df['possessions']\n\n # calculate rate stats indicating defensive style of play\n df['STLR'] = 100 * df['STL'] / df['possessions']\n df['BLKR'] = 100 * df['BLK'] / df['OP_2PA']\n df['OP_3PR'] = 100 * df['OP_3PA'] / df['OP_FGA']\n df['OP_FTR'] = 100 * df['OP_FTA'] / df['OP_FGA']\n df['OP_ASTR'] = 100 * df['OP_AST'] / (df['OP_2PM'] + df['OP_3PM'])\n df['OP_TOVR'] = 100 * df['OP_TOV'] / df['possessions']\n df['OP_AST-TOV_R'] = df['OP_AST'] / df['OP_TOV']\n df['PFR'] = 100 * df['PF'] / df['possessions']\n\n # calculate pace and rating stats indicating overall team efficiency\n df['PTS40'] = 40 * 5 * df['PTS'] / df['MTS']\n df['OP_PTS40'] = 40 * 5 * df['OP_PTS'] / df['OP_MTS']\n df['PACE'] = 40 * (df['POSS'] + df['OP_POSS']) / (2 * (df['MTS'] / 5))\n df['ORtg'] = 100 * df['PTS'] / df['possessions']\n df['DRtg'] = 100 * df['OP_PTS'] / df['possessions']\n df['NETRtg'] = df['ORtg'] - df['DRtg']\n\n cols_to_use = [\n 'season', 'team', 'PTS40', 'OP_PTS40', 'PTS', 'OP_PTS', '3P%', 'OP_3P%',\n '2P%', 'OP_2P%', 'FT%', '3PR', 'OP_3PR', 'FTR', 'OP_FTR', 'OREB%',\n 'DREB%', 'ASTR', 'OP_ASTR', 'TOVR', 'OP_TOVR', 'PFR', 'PFDR',\n 'AST-TOV_R', 'OP_AST-TOV_R', 'STLR', 'OP_STLR', 'BLKR', 'OP_BLKR',\n 'PACE', 'ORtg', 'DRtg', 'NETRtg', 'eFG%', 'OP_eFG%', 'TS%', 'OP_TS%'\n ]\n\n if not opponents_stats:\n df['win'] = df['PTS'] > df['OP_PTS']\n cols_to_use = cols_to_use + [\n 'game_id', 'round', 'location', 'OP_team', 'win'\n ]\n\n return df[cols_to_use]", "def get_games_opponents_stats(games_stats_df):\n home = games_stats_df.loc[games_stats_df['location'] == 'home_team']\n away = games_stats_df.loc[games_stats_df['location'] == 'away_team']\n both_home = home.merge(\n away.add_prefix('OP_'), left_on='game_id', right_on='OP_game_id')\n both_away = away.merge(\n home.add_prefix('OP_'), left_on='game_id', right_on='OP_game_id')\n both = pd.concat([both_home, both_away])\n both.sort_values(by=['season', 'round', 'game_id'], inplace=True)\n both.drop(['OP_game_id', 'OP_season', 'OP_round'], axis=1, inplace=True)\n\n return both", "def get_elo_league(league, data_folder):\n df = pd.read_pickle(os.path.join(data_folder,league,league + '.pkl'))\n allTeams = list(df['EloNameHome'].value_counts().index)\n fullData=[]\n for team in allTeams:\n try:\n url=\"http://api.clubelo.com/\"\n response = requests.get(url + team.replace(\" \", \"\") )\n Data = StringIO(response.text)\n df1 = pd.read_csv(Data, sep=\",\")\n df1['From'] = pd.to_datetime(df1['From'])\n df1.index = df1['From']\n df1 = df1.sort_index()\n df1['Rank'] = np.where(df1['Rank']=='None', np.nan, df1['Rank'] )\n # reindex to have daily data, via front filling. API returns ts at irregular frequencies\n idx = pd.date_range(df1.index[0],df1.index.max())\n df2 = df1.reindex(idx, fill_value = np.nan)\n df2 = df2.fillna(method = 'ffill')\n df2['Date'] = df2.index\n df2 = df2.drop(['Country', 'Level', 'From', 'To'], axis=1)\n fullData.append(df2)\n except:\n print 'failed: %s'%(team)\n print url + team.replace(\" \", \"\")\n fullDf = pd.concat(fullData, axis=0)\n return fullDf", "def _load_matches(self):\n for team in self.teams:\n self._load_team_matches(team)\n self.match_df['result_int'] = self.match_df.result.apply(winloss_to_int)\n self.match_df['unixtime'] = self.match_df.date.apply(lambda row: row.timestamp())" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Format transaction tables. In order to properly parse text in Date and Detail columns, we need to parse HTML outside of pandas.
def _formatTransactionTable(self, htmlStr, tds): df = pd.read_html(htmlStr, header=1)[0] dates = [' '.join(i.itertext()) for i in tds[::4]] df['DATE'] = dates details = [' '.join(i.itertext()).replace(' ', ' ').replace(' ,', ',') for i in tds[2::4]] df['DETAIL'] = details addDropKey = u'Transaction\xa0\xa0Add/Drop' addDropStr = '(\w+) dropped (.+?), \w+ \w+ to (Waivers|Free Agency)'\ '|(\w+) added (.+?), \w+ \w+ from (Waivers|Free Agency)' addDrop = pd.Series(df[df['TYPE'].str.match(addDropKey)]['DETAIL'].str. findall(addDropStr)) addDrop = addDrop.apply(lambda x: [x[0][:3], x[1][:3:-1]]) addKey = u'Transaction\xa0\xa0Add' addStr = '(\w+) added (.+?), \w+ \w+ from (Waivers|Free Agency)' add = pd.Series(df[df['TYPE'].str.match(addKey)]['DETAIL'].str. findall(addStr)) add = add.apply(lambda x: [x[0][::-1]]) dropKey = u'Transaction\xa0\xa0Drop' dropStr = '(\w+) dropped (.+?), \w+ \w+ to (Waivers|Free Agency)' drop = pd.Series(df[df['TYPE'].str.match(dropKey)]['DETAIL'].str. findall(dropStr)) tradeKey = u'Transaction\xa0\xa0Trade Processed' tradeStr = '(\w+) traded (.+?), \w+ \w+ to (\w+)' trade = pd.Series(df[df['TYPE'].str.match(tradeKey)]['DETAIL'].str. findall(tradeStr)) transactions = pd.concat([addDrop, add, drop, trade]) transactions.name = 'TRANSACTION' df = df.join(transactions) return df
[ "def format_table(row):\n shelter_name = row[\"FacilityName\"]\n last_report = row[\"timestamp_local\"]\n district = integrify(row[\"CouncilDistrict\"])\n occupied_beds = integrify(row[\"occupied_beds_computed\"])\n aval_beds = integrify(row[\"open_beds_computed\"])\n male_tot = integrify(row[\"Total Men Currently at Site\"])\n female_total = integrify(row[\"Total Women Currently at Site\"])\n pets = integrify(row[\"Number of Pets Currently at Site\"])\n ems_calls = integrify(row[\"Number of EMS Calls\"])\n ems_transport = integrify(row[\"Number of EMS Transports\"])\n num_quar = integrify(row[\"Clients currently quarantined\"])\n trail_open = integrify(row[\"Number of Open Trailers\"])\n trail_occupied_women = integrify(row[\"Total Women Currently in Trailer\"])\n trail_occupied_men = integrify(row[\"Total Men Currently in Trailer\"])\n trail_occupied_pets = integrify(row[\"Total Pets Currently in Trailer\"])\n\n shelter = f\"\"\"<b>{shelter_name}</b><br>\n <i>Council District {district}</i><br>\n <i>Report Time: {last_report}</i><br>\n <p style=\"margin-top:2px; margin-bottom: 2px\">Occupied Beds: {occupied_beds}</p>\n <p style=\"margin-top:2px; margin-bottom: 2px\">Available Beds: {aval_beds}</p>\n <p style=\"margin-top:2px; margin-bottom: 2px\">Male: {male_tot}</p>\n <p style=\"margin-top:2px; margin-bottom: 2px\">Female: {female_total}</p>\n <p style=\"margin-top:2px; margin-bottom: 2px\">Pets: {pets}</p><br>\n <i>Trailer Details: </i>\n <p style=\"margin-top:2px; margin-bottom: 2px\">Trailer Open Beds: {trail_open}</p>\n <p style=\"margin-top:2px; margin-bottom: 2px\">\n Trailer Occupied - Men: {trail_occupied_men}\n </p>\n <p style=\"margin-top:2px; margin-bottom: 2px\">\n Trailer Occupied - Women: {trail_occupied_women}\n </p>\n <p style=\"margin-top:2px; margin-bottom: 2px\">\n Trailer Occupied - Pets: {trail_occupied_pets}\n </p><br>\n <i>Health Details: </i>\n <p style=\"margin-top:2px; margin-bottom: 2px\">Number of EMS Calls: {ems_calls}</p>\n <p style=\"margin-top:2px; margin-bottom: 2px\">\n Number of EMS Transports: {ems_transport}\n </p>\n <p style=\"margin-top:2px; margin-bottom: 2px\">\n Number of currently quarantined clients: {num_quar}\n </p>\n\n\n \"\"\"\n return shelter.strip()", "def _formatDraftTable(self, html):\n rnd = df[0].ix[0].replace('ROUND ', '')\n df.drop([0], inplace=True)\n df['ROUND'] = rnd\n df['PICK'] = pd.to_numeric(df[0])\n df['MANAGER'] = df[2]\n df = self._formatAuctionDraftTable(df)\n df = df[['ROUND', 'PICK', 'MANAGER', 'PLAYER', 'TEAM', 'POS',\n 'KEEPER']]\n return df", "def table(self, text):\r\n text = text + \"\\n\\n\"\r\n pattern = re.compile(r'^(?:table(_?%(s)s%(a)s%(c)s)\\. ?\\n)?^(%(a)s%(c)s\\.? ?\\|.*\\|)\\n\\n'\r\n % {'s': self.table_span_re,\r\n 'a': self.align_re,\r\n 'c': self.c},\r\n re.S | re.M | re.U)\r\n return pattern.sub(self.fTable, text)", "def pandas_df_to_markdown_table(df: pd.DataFrame) -> str:\n\n fmt = ['---' for i in range(len(df.columns))]\n df_fmt = pd.DataFrame([fmt], columns=df.columns)\n df_formatted = pd.concat([df_fmt, df])\n return Markdown(df_formatted.to_csv(sep=\"|\", index=False)).data", "def _fix_html_tables_old_pandoc(self, htmlstring):\n result = []\n pos = 0\n re_tables = re.compile(r\"<table.*</table>\", re.DOTALL)\n re_tbody = re.compile(r\"<tbody.*</tbody>\", re.DOTALL)\n tables = re_tables.finditer(htmlstring)\n for table in tables:\n # process the html before the match\n result.append(htmlstring[pos:table.start()])\n # now the table itself\n table_html = htmlstring[table.start():table.end()]\n tbody = re_tbody.search(table_html)\n if not tbody is None:\n result.append(table_html[0:tbody.start()])\n tbody_html = table_html[tbody.start():tbody.end()]\n tbody_html = tbody_html.replace(\"<th\",\"<td\")\n tbody_html = tbody_html.replace(\"</th>\", \"</td>\")\n result.append(tbody_html)\n result.append(table_html[tbody.end():])\n else:\n result.append(table_html)\n pos = table.end()\n result.append(htmlstring[pos:])\n\n return \"\".join(result)", "def fix_md_tables(pelican_obj):\n html, changes = fix_md_table(pelican_obj._content)\n #if changes:\n # print(\"Altered %d tag(s) in %s\" % (changes, pelican_obj.source_path))\n\n return html", "def to_html(self,fn='tableone.html'):\n tablefmt = 'html'\n with open(fn, 'w') as f:\n f.write(tabulate(self.tableone, tablefmt=tablefmt))", "def to_html(self):\n # create table 1\n body = \"\"\"<html>\n <head>\n <style>\n table {\n font-family: arial, sans-serif;\n border-collapse: collapse;\n width: 100%;\n }\n\n td, th {\n border: 1px solid #dddddd;\n text-align: left;\n padding: 8px;\n }\n\n </style>\n </head>\n <body>\n\n <h2>transcription-compare Table</h2>\n <table>\n <tr>\n <th>output_name</th>\n <th>distance</th>\n <th>error_rate</th>\n <th>substitution</th>\n <th>insertion</th>\n <th>deletion</th>\n </tr>\n <tbody>\n \"\"\"\n for index, identifier in enumerate(self.identifiers):\n body += \"\"\"<tr><td>{}</td>\"\"\".format(identifier)\n body += '\\n<td>' + str(self.distance[index]) + '</td>'\n body += '\\n<td>' + str(self.error_rate[index]) + '</td>'\n body += '\\n<td>' + str(self.substitution[index]) + '</td>'\n body += '\\n<td>' + str(self.insertion[index]) + '</td>'\n body += '\\n<td>' + str(self.deletion[index]) + '</td>\\n</tr>'\n body += \"\"\"</tbody>\n </table>\n \"\"\"\n body += \"\"\"<table>\\n<tr>\\n<th>error_type</th>\"\"\"\n for index, identifier in enumerate(self.identifiers):\n body += \"\"\" <th>{}</th>\"\"\".format(identifier)\n body += \"\"\"<th>percentage</th>\"\"\"\n body += \"\"\"</tr>\"\"\"\n body += self.multi_alignment_result.to_html_error_type(self.total_rows)\n body += \"\"\"</tbody>\n </table>\n \"\"\"\n\n body += self.multi_alignment_result.to_html()\n body += '\\n</body>\\n</html>'\n return body", "def make_source_table_body(self, sources_dic):\n table_body = \"\"\n for source_id, source_dic in sorted(sources_dic.items(), key=lambda item: item[1][\"source_date\"]):\n \n try:\n source_date = int(source_dic[\"source_date\"])\n if source_date > 3000:\n source_date = \"(no date)\"\n except:\n source_date = \"(no date)\"\n \n table_body += ' <tr>\\n'\n table_body += ' <td>{}</td>\\n'.format(source_id)\n table_body += ' <td>{}</td>\\n'.format(source_dic[\"source_name\"])\n table_body += ' <td>{}</td>\\n'.format(source_dic[\"author\"])\n if jedli_global.include_len_in_table:\n table_body+= ' <td>{}</td>\\n'.format(source_dic[\"len\"])\n table_body += ' <td>{}</td>\\n'.format(source_date)\n for word, word_dic in sorted(source_dic[\"results\"].items()):\n no_of_results = word_dic[\"number_of_results\"]\n first_result_page = word_dic[\"first_result_page\"]\n if first_result_page:\n # add a link to the file that contains the first result of this source:\n fp = \"{}_{}_{}.html#{}\".format(self.base_filename, word,\n first_result_page, source_id)\n table_body += ' <td><a href=\"{}\">{}</a></td>\\n'.format(fp, no_of_results)\n else:\n table_body += ' <td>{}</td>\\n'.format(no_of_results)\n table_body += ' </tr>\\n'\n return table_body", "def get_formatted_tracklist_table_html(track_data: pd.DataFrame):\n if track_data.empty:\n print('A list of tracks is required.')\n return\n pd.set_option('display.max_colwidth', None)\n keys = ['name', 'album_image_url_small', 'artist_name', 'album_name', 'share_url']\n new_keys = ['Song Title', 'Cover Art', 'Artist', 'Album', 'Share URL']\n track_data = track_data[keys].rename(columns=dict(zip(keys, new_keys)))\n\n def image_formatter(im):\n return f'<img src=\"{im}\" />'\n\n formatters = {\n 'Cover Art': image_formatter\n }\n playlist_table = track_data.to_html(formatters=formatters, escape=False, index=False, render_links=True)\n playlist_table = playlist_table.replace('style=\"text-align: right;\"', '')\n playlist_table = playlist_table.replace('<tr>', '<tr style=\"border: solid 1px #CCC;\">')\n playlist_table = playlist_table.replace(\n '<table border=\"1\" class=\"dataframe\">',\n '<table style=\"border-collapse: collapse; border: solid 1px #CCC;\">'\n )\n return playlist_table", "def get_html(self):\n # Format the result\n result_str = self.get_text()\n if self.link is not None:\n result_str = '<a href=\"%s\">%s</a>' % (self.link, result_str)\n return '<td>%s</td>' % result_str", "def restructured_table(column_names, column_ids, object_list, truncate_len=13):\r\n single_cell_border = \"+\" + (truncate_len+2) * \"-\"\r\n border = len(column_names) * single_cell_border + \"+\"\r\n table = \"\\n\" + border + \"\\n\"\r\n # Column Headers first\r\n for column in column_names:\r\n table += \"| %-13s \" % column[:truncate_len]\r\n table += \"|\\n\" + border + \"\\n\"\r\n # Data next\r\n for obj in object_list:\r\n for i in column_ids:\r\n levels = i.split(\".\")\r\n attr = obj\r\n for l in levels:\r\n attr = getattr(attr, l)\r\n table += \"| %-13s \" % str(attr)[:truncate_len]\r\n table += \"|\\n\"\r\n table += border + \"\\n\"\r\n return table", "def table(df, name, locTable, formatters=None, tex=True, locCaption=None, escape=False, \n column_format=None, na_rep='', index=False, longtable=False, multirow=True, float_format=None, header=True):\n locCaption=locTable+'/Captions' if locCaption is None else locCaption\n if tex:\n with open(locTable+'/'+name+'.tex', 'w') as tex:\n try:\n with open(locCaption+'/'+name+'.txt', 'r') as cap:\n caption=cap.read()\n except:\n print(f'No caption found for {name}.')\n caption=None\n df.to_latex(buf=tex, na_rep=na_rep, formatters=formatters, escape=escape,\n longtable=longtable, index=index, column_format=column_format, caption=caption,\n label='tab:'+name, multirow=multirow, float_format=float_format, header=header)\n else:\n with open(locTable+'/'+name+'.txt', 'w') as txt:\n df.to_string(buf=txt, na_rep=na_rep, formatters=formatters, index=index, header=header)\n return", "def to_html(self):\n body = \"\"\"<table>\\n<tr>\\n<th>num</th>\n <th>Reference</th>\n <th>output</th>\n <th>error_type</th>\n <th>local_cer</th>\n <th>distance</th>\n <th>sub</th>\n <th>ins</th>\n <th>del</th></tr><tbody>\"\"\"\n # create header\n for c, t in enumerate(self.multi_alignment_tokens):\n body += t.to_html(c)\n # something else\n # <p> annotation </p>\n body += '\\n</tbody>\\n</table>'\n return body", "def data_frame_to_html(data_frame: DataFrame) -> str:\n return data_frame.to_html(float_format=\"%.2f\", index=False,\n classes=[\"table table-striped table-sm\"])", "def render_html(table, data):\n return render(renderers.HtmlRenderer, table, data)", "def summary_html_table():\n df = pd.DataFrame.from_csv('summarytable.csv')\n df = df[['Accession','Location','Protein Product','Gene Name']]\n df['Accession'] = df['Accession'].apply(\n lambda x: '<a href=\\\"http://student.cryst.bbk.ac.uk/cgi-bin/cgiwrap/em001/cgi-script.py?type={0}&input={1}\\\">{1}</a>'.format('Gene_ID',x))\n pd.set_option('display.max_colwidth', 1000)\n with open('indexhead.html') as f:\n html = f.read() + df.to_html(escape=False,index=False)\n with open('index.html','w') as g:\n g.write(html)", "def format_medical_table(self):\n self.format_medical_table_headers()\n self.format_medical_table_values()", "def create_tex_table(dbs):\n obs, series, pts = get_ordered_series(dbs)\n\n head = r\"\"\"\\begin{center}\n\\begin{tabular}{l|c|c|c}\n\\hline\n\"\"\"\n head += r\"\"\"Year & Cases & median Attack Ratio $ $S_0$ \\\\\n\\hline\n\"\"\"\n bot = r\"\"\"\n\\hline\n\\end{tabular}\n\\end{center}\n \"\"\"\n body = r\"\"\n st = []\n # years = sorted(list(series.keys()))\n print (series.keys())\n for i, (Y, V) in enumerate(series.items()):\n cases = obs[Y].sum()\n first_week = V.index[0]\n s0 = array(series[Y].S.ix[first_week])\n try:\n ratio = 1.0*cases/s0\n body += Y + r\" & {:.3} & {:.2} ({:.2}-{:.2}) & {:.3}({:.2}-{:.2})\\\\\".format(cases*100, nanmedian(ratio),\n stats.scoreatpercentile(ratio, 2.5),\n stats.scoreatpercentile(ratio, 97.5),\n nanmedian(s0)*100,\n stats.scoreatpercentile(s0, 2.5)*100,\n stats.scoreatpercentile(s0, 97.2)*100\n )\n body += \"\\n\"\n except KeyError as e:\n print (Y, first_week, e)\n except ValueError as e:\n print (s0, e)\n\n return head + body + bot" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This functions generate SQL statement for selecting groups and perms. Sadly, Django doesn't support join in ORM Should not select users that hasn't got any role Should select perms that assigned to any role
def get_permission_owners_query(): owners_query = """ {group_table_name!s} gug LEFT JOIN {owner_table_name!s} op ON gug.group_id = op.owner_object_id AND gug.group_content_type_id = op.owner_content_type_id AND (gug.roles & op.roles) != 0 LEFT JOIN {global_table_name!s} gl ON gl.content_type_id = gug.group_content_type_id AND (gl.roles & gug.roles) != 0 """ OwnerToPermission = apps.get_model('protector', 'OwnerToPermission') GenericUserToGroup = apps.get_model('protector', 'GenericUserToGroup') GenericGlobalPerm = apps.get_model('protector', 'GenericGlobalPerm') return owners_query.format( owner_table_name=OwnerToPermission._meta.db_table, group_table_name=GenericUserToGroup._meta.db_table, global_table_name=GenericGlobalPerm._meta.db_table, )
[ "def generate_query(self):\n self.query = self._add_select_statement() +\\\n self._add_case_statement() +\\\n self._add_from_statement() +\\\n self._add_group_by_statement()\n\n return self.query", "def _sql_gen_add_gammas(\n settings: dict,\n unique_id_col: str = \"unique_id\",\n table_name: str = \"df_comparison\",\n):\n\n\n select_cols_expr = _get_select_expression_gammas(settings)\n\n sql = f\"\"\"\n select {select_cols_expr}\n from {table_name}\n \"\"\"\n\n return sql", "def add_groupby(self):\n if self.query_model.groupBy_columns is not None and len(self.query_model.groupBy_columns) > 0:\n groupby_clause = \" GROUP BY \"\n for col_name in self.query_model.groupBy_columns:\n groupby_clause += \"?\" + col_name + \" \"\n groupby_clause += \"\\n\"\n self.query_string += groupby_clause", "def get_grouprole_permissions(self, user_obj,group):\n\t\t_group_perm_cache=[]\n \tif not hasattr(user_obj, '_group_perm_cache'):\n\t\t\tperms = Permission.objects.filter(gus_roles__uid=user_obj,gus_roles__gid=group\n \t \t).values_list('content_type__app_label', 'codename'\n \t\t).order_by()\n\t\t\t\n\n\t\t\t_group_perm_cache=set([\"%s.%s\" % (ct, name) for ct, name in perms])\n \treturn _group_perm_cache", "def format_group(group_perm):\n formatted = 'r' if group_perm & READ_PERM != 0 else '-'\n formatted += 'w' if group_perm & WRITE_PERM != 0 else '-'\n formatted += 'x' if group_perm & EXEC_PERM != 0 else '-'\n return formatted", "def _render_groupings(fields):\n\n if not fields:\n return \"\"\n\n return \"GROUP BY \" + \", \".join(fields)", "def denorm_group_in_group(cls, session):\n\n tbl1 = aliased(GroupInGroup.__table__, name='alias1')\n tbl2 = aliased(GroupInGroup.__table__, name='alias2')\n tbl3 = aliased(GroupInGroup.__table__, name='alias3')\n\n if get_sql_dialect(session) != 'sqlite':\n # Lock tables for denormalization\n # including aliases 1-3\n locked_tables = [\n '`{}`'.format(GroupInGroup.__tablename__),\n '`{}` as {}'.format(\n GroupInGroup.__tablename__,\n tbl1.name),\n '`{}` as {}'.format(\n GroupInGroup.__tablename__,\n tbl2.name),\n '`{}` as {}'.format(\n GroupInGroup.__tablename__,\n tbl3.name),\n '`{}`'.format(group_members.name)]\n lock_stmts = ['{} WRITE'.format(tbl) for tbl in locked_tables]\n query = 'LOCK TABLES {}'.format(', '.join(lock_stmts))\n session.execute(query)\n try:\n # Remove all existing rows in the denormalization\n session.execute(GroupInGroup.__table__.delete())\n\n # Select member relation into GroupInGroup\n qry = (GroupInGroup.__table__.insert().from_select(\n ['parent', 'member'], group_members.select().where(\n group_members.c.group_name.startswith('group/')\n ).where(\n group_members.c.members_name.startswith('group/')\n )\n ))\n\n session.execute(qry)\n\n iterations = 0\n rows_affected = True\n while rows_affected:\n # Join membership on its own to find transitive\n expansion = tbl1.join(tbl2, tbl1.c.member == tbl2.c.parent)\n\n # Left outjoin to find the entries that\n # are already in the table to prevent\n # inserting already existing entries\n expansion = expansion.outerjoin(\n tbl3,\n and_(tbl1.c.parent == tbl3.c.parent,\n tbl2.c.member == tbl3.c.member))\n\n # Select only such elements that are not\n # already in the table, indicated as NULL\n # values through the outer-left-join\n stmt = (\n select([tbl1.c.parent,\n tbl2.c.member])\n .select_from(expansion)\n # pylint: disable=singleton-comparison\n .where(tbl3.c.parent == None)\n .distinct()\n )\n\n # Execute the query and insert into the table\n qry = (GroupInGroup.__table__\n .insert()\n .from_select(['parent', 'member'], stmt))\n\n rows_affected = bool(session.execute(qry).rowcount)\n iterations += 1\n except Exception as e:\n LOGGER.exception(e)\n session.rollback()\n raise\n finally:\n if get_sql_dialect(session) != 'sqlite':\n session.execute('UNLOCK TABLES')\n session.commit()\n return iterations", "def _get_join_str(featuregroups, join_key):\n join_str = \"\"\n for idx, fg in enumerate(featuregroups):\n if (idx != 0):\n join_str = join_str + \"JOIN \" + _get_table_name(fg[constants.REST_CONFIG.JSON_FEATUREGROUPNAME],\n fg[constants.REST_CONFIG.JSON_FEATUREGROUP_VERSION]) + \" \"\n join_str = join_str + \"ON \"\n for idx, fg in enumerate(featuregroups):\n if (idx != 0 and idx < (len(featuregroups) - 1)):\n join_str = join_str + _get_table_name(featuregroups[0][constants.REST_CONFIG.JSON_FEATUREGROUPNAME],\n featuregroups[0][\n constants.REST_CONFIG.JSON_FEATUREGROUP_VERSION]) + \".`\" + join_key + \"`=\" + \\\n _get_table_name(fg[constants.REST_CONFIG.JSON_FEATUREGROUPNAME],\n fg[constants.REST_CONFIG.JSON_FEATUREGROUP_VERSION]) + \".`\" + join_key + \"` AND \"\n elif (idx != 0 and idx == (len(featuregroups) - 1)):\n join_str = join_str + _get_table_name(featuregroups[0][constants.REST_CONFIG.JSON_FEATUREGROUPNAME],\n featuregroups[0][\n constants.REST_CONFIG.JSON_FEATUREGROUP_VERSION]) + \".`\" + join_key + \"`=\" + \\\n _get_table_name(fg[constants.REST_CONFIG.JSON_FEATUREGROUPNAME],\n fg[constants.REST_CONFIG.JSON_FEATUREGROUP_VERSION]) + \".`\" + join_key + \"`\"\n return join_str", "def generate(self, outer_scope):\n\n self.relations.reverse()\n master_relation = self.relations.pop()\n self.relations.reverse()\n\n if self.verbosity > 2:\n master_relation.dump()\n\n ## SELECT EXPRESSIONS\n select = master_relation.select\n\n for r in self.relations:\n if r.select:\n select = f\"{select}, {r.select}\"\n\n s = f\"SELECT {select} FROM {master_relation.model_table}\"\n\n ## FROM JOINS\n if not outer_scope:\n for relation in self.relations:\n s += f\" {relation.join_operator} {relation.model_table} ON {relation.join_condition_expression} \"\n\n ## WHERE\n where = \"\"\n if master_relation.where:\n where += f\" WHERE {master_relation.where}\"\n for relation in self.relations:\n if relation.where:\n if where:\n where += \" AND \"\n else:\n where += \"WHERE \"\n where += relation.where\n s += where\n\n ## GROUP BY\n if master_relation.group_by:\n gb = master_relation.group_by_columns()\n if gb:\n s += f\" GROUP BY {gb}\"\n\n ## ORDER BY\n order = \"\"\n if master_relation.order_by:\n order += f\" ORDER BY {master_relation.order_by}\"\n for relation in self.relations:\n if relation.order_by:\n if not order:\n order = \" ORDER BY \"\n order += relation.order_by\n if relation.order_by_direction:\n order += \" DESC \" if relation.order_by_direction == \"-\" else \" ASC \"\n s += order\n\n if self._limit:\n s += f\" LIMIT {int(self._limit)}\"\n\n if self._offset:\n s += f\" OFFSET {int(self._offset)}\"\n\n # replace variables placeholders to be valid dict placeholders\n s = re.sub(PLACEHOLDER_PATTERN, lambda x: f\"%({x.group(1)})s\", s)\n\n self.sql = s\n self.master_relation = master_relation\n\n return self.sql", "def _add_group_by_statement(self):\n query = \"group by \" + \"\".join([\"{0},\".format(x) for x in range(1, len(self.index_col) + 1)])\n return query[:-1]", "def get_editable_permissions(self):\n # Dynamic generation of OR queries is based on code found at\n # https://bradmontgomery.net/blog/adding-q-objects-in-django/\n permission_filter = Q()\n for permission in self.editable_permissions:\n permission_filter.add(\n Q(content_type__app_label=permission[0],\n codename=permission[1]), Q.OR)\n\n return Permission.objects.filter(\n permission_filter)", "def get_sql_statement(self, *_) -> str:\n return self.sql_stmt.format(\n result_limit=self.config.sourceConfig.config.resultLimit,\n filters=self.filters, # pylint: disable=no-member\n )", "def query_all_groups():\n grp = MetalGroup.query.order_by(MetalGroup.level).all()\n return grp", "def metadata_sql(self, allowed_schemata_sql):\n\n table_name_pattern = self.get_table_grouping_pattern()\n\n return \"\"\"\n SELECT s.schema_name, c.table_name, c.column_name, c.data_type, c.character_maximum_length,\n c.numeric_precision, c.numeric_scale\n\n FROM information_schema.schemata s\n INNER JOIN information_schema.columns c\n ON s.schema_name = c.table_schema\n\n WHERE s.schema_owner = '{}'\n AND c.table_name LIKE '%{}%'\n\n ORDER BY s.schema_name, c.table_name, c.column_name\n \"\"\".format(allowed_schemata_sql, table_name_pattern)", "def prefetch_perms(self, objects):\n if self.user and not self.user.is_active:\n return []\n\n User = get_user_model()\n pks, model, ctype = _get_pks_model_and_ctype(objects)\n\n if self.user and self.user.is_superuser:\n perms = list(\n Permission.objects.filter(content_type=ctype).values_list(\"codename\", flat=True)\n )\n\n for pk in pks:\n key = (ctype.id, force_str(pk))\n self._obj_perms_cache[key] = perms\n\n return True\n\n group_model = get_group_obj_perms_model(model)\n\n group_filters = {\n 'object_pk__in': pks\n }\n\n if self.user:\n fieldname = 'group__{}'.format(\n User.groups.field.related_query_name(),\n )\n group_filters.update({fieldname: self.user})\n else:\n group_filters = {'group': self.group}\n\n if group_model.objects.is_generic():\n group_filters.update({\n 'content_type': ctype,\n 'object_pk__in': pks,\n })\n else:\n group_filters.update({\n 'content_object_id__in': pks\n })\n\n if self.user:\n model = get_user_obj_perms_model(model)\n user_filters = {\n 'user': self.user,\n }\n\n if model.objects.is_generic():\n user_filters.update({\n 'content_type': ctype,\n 'object_pk__in': pks\n })\n else:\n user_filters.update({\n 'content_object_id__in': pks\n })\n\n # Query user and group permissions separately and then combine\n # the results to avoid a slow query\n user_perms_qs = model.objects.filter(**user_filters).select_related('permission')\n group_perms_qs = group_model.objects.filter(**group_filters).select_related('permission')\n perms = chain(user_perms_qs, group_perms_qs)\n else:\n perms = group_model.objects.filter(**group_filters).select_related('permission')\n\n # initialize entry in '_obj_perms_cache' for all prefetched objects\n for obj in objects:\n key = self.get_local_cache_key(obj)\n if key not in self._obj_perms_cache:\n self._obj_perms_cache[key] = []\n\n for perm in perms:\n if type(perm).objects.is_generic():\n key = (ctype.id, perm.object_pk)\n else:\n key = (ctype.id, force_str(perm.content_object_id))\n\n self._obj_perms_cache[key].append(perm.permission.codename)\n\n return True", "def get_queryset_for_role_filtered(self, role):\n queryset = self.request.cradmin_app.get_accessible_group_queryset()\n assignment = role\n return queryset\\\n .filter(parentnode=assignment)\\\n .prefetch_related(\n models.Prefetch(\n 'candidates',\n queryset=self.get_candidate_queryset()))\\\n .prefetch_related(\n models.Prefetch(\n 'examiners',\n queryset=self.get_examiner_queryset()))\\\n .select_related(\n 'cached_data__last_published_feedbackset',\n 'cached_data__last_feedbackset',\n 'cached_data__first_feedbackset',\n 'parentnode'\n )", "def get_permission_policies(self, request):\n user = request.cache.user\n users = request.app.models.get('users')\n groups = request.app.models.get('groups')\n perms = []\n if not users or not groups or not user.is_authenticated():\n return perms\n with users.session(request) as session:\n session.add(user)\n for group in set(user.groups):\n for permission in group.permissions:\n policy = permission.policy\n if not isinstance(policy, list):\n policy = (policy,)\n perms.extend(policy)\n return perms", "def sqlSelect (self):\n return \"\"\"select x.departmentID,\n x.deptCode,\n x.name,\n x.managerID from Department x \"\"\"", "def get_group_obj_perms_model(obj):\n from guardian.models import GroupObjectPermissionBase\n from guardian.models import GroupObjectPermission\n return get_obj_perms_model(obj, GroupObjectPermissionBase, GroupObjectPermission)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a new instance of City before each test
def setUp(self): self.c1 = City()
[ "def setUp(self):\n name = \"SANFRANCISCO\"\n colour = \"blue\"\n connections = ['TOKYO', 'MANILA', 'LOSANGELES', 'CHICAGO']\n self.testCity = City(name=name,colour=colour,connections=connections)", "def test_new_city(self):\n self.new_helper(\"City\")", "def test_save_city(self):\n self.save_helper(\"City\")", "def test_reload_city(self):\n self.reload_helper(\"City\")", "def test_citymodel(self):\n all_objects = storage.all()\n my_model = City()\n storage.new(my_model)\n key = \"{}.{}\".format(my_model.__class__.__name__, my_model.id)\n self.assertIn(key, all_objects.keys())", "def seed_cities():\n # should be 95 cities\n # select city, state from biz group by state, city\n # group by state, city\n all_cities = db.session.query(PlatePalBiz.city, PlatePalBiz.state).filter(PlatePalBiz.city!=u\"blacksburg\", PlatePalBiz.city!=u'Carrboro Saxapahaw Chapel Hill Durham', PlatePalBiz.city!=u'Greenbelt ')\n cities = all_cities.group_by(PlatePalBiz.state).group_by(PlatePalBiz.city).all()\n\n # calculate lat/lng for each city\n geolocator = Nominatim()\n for city in cities:\n location = geolocator.geocode(city[0] + \" \" + city[1])\n print city\n print \"Lat: {}, Lng: {}\".format(location.latitude, location.longitude)\n new_city = City(city=city[0],\n state=city[1],\n lat=location.latitude,\n lng=location.longitude)\n db.session.add(new_city)\n db.session.commit()\n return", "def _save_new_city(state, city):\n city = City(name=city, state=state)\n city.save()\n return city", "def setUp(self):\n self.test_country_1 = Country('Country_one', 100, 1000)\n self.test_country_2 = Country(\"Country_two\", 50, 100)", "def test_city_seeded_to_db(self):\n\n seed_cities = seed_database.seed_cities_table()\n test_length = 10\n self.assertEqual(test_length, len(seed_cities))", "def create_city(city,state,forecast_office_id):\r\n\r\n city = City(city=city,state=state,forecast_office_id=forecast_office_id)\r\n\r\n db.session.add(city)\r\n db.session.commit()\r\n\r\n return city", "def test_city(db):\n query = db.query(Event)\n query = query.filter(Event.year == 2013)\n query = query.filter(Event.month == 12)\n query = query.filter(Event.day == 4)\n event = query.one()\n assert event.city.name == 'Ostrava'", "def initializeCity(self):\n\n title=self._city_obj.getName()\n price_gold, price_ore = self._city_obj.getSoldierPrice()\n \n self.changeTitle(title)\n gui.widgets[self._txt_p_gold_id].changeText(str(price_gold))\n gui.widgets[self._txt_p_ore_id].changeText(str(price_ore))", "def setUp(self):\n\n self.customers = dict()\n self.customers[\"james_bowen\"] = Customer.objects.create(\n name='James Bowen')\n self.customers[\"amanda-arias\"] = Customer.objects.create(\n name='Amanda Arias')\n self.customers[\"beau-jeppesen\"] = Customer.objects.create(\n name='Beau Jeppesen')", "def test_created_and_updated_at_init(self):\n c = City()\n self.assertEqual(c.created_at, c.updated_at)", "def test_cities(db):\n query = db.query(City)\n query = query.filter(City.name == 'Ostrava')\n city = query.one()\n assert city.slug == 'ostrava'\n assert city.events\n assert any(e.name == 'Ostravské KinoPyvo' for e in city.events)\n assert not any(e.name == 'Brněnské Pyvo' for e in city.events)", "def test_new_place(self):\n self.new_helper(\"Place\")", "def setUp(self):\n self.exercise = Exercise.objects.create(exercise_name=\"soccer\")\n self.exercise2 = Exercise.objects.create(exercise_name=\"yoga\")", "def test_place_instance(self):\n my_place = Place()\n my_place.first_name = \"Jerry\"\n my_place.last_name = \"Mouse\"\n my_place.email = \"jerry@holbertonshool.com\"\n my_place.password = \"root\"\n self.assertEqual(my_place.first_name, \"Jerry\")\n self.assertEqual(my_place.last_name, \"Mouse\")\n self.assertEqual(my_place.email, \"jerry@holbertonshool.com\")\n self.assertEqual(my_place.password, \"root\")", "def seed_city_distance():\n # should be 95 cities\n # select city, state from biz group by state, city\n # group by state, city\n cities = db.session.query(City)\n\n # find nearby cities (<50 miles)\n for city in cities:\n city1 = (city.lat, city.lng)\n for other_city in cities:\n if other_city != city:\n city2 = (other_city.lat, other_city.lng)\n # evaluate distance\n miles = vincenty(city1, city2).miles\n\n new_city_distance = CityDistance(city1_id=city.city_id,\n city2_id=other_city.city_id,\n miles=miles)\n db.session.add(new_city_distance)\n db.session.commit()\n return", "def setUp(self):\n self.category_name = 'Some category'\n self.category = Category(name=self.category_name)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Make sure state_id is str data type
def test_state_id_type(self): self.assertEqual(type(City.state_id), str)
[ "def create_state_id(self):\n for key, value in config.fips_dict.iteritems():\n if key == self.state.lower():\n state_num = value\n if state_num <=9:\n state_num = '0' + str(state_num)\n else:\n state_num = str(state_num)\n\n return 'st' + state_num", "def stid_str(self, stateid):\n stid = self.format(\"{0:crc16}\", stateid)\n return self.stid_map.get(stateid, stid)", "def setStateId(self, stateid: 'char const *') -> \"void\":\n return _coin.ScXMLInExprDataObj_setStateId(self, stateid)", "def test_type_of_id_is_str(self):\n b = BaseModel()\n self.assertTrue(type(b.id) is str)", "def _check_id(self, samplet_id):\n\n if not isinstance(samplet_id, str):\n return str(samplet_id)\n else:\n return samplet_id", "def test_str_cityid(self):\n self.assertEqual(str, type(Place().city_id))", "def state_transform(state):\n if isinstance(state, str):\n return np.array([int(s) for s in state])\n else:\n return str(state)[1:-1].replace(' ', '')", "def test_state_id_default(self):\n c = City()\n self.assertEqual(\"\", c.state_id)", "def generateStateOCDID(state):\n ocdid = TURBOVOTE_BASEOCDID\n ocdid += TURBOVOTE_STATEOCDID\n ocdid += state.lower()\n\n return ocdid", "def test_place_id(self):\n self.assertTrue(type(x.place_id) == str)", "def state2str(state: Union[dict, str]) -> str:\n\n if type(state) is str:\n return state\n\n return \"\".join([str(state[x]) for x in sorted(state)])", "def getState(id):", "def state_string(self):\n return SupvisorsStates._to_string(self.state)", "def __is_valid__(self, state):\n return False", "def check_id(id_: str) -> None:\n if id_ == \"\" or all_base62.search(id_) is None:\n raise ConversionError(f\"Invalid id: {id_!r}!\")", "def test_location_string(self):\n location = models.Location.objects.create(\n user=sample_user(),\n loc_id=23111,\n loc_name='Johnson Farm',\n )\n self.assertEqual(str(location), str(location.loc_id))", "def state_to_usercode(state: str) -> Optional[str]:\n if state.startswith(\"DEVICE-FLOW\"):\n return state.split(\" \")[1]\n return None", "def str_state(self):\n return self.IMAGE_STATES[int(self.state)]", "def state_fips(state):\n if state == \"Texas\":\n return '48'", "def int_id_str(self, user_info):\n user_info['id_str'] = int(user_info['id_str'])\n return user_info" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test passing kwargs to City instantation
def test_kwargs(self): json_dict = self.c1.to_dict() c2 = City(**json_dict) self.assertEqual(self.c1.id, c2.id) self.assertEqual(self.c1.created_at, c2.created_at) self.assertEqual(self.c1.updated_at, c2.updated_at) self.assertNotEqual(self.c1, c2)
[ "def test_new_city(self):\n self.new_helper(\"City\")", "def __init__(self, city: str, postoffice: int):\n self.city = city\n self.postoffice = postoffice", "def setUp(self):\n name = \"SANFRANCISCO\"\n colour = \"blue\"\n connections = ['TOKYO', 'MANILA', 'LOSANGELES', 'CHICAGO']\n self.testCity = City(name=name,colour=colour,connections=connections)", "def test_get_city(self):\n self.assertTrue(get_city(\"Sydney, Australia\")==\"Sydney\")", "def test_geocode_city_state(self):\n self._select_geocoder()\n resource = GeocoderResource()\n req = HttpRequest()\n req.method = 'GET'\n req.GET['q'] = \"golden, co\"\n bundle = resource.build_bundle(request=req)\n results = resource.obj_get_list(bundle)\n self.assertApxEqual(results[0].lat, 39.756655, .001) \n self.assertApxEqual(results[0].lng, -105.224949, .001)", "def test_save_city(self):\n self.save_helper(\"City\")", "def test_geocode_city(self):\n self._select_geocoder()\n resource = GeocoderResource()\n req = HttpRequest()\n req.method = 'GET'\n req.GET['q'] = \"Denver\"\n bundle = resource.build_bundle(request=req)\n results = resource.obj_get_list(bundle)\n self.assertApxEqual(results[0].lat, 39.737567, .01)\n self.assertApxEqual(results[0].lng, -104.9847179, .01)", "def city_custom_handler(city):\n pass", "def test_invalid_city(self):\n\n invalid_cities_to_test = [\" Rosevill3 \", \"W@rr3n\", \"St. Cl@!r Sh0r3s\", \" \", \"_Tr0y\", \" W3st Br@nch\", \" !D3tr0!t\"]\n option = \"city\"\n\n for city in invalid_cities_to_test:\n self.database.city = city\n self.assertFalse(self.database.validate_cityInfo(option, self.database.city))", "def test_city(db):\n query = db.query(Event)\n query = query.filter(Event.year == 2013)\n query = query.filter(Event.month == 12)\n query = query.filter(Event.day == 4)\n event = query.one()\n assert event.city.name == 'Ostrava'", "def test_city_country(self):\n formatted_city_country = city_country('santiago', 'chile')\n self.assertEqual(formatted_city_country, 'Santiago, Chile')", "def test_str_cityid(self):\n self.assertEqual(str, type(Place().city_id))", "def test_str_City(self):\n kansas = City()\n string = \"[City] ({}) {}\".format(kansas.id, kansas.__dict__)\n self.assertEqual(string, str(kansas))", "def test_place_instance(self):\n my_place = Place()\n my_place.first_name = \"Jerry\"\n my_place.last_name = \"Mouse\"\n my_place.email = \"jerry@holbertonshool.com\"\n my_place.password = \"root\"\n self.assertEqual(my_place.first_name, \"Jerry\")\n self.assertEqual(my_place.last_name, \"Mouse\")\n self.assertEqual(my_place.email, \"jerry@holbertonshool.com\")\n self.assertEqual(my_place.password, \"root\")", "def setcity(ctx, city, cityid, timezone):\n logging.info('Setting City %s to %d', city, cityid)\n\n city_data = get_city_data()\n if city not in city_data:\n city_data[city] = {}\n city_data[city]['id'] = cityid\n city_data[city]['timezone'] = timezone\n write_city_data(city_data)", "def test_province_address_is_valid(self):\n new_caterer = Caterer()\n pass", "def test_valid_county(self):\n\n valid_county_to_test = [\"Macomb\", \"Saginaw\", \" Clinton\", \"Gratiot\", \"Ogemaw\", \"Huron\", \"Gladwin\"]\n option = \"county\"\n\n for county in valid_county_to_test:\n self.database.county = county\n self.assertTrue(self.database.validate_cityInfo(option, self.database.county))", "def test_cities(db):\n query = db.query(City)\n query = query.filter(City.name == 'Ostrava')\n city = query.one()\n assert city.slug == 'ostrava'\n assert city.events\n assert any(e.name == 'Ostravské KinoPyvo' for e in city.events)\n assert not any(e.name == 'Brněnské Pyvo' for e in city.events)", "def test_class_CityPopulation():\r\n test = CityPopulation('San Diego', 'California', '192083', '2090')\r\n test.addpopulation({'787':'10'})\r\n \r\n # test if the intialization and calling of addpopulation method works as intended\r\n assert test.getpop() == {'192083' : '2090',\r\n '787' : '10'}\r\n \r\n assert test.getcity() == 'San Diego'\r\n \r\n # test if getinfo() properly returns a list and proper values\r\n assert test.getinfo() == ['San Diego', 'California', {'192083' : '2090',\r\n '787' : '10'}]\r\n \r\n # test if getinfo() works with objects missing populationdict\r\n assert CityPopulation('Houston', 'Texas').getinfo()[0] == 'Houston'\r\n \r\n # testing with other index of list\r\n assert CityPopulation('Philadelphia', 'Pennsylvania').getinfo()[1] == 'Pennsylvania'" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Read nearEarth object information from a CSV file.
def load_neos(neo_csv_path="data/neos.csv"): neos = [] with open(neo_csv_path, 'r') as infile: reader = csv.DictReader(infile) for line in reader: neos.append(line) print('loaded NEO data') neo_collection = [] for neo in neos: neo_collection.append(NearEarthObject( designation=neo['pdes'], name=neo['name'], diameter=neo['diameter'], hazardous=neo['pha'])) print('created NearEarthObject collection') return neo_collection
[ "def load_neos(neo_csv_path):\n\n \"\"\" A list for keeping all the `NearEarthObject`s created from each CSV row \"\"\"\n neo_list = []\n\n with open(neo_csv_path, 'r') as neo_file_obj:\n reader = csv.DictReader(neo_file_obj)\n\n \"\"\" Reading each row in the CSV file, creating `NearEarthObject` and adding to the list \"\"\"\n for entry in reader:\n neo_list.append(NearEarthObject(**entry))\n\n return neo_list", "def load_neos(neo_csv_path) -> list:\n with open(neo_csv_path, 'r') as neo_file:\n neos_info = csv.DictReader(neo_file)\n neos_objects = [NearEarthObject(**neo) for neo in neos_info]\n\n return neos_objects", "def parse_places_from_csv(file: IO) -> Iterator[PlaceTuple]:\n data_reader = csv.reader(file, delimiter=\"\\t\")\n for row in data_reader:\n if row[0] and row[1] and row[4] and row[5] and row[7]:\n yield PlaceTuple(\n data_source=\"geonames\",\n source_id=int(row[0]),\n name=row[1],\n country=row[8],\n latitude=float(row[4]),\n longitude=float(row[5]),\n place_type=row[7],\n altitude=float(row[14]),\n srid=4326,\n )", "def getTrackCSV(csvFile):\n CSV = DictReader(open(csvFile))\n lon, lat, tyme = [], [], []\n for row in CSV:\n lon += [float(row['lon']),]\n lat += [float(row['lat']),]\n tyme += [isoparser(row['time']),]\n \n return ( array(lon), array(lat), array(tyme) )", "def _process_csv(filename):\n import csv\n\n node_dict, neighbor_dict = {}, {}\n\n with open(filename, \"r\") as csv_file:\n for row in csv.DictReader(csv_file):\n node = EuclideanNode(\n node_type=row['NodeType'],\n name=row['Name'],\n floor=row['Floor'],\n coord=eval(row['Coordinates'])\n )\n node_dict[row['Name']] = node\n neighbor_dict[row['Name']] = eval(row['Neighbors'])\n return node_dict, neighbor_dict", "def load_markers(file):\n\n with open(file) as csvfile:\n # next(f) # skip header row\n\n # for i, row in enumerate(f):\n # row = row.rstrip()\n # print row.split(\"\\t\")\n\n # 0)title, 1)description, 2)date, 3)date-tier, 4)time, \n # 5)name(venue), 6)foursquare_id, 7)neighborhood, 8)city, 9)address, \n # 10)latitude, 11)longitude, 12)cost, 13)img_url, 14)event_url, \n # 15)category, 16)marker_type, 17)marker_symbol, 18)marker_color,\n # 19) datetime_obj (new cell) \n\n csvreader = csv.reader(csvfile)\n\n # Skips the first header row of the CSV file\n next(csvreader)\n\n for i, row in enumerate(csvreader):\n\n if row[2]:\n date_str = str(row[2])\n datetime_obj = datetime.strptime(date_str, \"%B %d, %Y\")\n else:\n datetime_obj = None\n\n marker = Marker(title=row[0],\n address=row[9],\n latitude=row[10],\n longitude=row[11],\n date=row[2],\n date_tier=row[3],\n time=row[4],\n name=row[5],\n neighborhood=row[7],\n city=row[8],\n description=row[1],\n cost=row[12],\n img_url=row[13],\n event_url=row[14],\n category=row[15],\n marker_type=row[16],\n marker_symbol=row[17],\n marker_color=row[18],\n datetime=datetime_obj,\n foursquare_id=row[6])\n\n db.session.add(marker)\n\n if i % 10 == 0:\n print i\n\n db.session.commit()", "def open_csv():\n with open('log.csv', 'r') as csvfile:\n entry_info = ['name', 'date', 'time', 'note']\n log_reader = csv.DictReader(csvfile, fieldnames=entry_info, delimiter=',')\n entries = list(log_reader)\n return entries", "def parse_csv( fileLocation, Unique_Features=None ):\r\n\r\n print 'Parsing CSV: ', fileLocation\r\n\r\n headers = []\r\n\r\n # Change directory to the folder\r\n os.chdir( os.path.dirname( fileLocation) )\r\n\r\n # grab all the rows in the csv file\r\n coordFileToList = [ line for line in csv.reader(open( fileLocation, 'r')) ]\r\n\r\n # grabs the csv column headers \r\n headers = coordFileToList[0]\r\n\r\n # deletes list item, because it will get in the way later on if we don't\r\n del coordFileToList[0]\r\n\r\n # file name without the extension and minus the path\r\n rootFileName = os.path.splitext( os.path.basename( fileLocation ))[0]\r\n\r\n # If parsing csv for polylines or polygons\r\n if Unique_Features == True:\r\n\r\n # For kml_to_line and kml_to_polygon. Not used for kml_to_point. List \r\n # of names of the unique spatial feataures in csv file\r\n Unique_Features = list(set( [each[0] for each in coordFileToList ] ))\r\n\r\n return coordFileToList, rootFileName, headers, Unique_Features\r\n\r\n else:\r\n \r\n return coordFileToList, rootFileName, headers", "def import_from_csv(self, csv_file):\n reader = csv.reader(csv_file)\n\n self.variable_labels = next(reader, None)[1:]\n self.element_labels = []\n self.data = []\n\n data_mode = True\n for row in reader:\n if not any(row):\n if data_mode:\n data_mode = False\n continue\n else:\n if data_mode:\n self.element_labels.append(row[0])\n self.data.append([int(i) for i in row[1:]])\n else:\n self.weights = [int(i) for i in row[1:]]\n self.neg_min = [int(i) for i in next(reader, None)[1:]]\n self.pos_max = [int(i) for i in next(reader, None)[1:]]\n break", "def load_data(f):\n import csv\n with open(f, newline='') as csvfile:\n ecgreader = csv.reader(csvfile, delimiter=' ')\n time, voltage, high_voltages = organize_data(ecgreader, f)\n return time, voltage, high_voltages", "def read_kg_data(csv_file):\n print(f\"Started a model builder for data from: {csv_file}\")\n df = pd.read_csv(csv_file)\n df.columns = [\"h\", \"r\", \"t\"]\n entities = list(set(df[\"h\"].tolist() + df[\"t\"].tolist()))\n relations = list(set(df[\"r\"].tolist()))\n return entities, relations", "def read_file(self, file):\n self.rooms.clear()\n\n with open(file, mode='r') as csv_file:\n csv_reader = csv.DictReader(csv_file)\n line_count = 0\n for row in csv_reader:\n self.rooms[row[\"room\"]] = row[\"neighbours\"].split('-')\n line_count += 1\n\n logging.info('Processed %d lines from %s', line_count, file)\n logging.debug(self.rooms)", "def read_from_csv(self, input_file, delimiter):\n\n # read CSV as UTF-8 encoded file (see also http://stackoverflow.com/a/844443)\n with codecs.open(input_file, encoding='utf8') as fp:\n logger.info(\"Reading venues from \" + input_file + \"...\")\n\n reader = csv.reader(fp, delimiter=delimiter)\n\n # read header\n header = next(reader, None)\n if not header:\n raise IllegalArgumentError(\"Missing header in CSV file.\")\n\n venue_index = header.index(\"venue\")\n year_index = header.index(\"year\")\n identifier_index = header.index(\"identifier\")\n\n # read CSV file\n for row in reader:\n if row:\n self.venues.append(\n Venue(row[venue_index], row[year_index], row[identifier_index])\n )\n else:\n raise IllegalArgumentError(\"Wrong CSV format.\")\n\n self.filename = os.path.basename(input_file)\n logger.info(str(len(self.venues)) + \" venues have been imported.\")", "def loadToday(today):\n import csv\n from datetime import datetime, time\n\n data_file = \"sunrise2020.csv\"\n file = open(data_file, newline='')\n reader = csv.reader(file)\n header = next(reader) # first line of file\n \n daylight = DayLight()\n \n daylight.load = False\n \n for row in reader:\n\n date = datetime.strptime(row[0],'%d/%m/%Y')\n if date == today:\n daylight.sunrise = datetime.strptime( row[0] +' '+ row[1],'%d/%m/%Y %H:%M')\n daylight.sunrise_bearing = float(row[2])\n daylight.sunset = datetime.strptime( row[0] +' '+ row[3],'%d/%m/%Y %H:%M')\n daylight.sunset_bearing = float(row[4])\n# ignore daylight.day_length = datetime.strptime(row[5],'%H:%M:%S')\n daylight.solar_noon = row[6]\n daylight.noon_elevation = float(row[7])\n daylight.load = True\n break\n else:\n pass\n\n return(daylight)", "def read_csv(self):\n with open(self.csv_train_path, 'r') as csv_file:\n exemples = csv.reader(csv_file, delimiter=';')\n\n for row in exemples:\n xi = []\n xi.append(float(row[0]))\n xi.append(float(row[1]))\n xi.append(float(row[2]))\n self.y.append(row[3])\n self.x.append(xi)\n self.number_of_features = len(self.x[0])", "def read_CSV(self):\n file = open(self.file_name, \"r\")\n self.data = {}\n self.header_adjustment(file)\n self.process_line_by_line(file)", "def test_import_artists_csv_1_columns(tmpdir):\n path = os.path.join(tmpdir, \"artists.csv\")\n with open(path, \"w\") as stream:\n stream.write(\"Artist1\\nArtist2\")\n\n actual = _io.import_artists_from_csv(path)\n assert actual == [\n Artist(\"Artist1\"),\n Artist(\"Artist2\"),\n ]", "def stream_ol_data(csv_file):\n # Read in our logfile into the completed list\n with open(csv_file, 'r') as working_file:\n print(f\"Working on file {csv_file}\")\n items = csv.DictReader(working_file)\n for item in items:\n if csv_file.find('entities') > 1 and \\\n (csv_file == ['incorporation_date'] == \"\" or len(item['incorporation_date']) > 10):\n item['incorporation_date'] = None\n yield {\n \"_index\": f\"ol{csv_file[csv_file.rfind('_'):-4]}\",\n \"_source\": item\n }", "def read_windmills_data(fname):\n try:\n with open(fname, 'r', newline='') as csvfile:\n # first count the lines\n reader = csv.DictReader(\n (row for row in csvfile if (\n not row.startswith('#') and\n not row.startswith('@') and not row.startswith(\" \")\n and row)),\n # fieldnames=[\n # 'Datum(Remote)', 'Uhrzeit(Remote)', 'Datum(Server)',\n # 'Uhrzeit(Server)', 'Zeitdifferenz', 'Windgeschwindigkeit',\n # 'Windgeschwindigkeit Max', 'Windgeschwindigkeit Min',\n # 'Rotordrehzahl', 'Rotordrehzahl Max', 'Rotordrehzahl Min',\n # 'Leistung', 'Leistung Max', 'Leistung Min',\n # 'Gondelposition', 'Windrichtung', 'Generator Umdr.',\n # 'Stop Fault', 'T Aussen', 'T Getriebe', 'T Lager A',\n # 'T Lager B', 'T Gondel', 'T Getriebelager',\n # 'T Wellenlager', 'Scheinleistung', 'cos phi',\n # 'Blindleistung', 'Spannung L1-N', 'Spannung L2-N',\n # 'Spannung L3-N', 'Strom L1', 'Strom L2', 'Strom L3',\n # 'Blattwinkel 1', 'Blattwinkel 2', 'Blattwinkel 3',\n # 'Blattwinkel 1 (Soll)', 'Blattwinkel 2 (Soll)',\n # 'Blattwinkel 3 (Soll)', 'cos phi (Soll)',\n # 'Betriebszustand', 'T Getriebelager B', 'Netz Freq.',\n # 'T Hydraulic Oil', 'T Gear Oil', 'Air Pressure',\n # 'Leistung Vorgabe', 'Blindleistung Vorgabe',\n # 'Statortemperatur L1', 'Statortemperatur L2',\n # 'Statortemperatur L3', 'xxx', 't (Innerhalb Windgrenzen)',\n # 'Active Power Reference Value', 'Exported active energy',\n # 'Exported active energy (red. op-mode)',\n # 'Setpoint in percent', 'Setpoint active power in percent',\n # 'Internal setpoint max power',\n # 'Internal setpoint stop WTG',\n # 'Internal setpoint start WTG',\n # 'Grid Possible Power (avg)',\n # 'Max. Grid Active Power (Setpoint)',\n # 'Min. Operatingstate', 'Wind Speed 2', 'Wind Speed 3',\n # 'Wind Direction 2', 'Relative Humidity',\n # 'T Generator Bearing DE', 'T Generator Bearing NDE',\n # 'Wind Speed 4', 'Wind Speed 5', 'Wind Speed 6',\n # 'Wind Speed 7', 'Wind Speed 8', 'Wind Direction 3',\n # 'Wind Direction 4', 'T Outside 2',\n # 'Wind Speed Sensor 1 (avg)', 'Wind Speed Sensor 1 (min)',\n # 'Wind Speed Sensor 1 (max)',\n # 'Wind Speed Sensor 1 (stddev)',\n # 'Wind Speed Sensor 2 (avg)', 'Wind Speed Sensor 2 (min)',\n # 'Wind Speed Sensor 2 (max)',\n # 'Wind Speed Sensor 2 (stddev)',\n # 'T Ground Controller (avg)', 'T Ground Controller (min)',\n # 'T Ground Controller (max)', 'T Ground Controller (std)',\n # 'T Top Controller (avg)', 'T Top Controller (min)',\n # 'T Top Controller (max)', 'T Top Controller (stddev)',\n # 'Ice Level', 'External setpoint power factor',\n # 'Setpoint power from grid operator',\n # 'Setpoint power from direct marketer',\n # 'Setpoint power from customer',\n # 'Setpoint active power controller',\n # 'T Gear Oil Inlet (avg)', 'T Gear Oil Inlet (min)',\n # 'T Gear Oil Inlet (max)', 'T Gear Oil Inlet (stddev)',\n # 'Calculated By ROTORsoft'],\n delimiter=';')\n nrows = sum(1 for row in reader)\n\n if nrows == 0:\n warn('No data in file ' + fname)\n return None\n\n dt_remote = np.ma.masked_all(nrows, dtype=datetime.datetime)\n dt_server = np.ma.masked_all(nrows, dtype=datetime.datetime)\n rotor_speed_avg = np.ma.masked_all(nrows, dtype=float)\n rotor_speed_min = np.ma.masked_all(nrows, dtype=float)\n rotor_speed_max = np.ma.masked_all(nrows, dtype=float)\n nacelle_pos = np.ma.masked_all(nrows, dtype=float)\n blade_angle_1 = np.ma.masked_all(nrows, dtype=float)\n blade_angle_2 = np.ma.masked_all(nrows, dtype=float)\n blade_angle_3 = np.ma.masked_all(nrows, dtype=float)\n t_outside = np.ma.masked_all(nrows, dtype=float)\n ice_level = np.ma.masked_all(nrows, dtype=float)\n\n # now read the data\n csvfile.seek(0)\n reader = csv.DictReader(\n (row for row in csvfile if (\n not row.startswith('#') and\n not row.startswith('@') and not row.startswith(\" \")\n and row)),\n # fieldnames=[\n # 'Datum(Remote)', 'Uhrzeit(Remote)', 'Datum(Server)',\n # 'Uhrzeit(Server)', 'Zeitdifferenz', 'Windgeschwindigkeit',\n # 'Windgeschwindigkeit Max', 'Windgeschwindigkeit Min',\n # 'Rotordrehzahl', 'Rotordrehzahl Max', 'Rotordrehzahl Min',\n # 'Leistung', 'Leistung Max', 'Leistung Min',\n # 'Gondelposition', 'Windrichtung', 'Generator Umdr.',\n # 'Stop Fault', 'T Aussen', 'T Getriebe', 'T Lager A',\n # 'T Lager B', 'T Gondel', 'T Getriebelager',\n # 'T Wellenlager', 'Scheinleistung', 'cos phi',\n # 'Blindleistung', 'Spannung L1-N', 'Spannung L2-N',\n # 'Spannung L3-N', 'Strom L1', 'Strom L2', 'Strom L3',\n # 'Blattwinkel 1', 'Blattwinkel 2', 'Blattwinkel 3',\n # 'Blattwinkel 1 (Soll)', 'Blattwinkel 2 (Soll)',\n # 'Blattwinkel 3 (Soll)', 'cos phi (Soll)',\n # 'Betriebszustand', 'T Getriebelager B', 'Netz Freq.',\n # 'T Hydraulic Oil', 'T Gear Oil', 'Air Pressure',\n # 'Leistung Vorgabe', 'Blindleistung Vorgabe',\n # 'Statortemperatur L1', 'Statortemperatur L2',\n # 'Statortemperatur L3', 'xxx', 't (Innerhalb Windgrenzen)',\n # 'Active Power Reference Value', 'Exported active energy',\n # 'Exported active energy (red. op-mode)',\n # 'Setpoint in percent', 'Setpoint active power in percent',\n # 'Internal setpoint max power',\n # 'Internal setpoint stop WTG',\n # 'Internal setpoint start WTG',\n # 'Grid Possible Power (avg)',\n # 'Max. Grid Active Power (Setpoint)',\n # 'Min. Operatingstate', 'Wind Speed 2', 'Wind Speed 3',\n # 'Wind Direction 2', 'Relative Humidity',\n # 'T Generator Bearing DE', 'T Generator Bearing NDE',\n # 'Wind Speed 4', 'Wind Speed 5', 'Wind Speed 6',\n # 'Wind Speed 7', 'Wind Speed 8', 'Wind Direction 3',\n # 'Wind Direction 4', 'T Outside 2',\n # 'Wind Speed Sensor 1 (avg)', 'Wind Speed Sensor 1 (min)',\n # 'Wind Speed Sensor 1 (max)',\n # 'Wind Speed Sensor 1 (stddev)',\n # 'Wind Speed Sensor 2 (avg)', 'Wind Speed Sensor 2 (min)',\n # 'Wind Speed Sensor 2 (max)',\n # 'Wind Speed Sensor 2 (stddev)',\n # 'T Ground Controller (avg)', 'T Ground Controller (min)',\n # 'T Ground Controller (max)', 'T Ground Controller (std)',\n # 'T Top Controller (avg)', 'T Top Controller (min)',\n # 'T Top Controller (max)', 'T Top Controller (stddev)',\n # 'Ice Level', 'External setpoint power factor',\n # 'Setpoint power from grid operator',\n # 'Setpoint power from direct marketer',\n # 'Setpoint power from customer',\n # 'Setpoint active power controller',\n # 'T Gear Oil Inlet (avg)', 'T Gear Oil Inlet (min)',\n # 'T Gear Oil Inlet (max)', 'T Gear Oil Inlet (stddev)',\n # 'Calculated By ROTORsoft'],\n delimiter=';')\n\n for i, row in enumerate(reader):\n if 'Datum(Remote)' in row and 'Uhrzeit(Remote)' in row:\n dt_remote[i] = datetime.datetime.strptime(\n row['Datum(Remote)'] + ' ' + row['Uhrzeit(Remote)'],\n '%d.%m.%Y %H:%M:%S')\n dt_server[i] = datetime.datetime.strptime(\n row['Datum(Server)'] + ' ' + row['Uhrzeit(Server)'],\n '%d.%m.%Y %H:%M:%S')\n else:\n dt_remote[i] = datetime.datetime.strptime(\n row['Datum (Anlage)'] + ' ' + row['Zeit (Anlage)'],\n '%d.%m.%Y %H:%M:%S')\n rotor_speed_avg[i] = float(\n row['Rotordrehzahl'].replace(',', '.'))\n rotor_speed_min[i] = float(\n row['Rotordrehzahl Max'].replace(',', '.'))\n rotor_speed_max[i] = float(\n row['Rotordrehzahl Min'].replace(',', '.'))\n nacelle_pos[i] = float(\n row['Gondelposition'].replace(',', '.'))\n blade_angle_1[i] = float(\n row['Blattwinkel 1'].replace(',', '.'))\n blade_angle_2[i] = float(\n row['Blattwinkel 2'].replace(',', '.'))\n blade_angle_3[i] = float(\n row['Blattwinkel 3'].replace(',', '.'))\n t_outside[i] = float(\n row['T Aussen'].replace(',', '.'))\n ice_level[i] = float(\n row['Ice Level'].replace(',', '.'))\n\n csvfile.close()\n\n windmill_dict = {\n 'dt_remote': dt_remote,\n 'dt_server': dt_server,\n 'rotor_speed_avg': rotor_speed_avg,\n 'rotor_speed_min': rotor_speed_min,\n 'rotor_speed_max': rotor_speed_max,\n 'nacelle_pos': nacelle_pos,\n 'blade_angle_1': blade_angle_1,\n 'blade_angle_2': blade_angle_2,\n 'blade_angle_3': blade_angle_3,\n 't_outside': t_outside,\n 'ice_level': ice_level,\n }\n\n return windmill_dict\n\n except EnvironmentError as ee:\n warn(str(ee))\n warn('Unable to read file ' + fname)\n return None" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parse artist name for url insertion.
def parse_artist_name(artist_name: str) -> str: split_artist_name = artist_name.split(" ") if len(split_artist_name) > 1: parsed_artist_name = "+".join(split_artist_name) return parsed_artist_name else: return artist_name
[ "def _artisturl(self):\n if self.metadata[\"albumartist\"] <> \"Various Artists\":\n self.album._requests += 1\n #sys.stderr.write(self.cfg['abetterpath_http_echonest_host'] + \":\" + self.cfg['abetterpath_http_echonest_port'] + self.urls['echonest_artist_url'] + \"\\n\")\n self.album.tagger.xmlws.get(self.cfg['abetterpath_http_echonest_host'], self.cfg['abetterpath_http_echonest_port'], self.urls['echonest_artist_url'], partial(self._processurls))", "def parse_artist(html_text: str) -> str:\n # parse HTML for artists\n m = re.search(ARTIST_CCLI_REGEX, html_text, re.M)\n if m is not None:\n artists = re.findall(GET_ARTISTS_REGEX, m.group(0), re.M)\n if len(artists) > 0: # artists found\n return \", \".join(artists)\n else: # general tags found, but no artists parsed\n p_warning(\"author tags found, but composer not extracted in GET request.\")\n return DEFAULT_HEADER[\"composer\"]\n p_warning(\"composer not found in GET request.\")\n return DEFAULT_HEADER[\"composer\"]", "def get_artist_from_tracklist(self, tracklistURL):\r\n name = self.execute_string(\"\"\"\r\n PREFIX etree:<http://etree.linkedmusic.org/vocab/>\r\n PREFIX mo:<http://purl.org/ontology/mo/>\r\n PREFIX event:<http://purl.org/NET/c4dm/event.owl#>\r\n PREFIX skos:<http://www.w3.org/2004/02/skos/core#>\r\n PREFIX rdf:<http://www.w3.org/1999/02/22-rdf-syntax-ns#>\r\n\r\n SELECT DISTINCT ?name WHERE \r\n {{ \r\n <{0}> mo:performer ?performer.\r\n ?performer foaf:name ?name.\r\n }} LIMIT 1\r\n \"\"\".format(tracklistURL))\r\n\r\n return name['results']['bindings'][0]['name']['value']", "def find_artist_wikiname(artist_name: str) -> Tuple[str, str]:\n potential_matches = []\n\n logger.info(f'Search matches for {artist_name}')\n first_letter = artist_name[0].lower()\n url = BASE_URL + f'/Alphabet/{first_letter}/text-list'\n req = requests.get(url)\n regex_artist = r'<a href=\"/en/(.*?)\">(.*?)</a>'\n list_artists = re.findall(regex_artist, req.text)\n for artist in list_artists:\n if artist_name in artist[1]:\n potential_matches.append(artist)\n\n if not potential_matches:\n raise DownloadArtistException(f'{artist_name} - Found no match')\n elif len(potential_matches) > 1:\n raise DownloadArtistException(f'{artist_name} - Found multiple matches: {potential_matches}')\n elif len(potential_matches) == 1:\n logger.info(f'{artist_name} - Found 1 match: {potential_matches[0]}')\n\n url_artist_name = html.unescape(potential_matches[0][0])\n artist_wiki_name = html.unescape(potential_matches[0][1])\n\n return url_artist_name, artist_wiki_name", "def artist_uri(self, artist_uri):\r\n self.data['artist_uri'] = artist_uri", "def deriveArtistFromName(name):\n if not name:\n return name\n removeParts = [\" ft. \", \" ft \", \" feat \", \" feat. \"]\n for removePart in removeParts:\n i = name.lower().find(removePart)\n if i > -1:\n name = name[:i]\n return string.capwords(name)", "def parse_artists_node( artists_node ):\n\n return parse_simple_node_list( artists_node, \"Artist\", \"name\" )", "def clean_artist(artist):\n\n # Converts artist(s) name string to lowercase.\n cleaned_artist = artist.lower()\n\n # Cleans out the artist's name by removing the substrings specified above.\n for item in CLUTTERERS:\n cleaned_artist = cleaned_artist.replace(item, \" \")\n return cleaned_artist", "def _init_artist(self):\n self.artist = self.soup.find_all('h3', 'lyric-artist')[0].contents[0].string", "def get_urlname(cls, name):\n return '-'.join(''.join(ch for ch in word if ch.isalnum()) \\\n for word in name.split())", "def translate_artist(name, sortname):\n for c in name:\n ctg = unicodedata.category(c)\n if ctg[0] not in (\"P\", \"Z\") and ctg != \"Nd\" and unicodedata.name(c).find(\"LATIN\") == -1:\n return \" & \".join(map(_reverse_sortname, sortname.split(\"&\")))\n return name", "def new_artist( self, artist_name ):\n\n if artist_name in self.art_fields[\"artists\"]:\n raise ValueError( \"'{:s}' is already an artist in the database.\".format( artist_name ) )\n\n # find the first position where the new artist sorts (insensitively)\n # after everything before it.\n #\n # NOTE: we don't use something like the bisect module so as to\n # preserve the existing order of the artists, which may or may\n # not be sorted.\n #\n for index, existing_artist_name in enumerate( self.art_fields[\"artists\"] ):\n if artist_name.lower() < existing_artist_name.lower():\n break\n\n self.art_fields[\"artists\"].insert( index, artist_name )\n\n self.mark_data_dirty()", "def full_album_name(artist_name, song_name):\n full_name = {'artist name': artist_name, 'song_name': song_name}\n return full_name", "def getTrackArtist(self):\n return (self.artist or '').strip()", "def _extract_album_name(self):\n return self._get_child('album').text", "def author_title_from_filename(self,filename):\n filename = filename.replace('.mp3','')\n filename = filename.replace('_',' ')\n parts = filename.split(' - ')\n self.author = parts[0]\n self.title = parts[1]", "def get_artist_song(r):\n h = html.fromstring(r.text)\n song = h.find_class('header_with_cover_art-primary_info-title')[0].text.title()\n artist = h.find_class('header_with_cover_art-primary_info-primary_artist')[0].text.title()\n return artist, song", "def parse_artists(artist_credits):\n artists = []\n is_guest = False\n for artist in artist_credits:\n if artist == \" feat. \":\n is_guest = True\n elif isinstance(artist, dict):\n artists.append((artist[\"artist\"][\"name\"], \"guest\" if is_guest else \"main\"))\n return artists", "def get_artist(name):\n results = sp.search(q='artist:' + name, type='artist')\n items = results['artists']['items']\n if len(items) > 0:\n return items[0]\n else:\n return None" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The core assumption for simplest cases is that one system will be featurized as a single tensor, and that all the tensors will be of the same shape across systems.
def test_datasetprovider_exporter_single_tensor_same_shape(): from kinoml.core.ligands import Ligand from kinoml.features.ligand import MorganFingerprintFeaturizer from kinoml.features.core import Concatenated conditions = AssayConditions() systems = [LigandSystem([Ligand(smiles=smiles)]) for smiles in ("CCCCC", "CCCCCCCC")] measurements = [ BaseMeasurement(50, conditions=conditions, system=systems[0]), BaseMeasurement(30, conditions=conditions, system=systems[1]), ] dataset = DatasetProvider(measurements=measurements) featurizer1 = MorganFingerprintFeaturizer(radius=2, nbits=512, use_multiprocessing=False) featurizer2 = MorganFingerprintFeaturizer(radius=2, nbits=1024, use_multiprocessing=False) aggregated = Concatenated([featurizer1, featurizer2], axis=1) aggregated.featurize(dataset.systems) for system in systems: assert system.featurizations["last"].shape[0] == (1024 + 512) # With a single tensor per system, we build a unified X tensor # First dimension in X and y must match X, y = dataset.to_numpy() assert X.shape[:2] == (2, (1024 + 512)) assert X.shape[0] == y.shape[0] # With dict_of_arrays and single tensor per system, # the behavior is essentially the same arrays = dataset.to_dict_of_arrays() assert sorted(arrays.keys()) == ["X", "y"] assert (arrays["X"] == X).all() assert (arrays["y"] == y).all()
[ "def __infer_existing_tensors(self, F) -> None:\n for attr_name, types_with_attr in F.get_feature_list().items():\n for vt in types_with_attr:\n attr_dtype = F.get_data(np.array([0]), vt, attr_name).dtype\n self.create_named_tensor(\n attr_name=attr_name,\n properties=None,\n vertex_type=vt,\n dtype=attr_dtype,\n )", "def create_statistical_feature_net(seq_length: int, num_channels: int, num_features: int) -> Functional:\n\n def compute_stats(input_data: np.ndarray) -> Tensor:\n \"\"\"\n Computes the stats.\n\n :param input_data: The input data which is represented as a numpy array.\n :return: A tensor object which contains data about the stats.\n \"\"\"\n mean: Tensor = keras_backend.mean(input_data, axis=1, keepdims=True)\n standard_deviation: Tensor = keras_backend.std(input_data, axis=1, keepdims=True)\n variance: Tensor = keras_backend.var(input_data, axis=1, keepdims=True)\n\n # Zero: I think that this is where the error is, although I don't know for sure!\n # If it breaks I can just copy-paste code from an old commit and trial error until it runs\n x_max: Tensor = keras_backend.reshape(keras_backend.max(input_data, axis=1), (-1, 1, num_channels))\n x_min: Tensor = keras_backend.reshape(keras_backend.min(input_data, axis=1), (-1, 1, num_channels))\n p2p: Tensor = tf.subtract(x_max, x_min)\n amp: Tensor = tf.subtract(x_max, mean)\n rms: Tensor = keras_backend.reshape(\n keras_backend.sqrt(tf.reduce_sum(keras_backend.pow(input_data, 2), 1)), (-1, 1, num_channels))\n s2e: Tensor = keras_backend.reshape(\n tf.subtract(input_data[:, seq_length - 1, :], input_data[:, 0, :]), (-1, 1, num_channels))\n\n full_vec: Tensor = keras_backend.concatenate((mean, standard_deviation))\n full_vec: Tensor = keras_backend.concatenate((full_vec, variance))\n full_vec: Tensor = keras_backend.concatenate((full_vec, x_max))\n full_vec: Tensor = keras_backend.concatenate((full_vec, x_min))\n full_vec: Tensor = keras_backend.concatenate((full_vec, p2p))\n full_vec: Tensor = keras_backend.concatenate((full_vec, amp))\n full_vec: Tensor = keras_backend.concatenate((full_vec, rms))\n full_vec: Tensor = keras_backend.concatenate((full_vec, s2e))\n full_vec: Tensor = keras_backend.reshape(full_vec, (-1, num_features * num_channels))\n\n return full_vec\n\n def output_of_stat_layer(input_shape: Tuple[int, int]) -> Tuple[int, int]:\n \"\"\"\n Gets the output of the stat layer.\n\n :param input_shape: The shape of the input.\n :return: The output of the stat layer as a tuple of integers (int, int).\n \"\"\"\n return input_shape[0], input_shape[1] * num_features\n\n shape: Tuple[int, int] = (seq_length, num_channels)\n model_in: KerasTensor = Input(shape=shape)\n model_out: Layer = Lambda(compute_stats, output_shape=output_of_stat_layer)(model_in)\n\n model: Functional = Model(inputs=model_in, outputs=model_out, name=\"SFN\")\n return model", "def input_tensors(self):\n pass", "def test_atomic_conv_variable():\n frag1_num_atoms = 1000\n frag2_num_atoms = 1200\n complex_num_atoms = frag1_num_atoms + frag2_num_atoms\n batch_size = 1\n atomic_convnet = atomic_conv.AtomicConvModel(\n n_tasks=1,\n batch_size=batch_size,\n layer_sizes=[\n 10,\n ],\n frag1_num_atoms=frag1_num_atoms,\n frag2_num_atoms=frag2_num_atoms,\n complex_num_atoms=complex_num_atoms)\n\n # Creates a set of dummy features that contain the coordinate and\n # neighbor-list features required by the AtomicConvModel.\n features = []\n frag1_coords = np.random.rand(frag1_num_atoms, 3)\n frag1_nbr_list = {i: [] for i in range(frag1_num_atoms)}\n frag1_z = np.random.randint(10, size=(frag1_num_atoms))\n frag2_coords = np.random.rand(frag2_num_atoms, 3)\n frag2_nbr_list = {i: [] for i in range(frag2_num_atoms)}\n frag2_z = np.random.randint(10, size=(frag2_num_atoms))\n system_coords = np.random.rand(complex_num_atoms, 3)\n system_nbr_list = {i: [] for i in range(complex_num_atoms)}\n system_z = np.random.randint(10, size=(complex_num_atoms))\n\n features.append(\n (frag1_coords, frag1_nbr_list, frag1_z, frag2_coords, frag2_nbr_list,\n frag2_z, system_coords, system_nbr_list, system_z))\n features = np.asarray(features)\n labels = np.zeros(batch_size)\n train = NumpyDataset(features, labels)\n atomic_convnet.fit(train, nb_epoch=1)\n preds = atomic_convnet.predict(train)\n assert preds.shape == (1, 1, 1)\n assert np.count_nonzero(preds) > 0", "def _apply_operation(self, dist_o, world):\n # Batch, X, Y, Z, F\n before = tf.reduce_mean(tf.reduce_mean(world, -1, keep_dims=True) / \n tf.reduce_max(world), 1) # Collapse Y\n self.img_op = tf.concat((before, before, before), -1)\n print_shape(world, \"world input\")\n world = tf.reshape(world, [self.config.batch_size, -1, 1])\n print_shape(world, \"world reshape\")\n operation = tf.matmul(dist_o, self.op_embeddings.embeddings)\n world = tf.matmul(world, tf.reshape(operation,\n [self.config.batch_size, 1, self.config.pixel_dim]))\n\n print_shape(world, \"world postop\")\n world = tf.reshape(world, [self.config.batch_size, self.config.rep_dim_y, \n self.config.rep_dim, self.config.rep_dim, -1])\n print_shape(world, \"world reshape\")\n\n w = {\n 'c1': conv_w(self.config.kernel_size_y, \n self.config.kernel_size, \n self.config.kernel_size, \n self.config.pixel_dim, \n self.config.hidden_dim, \n 'filt', reg=self.config.regularizer),\n 'c2': conv_w(max(self.config.kernel_size_y - 2, 1), \n max(self.config.kernel_size - 2, 1), \n max(self.config.kernel_size - 2, 1), \n self.config.hidden_dim, \n self.config.hidden_dim, \n 'filt2', reg=self.config.regularizer)\n\n }\n world = conv3d('conv', world, w['c1'],\n non_linearity=self.config.non_linearity,\n batch_norm=self.config.batch_norm,\n training_phase=self.vision_bn_phase)\n print_shape(world, \"world c1\")\n world = conv3d('conv2', world, w['c2'], b=None,\n non_linearity=self.config.non_linearity,\n batch_norm=self.config.batch_norm,\n training_phase=self.vision_bn_phase)\n print_shape(world, \"world c2\")\n\n after = tf.reduce_mean(tf.reduce_mean(world, -1, keep_dims=True) / \n tf.reduce_max(world), 1) # Collapse Y\n tf.summary.image(\"att_op\", self.img_op)\n print_shape(world, \"post op\")\n return world", "def _scalarize(self, transformed_multi_objectives: tf.Tensor) -> tf.Tensor:", "def __init_tensors(self, im_shape):\n self.__init_tensor_register()\n self.__init_input(im_shape)", "def build(self, hp, inputs=None):\n input_node = nest.flatten(inputs)\n\n # expland all the tensors to 3D tensor \n for idx, node in enumerate(input_node):\n if len(node.shape) == 1:\n input_node[idx] = tf.expand_dims(tf.expand_dims(node, -1), -1)\n elif len(node.shape) == 2:\n input_node[idx] = tf.expand_dims(node, 1) \n elif len(node.shape) > 3:\n raise ValueError(\n \"Unexpected inputs dimensions %d, expect to be smaller than 3\" % len(node.shape)\n )\n\n # align the embedding_dim of input nodes if they're not the same\n embedding_dim = self.embedding_dim or hp.Choice('embedding_dim',\n [4, 8, 16],\n default=8)\n output_node = [tf.keras.layers.Dense(embedding_dim)(node)\n if node.shape[2] != embedding_dim else node for node in input_node]\n output_node = tf.concat(output_node, axis=1)\n\n att_embedding_dim = self.att_embedding_dim or hp.Choice('att_embedding_dim',\n [4, 8, 16],\n default=8)\n head_num = self.head_num or hp.Choice('head_num',\n [1, 2, 3, 4],\n default=2)\n residual = self.residual or hp.Choice('residual',\n [True, False],\n default=True)\n outputs = []\n for _ in range(head_num):\n query = tf.keras.layers.Dense(att_embedding_dim, use_bias=False)(output_node) \n key = tf.keras.layers.Dense(att_embedding_dim, use_bias=False)(output_node) \n value = tf.keras.layers.Dense(att_embedding_dim, use_bias=False)(output_node) \n \n outputs.append(self._scaled_dot_product_attention(query, key, value))\n\n outputs = tf.concat(outputs, axis=2)\n\n if self.residual:\n outputs += tf.keras.layers.Dense(att_embedding_dim * head_num, use_bias=False)(output_node)\n \n return output_node", "def testValidateTensorsSingleY(self):\n x_steps = 1000\n batch_size = 1\n base_instance = base.TF1CoreSaliency(self.graph,\n self.sess,\n self.y_indexed,\n self.x)\n \n # Check validate doesn't throw any ValueError\n base_instance.validate_xy_tensor_shape(x_steps, batch_size)", "def test_atomic_conv():\n # For simplicity, let's assume both molecules have same number of\n # atoms.\n N_atoms = 5\n batch_size = 1\n atomic_convnet = atomic_conv.AtomicConvModel(n_tasks=1,\n batch_size=batch_size,\n layer_sizes=[10],\n frag1_num_atoms=5,\n frag2_num_atoms=5,\n complex_num_atoms=10,\n dropouts=0.0,\n learning_rate=0.003)\n\n # Creates a set of dummy features that contain the coordinate and\n # neighbor-list features required by the AtomicConvModel.\n features = []\n frag1_coords = np.random.rand(N_atoms, 3)\n frag1_nbr_list = {0: [], 1: [], 2: [], 3: [], 4: []}\n frag1_z = np.random.randint(10, size=(N_atoms))\n frag2_coords = np.random.rand(N_atoms, 3)\n frag2_nbr_list = {0: [], 1: [], 2: [], 3: [], 4: []}\n frag2_z = np.random.randint(10, size=(N_atoms))\n system_coords = np.random.rand(2 * N_atoms, 3)\n system_nbr_list = {\n 0: [],\n 1: [],\n 2: [],\n 3: [],\n 4: [],\n 5: [],\n 6: [],\n 7: [],\n 8: [],\n 9: []\n }\n system_z = np.random.randint(10, size=(2 * N_atoms))\n\n features.append(\n (frag1_coords, frag1_nbr_list, frag1_z, frag2_coords, frag2_nbr_list,\n frag2_z, system_coords, system_nbr_list, system_z))\n features = np.asarray(features)\n labels = np.random.rand(batch_size)\n train = NumpyDataset(features, labels)\n atomic_convnet.fit(train, nb_epoch=150)\n assert np.allclose(labels, atomic_convnet.predict(train), atol=0.01)", "def _merge_concat_helper(spt1, spt2, feature_concat=False):\n assert feature_concat == True or spt1.features.shape[1] == spt2.features.shape[1], \\\n \"length of features must match when feature_concat == False\"\n assert all(s1 == s2 for s1, s2 in zip(spt1.spatial_shape, spt2.spatial_shape)), \\\n \"spatial shape of tensors must match\"\n assert spt1.batch_size == spt2.batch_size, \"batch size of tensors must match\"\n\n # resolve indices\n indices_concat = torch.cat((spt1.indices, spt2.indices))\n indices_unique, inverse_index, counts = torch.unique(indices_concat, dim=0, return_inverse=True, return_counts=True)\n indices = indices_unique\n\n # resolve features\n if feature_concat:\n features = torch.zeros(len(indices_unique),\n spt1.features.shape[1] + spt2.features.shape[1],\n dtype=spt1.features.dtype,\n device=spt1.features.device)\n features[inverse_index[:spt1.features.shape[0]], :spt1.features.shape[1]] = spt1.features\n features[inverse_index[spt1.features.shape[0]:], spt1.features.shape[1]:] = spt2.features\n else:\n features = torch.zeros(len(indices_unique),\n spt1.features.shape[1],\n dtype=spt1.features.dtype,\n device=spt1.features.device)\n features[inverse_index[:spt1.features.shape[0]]] += spt1.features\n features[inverse_index[spt1.features.shape[0]:]] += spt2.features\n # features[counts == 2] /= 2.0 # should features be averaged???\n\n spatial_shape = spt1.spatial_shape\n batch_size = spt1.batch_size\n\n return spconv.SparseConvTensor(features, indices, spatial_shape, batch_size)", "def squeeze_net(input, classes):\n\n weights = {'conv1': tf.Variable(tf.truncated_normal([7, 7, 3, 96])),\n 'conv10': tf.Variable(tf.truncated_normal([1, 1, 512, classes]))}\n\n biases = {'conv1': tf.Variable(tf.truncated_normal([96])),\n 'conv10': tf.Variable(tf.truncated_normal([classes]))}\n\n output = tf.nn.conv2d(input, weights['conv1'], strides=[1,2,2,1], padding='SAME', name='conv1')\n output = tf.nn.bias_add(output, biases['conv1'])\n\n output = tf.nn.max_pool(output, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME', name='maxpool1')\n\n output = fire_module(output, s1=16, e1=64, e3=64, channel=96, fire_id='fire2')\n output = fire_module(output, s1=16, e1=64, e3=64, channel=128, fire_id='fire3')\n output = fire_module(output, s1=32, e1=128, e3=128, channel=128, fire_id='fire4')\n\n output = tf.nn.max_pool(output, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME', name='maxpool4')\n\n output = fire_module(output, s1=32, e1=128, e3=128, channel=256, fire_id='fire5')\n output = fire_module(output, s1=48, e1=192, e3=192, channel=256, fire_id='fire6')\n output = fire_module(output, s1=48, e1=192, e3=192, channel=384, fire_id='fire7')\n output = fire_module(output, s1=64, e1=256, e3=256, channel=384, fire_id='fire8')\n\n output = tf.nn.max_pool(output, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME', name='maxpool8')\n\n output = fire_module(output, s1=64, e1=256, e3=256, channel=512, fire_id='fire9')\n\n output = tf.nn.dropout(output, keep_prob=0.5, name='dropout9')\n\n output = tf.nn.conv2d(output, weights['conv10'], strides=[1, 1, 1, 1], padding='SAME', name='conv10')\n output = tf.nn.bias_add(output, biases['conv10'])\n\n output = tf.nn.avg_pool(output, ksize=[1, 13, 13, 1], strides=[1, 2, 2, 1], padding='SAME', name='avgpool10')\n\n return output", "def patch_base_cnn_model_fn(features, labels, mode): \n\n\tinput_layer = tf.reshape(features[\"x\"], [-1, 101, 101, 3])\n\n# ------------------ Layer1 -------------------------\n\tconv1 = tf.layers.conv2d(\n\t\tinputs=input_layer,\n\t\tfilters=80,\n\t\tkernel_size=[6,6],\n\t\tstrides=[1,1],\n\t\tpadding=\"same\",\n\t\tactivation=tf.nn.relu)\n\n\tlrn1 = tf.nn.local_respose_normalsation(\t\t\t\t\t\t#TODO\n\t\tinputs=conv1,\n\t\tdepth_radius=5,\n\t\tbias=1,\n\t\talpha=1,\n\t\tbeta=0.5,\n\t\tname=None)\n\n\tpool1 = tf.layers.max_pooling2d(\n\t\tinputs=lrn1,\n\t\tpool_size=[2,2],\n\t\tstrides=2)\n\n# ------------------ Layer2 -------------------------\n\tconv2 = tf.layers.conv2d(\n\t\tinputs=pool1,\n\t\tfilters=120,\n\t\tkernel_size=[5,5],\n\t\tstrides=[1,1],\n\t\tpadding=\"same\",\n\t\tactivation=tf.nn.relu)\n\n\tlrn2 = tf.nn.local_respose_normalsation(\t\t\t\t\t\t#TODO\n\t\tinputs=conv2,\n\t\tdepth_radius=5,\n\t\tbias=1,\n\t\talpha=1,\n\t\tbeta=0.5,\n\t\tname=None)\n\n\tpool2 = tf.layers.max_pooling2d(\n\t\tinputs=lrn2,\n\t\tpool_size=[2,2],\n\t\tstrides=2)\n\n# ------------------ Layer3 -------------------------\n\tconv3 = tf.layers.conv2d(\n\t\tinputs=pool2,\n\t\tfilters=160,\n\t\tkernel_size=[3,3],\n\t\tstrides=(1,1),\n\t\tpadding=\"same\",\n\t\tactivation=tf.nn.relu)\n\n# ------------------ Layer4 -------------------------\n\tconv4 = tf.layers.conv2d(\n\t\tinputs=conv3,\n\t\tfilters=200,\n\t\tkernel_size=[3,3],\n\t\tstrides=(1,1),\n\t\tpadding=\"same\",\n\t\tactivation=tf.nn.relu)\n\n\tpool4 = tf.layers.max_pooling2d(\n\t\tinputs=conv4,\n\t\tpool_size=[3,3],\n\t\tstrides=2)\n\n# ------------------ Dense Layer 1-------------------------\n\tpool4_flat = tf.reshape(pool4, [-1, 9*9*200])\n\tdense_layer1 = tf.layers.dense(\n\t\tinputs=pool4_flat,\n\t\tunits=320,\n\t\tactivation=tf.nn.relu)\n\n\tdropout1 = tf.layers.dropout(\n\t\tinputs=dense_layer1,\n\t\trate=0.5,\t\t\t\t\t\t\t\t\t\t\t\t\t\t#FIXME\n\t\ttraining=mode==tf.estimator.ModeKeys.TRAIN)\n\n# ------------------ Dense Layer 2-------------------------\n\tdense_layer2 = tf.layers.dense(\n\t\tinputs=dropout1,\n\t\tunits=320,\n\t\tactivation=tf.nn.relu)\t\n\n\tdropout2 = tf.layers.dropout(\n\t\tinputs=dense_layer2,\n\t\trate=0.5,\t\t\t\t\t\t\t\t\t\t\t\t\t\t#FIXME\n\t\ttraining=mode==tf.estimator.ModeKeys.TRAIN)\n\n# ------------------ Logits Layer -------------------------\n\tlogits = tf.layers.dense(\n\t\tinputs=dropout2,\n\t\tunits=2,\n\t\tactivation=None)\n\n\n#--------------- mode = PRED -----------------#\n\tpredictions = {\n # Generate predictions (for PREDICT and EVAL mode)\n \"classes\": tf.argmax(input=logits, axis=1),\n # Add `softmax_tensor` to the graph. It is used for PREDICT and by the\n # `logging_hook`.\n \"probabilities\": tf.nn.softmax(logits, name=\"softmax_tensor\")\n \t}\n\n \tif mode == tf.estimator.ModeKeys.PREDICT:\n\t return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)\n\n\n#--------------- mode = TRAIN and EVAL -----------------#\n\tonehot_labels = tf.one_hot(\n\t\t\t\t\tindices=tf.cast(labels, tf.int32), \n\t\t\t\t\tdepth=10)\t\t\t\t\t\t\t\t\t# Number of classes\t\t\t\t\t\t\n\t\n\tloss = tf.losses.softmax_cross_entropy(\n\t\t\t\t\tonehot_labels=onehot_labels,\n\t\t\t\t\tlogits=logits)\t\t\t\t\t\t\t\t# Logits are taken as input not their softmax probabilities \n\n\n\t# Training Mode\n\tif mode == tf.estimator.ModeKeys.TRAIN:\n\t\toptimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001)\n\t\ttrain_op = optimizer.minimize(\n\t\t\t loss=loss,\n\t\t\t\t\tglobal_step=tf.train.get_global_step())\n\t return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op)\n\n\n\t# Eval mode\n\tif mode == tf.estimator.ModeKeys.EVAL:\n\t\teval_metric_ops = {\n\t\t\t\t\"accuracy\": tf.metrics.accuracy(labels, predictions=predictions[\"classes\"])\n\t\t\t}\n\t\treturn tf.estimator.EstimatorSpec(mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)", "def _setup(self):\n self.X = tf.placeholder(dtype=tf.float32, shape=[None, self.input_shape[0], self.input_shape[1]], name='X')\n self.Y = tf.placeholder(dtype=tf.float32, shape=[None, self.input_shape[0], self.input_shape[1]], name='Y')\n self.Y_flat = tf.reshape(self.Y, shape=[-1, self.input_shape[0] * self.input_shape[1]])\n #self.keep_prob = tf.placeholder(dtype=tf.float32, shape=(), name='keep_prob')\n dummy_dim = int(np.sqrt(self.small_size_img))\n self.reshaped_dim = [-1, dummy_dim, dummy_dim, 1] #[-1, 7, 7, 1]", "def __init__(self,\n input_tensor_spec,\n output_tensor_spec,\n preprocessing_layers=None,\n preprocessing_combiner=None,\n conv_layer_params=None,\n input_fc_layer_params=(200, 100),\n lstm_size=(40,),\n output_fc_layer_params=(200, 100),\n activation_fn=tf.keras.activations.relu,\n name='MultiInputsActorRnnNetwork'):\n observation_spec = input_tensor_spec\n if preprocessing_layers is None:\n flat_preprocessing_layers = None\n else:\n flat_preprocessing_layers = [\n _copy_layer(layer) for layer in tf.nest.flatten(preprocessing_layers)\n ]\n # Assert shallow structure is the same. This verifies preprocessing\n # layers can be applied on expected input nests.\n observation_nest = observation_spec\n # Given the flatten on preprocessing_layers above we need to make sure\n # input_tensor_spec is a sequence for the shallow_structure check below\n # to work.\n if not nest.is_sequence(observation_spec):\n observation_nest = [observation_spec]\n nest.assert_shallow_structure(\n preprocessing_layers, observation_nest, check_types=False)\n\n if (len(tf.nest.flatten(observation_spec)) > 1 and\n preprocessing_combiner is None):\n raise ValueError(\n 'preprocessing_combiner layer is required when more than 1 '\n 'observation_spec is provided.')\n\n if preprocessing_combiner is not None:\n preprocessing_combiner = _copy_layer(preprocessing_combiner)\n\n input_layers = utils.mlp_layers(\n conv_layer_params,\n input_fc_layer_params,\n activation_fn=activation_fn,\n kernel_initializer=tf.compat.v1.keras.initializers.glorot_uniform(),\n name='input_mlp')\n\n # Create RNN cell\n if len(lstm_size) == 1:\n cell = tf.keras.layers.LSTMCell(lstm_size[0])\n else:\n cell = tf.keras.layers.StackedRNNCells(\n [tf.keras.layers.LSTMCell(size) for size in lstm_size])\n\n state_spec = tf.nest.map_structure(\n functools.partial(\n tensor_spec.TensorSpec, dtype=tf.float32,\n name='network_state_spec'), list(cell.state_size))\n\n output_layers = utils.mlp_layers(fc_layer_params=output_fc_layer_params,\n name='output')\n\n flat_action_spec = tf.nest.flatten(output_tensor_spec)\n action_layers = [\n tf.keras.layers.Dense(\n single_action_spec.shape.num_elements(),\n activation=tf.keras.activations.tanh,\n kernel_initializer=tf.keras.initializers.RandomUniform(\n minval=-0.003, maxval=0.003),\n name='action') for single_action_spec in flat_action_spec\n ]\n\n super(MultiInputsActorRnnNetwork, self).__init__(\n input_tensor_spec=input_tensor_spec,\n state_spec=state_spec,\n name=name)\n\n self._output_tensor_spec = output_tensor_spec\n self._flat_action_spec = flat_action_spec\n self._conv_layer_params = conv_layer_params\n self._input_layers = input_layers\n self._dynamic_unroll = dynamic_unroll_layer.DynamicUnroll(cell)\n self._output_layers = output_layers\n self._action_layers = action_layers\n\n self._preprocessing_nest = tf.nest.map_structure(lambda l: None,\n preprocessing_layers)\n self._flat_preprocessing_layers = flat_preprocessing_layers\n self._preprocessing_combiner = preprocessing_combiner", "def semantic_fusion(input_vector, input_dim, fusion_vectors, scope):\n with tf.variable_scope(scope):\n assert len(fusion_vectors) > 0\n stacked_vectors = tf.concat(fusion_vectors + [input_vector], axis=-1) # size = [batch_size, ..., input_dim * (len(fusion_vectors) + 1)]\n num_total_vectors = len(fusion_vectors) + 1\n Wr = tf.get_variable(\"Wr\", dtype=tf.float32, shape=[num_total_vectors * input_dim, input_dim])\n Wg = tf.get_variable(\"Wg\", dtype=tf.float32, shape=[num_total_vectors * input_dim, input_dim])\n br = tf.get_variable(\"br\", dtype=tf.float32, shape=[input_dim])\n bg = tf.get_variable(\"bg\", dtype=tf.float32, shape=[input_dim])\n r = tf.tanh(multiply_tensors(stacked_vectors, Wr) + br) # size = [batch_size, ..., input_dim]\n g = tf.sigmoid(multiply_tensors(stacked_vectors, Wg) + bg) # size = [batch_size, ..., input_dim]\n return g * r + (1 - g) * input_vector # size = [batch_size, ..., input_dim]", "def __call__(self, tensors, non_tensors, time_card):\n raise NotImplementedError", "def __init_tensor_register(self):\n self.tensors = dict()", "def test_init(self):\n # ------ tests for basic properties of a tensor with default mode names\n true_shape = (2, 4, 8)\n true_size = reduce(lambda x, y: x * y, true_shape)\n true_order = len(true_shape)\n true_data = np.ones(true_size).reshape(true_shape)\n true_default_mode_names = ['mode-0', 'mode-1', 'mode-2']\n true_default_state = State(normal_shape=true_shape)\n tensor = Tensor(array=true_data)\n np.testing.assert_array_equal(tensor.data, true_data)\n assert (tensor.frob_norm == 8.0)\n assert (tensor.shape == true_shape)\n assert (tensor.ft_shape == true_shape)\n assert (tensor.order == true_order)\n assert (tensor.size == true_size)\n assert (tensor.mode_names == true_default_mode_names)\n assert (tensor._state == true_default_state)\n assert (tensor._data is not true_data) # check that is not a reference\n\n # ------ tests for creating a Tensor object with custom mode names\n true_custom_mode_names = ['time', 'frequency', 'channel']\n tensor = Tensor(array=true_data, mode_names=true_custom_mode_names)\n assert (tensor.mode_names == true_custom_mode_names)\n\n # ------ tests for creating a Tensor object in custom state\n I, J, K = 2, 4, 8\n true_data = np.ones(I * J * K).reshape(I, J*K)\n true_ft_shape = (I, J, K)\n true_mode_order = ([0], [1, 2])\n true_mode_names = [\"mode-0\", \"mode-1_mode-2\"]\n custom_state = dict(normal_shape=true_ft_shape,\n mode_order=true_mode_order,\n rtype=\"T\")\n tensor = Tensor(array=true_data, custom_state=custom_state)\n assert (tensor.ft_shape == true_ft_shape)\n assert (tensor.mode_names == true_mode_names)\n\n # ------ tests for creating a Tensor object with in custom state and with custom mode names\n I, J, K = 2, 4, 8\n true_data = np.ones(I * J * K).reshape(I, J * K)\n true_ft_shape = (I, J, K)\n true_mode_order = ([0], [1, 2])\n custom_mode_names = [\"time\", \"frequency\", \"channel\"]\n true_custom_mode_names = [\"time\", \"frequency_channel\"]\n custom_state = dict(normal_shape=true_ft_shape,\n mode_order=true_mode_order,\n rtype=\"T\")\n tensor = Tensor(array=true_data, custom_state=custom_state, mode_names=custom_mode_names)\n assert (tensor.ft_shape == true_ft_shape)\n assert (tensor.mode_names == true_custom_mode_names)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Bad Checksum Detection Should Raise Exception.
def badChecksumDetection(self): liten = Liten(spath='testData') badChecksumAttempt = liten.createChecksum('fileNotFound.txt')
[ "def _get_error_bad_checksum(self):\n return self.__error_bad_checksum", "def RxTcpChecksumError(self):\n if self.force_auto_sync:\n self.get('RxTcpChecksumError')\n return self._RxTcpChecksumError", "def RxUdpChecksumError(self):\n if self.force_auto_sync:\n self.get('RxUdpChecksumError')\n return self._RxUdpChecksumError", "def crc_check(self, data):\n\n crc = calc_crc(data)\n if crc != 0:\n print('Failed CRC. Errors in data received')", "def validate(self, packet):\n return Checksum.validate_checksum(packet)", "def _receive_check(self, length):\n data = self._receive(length)\n # Test checksum\n checksum = data[-1]\n s = sum(data[:-1]) % 256\n if s != checksum:\n raise ButtshockError(\"Checksum mismatch! 0x%.02x != 0x%.02x\" % (s, checksum))\n return data[:-1]", "def _set_error_bad_checksum(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"error-bad-checksum\", rest_name=\"error-bad-checksum\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mc-hms-operational', defining_module='brocade-mc-hms-operational', yang_type='uint32', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"error_bad_checksum must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"error-bad-checksum\", rest_name=\"error-bad-checksum\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mc-hms-operational', defining_module='brocade-mc-hms-operational', yang_type='uint32', is_config=False)\"\"\",\n })\n\n self.__error_bad_checksum = t\n if hasattr(self, '_set'):\n self._set()", "def RxIpv4ChecksumError(self):\n if self.force_auto_sync:\n self.get('RxIpv4ChecksumError')\n return self._RxIpv4ChecksumError", "def test_checksum(self):", "def corrupt_checksum(checksum: str, probability: float) -> str:\n assert(0 <= probability < 1)\n probability *= 100 # Turn the percentage into an integer\n rand_num = rnd.randint(0, 100)\n if probability > rand_num:\n print(\"packet corrupted!\")\n # return an invalid checksum\n return \"000000000000000000000000\"\n else:\n # return original checksum\n return checksum", "def valid_checksum(self):\n (ck_a, ck_b) = self.checksum()\n d = self._buf[2:-2]\n (ck_a2, ck_b2) = struct.unpack('<BB', self._buf[-2:])\n return ck_a == ck_a2 and ck_b == ck_b2", "def test_dump_invalid_checksum_type(self):\n with self.assertRaises(SaltInvocationError):\n images.dump(\"http://example.org/image.xz\", \"/dev/sda1\", checksum_type=\"crc\")", "def corrupt(self, receivedPacket):\r\n # Compute checksum for the received packet\r\n computedChecksum = self.checksum(receivedPacket.Payload)\r\n\r\n # Compare computed checksum with the checksum of received packet\r\n if computedChecksum != receivedPacket.Checksum:\r\n return True\r\n else:\r\n return False", "def validate_checksum( filename, md5sum ):\n filename = match_filename( filename )\n md5_hash = file_md5( filename=filename )\n if md5_hash != md5sum:\n raise ValueError('md5 checksums are inconsistent: {}'.format( filename ))", "def validate(self):\n\t\treturn self.checksum == self.create_checksum()", "def corrupt(self, receivedPacket):\n # Compute checksum for the received packet\n computedChecksum = self.checksum(receivedPacket.Data)\n\n # Compare computed checksum with the checksum of received packet\n if computedChecksum != receivedPacket.Checksum:\n return True\n else:\n return False", "def check_crc(self, response):\n if response[6:8] != self.crc(response[0:6]):\n raise AuroraError('Response has a wrong CRC')", "def testChecksumsLegal(self):\n cyto = self.session.create_cytokine()\n success = False\n checksums = {\"md5\": \"d8e8fca2dc0f896fd7cb4cb0031ba249\"}\n\n try:\n cyto.checksums = checksums\n success = True\n except Exception:\n pass\n\n self.assertTrue(success, \"Able to use the checksums setter\")\n\n self.assertEqual(cyto.checksums['md5'], checksums['md5'],\n \"Property getter for 'checksums' works.\")", "def test_mismatchedOpaqueChecksum(self):\n credentialFactory = FakeDigestCredentialFactory('md5', 'test realm')\n\n d = credentialFactory.getChallenge(clientAddress)\n\n def _test(challenge):\n key = '%s,%s,%s' % (challenge['nonce'],\n clientAddress.host,\n '0')\n\n digest = md5(key + 'this is not the right pkey').hexdigest()\n\n badChecksum = '%s-%s' % (digest,\n key.encode('base64').strip('\\n'))\n\n self.assertRaises(\n error.LoginFailed,\n credentialFactory.verifyOpaque,\n badChecksum,\n challenge['nonce'],\n clientAddress.host)\n return d.addCallback(_test)", "def validate_address_checksum(address):\n if is_checksum_formatted_address(address):\n if not is_checksum_address(address):\n raise ValueError(\"'address' has an invalid EIP55 checksum\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test checksum of duplicate files
def testDupeFileDetection(self): liten = Liten(spath='testData') checksumOne = liten.createChecksum(self.dupeFileOne) checksumTwo = liten.createChecksum(self.dupeFileTwo) self.assertEqual(checksumOne, checksumTwo)
[ "def checksum_matches(content, filename):\n with open(filename, \"rb\") as f:\n content_hash = hashlib.md5(content)\n file_hash = hashlib.md5(f.read())\n return content_hash.digest() == file_hash.digest()", "def test_checksum(self):", "def test_check_files_md5(self):\n table_err = PrettyTable(['File', 'Expected', 'Actual'])\n for file_path, expected_md5 in self.Md5Csum.items():\n actual_md5 = self.hash_md5(file_path)\n if actual_md5 != expected_md5:\n table_err.add_row([file_path, expected_md5, actual_md5])\n continue\n if len(table_err._rows) > 0:\n logger.error(\"Md5sum Check:\\n\".format(table_err))\n raise Exception(\"FAILED: File md5 NOT matched!\")\n return True", "def test_compute_md5sums(self):\n compute_md5sums('.',output_file=self.checksum_file,relative=True)\n checksums = io.open(self.checksum_file,'rt').read()\n reference_checksums = self.reference_checksums.split('\\n')\n reference_checksums.sort()\n checksums = checksums.split('\\n')\n checksums.sort()\n for l1,l2 in zip(reference_checksums,checksums):\n self.assertEqual(l1,l2)", "def test_deduplicates_file(self):\n\n data_dir = os.path.dirname(__file__)\n src_filepath = data_dir + '/data/test_file_2.nt'\n shutil.copy(data_dir + '/data/test_file.nt', src_filepath)\n deduplicates_file(src_filepath)\n\n # test method\n with open(src_filepath) as f: data = f.readlines()\n self.assertTrue(len(data) == 5)\n\n # clean up environment\n if os.path.exists(src_filepath): os.remove(src_filepath)\n\n return None", "def check_need_update(self):\n current_md5 = self.dir_hash(self.path_dir)\n last_md5 = \"\"\n path = md5_file_path\n file_operation = 'r'\n if not os.path.exists(path): \n file_operation = 'w+'\n\n with open(path,file_operation) as file:\n last_md5 = file.read()\n last_md5 = str(last_md5)\n is_equal = last_md5 == current_md5\n if not is_equal:\n with open(path,'w') as f:\n f.write(current_md5)\n return is_equal\n else:\n return is_equal", "def _validate_random_hashes(self):\n if not os.path.exists(self.src_path) or os.path.isdir(self.src_path) or self.maintype == 'image':\n # Images are converted, we don't have to fear TOCTOU\n return True\n for start_pos, hashed_src in self.random_hashes:\n with open(self.dst_path, 'rb') as f:\n f.seek(start_pos)\n hashed = hashlib.sha256(f.read(self.block_length)).hexdigest()\n if hashed != hashed_src:\n # Something fucked up happened\n return False\n return True", "def _validate_random_hashes(self) -> bool:\n if not os.path.exists(self.src_path) or os.path.isdir(self.src_path) or self.maintype == 'image':\n # Images are converted, we don't have to fear TOCTOU\n return True\n for start_pos, hashed_src in self.random_hashes:\n with open(self.dst_path, 'rb') as f:\n f.seek(start_pos)\n hashed = hashlib.sha256(f.read(self.block_length)).hexdigest()\n if hashed != hashed_src:\n # Something fucked up happened\n return False\n return True", "async def test_51a_post_files__unique_file_version__okay(rest: RestClient) -> None:\n # define the file to be created\n logical_name = '/blah/data/exp/IceCube/blah.dat'\n checksum = {'sha512': hex('foo bar')}\n metadata1 = {\n 'logical_name': logical_name,\n 'checksum': checksum,\n 'file_size': 1,\n u'locations': [{u'site': u'WIPAC', u'path': u'/blah/data/exp/IceCube/blah.dat'}]\n }\n metadata_same_logical_name = {\n 'logical_name': logical_name,\n 'checksum': {'sha512': hex('foo bar baz boink')},\n 'file_size': 1,\n u'locations': [{u'site': u'NORTH-POLE', u'path': u'/blah/data/exp/IceCube/blah.dat'}]\n }\n metadata_same_checksum = {\n 'logical_name': logical_name + '!!!',\n 'checksum': checksum,\n 'file_size': 1,\n u'locations': [{u'site': u'SOUTH-POLE', u'path': u'/blah/data/exp/IceCube/blah.dat'}]\n }\n\n data, url, uuid1 = await _post_and_assert(rest, metadata1)\n data = await _assert_in_fc(rest, uuid1)\n\n data, url, uuid2 = await _post_and_assert(rest, metadata_same_logical_name)\n data = await _assert_in_fc(rest, [uuid1, uuid2])\n\n data, url, uuid3 = await _post_and_assert(rest, metadata_same_checksum)\n data = await _assert_in_fc(rest, [uuid1, uuid2, uuid3]) # noqa: F841", "def test_find_duplicate_files(parent_directory,\n duplicates):\n duplicate_files = \\\n find_duplicate_files.find_duplicate_files(parent_directory)\n\n assert duplicate_files == duplicates", "def compare_checksum(info, f):\n pieces = info['pieces']\n\n def getchunks(f, size):\n while True:\n chunk = f.read(size)\n if chunk == '':\n break\n yield hashlib.sha1(chunk).digest()\n\n calc = getchunks(f, info['piece length'])\n ref = (pieces[i:i + 20] for i in xrange(0, len(pieces), 20))\n for expected, actual in itertools.izip(calc, ref):\n if expected != actual:\n return False\n return ensure_empty(calc) and ensure_empty(ref)", "def __deduplicate(self, path, stat_info, fingerprint, file_obj):\n\n # No need to deduplicate empty files\n if stat_info.st_size == 0:\n return\n\n # Check modify time\n if self.__config[\"trust_modify_time\"]:\n prev_info = self.__prev_files.get(path)\n\n if prev_info is not None:\n prev_hash, prev_fingerprint = prev_info\n\n if fingerprint == prev_fingerprint:\n LOG.debug(\n \"File '%s' hasn't been changed. Make it an extern file with %s hash.\",\n path, prev_hash)\n\n return prev_hash\n\n # Find files with the same hash -->\n file_size = 0\n\n while file_size < stat_info.st_size:\n data = file_obj.read(\n min(psys.BUFSIZE, stat_info.st_size - file_size))\n\n if data:\n file_size += len(data)\n elif file_size == stat_info.st_size:\n break\n else:\n raise Error(\"The file has been truncated during the backup.\")\n\n file_hash = file_obj.hexdigest()\n file_obj.reset()\n\n if file_hash in self.__hashes:\n LOG.debug(\"Make '%s' an extern file with %s hash.\", path, file_hash)\n return file_hash\n # Find files with the same hash <--", "def check_file_hashes(self):\n for filepath in pathlib.Path(self.dir.name).glob(\"**/*.*\"):\n filename = os.path.basename(filepath)\n if filename != \"datapackage.json\" and filename != \"datapackage-digest.json\":\n file = open(filepath, \"rb\").read()\n hash = support_hash_file(self.hash_type, file)\n file = str(filepath).split(\"/\")[-2:]\n file = \"/\".join(file)\n res = None\n for item in self.datapackage[\"resources\"]:\n if item[\"path\"] == file:\n res = item\n if res == None or (res[\"hash\"] != hash):\n print(\n \"\\nfile %s's hash does not match the hash listed in the datapackage\"\n % file\n )\n return False\n return True", "def filecmp(filename_a, filename_b):\n size_a = FileIO(filename_a, \"rb\").size()\n size_b = FileIO(filename_b, \"rb\").size()\n if size_a != size_b:\n return False\n\n # Size is the same. Do a full check.\n crc_a = file_crc32(filename_a)\n crc_b = file_crc32(filename_b)\n return crc_a == crc_b", "def validate_checksum( filename, md5sum ):\n filename = match_filename( filename )\n md5_hash = file_md5( filename=filename )\n if md5_hash != md5sum:\n raise ValueError('md5 checksums are inconsistent: {}'.format( filename ))", "def verify_file(path: PathLike, checksum: str) -> bool:\n algorithm, want = checksum.split(\":\", 1)\n\n have = hexdigest_file(path, algorithm)\n\n return have == want", "def check_sha1sum(filepath, sha1sums=None, observatory=None):\n if sha1sums is None:\n sha1sums = get_all_sha1sums(observatory)\n sha1sum = utils.checksum(filepath)\n log.verbose(\"Checking file\", repr(filepath), \"with sha1sum\", repr(sha1sum),\n \"for duplication on CRDS server.\")\n if sha1sum in sha1sums:\n raise DuplicateSha1sumError(\n \"File\", repr(os.path.basename(filepath)),\n \"is identical to existing CRDS file\", repr(sha1sums[sha1sum]))", "def verify_checksum(filepath):\n file_obj = file_factory(filepath)\n return file_obj.verify_checksum()", "def test__checksum(self):\n # Test\n result = converter._checksum(1, 2, 3)\n expected = ('''\\\n3c9909afec25354d551dae21590bb26e38d53f2173b8d3dc3eee4c047e7ab1c1eb8b85103e3be7\\\nba613b31bb5c9c36214dc9f14a42fd7a2fdb84856bca5c44c2''')\n self.assertEqual(result, expected)", "def check_duplicates(self, file_path):\n\t\tif not file_path:\n\t\t\treturn file_path # !cover\n\t\twith HandlerThread.ele_lock:\n\t\t\t# The IO here could cause issues if multiple Threads tried to delete the same files, so safety lock.\n\t\t\t# Files currently downloading won't exist in the hashjar yet, so there's no risk of catching one in progress.\n\t\t\tif not settings.get('output.deduplicate_files'):\n\t\t\t\t# Deduplication disabled.\n\t\t\t\treturn file_path # !cover\n\t\t\twas_new, existing_path = hashjar.add_hash(file_path) # Check if the file exists already.\n\t\t\tif not was_new and existing_path != file_path:\n\t\t\t\t# Quick and dirty comparison, assumes larger filesize means better quality.\n\t\t\t\tif os.path.isfile(file_path) and os.path.isfile(existing_path):\n\t\t\t\t\tif os.path.getsize(file_path) > os.path.getsize(existing_path):\n\t\t\t\t\t\tmanifest.remove_file_hash(existing_path)\n\t\t\t\t\t\tos.remove(existing_path)\n\t\t\t\t\t\tmanifest.remap_filepath(existing_path, file_path)\n\t\t\t\t\t\treturn file_path\n\t\t\t\t\telse:\n\t\t\t\t\t\tmanifest.remove_file_hash(file_path)\n\t\t\t\t\t\tos.remove(file_path)\n\t\t\t\t\t\treturn existing_path\n\t\t\treturn file_path" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test checksum of Nonduplicate files
def testDupeFileDetectionError(self): liten = Liten(spath='testData') checksumOne = liten.createChecksum(self.dupeFileOne) checksumThree= liten.createChecksum(self.nonDupeFile) self.assertNotEqual(checksumOne, checksumThree)
[ "def checksum_matches(content, filename):\n with open(filename, \"rb\") as f:\n content_hash = hashlib.md5(content)\n file_hash = hashlib.md5(f.read())\n return content_hash.digest() == file_hash.digest()", "def test_checksum(self):", "def test_check_files_md5(self):\n table_err = PrettyTable(['File', 'Expected', 'Actual'])\n for file_path, expected_md5 in self.Md5Csum.items():\n actual_md5 = self.hash_md5(file_path)\n if actual_md5 != expected_md5:\n table_err.add_row([file_path, expected_md5, actual_md5])\n continue\n if len(table_err._rows) > 0:\n logger.error(\"Md5sum Check:\\n\".format(table_err))\n raise Exception(\"FAILED: File md5 NOT matched!\")\n return True", "def test_compute_md5sums(self):\n compute_md5sums('.',output_file=self.checksum_file,relative=True)\n checksums = io.open(self.checksum_file,'rt').read()\n reference_checksums = self.reference_checksums.split('\\n')\n reference_checksums.sort()\n checksums = checksums.split('\\n')\n checksums.sort()\n for l1,l2 in zip(reference_checksums,checksums):\n self.assertEqual(l1,l2)", "def _validate_random_hashes(self):\n if not os.path.exists(self.src_path) or os.path.isdir(self.src_path) or self.maintype == 'image':\n # Images are converted, we don't have to fear TOCTOU\n return True\n for start_pos, hashed_src in self.random_hashes:\n with open(self.dst_path, 'rb') as f:\n f.seek(start_pos)\n hashed = hashlib.sha256(f.read(self.block_length)).hexdigest()\n if hashed != hashed_src:\n # Something fucked up happened\n return False\n return True", "def compare_checksum(info, f):\n pieces = info['pieces']\n\n def getchunks(f, size):\n while True:\n chunk = f.read(size)\n if chunk == '':\n break\n yield hashlib.sha1(chunk).digest()\n\n calc = getchunks(f, info['piece length'])\n ref = (pieces[i:i + 20] for i in xrange(0, len(pieces), 20))\n for expected, actual in itertools.izip(calc, ref):\n if expected != actual:\n return False\n return ensure_empty(calc) and ensure_empty(ref)", "def check_file_hashes(self):\n for filepath in pathlib.Path(self.dir.name).glob(\"**/*.*\"):\n filename = os.path.basename(filepath)\n if filename != \"datapackage.json\" and filename != \"datapackage-digest.json\":\n file = open(filepath, \"rb\").read()\n hash = support_hash_file(self.hash_type, file)\n file = str(filepath).split(\"/\")[-2:]\n file = \"/\".join(file)\n res = None\n for item in self.datapackage[\"resources\"]:\n if item[\"path\"] == file:\n res = item\n if res == None or (res[\"hash\"] != hash):\n print(\n \"\\nfile %s's hash does not match the hash listed in the datapackage\"\n % file\n )\n return False\n return True", "def _validate_random_hashes(self) -> bool:\n if not os.path.exists(self.src_path) or os.path.isdir(self.src_path) or self.maintype == 'image':\n # Images are converted, we don't have to fear TOCTOU\n return True\n for start_pos, hashed_src in self.random_hashes:\n with open(self.dst_path, 'rb') as f:\n f.seek(start_pos)\n hashed = hashlib.sha256(f.read(self.block_length)).hexdigest()\n if hashed != hashed_src:\n # Something fucked up happened\n return False\n return True", "def verify_file(path: PathLike, checksum: str) -> bool:\n algorithm, want = checksum.split(\":\", 1)\n\n have = hexdigest_file(path, algorithm)\n\n return have == want", "def validate_checksum( filename, md5sum ):\n filename = match_filename( filename )\n md5_hash = file_md5( filename=filename )\n if md5_hash != md5sum:\n raise ValueError('md5 checksums are inconsistent: {}'.format( filename ))", "def find_checksum(chksum): \n for record in capture_metadata2:\n if record[1]==chksum:\n return 1\n return 0", "def check_need_update(self):\n current_md5 = self.dir_hash(self.path_dir)\n last_md5 = \"\"\n path = md5_file_path\n file_operation = 'r'\n if not os.path.exists(path): \n file_operation = 'w+'\n\n with open(path,file_operation) as file:\n last_md5 = file.read()\n last_md5 = str(last_md5)\n is_equal = last_md5 == current_md5\n if not is_equal:\n with open(path,'w') as f:\n f.write(current_md5)\n return is_equal\n else:\n return is_equal", "def test_deduplicates_file(self):\n\n data_dir = os.path.dirname(__file__)\n src_filepath = data_dir + '/data/test_file_2.nt'\n shutil.copy(data_dir + '/data/test_file.nt', src_filepath)\n deduplicates_file(src_filepath)\n\n # test method\n with open(src_filepath) as f: data = f.readlines()\n self.assertTrue(len(data) == 5)\n\n # clean up environment\n if os.path.exists(src_filepath): os.remove(src_filepath)\n\n return None", "def filecmp(filename_a, filename_b):\n size_a = FileIO(filename_a, \"rb\").size()\n size_b = FileIO(filename_b, \"rb\").size()\n if size_a != size_b:\n return False\n\n # Size is the same. Do a full check.\n crc_a = file_crc32(filename_a)\n crc_b = file_crc32(filename_b)\n return crc_a == crc_b", "def validate_02_links_checksums(self):\n\n mets_files = self.psp.mets.xpath(\"//mets:file\", namespaces=self.catalog.namespaces)\n\n def validator(self,mets_file):\n declared_checksum = mets_file.xpath(\"./@CHECKSUM\")[0]\n self.logger.debug('uvedene CHECKSUM je: %s' % (declared_checksum,))\n fpath = mets_file.xpath(\"./mets:FLocat/@xlink:href\", namespaces = self.catalog.namespaces)[0]\n m = hashlib.md5()\n self.logger.debug('fpath je: %s' % (fpath,))\n fh = open(self.psp.join(fpath), 'rb')\n while True:\n data = fh.read(8192)\n if not data:\n break\n m.update(data)\n real_checksum = m.hexdigest()\n if declared_checksum != real_checksum:\n return \"soubor %s ma jiny CHECKSUM, nez je napsano\" % (fpath,)\n return None\n return self.for_each(mets_files, validator)", "def verify_checksum(filepath):\n file_obj = file_factory(filepath)\n return file_obj.verify_checksum()", "def checksum(self, fileName):\n\n tar = tarfile.open(fileName, mode='r')\n lsl = [(x.name, int(x.size), int(x.mtime), x.uname) for x in tar.getmembers()]\n hasher = hashlib.sha256(str(lsl))\n checksum = hasher.hexdigest()\n\n return checksum", "async def test_51a_post_files__unique_file_version__okay(rest: RestClient) -> None:\n # define the file to be created\n logical_name = '/blah/data/exp/IceCube/blah.dat'\n checksum = {'sha512': hex('foo bar')}\n metadata1 = {\n 'logical_name': logical_name,\n 'checksum': checksum,\n 'file_size': 1,\n u'locations': [{u'site': u'WIPAC', u'path': u'/blah/data/exp/IceCube/blah.dat'}]\n }\n metadata_same_logical_name = {\n 'logical_name': logical_name,\n 'checksum': {'sha512': hex('foo bar baz boink')},\n 'file_size': 1,\n u'locations': [{u'site': u'NORTH-POLE', u'path': u'/blah/data/exp/IceCube/blah.dat'}]\n }\n metadata_same_checksum = {\n 'logical_name': logical_name + '!!!',\n 'checksum': checksum,\n 'file_size': 1,\n u'locations': [{u'site': u'SOUTH-POLE', u'path': u'/blah/data/exp/IceCube/blah.dat'}]\n }\n\n data, url, uuid1 = await _post_and_assert(rest, metadata1)\n data = await _assert_in_fc(rest, uuid1)\n\n data, url, uuid2 = await _post_and_assert(rest, metadata_same_logical_name)\n data = await _assert_in_fc(rest, [uuid1, uuid2])\n\n data, url, uuid3 = await _post_and_assert(rest, metadata_same_checksum)\n data = await _assert_in_fc(rest, [uuid1, uuid2, uuid3]) # noqa: F841", "def test__checksum(self):\n # Test\n result = converter._checksum(1, 2, 3)\n expected = ('''\\\n3c9909afec25354d551dae21590bb26e38d53f2173b8d3dc3eee4c047e7ab1c1eb8b85103e3be7\\\nba613b31bb5c9c36214dc9f14a42fd7a2fdb84856bca5c44c2''')\n self.assertEqual(result, expected)", "def check_sha1sum(filepath, sha1sums=None, observatory=None):\n if sha1sums is None:\n sha1sums = get_all_sha1sums(observatory)\n sha1sum = utils.checksum(filepath)\n log.verbose(\"Checking file\", repr(filepath), \"with sha1sum\", repr(sha1sum),\n \"for duplication on CRDS server.\")\n if sha1sum in sha1sums:\n raise DuplicateSha1sumError(\n \"File\", repr(os.path.basename(filepath)),\n \"is identical to existing CRDS file\", repr(sha1sums[sha1sum]))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the current patch with underscores instead of periods Uses only the first 2 parts of the patch name
def get_format_underscore_current_patch(cls) -> str: current_patch = cls.get_current_patch() return "_".join(current_patch.split(".")[:2])
[ "def get_format_underscore_previous_patch(cls) -> str:\n\n previous_patch = cls.get_all_patches()[1]\n return \"_\".join(previous_patch.split(\".\")[:2])", "def get_next_patch_filename():\r\n last_patch = sorted(glob.glob(os.path.join(PATCHES_PATH, \"patch_*.sql\")))[-1]\r\n patch_number = int(re.search(\"patch_([0-9]{3})\\.sql\", last_patch).group(1))\r\n next_number = patch_number + 1\r\n return \"patch_{:03}.sql\".format(next_number)", "def patch_name(tile_id, patch_id):\n # figures of tiles\n tile_max_len = 4\n # figures of patchs\n patch_max_len = 7\n tile_id_str = str(tile_id)\n patch_id_str = str(patch_id)\n tile_id_len = len(tile_id_str)\n patch_id_len = len(patch_id_str)\n tile_id_str = '0' * (tile_max_len-tile_id_len) + tile_id_str\n patch_id_str = '0' * (patch_max_len-patch_id_len) + patch_id_str\n return tile_id_str, patch_id_str", "def _format_exercise_ref(exercise_ref: str) -> str:\n try:\n return exercise_ref.split(\"_\")[1]\n except IndexError:\n return exercise_ref", "def _cogroup_basename(grp):\n return grp[5:].rsplit('_', 1)[0] if grp.startswith('_grp_') else grp", "def spp_name_withunderscore(self, name):\n withunderscore = name.replace('.', '_')\n return withunderscore", "def ORIGINAL_PART_NAME(self):\n if self.head is self or not isinstance(self.head,Device): return \"\"\n if self._original_part_name is None:\n try: self._original_part_name = self.head.__class__.parts[self.nid-self.head.nid-1]['path']\n except:self._original_part_name = \"\"\n return self._original_part_name", "def nonColonizedName_to_moduleName(name):\r\n return re.sub('\\.', '_', name)", "def group_title(path):\n\n def filter_group(group):\n for suffix in [\"_patch_parameter\", \"_update_parameter\", \"_parameter\"]:\n if group.endswith(suffix):\n group = group[:0 - len(suffix)]\n return group\n\n group_path = path.split('.')\n group_path = list(map(filter_group, group_path))\n title = ': '.join(group_path)\n for each in group_path:\n title = title.replace(each, \" \".join([n.title() for n in each.split('_')]), 1)\n return title", "def get_sample_sheet_proj_name(lims_project_name):\n return re.sub(r'[^a-zA-Z0-9_\\-]', '_', lims_project_name)", "def _calibration_prefix(params):\n nightname = params['NIGHTNAME']\n # remove separators\n calib_prefix = nightname.replace(os.sep, '_')\n # return calib_prefix\n return calib_prefix + '_'", "def get_name_core(filename: str) -> str:\n # Discard any file extensions (e.g. .wmv_AA.txt)\n no_extension: str = os.path.basename(filename).split('.', 1)[0]\n # Discard everything after the last `_` (e.g. 1, 2, or Morning)\n core = no_extension.split('_')[:-1]\n return \"_\".join(core)", "def fs_project_name(project_name):\n return project_name.replace('/', '@')", "def get_safe_label(self):\n\n if self.info.get('label') == '/':\n return 'root'\n\n suffix = re.sub(r\"[/ \\(\\)]+\", \"_\", self.info.get('label')) if self.info.get('label') else \"\"\n if suffix and suffix[0] == '_':\n suffix = suffix[1:]\n if len(suffix) > 2 and suffix[-1] == '_':\n suffix = suffix[:-1]\n return suffix", "def get_reg_name(self, name):\n return name.lower().replace('-', '').replace('_', '').replace(' ', '')", "def _getLogName(self, name):\n result = name.replace('$', '_')\n result = result.replace('/', '_')\n result = result.replace('\\\\', '_')\n return result", "def generate_name(self, path) -> str:\n name = path.strip(\"/\").split(\"/\")[-1]\n return name", "def standardize_groupname(label: str) -> str:\n new_label = label.replace(\"/\", \"-\")\n return new_label", "def name_dashed(self) -> str:\n return self.name.replace(\"_\", \"-\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the previous patch with underscores instead of periods Uses only the first 2 parts of the patch name
def get_format_underscore_previous_patch(cls) -> str: previous_patch = cls.get_all_patches()[1] return "_".join(previous_patch.split(".")[:2])
[ "def get_format_underscore_current_patch(cls) -> str:\n\n current_patch = cls.get_current_patch()\n return \"_\".join(current_patch.split(\".\")[:2])", "def get_next_patch_filename():\r\n last_patch = sorted(glob.glob(os.path.join(PATCHES_PATH, \"patch_*.sql\")))[-1]\r\n patch_number = int(re.search(\"patch_([0-9]{3})\\.sql\", last_patch).group(1))\r\n next_number = patch_number + 1\r\n return \"patch_{:03}.sql\".format(next_number)", "def get_tract_and_patch(filename):\n # todo add complexity to this argument (return just tract if required)\n clip_1 = filename.split(\".\")[0]\n clip_2 = clip_1.split(\"_\")\n tract, patch = clip_2[1], clip_2[2].replace(\"c\", \",\")\n return tract, patch", "def ORIGINAL_PART_NAME(self):\n if self.head is self or not isinstance(self.head,Device): return \"\"\n if self._original_part_name is None:\n try: self._original_part_name = self.head.__class__.parts[self.nid-self.head.nid-1]['path']\n except:self._original_part_name = \"\"\n return self._original_part_name", "def del_breakingNews_pre_name(filename):\n filename = filename.replace('222_', '')\n\n return filename", "def _cogroup_basename(grp):\n return grp[5:].rsplit('_', 1)[0] if grp.startswith('_grp_') else grp", "def _format_exercise_ref(exercise_ref: str) -> str:\n try:\n return exercise_ref.split(\"_\")[1]\n except IndexError:\n return exercise_ref", "def get_last_name_elem(filename: str) -> str:\n # Discard any file extensions (e.g. .wmv_AA.txt)\n no_extension: str = os.path.basename(filename).split('.', 1)[0]\n # Keep everything after the last `_` (e.g. 1, 2, or Morning)\n end = no_extension.split('_')[-1]\n return end", "def AddPrefix(patch, text):\n return '%s%s' % (site_config.params.CHANGE_PREFIX[patch.remote], text)", "def last_command_name():\n IGNORE = ['ss', 'ss1', 'fix']\n def get_name(args):\n fldr = os.path.dirname(os.path.dirname(os.path.dirname(args[0])))\n return os.path.splitext(os.path.basename(fldr))[0].encode('utf-8')\n names = [get_name(args) for args in last_commands(20)]\n #print \"Last commands: %s\" % ', '.join(names)\n names = [n for n in names if n.lower() not in IGNORE]\n return names[-1] if len(names)>0 else None", "def eliminate_frame_idx_and_ext_from_clip_name(clip_name):\n eliminated_name = re.sub('_\\[\\d+-\\d+\\]\\..+$', '', clip_name)\n\n return eliminated_name", "def semver_incr_patch(ver: str) -> str:\n parts = ver.split(\".\")\n patch = str(int(parts[-1]) + 1)\n\n parts = parts[:-1]\n parts.append(patch)\n\n return \".\".join(parts)", "def stripOrderPrefix(filename):\n n = filename.find(\"_\")\n if n > 0:\n try:\n int(filename[:n])\n return filename[n+1:]\n except ValueError:\n pass\n return filename", "def patch_name(tile_id, patch_id):\n # figures of tiles\n tile_max_len = 4\n # figures of patchs\n patch_max_len = 7\n tile_id_str = str(tile_id)\n patch_id_str = str(patch_id)\n tile_id_len = len(tile_id_str)\n patch_id_len = len(patch_id_str)\n tile_id_str = '0' * (tile_max_len-tile_id_len) + tile_id_str\n patch_id_str = '0' * (patch_max_len-patch_id_len) + patch_id_str\n return tile_id_str, patch_id_str", "def prefix(pattern):\r\n return pattern[:len(pattern) - 1]", "def get_img_name(self):\n\n name = self.img\n idx = name.rindex(\".\")\n return name[:idx]", "def strip_prefix(self, name):\n match = self.prefixre.match(name)\n if match.group(3):\n return match.group(3)\n else:\n return name", "def get_name_core(filename: str) -> str:\n # Discard any file extensions (e.g. .wmv_AA.txt)\n no_extension: str = os.path.basename(filename).split('.', 1)[0]\n # Discard everything after the last `_` (e.g. 1, 2, or Morning)\n core = no_extension.split('_')[:-1]\n return \"_\".join(core)", "def folder_from_egtb_name(name: str) -> str:\n l, r = name.split('v')\n prefix = f'{len(l)}v{len(r)}'\n suffix = '_pawnful' if ('P' in l or 'P' in r) else '_pawnless'\n return prefix + suffix" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
(Re)Load the package database.
def load(self): self.db = info()
[ "def reload(self):\n with self.lock:\n self.db = _load_json(self.path, driver=self.driver)", "def rebuild():\n raise NotImplementedError\n import config\n # First let's get a list of all the packages in the database.\n # TODO: create the session.\n logger.debug(\"Getting database packages and versions.\")\n db_data = db.search_packages(session, include_prerelease=True)\n db_data = _db_data_to_dict(db_data)\n\n # Then we'll get the list of all the packages in the package directory\n # Same data structure as the db data.\n pkg_path = Path(config.SERVER_PATH) / Path(config.PACKAGE_DIR)\n file_data = _get_packages_from_files(pkg_path)\n\n _add_packages_to_db(file_data)\n _remove_packages_from_db(file_data, db_data)\n\n return False", "def reload_all(self):\n dbList = self.PRIMARY_IMPORT_LIST + self.SECONDARY_IMPORT_LIST\n for dbName in dbList:\n self.import_pickle(dbName)", "def preload(self):\n self.db", "def load_DB(self):\n\t\tstream = open(self.DB_file)\n\t\tself.DB = pickle.load(stream)\n\t\tstream.close()\n\t\treturn", "def reload_data(self):\r\n self.pre_requisite()", "def load_old(self, path):\n # The old database does not have a depository, so create an empty one\n self.depository = {'depository': StatmechDepository(label='depository', name='Statmech Depository')}\n\n for (root, dirs, files) in os.walk(os.path.join(path, 'frequencies_libraries')):\n if (os.path.exists(os.path.join(root, 'Dictionary.txt')) and\n os.path.exists(os.path.join(root, 'Library.txt'))):\n library = StatmechLibrary(label=os.path.basename(root), name=os.path.basename(root))\n library.load_old(\n dictstr=os.path.join(root, 'Dictionary.txt'),\n treestr='',\n libstr=os.path.join(root, 'Library.txt'),\n num_parameters=-1,\n num_labels=1,\n pattern=False,\n )\n library.label = os.path.basename(root)\n self.libraries[library.label] = library\n\n self.groups['groups'] = StatmechGroups(label='group', name='Functional Group Values').load_old(\n dictstr=os.path.join(path, 'frequencies_groups', 'Dictionary.txt'),\n treestr=os.path.join(path, 'frequencies_groups', 'Tree.txt'),\n libstr=os.path.join(path, 'frequencies_groups', 'Library.txt'),\n num_parameters=-1,\n num_labels=1,\n pattern=True,\n )", "def refresh(self):\n print(\"Copy to\", self.ro_dbpath)\n\n if self.ro_dbpath:\n if (\n not self.ro_dbpath.is_file()\n or self.ro_dbpath.stat().st_mtime < self.dbpath.stat().st_mtime\n ):\n shutil.copyfile(self.dbpath, self.ro_dbpath)\n self.engine = None\n\n if not self.engine:\n self.engine = create_engine(\n \"sqlite://\", creator=self.connect, connect_args={\"readonly\": True}\n )\n # options={ \"mode\": \"ro\"})\n self.session = scoped_session(sessionmaker(bind=self.engine))\n\n logging.info(\"Connected to Zotero SQL database\")\n\n self.fields = {}\n for row in self.session.query(dbz.FieldsCombined):\n self.fields[row.fieldName] = row.fieldID", "def populate():\n import dal.cache", "def db_imports():\n import_energy_data()", "def fresh_database():\n from ip_inspector.database import DATABASE_PATH, create_tables\n\n if os.path.exists(DATABASE_PATH):\n os.remove(DATABASE_PATH)\n create_tables()", "def initialize_database(self):\n self.database = self.loader.request_library(\"common_libs\", \"database\")\n self.database.create_connection(\"production\")\n self.database.load_mappings()\n\n self.migrator = self.loader.request_library(\"database_tools\", \"migrator\")\n self.migrator.migrate()", "def build_database():\n\tfrom fake_data import setup_initial_database, more_data\n\n\tprint (\"Updating Local Development Database...\")\n\n\tsetup_initial_database()\n\n\tprint (\"Finished!\")\n\n\treturn", "def populate_database(self):\n self.dye_stocks.add_new_dye_stocks()\n self.detections.add_new_detections()\n self.profiles.add_new_profiles()", "def _reset_database(self):\r\n self._delete_tables()\r\n self._create_tables()", "def restore_database():\n database_backup_menu()", "def reset_database():\n if os.path.exists(testinit.database_file):\n os.remove(testinit.database_file)\n shutil.copy(testinit.clean_db, testinit.database_file)", "def populate_database(query, write):\n db = ingest.load_database()\n db = ingest.populate_database(db, query=query, write=write)", "def refresh_asset_db(self, *args, **kwargs):\n \n asset_finder = self.context.asset_finder\n if asset_finder:\n asset_finder.refresh_data(*args, **kwargs)\n self.log_warning(\"Relaoded asset database.\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Query the install status of a port.
def status(self, port): pstatus = ABSENT if port.origin in self.db: portname = port.attr['pkgname'].rsplit('-', 1)[0] for pkgname in self.db[port.origin]: if pkgname.rsplit('-', 1)[0] == portname: pstatus = max(pstatus, version(pkgname, port.attr['pkgname'])) return pstatus
[ "def check_port_status(self, port):\n # check existing ports dbqp has created\n dbqp_ports = self.check_dbqp_ports()\n if port not in dbqp_ports and not self.is_port_used(port):\n return 1\n else:\n return 0", "def getPortStatus(self, timeout = 100):\n\t\treturn self.__devhandle.controlMsg(requestType = 0xa1,\n\t\t\t\t\t\t\t\t\t\t request = 1,\n\t\t\t\t\t\t\t\t\t\t value = 0,\n\t\t\t\t\t\t\t\t\t\t index = self.__intf,\n\t\t\t\t\t\t\t\t\t\t buffer = 1,\n\t\t\t\t\t\t\t\t\t\t timeout = timeout)[0]", "def check_if_port_available_factory(port):\n def check_if_port_available():\n \"\"\"\n Check if a port is in use\n :return bool not_in_use: True if not in use, False if in use\n \"\"\"\n check_port_command = \"netstat -tuna | grep -E \\\"{:d}\\s\\\"\".format(port)\n return not check_nonzero_exit(check_port_command)\n return check_if_port_available", "def _portInUse(port):\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sck:\n inUse = sck.connect_ex(('localhost', port)) == 0\n logging.debug(f' >>> Port {port} is in use: {inUse} <<<')\n return inUse", "def get_port_status(cluster, lswitch_id, port_id):\n try:\n r = do_request(HTTP_GET,\n \"/ws.v1/lswitch/%s/lport/%s/status\" %\n (lswitch_id, port_id), cluster=cluster)\n except exception.NotFound as e:\n LOG.error(_(\"Port not found, Error: %s\"), str(e))\n raise exception.PortNotFoundOnNetwork(\n port_id=port_id, net_id=lswitch_id)\n if r['link_status_up'] is True:\n return constants.PORT_STATUS_ACTIVE\n else:\n return constants.PORT_STATUS_DOWN", "def test_portNumber(self):\n site = self.store.findUnique(SiteConfiguration)\n ports = list(self.store.query(TCPPort, TCPPort.factory == site))\n self.assertEqual(len(ports), 1)\n self.assertEqual(ports[0].portNumber, 8088)\n self.assertEqual(installedOn(ports[0]), self.store)\n self.assertEqual(list(self.store.interfacesFor(ports[0])), [IService])", "def IsDevicePortUsed(device, device_port, state=''):\n base_urls = ('127.0.0.1:%d' % device_port, 'localhost:%d' % device_port)\n netstat_results = device.RunShellCommand(['netstat', '-an'],\n check_return=True,\n large_output=True)\n for single_connect in netstat_results:\n # Column 3 is the local address which we want to check with.\n connect_results = single_connect.split()\n if connect_results[0] != 'tcp':\n continue\n if len(connect_results) < 6:\n raise Exception('Unexpected format while parsing netstat line: ' +\n single_connect)\n is_state_match = connect_results[5] == state if state else True\n if connect_results[3] in base_urls and is_state_match:\n return True\n return False", "def portScanner(self, ip_addr, port):\r\n try:\r\n res = self.scanner(ip_addr, port)\r\n if str(type(res)) == \"<class 'NoneType'>\":\r\n print(f\"{missing} port {port} closed on {ip_addr}\", end=\"\")\r\n return False\r\n else:\r\n if res.sprintf(\"%TCP.flags%\") == \"SA\":\r\n print(f\"{result}: port {res.sport} is open on {ip_addr} :\", end=\"\")\r\n return True\r\n else:\r\n print(f\"{missing}: port {port} closed on {ip_addr} :\", end=\"\")\r\n return False\r\n\r\n except socket.gaierror:\r\n print(f\"{error} {ip_addr} Name or service not known\")\r\n sys.exit()", "def port_num(port):\n print(\"checking port numbers\")\n if port not in PORT_RANGE:\n return False\n else:\n return True", "def is_port_available(self, host, port):\n with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:\n if sock.connect_ex((host, port)) == 0:\n self.logger.info(\"port {} not available\".format(port))\n return False\n else:\n self.logger.info(\"port {} available\".format(port))\n return True", "def checkPort(self):\r\n \r\n #Auto select new port value, In case port was not allocated\r\n if self.port is None:\r\n for newPortValue in range(9000, 65535):\r\n if newPortValue not in [conn.laddr[1] for conn in psutil.net_connections()]:\r\n self.port = newPortValue\r\n break\r\n #If the port already selected, we check if the port is in use\r\n if self.port in [conn.laddr[1] for conn in psutil.net_connections()]:\r\n raise IOError('Port ' + str(self.port) + ' is already in use.')", "def counter_get_state(self, port):\n return self.comm('counter_get_state {0}'.format(port)) == '1'", "def _get_port_profile_status(self):\n return self.__port_profile_status", "def check_port(device, port_device):\r\n url = base_url + '/devices/' + device + '/ports'\r\n print(url)\r\n res = requests.get(url, auth=('onos', 'rocks'))\r\n print(res.status_code)\r\n if (res.status_code != 200):\r\n pass\r\n ports = res.json()['ports']\r\n print(ports)\r\n for port in ports:\r\n if port['port'] != port_device:\r\n continue\r\n if port['isEnabled'] == True:\r\n continue\r\n if (port['port'] == port_device) and (port['isEnabled'] == False):\r\n print(\"Link failure at switch {0}: port {1}\".format(\r\n device, port_device))\r\n return False\r\n return True", "def check_port_status(address, port=22, timeout=2):\n\n default_timeout = socket.getdefaulttimeout()\n socket.setdefaulttimeout(timeout)\n remote_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n try:\n remote_socket.connect((address, port))\n except Exception as inst:\n LOG.debug(\"Exception in check_port_status : %s\" % (str(inst)))\n return False\n finally:\n remote_socket.close()\n socket.setdefaulttimeout(default_timeout)\n return True", "def Appium_is_runner(port):\r\n response = None\r\n url = \" http://127.0.0.1:\" + str(port) + \"/wd/hub\" + \"/status\"\r\n try:\r\n response = requests.get(url, timeout=5)\r\n\r\n if str(response.status_code).startswith(\"2\"):\r\n return 'True'\r\n else:\r\n return 'False'\r\n except:\r\n return 'False'\r\n finally:\r\n if response:\r\n response.close()", "def check_devserver_port_used(port):\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n # immediately reuse a local socket in TIME_WAIT state\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n try:\n sock.bind(('127.0.0.1', int(port)))\n used = False\n except socket.error:\n used = True\n finally:\n sock.close()\n return used", "def check(self):\n if not self.status:\n self.class_logger.info(\"Skip switch id:%s(%s) check because it's has Off status.\" % (self.id, self.name))\n return\n status = self.waiton()\n # Verify Ports table is not empty\n if self.ui.get_table_ports() == []:\n if self.opts.fail_ctrl == 'stop':\n self.class_logger.debug(\"Exit switch check. Ports table is empty!\")\n pytest.exit('Ports table is empty!')\n else:\n self.class_logger.debug(\"Fail switch check. Ports table is empty!\")\n pytest.fail('Ports table is empty!')\n return status", "def check_port(_args):\n port = None\n ports = serial.tools.list_ports.comports()\n x = [print(i) for i in ports]\n port_names = [p.device for p in ports]\n\n if _args.port is not None:\n if _args.port in port_names:\n port = _args.port\n else:\n return None # no comport found\n elif USB_PORT_DEFAULT in port_names:\n port = USB_PORT_DEFAULT\n else:\n for i in port_names:\n match = re.match(r'.+(USB).+', i)\n if match:\n port = i #set USB port to the first containing \"USB\"\n break\n if port is None:\n return None # no comport found\n\n print(f\"USB port: {port}\")\n return port" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Produces a matplotlib plot of the ratio between the sensors. If running from a csv file, pass the name of the file to this function. If no filename is provided, will attempt to read data live from sensors. A lot of the plotting code is magic from various tutorials.
def liveplot(filename=0): ratio = None if filename: # if plotting from CSV with open(filename) as f: for i in range(0, 480): # number of slightly-more-than-quarter-seconds to run for oldratio = ratio a, b, ratio = read(f) print(ratio) if oldratio is not None: plt.plot([i - 1, i], [oldratio, ratio], hold=True, color='black') # plots a line connecting the last 2 points plt.axis([i - 20, i + 2, .8, 1.2]) # axes shift with data # magic plt.show(block=False) plt.pause(0.05) # run approximately every quarter second to mimic the data collection sleep(0.25) else: # no file provided, plotting live data from sensors print("Live Sensing begun") for i in range(0, 100): oldratio = ratio a, b, ratio = read() print(ratio) if oldratio is not None: plt.plot([i - 1, i], [oldratio, ratio], hold=True, color='black') plt.axis([i - 20, i + 2, .8, 1.2]) #plt.scatter(i, ratio, hold=True) plt.show(block=False) plt.pause(0.05) # no quarter second sleep because plotting takes significant amounts of time # probably doesn't run at the right speed for actual luff sensing because the algorithm # parameters depend on the frequency of sensor reading.
[ "def plot(self, filename:str=None):\n if not filename:\n filename = max(Saver.data_files())\n df = pd.read_csv(filename)\n print('DATAFRAME:')\n print(df)\n plot = self.plotter(df, self.config_change_steps)\n plt.show()", "def analyze_file(file):\n\n data = pd.read_csv(file, sep=',', parse_dates=[0], names=['date', 'value'], header=0, dayfirst=True)\n\n # On récupère toutes les données numériques intéressantes\n results = {}\n results['filename'] = file.filename\n results['date'] = str(datetime.datetime.now()).replace(\" \", \"_\").replace(\".\", \"-\").replace(':','-')\n results['Mean'] = data.value.mean()\n results['Variance'] = data.value.var()\n results['1st quartile'] = data['value'].quantile(0.25)\n results['2nd quartile'] = data['value'].quantile(0.5)\n results['3rd quartile'] = data['value'].quantile(0.75)\n results['Minimum date'] = data.date.iloc[data.value.idxmin()].strftime('%d/%m/%Y')\n results['Minimum'] = data.value.min()\n results['Maximum date'] = data.date.iloc[data.value.idxmax()].strftime('%d/%m/%Y')\n results['Maximum'] = data.value.max()\n\n # On crée un graphe traçant la valeur et la fft du csv \n fig, (ax1, ax2) = plt.subplots( nrows=2, ncols=1 ) # create figure & 1 axis\n ax1.plot( data['date'], data['value'])\n ax1.set_ylabel('Value')\n ax2.plot( range(data.shape[0]), np.fft.fft(data['value'] - data.value.mean()))\n ax2.set_ylabel('FFT')\n # On l'enregistre dans le dossier static\n path_graph = os.path.join(os.path.dirname(__file__), 'static',results['date'] + '.png')\n fig.savefig(path_graph)\n plt.close(fig)\n\n # On charge tous nos résultats dans une dataframe qu'on ajoute à history.csv\n results_df = pd.DataFrame(results, index=[1])\n path_history = os.path.join(os.path.dirname(__file__), 'history.csv')\n try:\n history = pd.read_csv(path_history,',')\n history = history.append(results_df, ignore_index=True)\n # Si history.csv n'existe pas, on le crée\n except:\n history = pd.DataFrame(results_df)\n history.to_csv(path_history)\n\n # On retourne l'index de la nouvelle analyse\n return history.shape[0]-1", "def load_and_plot_data(filename):\n df = pd.load_csv(filename, index_col=0)\n df.hist()\n return df", "def plotStats(fileName):\r\n # read in playlist\r\n plist=plistlib.readPlist(fileName)\r\n # get the tracks from the playlist\r\n tracks=plist['Tracks']\r\n # creat lists of song ratings and track durations\r\n ratings=[]\r\n durations=[]\r\n # iterate through the tracks\r\n for trackId,track in tracks.items():\r\n try:\r\n ratings.append(track['Album Rating'])\r\n durations.append(track['Total Time'])\r\n except:\r\n # ignore\r\n pass\r\n\r\n # ensure that valid data was collected\r\n if ratings==[]or durations==[]:\r\n print(\"No valid Album Rating/Total Time data in %s.\"% fileName)\r\n return\r\n\r\n # cross plot\r\n x=np.array(durations,np.int32)\r\n # convent to minutes\r\n x=x/60000.0\r\n y=np.array(ratings,np.int32)\r\n pyplot.subplot(2,1,1)\r\n pyplot.plot(x,y,'o')\r\n pyplot.axis([0,1.05*np.max(x),-1,110])\r\n pyplot.xlabel('Track duration')\r\n pyplot.ylabel('Track rating')\r\n\r\n # plot histogram\r\n pyplot.subplot(2,1,2)\r\n pyplot.hist(x,bins=20)\r\n pyplot.xlabel('Track duration')\r\n pyplot.ylabel('Count')\r\n\r\n # show plot\r\n pyplot.show()", "def plot_progress(filepath, columns):\n data = defaultdict(list)\n\n with open(filepath) as f:\n # if columns list is empty, print a list of all columns and return\n if not columns:\n reader = csv.reader(f)\n print('Columns are: ' + ', '.join(next(reader)))\n return\n\n try:\n reader = csv.DictReader(f)\n for row in reader:\n for col in columns:\n data[col].append(float(row[col]))\n except KeyError:\n print('Error: {} was called with an unknown column name \"{}\".\\n'\n 'Run \"python {} {}\" to get a list of all the existing '\n 'columns'.format(__file__, col, __file__, filepath))\n raise\n except ValueError:\n print('Error: {} was called with an invalid column name \"{}\".\\n'\n 'This column contains values that are not convertible to '\n 'floats.'.format(__file__, col))\n raise\n\n plt.ion()\n for col_name, values in data.items():\n plt.plot(values, label=col_name)\n plt.legend()\n plt.show()", "def plot_a_numeric_attribute(csv_file=None, col_to_plot=None, output_plot=None):\r\n # read data into pandas dataframe\r\n # [YOUR CODE HERE]\r\n df = pd.read_csv(csv_file)\r\n # use seaborn to plot distribution of data\r\n # ax = sns.distplot(ADD YOUR CODE HERE)\r\n ax = sns.distplot(df[col_to_plot])\r\n # save plot as png file\r\n # ax.get_figure().savefig(ADD YOUR CODE HERE)\r\n ax.get_figure().savefig(output_plot)", "def plot(filename, column):\n df = pd.read_csv(filename)\n if column is None:\n df.hist()\n else:\n df.hist(column=column);\n plt.show() # for terminal", "def save_plot_from_file(filename, stat_name):\n\n # Read in the data\n data = pd.read_csv(filename, sep=\"\\t\")\n try:\n stat = list(data[stat_name])\n except KeyError:\n s = \"utilities.stats.save_plots.save_plot_from_file\\n\" \\\n \"Error: stat %s does not exist\" % stat_name\n raise Exception(s)\n\n # Set up the figure.\n fig = plt.figure()\n ax1 = fig.add_subplot(1, 1, 1)\n\n # Plot the data.\n ax1.plot(stat)\n\n # Plot title.\n plt.title(stat_name)\n\n # Get save path\n save_path = pathsep.join(filename.split(pathsep)[:-1])\n\n # Save plot and close.\n plt.savefig(path.join(save_path, (stat_name + '.pdf')))\n plt.close()", "def create_mosquitos_vs_tempC_plot(filename):\n \n # write processing here\n # load data\n print(\"Loading\", filename)\n mosquitos_data = pandas.read_csv(filename)\n # convert celsius\n mosquitos_data[\"temperature_C\"] = fahr_to_celsius(mosquitos_data[\"temperature\"])\n # create the plot\n print(\"Plotting\", filename)\n plt.plot(mosquitos_data[\"temperature_C\"], mosquitos_data[\"mosquitos\"], \".\")\n # save the plot\n filename_png = filename[0:-4] + \"_mosquitos_vs_tempC.png\"\n plt.savefig(filename_png)\n print(\"Saving\", filename_png)\n return mosquitos_data", "def uti_data_file_plot(_fname, _read_labels=1, _e=0, _x=0, _y=0, _graphs_joined=True):\n #if '_backend' not in locals(): uti_plot_init() #?\n _backend.uti_data_file_plot(_fname, _read_labels, _e, _x, _y, _graphs_joined)", "def plot_loads(cpu_index):\n\n file_name = 'cpu{:0>3}.csv'.format(cpu_index)\n if os.path.exists(file_name):\n output_png = \"cpu%03d_loads.png\" % cpu_index\n g_plot = common_all_gnuplot_settings(output_png)\n g_plot('set yrange [0:100]')\n g_plot('set ytics 0, 10')\n g_plot('set title \"{} : loads : CPU {:0>3} : {:%F %H:%M}\"'.format(testname, cpu_index, datetime.now()))\n g_plot('set ylabel \"CPU load (percent)\"')\n# override common\n g_plot('set key off')\n set_4_plot_linestyles(g_plot)\n g_plot('plot \"' + file_name + '\" using {:d}:{:d} with linespoints linestyle 1 axis x1y1'.format(C_ELAPSED, C_LOAD))", "def plot_sim_logs(data_filename, savedata_filename=None):\n ## Format Data\n data = read_csv(data_filename)\n data = data.astype('float32')\n data = data.transpose()\n\n ## Assign filename for saving data\n if savedata_filename is not None:\n filepath, _ = os.path.split(data_filename)\n filename = savedata_filename\n fileformat = '.png'\n\n angleplots_filename = os.path.join(filepath,\n (filename + 'angles' + fileformat))\n velplots_filename = os.path.join(filepath, (filename + 'vels' + fileformat))\n torqueplots_filename = os.path.join(filepath,\n (filename + 'torques' + fileformat))\n\n ## Slicing Data\n unit = 'degree'\n\n sim_time = data[TIME_ID]\n if unit == 'degree':\n motor_angles_des = data[DESMOTORANG_ID_BEGIN:DESMOTORANG_ID_END] * (180 /\n math.pi)\n motor_angles_act = data[ACTMOTORANG_ID_BEGIN:ACTMOTORANG_ID_END] * (180 /\n math.pi)\n else:\n motor_angles_des = data[DESMOTORANG_ID_BEGIN:DESMOTORANG_ID_END]\n motor_angles_act = data[ACTMOTORANG_ID_BEGIN:ACTMOTORANG_ID_END]\n motor_vels_des = data[DESMOTORVEL_ID_BEGIN:DESMOTORVEL_ID_END]\n motor_vels_act = data[ACTMOTORVEL_ID_BEGIN:ACTMOTORVEL_ID_END]\n motor_torques_act = data[ACTMOTORTORQ_ID_BEGIN:ACTMOTORTORQ_ID_END]\n\n # Plotting Motor Angles\n plt.figure()\n fig, axs = plt.subplots(4, 2, figsize=(12, 12))\n for motor in range(NUM_MOTORS):\n\n if motor < (NUM_MOTORS / 2):\n column = 0\n else:\n column = 1\n\n row = int(motor % (NUM_MOTORS / 2))\n\n axs[row, column].plot(\n sim_time, motor_angles_des[motor], linewidth=1.5, label='desired')\n axs[row, column].plot(\n sim_time, motor_angles_act[motor], linewidth=1.5, label='actual')\n axs[row, column].set_title(\n MOTOR_LABEL[motor] + ' Motor Angle Plots', fontsize=12)\n axs[row, column].set_xlabel('Time (s)', fontsize=12)\n if unit == 'degree':\n axs[row, column].set_ylabel('Motor Angle (deg)', fontsize=12)\n else:\n axs[row, column].set_ylabel('Motor Angle (rad)', fontsize=12)\n axs[row, column].legend(fontsize=10,)\n axs[row, column].grid(True)\n\n fig.tight_layout()\n plt.show()\n if savedata_filename is not None:\n fig.savefig(angleplots_filename, dpi=600)\n\n # Plotting Motor Velocities\n plt.figure()\n fig, axs = plt.subplots(4, 2, figsize=(12, 12))\n for motor in range(NUM_MOTORS):\n\n if motor < (NUM_MOTORS / 2):\n column = 0\n else:\n column = 1\n\n row = int(motor % (NUM_MOTORS / 2))\n\n axs[row, column].plot(\n sim_time, motor_vels_des[motor], linewidth=1.5, label='desired')\n axs[row, column].plot(\n sim_time, motor_vels_act[motor], linewidth=1.5, label='actual')\n axs[row, column].set_title(\n MOTOR_LABEL[motor] + ' Motor Velocity Plot', fontsize=12)\n axs[row, column].set_xlabel('Time (s)', fontsize=12)\n axs[row, column].set_ylabel('Motor Vel (rad/s)', fontsize=12)\n axs[row, column].legend(fontsize=10,)\n axs[row, column].grid(True)\n\n fig.tight_layout()\n plt.show()\n if savedata_filename is not None:\n fig.savefig(velplots_filename, dpi=600)\n\n plt.figure()\n fig, axs = plt.subplots(4, 2, figsize=(12, 12))\n for motor in range(NUM_MOTORS):\n\n if motor < (NUM_MOTORS / 2):\n column = 0\n else:\n column = 1\n\n row = int(motor % (NUM_MOTORS / 2))\n\n axs[row, column].plot(\n sim_time, motor_torques_act[motor], linewidth=1.5, label='actual')\n axs[row, column].set_title(\n MOTOR_LABEL[motor] + ' Motor Torque Plot', fontsize=12)\n axs[row, column].set_xlabel('Time (s)', fontsize=12)\n axs[row, column].set_ylabel('Motor Torque (Nm)', fontsize=12)\n axs[row, column].grid(True)\n\n fig.tight_layout()\n plt.show()\n if savedata_filename is not None:\n fig.savefig(torqueplots_filename, dpi=600)", "def generate_plot(filename, log_likelihood_list):\n plt.plot(log_likelihood_list, marker='.')\n plt.title(filename)\n plt.xlabel(\"Iteration #\")\n plt.ylabel(\"Log Likelihood\")\n plt.show()", "def plot(self, args):\n self.databaser.plot(args.song_id, args.f)", "def simple_plot(file_name, title, x, y, xlabel, ylabel):\n\n plt.plot(x, y)\n plt.xlabel(xlabel)\n plt.ylabel(ylabel)\n plt.title(title)\n\n plt.savefig(file_name)\n plt.clf()", "def plot(self, filename=None, model=None):\n\n #import matplotlib\n #if (filename != None): matplotlib.use('pdf')\n\n import pylab\n pylab.figure()\n fontsize = 8\n markersize = 5\n\n #\n # Plot the raw measurements of differential power versus time and the enthalpogram.\n #\n\n pylab.subplot(211)\n pylab.hold(True)\n\n # Plot baseline fit.\n pylab.plot(self.filter_period_end_time / Units.s, self.baseline_power / (Units.ucal/Units.s), 'g-')\n \n # Plot differential power.\n pylab.plot(self.filter_period_end_time / Units.s, self.differential_power / (Units.ucal/Units.s), 'k.', markersize=markersize)\n\n # Plot injection time markers.\n [xmin, xmax, ymin, ymax] = pylab.axis()\n for injection in self.injections:\n last_index = injection['first_index'] # timepoint at start of syringe injection\n t = self.filter_period_end_time[last_index] / Units.s\n pylab.plot([t, t], [ymin, ymax], 'r-') \n\n # Label plot axes.\n xlabel = pylab.xlabel('time / s')\n xlabel.set_fontsize(fontsize)\n ylabel = pylab.ylabel('differential power / ucal/s')\n ylabel.set_fontsize(fontsize)\n\n # Change tick label font sizes.\n ax = pylab.gca()\n for tick in ax.xaxis.get_major_ticks():\n tick.label1.set_fontsize(fontsize)\n for tick in ax.yaxis.get_major_ticks():\n tick.label1.set_fontsize(fontsize)\n\n # title plot\n title = pylab.title(self.data_filename)\n title.set_fontsize(fontsize)\n\n pylab.hold(False)\n\n #\n # Plot integrated heats and model fits.\n #\n \n pylab.subplot(212)\n pylab.hold(True)\n\n # Determine injection end times.\n injection_end_times = numpy.zeros([len(self.injections)], numpy.float64)\n for (index, injection) in enumerate(self.injections):\n # determine initial and final samples for injection \n first_index = injection['first_index'] # index of timepoint for first filtered differential power measurement\n last_index = injection['last_index'] # index of timepoint for last filtered differential power measurement\n # determine time at end of injection period\n injection_end_times[index] = self.filter_period_end_time[last_index] / Units.s\n\n # Plot model fits, if specified.\n if model: \n P0_n = model.trace('P0')[:]\n Ls_n = model.trace('Ls')[:] \n DeltaG_n = model.trace('DeltaG')[:]\n DeltaH_n = model.trace('DeltaH')[:]\n DeltaH0_n = model.trace('DeltaH_0')[:]\n N = DeltaG_n.size\n for n in range(N):\n expected_injection_heats = mcmc.q_n.parents['mu']._eval_fun\n q_n = expected_injection_heats(DeltaG=DeltaG_n[n], DeltaH=DeltaH_n[n], DeltaH_0=DeltaH0_n[n], P0=P0_n[n], Ls=Ls_n[n])\n pylab.plot(injection_end_times/ Units.s, q_n / Units.ucal, 'r-', linewidth=1)\n\n # Plot integrated heats.\n for (index, injection) in enumerate(self.injections):\n # determine time at end of injection period\n t = injection_end_times[index] / Units.s\n # plot a point there to represent total heat evolved in injection period\n y = injection['evolved_heat'] / Units.ucal\n pylab.plot(t, y, 'k.', markersize=markersize)\n #pylab.plot([t, t], [0, y], 'k-') # plot bar from zero line\n # label injection\n pylab.text(t, y, '%d' % injection['number'], fontsize=6) \n\n # Adjust axes to match first plot.\n [xmin_new, xmax_new, ymin, ymax] = pylab.axis()\n pylab.axis([xmin, xmax, ymin, ymax]) \n\n # Label axes.\n #pylab.title('evolved heat per injection')\n xlabel = pylab.xlabel('time / s')\n xlabel.set_fontsize(fontsize)\n ylabel = pylab.ylabel('evolved heat / ucal')\n ylabel.set_fontsize(fontsize)\n\n # Plot zero line.\n pylab.plot(experiment.filter_period_end_time / Units.s, 0.0*experiment.filter_period_end_time / (Units.ucal/Units.s), 'g-') # plot zero line\n\n # Adjust font sizes for tick labels.\n ax = pylab.gca()\n for tick in ax.xaxis.get_major_ticks():\n tick.label1.set_fontsize(fontsize)\n for tick in ax.yaxis.get_major_ticks():\n tick.label1.set_fontsize(fontsize)\n\n pylab.hold(False)\n\n # \n # Send plot to appropriate output device.\n #\n\n if (filename != None):\n # Save the plot to the specified file.\n pylab.savefig(filename, dpi=150)\n else:\n # Show plot.\n pylab.show()\n \n return", "def plot_frequency_cpu():\n\n output_png = 'all_cpu_frequencies.png'\n g_plot = common_all_gnuplot_settings(output_png)\n g_plot('set yrange [0:4]')\n g_plot('set ylabel \"CPU Frequency (GHz)\"')\n g_plot('set title \"{} : cpu frequencies : {:%F %H:%M}\"'.format(testname, datetime.now()))\n\n title_list = subprocess.check_output('ls cpu???.csv | sed -e \\'s/.csv//\\'',shell=True).replace('\\n', ' ')\n plot_str = \"plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i\".format(C_ELAPSED, C_FREQ)\n g_plot('title_list = \"{}\"'.format(title_list))\n g_plot(plot_str)", "def plot(self, filename=None, show=True):\n\n #import matplotlib as mpl\n #mpl.use('Agg')\n import matplotlib.pyplot as plt\n from matplotlib.ticker import ScalarFormatter, FormatStrFormatter\n #import pylab as plt\n\n if self.v0 is None:\n sys.exit('plot(): self.v0 is None!')\n # self.fit()\n\n if filename is None and show is None:\n show = True\n\n fig = plt.figure(figsize=(10, 7))\n ax1 = fig.add_subplot(111)\n fig.subplots_adjust(left=0.12, right=0.9, top=0.9, bottom=0.15)\n if self.units == 'bohr':\n if self.eos_string == 'morse':\n plt.plot(self.sws, self.e, 'o')\n x = np.linspace(min(self.sws), max(self.sws), 100)\n y = eval('self.{0}'.format(self.eos_string))(\n x,\n self.eos_parameters[0], #+ self.eMin,\n self.eos_parameters[1],\n self.eos_parameters[2],\n self.eos_parameters[3],) + self.eMin\n elif self.eos_string == 'sjeos':\n plt.plot(self.angstrom2bohr(self.v), self.e / self.ry2ev, 'o')\n x = np.linspace(min(self.v), max(self.v), 100)\n y = eval('self.{0}'.format(self.eos_string))(\n x,\n self.eos_parameters[0], #+ self.eMin,\n self.eos_parameters[1],\n self.eos_parameters[2],\n self.eos_parameters[3],) + self.eMin\n #y = self.sjeosfit0(x**-(1.0 / 3.0))\n x = self.angstrom2bohr(x)\n y /= self.ry2ev\n else:\n plt.plot(self.angstrom2bohr(self.v), self.e / self.ry2ev, 'o')\n x = np.linspace(min(self.v), max(self.v), 100)\n y = eval('self.{0}'.format(self.eos_string))(\n x,\n self.eos_parameters[0] +\n self.eMin,\n self.eos_parameters[1],\n self.eos_parameters[2],\n self.eos_parameters[3],)\n x = self.angstrom2bohr(x)\n y /= self.ry2ev\n\n if self.units == 'angstrom':\n if self.eos_string == 'morse':\n plt.plot(self.bohr2angstrom(self.v), self.e, 'o')\n x = np.linspace(min(self.v), max(self.v), 100)\n y = eval('self.{0}'.format(self.eos_string))(\n x,\n self.eos_parameters[0] +\n self.eMin,\n self.eos_parameters[1],\n self.eos_parameters[2],\n self.eos_parameters[3],)\n x = self.bohr2angstrom(x)\n else:\n plt.plot(self.v, self.e / self.ry2ev, 'o')\n x = np.linspace(min(self.v), max(self.v), 100)\n y = eval('self.{0}'.format(self.eos_string))(\n x,\n self.eos_parameters[0] +\n self.eMin,\n self.eos_parameters[1],\n self.eos_parameters[2],\n self.eos_parameters[3],)\n y /= self.ry2ev\n\n plt.plot(x, y, '-r')\n plt.ylabel('energy [Ry]')\n ax1.yaxis.set_major_formatter(FormatStrFormatter('%0.6f'))\n ax = plt.gca()\n plt.text(0.2, 0.8, filename, transform=ax.transAxes)\n plt.text(\n 0.2,\n 0.75,\n 'Chi^2 = {0}'.format(\n self.chisqr),\n transform=ax.transAxes)\n plt.text(\n 0.2,\n 0.7,\n 'Red. Chi^2 = {0}'.format(\n self.redchi),\n transform=ax.transAxes)\n plt.text(\n 0.2,\n 0.65,\n 'R^2 = {0}'.format(\n self.rsquared),\n transform=ax.transAxes)\n if self.units == 'bohr':\n plt.xlabel('volume [WS-radius (in Bohr)]')\n plt.title(\n '{0}: E0: {1:.3f} Ry, w0: {2:.3f} Bohr, B0: {3:3.1f} GPa'.format(\n self.eos_string,\n self.e0,\n self.sws0,\n self.B0))\n if self.units == 'angstrom':\n plt.xlabel('volume / atom [Angstrom^3]')\n plt.title(\n '{0}: E0: {1:.3f} Ry, V0: {2:.3f} Angstrom^3, B0: {3:3.1f} GPa'.format(\n self.eos_string,\n self.e0,\n self.v0,\n self.B0))\n\n if show:\n # plt.tight_layout()\n plt.show()\n #if filename is not None:\n # fig.savefig(\"fit/{0}.png\".format(filename))\n\n # return f", "def _plot_rmse_fromR(self, filename, F=.3, scale=2, col=None):\n import pandas as pd\n df = pd.read_csv(filename, index_col=0, header=None)\n\n if col==None:\n df = df.mean(axis=1)\n elif col in df.columns:\n df = df[col]\n else:\n print(\"Invalid column provided. Use one of {}\".format(df.columns))\n for this in self.nodes():\n if this in self.signals:\n mse = df.ix[this] #.values[0]\n self.node[this]['mse'] = (1-(mse/F)**scale)\n self.node[this]['label'] = this+\"\\n\"+str(int(mse*1000)/1000.)\n else:\n self.node[this]['mse'] = 1\n cm = colormap.Colormap()\n self.plot(node_attribute=\"mse\", cmap=cm.get_cmap_heat())" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
A function to check if a given cell (row, col) can be included in DFS
def isSafe(self, i, j, visited, l): # row number is in range, column number is in range and value is 1 and not yet visited return (i >= 0 and i < self.nrows and j >= 0 and j < self.ncols and not visited[i][j] and self.graph[i][j]==l)
[ "def DFS(r,c,r0,c0):\n if r >=0 and r <= rows-1 and c >= 0 and c <= cols-1 and grid[r][c] != 'V' and grid[r][c] != 0:\n shape.add((r-r0,c-c0)) # Get shape of current cell wrt base coordinates r0 and c0\n grid[r][c] = 'V'\n DFS(r+1,c,r0,c0)\n DFS(r-1,c,r0,c0)\n DFS(r,c+1,r0,c0)\n DFS(r,c-1,r0,c0)", "def isLegal(prevCol, currentCol,index,g):\r\n height = len(prevCol) # n+1\r\n legalColumns = True\r\n for h in range(1,height):\r\n if g[h-1][index] == 1:\r\n if prevCol[h-1] + currentCol[h-1] + prevCol[h] + currentCol[h] != 1:\r\n legalColumns=False\r\n break\r\n else:\r\n if prevCol[h - 1] + currentCol[h - 1] + prevCol[h] + currentCol[h] == 1:\r\n legalColumns=False\r\n break\r\n\r\n return legalColumns", "def depth_first_search(grid: list[list[int]], row: int, col: int, visit: set) -> int:\n row_length, col_length = len(grid), len(grid[0])\n if (\n min(row, col) < 0\n or row == row_length\n or col == col_length\n or (row, col) in visit\n or grid[row][col] == 1\n ):\n return 0\n if row == row_length - 1 and col == col_length - 1:\n return 1\n\n visit.add((row, col))\n\n count = 0\n count += depth_first_search(grid, row + 1, col, visit)\n count += depth_first_search(grid, row - 1, col, visit)\n count += depth_first_search(grid, row, col + 1, visit)\n count += depth_first_search(grid, row, col - 1, visit)\n\n visit.remove((row, col))\n return count", "def bfs_reachable_exit_rooms(self, row, col):\n queue = deque()\n queue.append((row, col))\n found_Exit = False\n while len(queue) > 0:\n node = queue.popleft()\n if self.__maze[node[0]][node[1]].is_exit:\n found_Exit = True\n if self.is_valid_room(node[0]-1, node[1]):\n self.__maze[node[0]-1][node[1]].visited = True\n queue.append((node[0]-1, node[1]))\n if self.is_valid_room(node[0]+1, node[1]):\n self.__maze[node[0]+1][node[1]].visited = True\n queue.append((node[0]+1, node[1]))\n if self.is_valid_room(node[0], node[1]-1):\n self.__maze[node[0]][node[1]-1].visited = True\n queue.append((node[0], node[1]-1))\n if self.is_valid_room(node[0], node[1]+1):\n self.__maze[node[0]][node[1]+1].visited = True\n queue.append((node[0], node[1]+1))\n return found_Exit", "def in_maze(self,node):\r\n return (0 <= node[0] < self.size) and (0 <= node[1] < self.size)", "def check_visited_position(self):\n return (self.cur_i, self.cur_j) in self.visited_positions", "def checkCell(board, i, j):\n\n move_i = []\n move_j = []\n board_size = len(board)\n if i > 0:\n move_i.append(-1)\n move_j.append(0)\n if i < (board_size - 1):\n move_i.append(1)\n move_j.append(0)\n if j > 0:\n move_j.append(-1)\n move_i.append(0)\n if j < (board_size - 1):\n move_j.append(1)\n move_i.append(0)\n for k in range(len(move_i)):\n if board[i + move_i[k]][j + move_j[k]] == board[i][j]:\n return True\n return False", "def check_if_occupied(board,x,y):\n\n if board[x][y]!='~':\n return True\n return False", "def has_unknown_neighbors_of_8(self, x, y):\n if self.has_unknown_neighbors_of_4(x, y):\n return True\n else:\n notAvailibleSpaces = []\n \n if(x!=0 and y!=0) and (self.is_cell_not_walkable(x-1,y-1)):\n notAvailibleSpaces.append((x-1,y-1))\n\n if(x!=self.map.info.width-1 and y!=self.map.info.height-1) and (self.is_cell_not_walkable(x+1,y+1)):\n notAvailibleSpaces.append((x+1,y+1))\n\n if(x!=self.map.info.width-1 and y!=0) and (self.is_cell_not_walkable(x+1,y-1)):\n notAvailibleSpaces.append((x+1,y-1))\n\n if(x!=0 and y!=self.map.info.height-1) and (self.is_cell_not_walkable(x-1,y+1)):\n notAvailibleSpaces.append((x-1,y+1))\n\n return len(notAvailibleSpaces) is not 0", "def check_colored_cells(board: list)-> bool:\r\n color = ['', '', '', '', '']\r\n for i in range(5):\r\n for col in range(5):\r\n color[i] = color[i] + board[8-i-col][i]\r\n for row in range(5):\r\n color[i] = color[i] + board[8-i][row+i]\r\n color[i] = color[i][1:]\r\n return check_rows(color)", "def possibility_check(puzzle, row, col, num):\r\n for i in range(9):\r\n if puzzle[row][i] == num: # row check\r\n return False\r\n if puzzle[i][col] == num: # col check\r\n return False\r\n row0 = (row//3)*3 # resolves to (0, 1, or 2) * 3 = 0, 3, or 6\r\n col0 = (col//3)*3 # 0, 3, or 6. Used for starting (upper left) square of inner squares.\r\n for i in range(3):\r\n for j in range(3): # Checking inner squares with double loop of length 3\r\n if puzzle[row0+i][col0+j] == num:\r\n return False\r\n return True", "def isMineAt(board, row, col):\n if board[row][col] == \"X\":\n return True\n else:\n return False", "def occupied_adjacent_neighbors(seats: list, row: int, column: int) -> int:\n neigh_seats = [(0, -1), (1, -1), (1, 0), (1, 1), (0, 1), (-1, 1), (-1, 0), (-1, -1)]\n neighbors = 0\n rows = len(seats)\n columns = len(seats[0])\n for dy, dx in neigh_seats:\n nrow, ncolumn = row+dy, column+dx\n if 0 <= nrow < rows and 0 <= ncolumn < columns and seats[nrow][ncolumn] == OCCUPIED_SEAT:\n neighbors += 1\n return neighbors", "def dig_2d(game, row, col):\n if game['state'] == 'defeat' or game['state'] == 'victory':\n game['state'] = game['state'] # keep the state the same\n return 0\n\n if game['board'][row][col] == '.':\n game['mask'][row][col] = True\n game['state'] = 'defeat'\n return 1\n\n bombs,covered_squares=coveredsquares(game)\n\n if bombs != 0:\n # if bombs is not equal to zero, set the game state to defeat and\n # return 0\n game['state'] = 'defeat'\n return 0\n if covered_squares == 0:\n game['state'] = 'victory'\n return 0\n #if number of covered squares if 0, then we won\n if game['mask'][row][col] != True:\n game['mask'][row][col] = True\n revealed = 1\n else:\n return 0\n #recursively digs on surrounding nodes if the node is a 0\n if game['board'][row][col] == 0:\n num_rows, num_cols = game['dimensions']\n if 0 <= row < num_rows:\n if 0 <= col < num_cols:\n nearbynodes=find_nearby_nodes(num_rows, num_cols, row, col)\n for node in nearbynodes:\n if game['board'][node[0]][node[1]] != '.':\n if game['mask'][node[0]][node[1]] == False:\n revealed += dig_2d(game, node[0], node[1])\n badsquares=bad_squares(game)\n if badsquares > 0:\n game['state'] = 'ongoing'\n return revealed\n else:\n game['state'] = 'victory'\n return revealed", "def check_room(self, row, col):\n if self.check_room_exists(row, col):\n if not self.__maze[row][col].is_impassable:\n return True\n else:\n self.__maze[row][col].explored = True\n return False\n else:\n return False", "def check_row(grid,row,col):\n\n current_player = grid[row][col]\n return grid[row].count(current_player) == len(grid)", "def DFS(self, i, j, visited, current_score, l, safe_path):\n \n # These arrays are used to get row and column numbers of 4 neighbours of a given cell\n rowNbr = [-1, 0, 0, 1];\n colNbr = [0 ,-1, 1, 0];\n \n # Mark this cell as visited\n visited[i][j] = True\n\n current_score+=1\n\n #See what other nodes became points\n safe_path.append((i, j))\n \n # Recur for all connected neighbours\n for k in range(0, 4):\n if self.isSafe(i + rowNbr[k], j + colNbr[k], visited, l):\n current_score, safe_path = self.DFS(i + rowNbr[k], j + colNbr[k], visited, current_score, l, safe_path)\n\n return (current_score, safe_path)", "def check_col(grid,row,col):\n\n current_player = grid[row][col]\n size = len(grid)\n count = 0\n\n # go through all fields in the column manually and increase count if they're\n # occupied by the same player as the chosen field\n for i in range(size):\n count += grid[i][col] == current_player\n\n return count == size", "def _check_legal_index(self, row, col):\n return 0 <= row and row < self._size and\\\n 0 <= col and col < self._size" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
A utility function to do DFS for a 2D boolean matrix. It only considers the 4 neighbours as adjacent vertices.
def DFS(self, i, j, visited, current_score, l, safe_path): # These arrays are used to get row and column numbers of 4 neighbours of a given cell rowNbr = [-1, 0, 0, 1]; colNbr = [0 ,-1, 1, 0]; # Mark this cell as visited visited[i][j] = True current_score+=1 #See what other nodes became points safe_path.append((i, j)) # Recur for all connected neighbours for k in range(0, 4): if self.isSafe(i + rowNbr[k], j + colNbr[k], visited, l): current_score, safe_path = self.DFS(i + rowNbr[k], j + colNbr[k], visited, current_score, l, safe_path) return (current_score, safe_path)
[ "def DFS(graph, vertex, vertex2):\r\n\r\n # Initializing the flag of path and all vertices visited or not to false\r\n path = False\r\n for key in graph.booleanVerticeTraversed:\r\n graph.booleanVerticeTraversed[key][0] = False\r\n\r\n def DFSHelper(graph, vertex, vertex2):\r\n \"\"\"Helper function which runs the main DFS Algorithm.\"\"\"\r\n\r\n # Refers to the global path variable of whether or not\r\n # there exists a path between the two vertices\r\n global path\r\n\r\n # Visited the first starting vertex\r\n graph.booleanVerticeTraversed[vertex][0] = True\r\n\r\n # Storing and referencing list of neighbours\r\n neighbours = graph.booleanVerticeTraversed[vertex][1:]\r\n\r\n # If there are no neighbours, then no path is there between those\r\n # two vertices\r\n if neighbours == []:\r\n path = False\r\n return\r\n\r\n\r\n # Checking whether or not all the neighbours are visited\r\n flag = True\r\n for neighbour in neighbours:\r\n if graph.booleanVerticeTraversed[neighbour][0] == False:\r\n flag = False\r\n\r\n # If yes, then there is no path between those two vertices\r\n # otherwise the path would have been found out by now\r\n if flag == True:\r\n path = False\r\n return\r\n\r\n # Checking on neighbours which are not visited\r\n for neighbour in neighbours:\r\n if graph.booleanVerticeTraversed[neighbour][0] == False:\r\n # If the neighbour is the required vertex, then there exists a path\r\n if neighbour == vertex2:\r\n path = True\r\n return\r\n # Else recursively run DFS on the neighbour\r\n else:\r\n DFSHelper(graph,neighbour,vertex2)\r\n\r\n # Calling Main DFS Algorithm (Helper)\r\n DFSHelper(graph,vertex,vertex2)\r\n\r\n return path", "def DFS(r,c,r0,c0):\n if r >=0 and r <= rows-1 and c >= 0 and c <= cols-1 and grid[r][c] != 'V' and grid[r][c] != 0:\n shape.add((r-r0,c-c0)) # Get shape of current cell wrt base coordinates r0 and c0\n grid[r][c] = 'V'\n DFS(r+1,c,r0,c0)\n DFS(r-1,c,r0,c0)\n DFS(r,c+1,r0,c0)\n DFS(r,c-1,r0,c0)", "def calculateNeighbours(mat):\n N = mat[:-2, :-2] + mat[:-2, 1:-1] + mat[:-2, 2:] + mat[1:-1, :-2] + mat[1:-1, 2:] + mat[2:, :-2] + mat[2:, 1:-1] + mat[2:, 2:]\n # N is about the size of the visual region\n return (N == 2) | (N == 3)", "def DFS(self, adj, stack, visited) :\n while len(stack) > 0 :\n node = stack.pop(-1)\n for j in adj[node] :\n if not visited[j] and len(adj[j])>0 :\n visited[j] = True\n stack.append(j)", "def DFS(g, u, discovered):\n for e in g.incident_edges(u): # for every outgoing edge from u\n v = e.opposite(u)\n if v not in discovered: # v is an unvisited vertex\n discovered[v] = e # e is the tree edge that discovered v\n DFS(g, v, discovered) # recursively explore from v", "def neighbors(bool_ar):\n return bool_ar ^ dilate(bool_ar)", "def neighbors_of_4(mapdata, x, y):\n neighbors = []\n offset = [-1, 1]\n for off in offset:\n if PathPlanner.is_cell_walkable(mapdata, x + off, y):\n newNeighbor = (x + off, y)\n neighbors.append(newNeighbor)\n if PathPlanner.is_cell_walkable(mapdata, x, y + off):\n newNeighbor = (x, y + off)\n neighbors.append(newNeighbor)\n return neighbors", "def dfs_all():\n for vertex in g:\n if not visited[vertex]:\n dfs_util(vertex)", "def isConnectedToEdge(*args, **kwargs):\n \n pass", "def check_graph(adj_matrix):\n check_sym = (len(np.where(adj_matrix!=adj_matrix.T)[0])==0)\n check_connected = is_connected(adj_matrix)\n return check_sym and check_connected", "def build_adjlist(self):\n adj_list = {}\n for i in range(0, self.maze_numrows):\n for j in range (0, self.maze_numcols):\n adjacent_cells = []\n if(self.paths[i][j] == 0):\n # check if connected to northwest cell\n if(i - 1 >= 0) and (j - 1 >= 0):\n if(self.paths[i - 1][j - 1] == 0):\n adjacent_cells.append(str(i - 1) + \" \" + str(j - 1))\n # check if connected to north cell\n if(i - 1 >= 0):\n if(self.paths[i - 1][j] == 0):\n adjacent_cells.append(str(i - 1) + \" \" + str(j))\n # check if connected to northeast cell\n if(i - 1 >= 0) and (j + 1 < self.maze_numcols):\n if(self.paths[i - 1][j + 1] == 0):\n adjacent_cells.append(str(i - 1) + \" \" + str(j + 1))\n # check if connected to west cell\n if(j - 1 >= 0):\n if(self.paths[i][j - 1] == 0):\n adjacent_cells.append(str(i) + \" \" + str(j - 1))\n # check if connected to east cell\n if(j + 1 < self.maze_numcols):\n if(self.paths[i][j + 1] == 0):\n adjacent_cells.append(str(i) + \" \" + str(j + 1))\n # check if connected to southwest cell\n if(i + 1 < self.maze_numrows) and (j - 1 >= 0):\n if(self.paths[i + 1][j - 1] == 0):\n adjacent_cells.append(str(i + 1) + \" \" + str(j - 1))\n # check if connected to south cell\n if(i + 1 < self.maze_numrows):\n if(self.paths[i + 1][j] == 0):\n adjacent_cells.append(str(i + 1) + \" \" + str(j))\n # check if connected to southeast cell\n if(i + 1 < self.maze_numrows) and (j + 1 < self.maze_numcols):\n if(self.paths[i + 1][j + 1] == 0):\n adjacent_cells.append(str(i + 1) + \" \" + str(j + 1))\n adj_list[str(i) + \" \" + str(j)] = adjacent_cells\n setattr(self, 'adj_list', adj_list)", "def neighbours(matrix):\n \n rows = len(matrix)\n columns = len(matrix[1])\n for row in range(rows):\n for column in range(columns): # Cycling through each point in the matrix\n num_neighbours = 0 # This initiates the variable that will become the new value for that point within the matrix\n if matrix[row][column] == 1: # This loop checks in turn the point left, right, above and below the selected point for a neighbour\n if column - 1 >= 0 and matrix[row][column - 1] != 0 and matrix[row][column - 1] != 88: # The point being checked !=0 or 88 as these points are either not within\n num_neighbours += 1 # the target area, or are out of bounds.\n if column + 1 <= columns and matrix[row][column + 1] != 0 and matrix[row][column + 1] != 88: # The number 88 was choosen to indicate invalid points as it is clearly identifiable during print statement error checking.\n num_neighbours += 1 # I also had to ensure that I did not accientally check a point at the end of\n if row - 1 >= 0 and matrix[row - 1][column] != 0 and matrix[row - 1][column] != 88: # a row, or the last row, by accident (e.g if I looked at matrix[0][-1]).\n num_neighbours += 1\n if row + 1 <= rows and matrix[row + 1][column] != 0 and matrix[row + 1][column] != 88:\n num_neighbours += 1 \n matrix[row][column] = num_neighbours # The matrix is then updated with its new values. \n return matrix", "def dfs(self, v_start, v_end=None) -> []:\r\n # initialize a stack to keep next vertices\r\n next_verts = []\r\n visited_verts = []\r\n next_verts.append(v_start)\r\n\r\n # check to make sure the start vert is in the graph\r\n if 0 <= v_start < self.v_count:\r\n\r\n # while next_verts is not empty, keep going\r\n while next_verts:\r\n # pop the top vertex\r\n src_vert = next_verts.pop()\r\n\r\n # if the vertex that we're currently on is the ending vertex, end the function\r\n if src_vert == v_end:\r\n visited_verts.append(src_vert)\r\n return visited_verts\r\n\r\n if src_vert not in visited_verts:\r\n # push all the destination verts that have an edge to the stack from the back index forward\r\n for dst_vert in range(self.v_count - 1, -1, -1):\r\n edge = self.adj_matrix[src_vert][dst_vert]\r\n if edge != 0:\r\n next_verts.append(dst_vert)\r\n visited_verts.append(src_vert)\r\n return visited_verts", "def percolates(flow_matrix):\n n = len(flow_matrix)\n full = directed_flow(flow_matrix)\n for j in range(n):\n if full[n - 1, j] == 1:\n return True\n return False", "def findEdges(self):\n for nc in self.nodes:\n x = nc[0]\n y = nc[1]\n nc_neighbours = self.nodes.get(nc).neighbours\n # Check for adjacent nodes in all directions\n if (x - self.x_div_len, y) in self.nodes:\n nc_neighbours.append(self.nodes.get((x - self.x_div_len, y)))\n if (x + self.x_div_len, y) in self.nodes:\n nc_neighbours.append(self.nodes.get((x + self.x_div_len, y)))\n if (x, y - self.y_div_len) in self.nodes:\n nc_neighbours.append(self.nodes.get((x, y - self.y_div_len)))\n if (x, y + self.y_div_len) in self.nodes:\n nc_neighbours.append(self.nodes.get((x, y + self.y_div_len)))", "def solution(self, grid, m, n, x0, y0, x1, y1):\n\n def check_exceed(x, y, m, n):\n if x < 0 or x >= m:\n return False\n if y < 0 or y >= n:\n return False\n return True\n\n def check_leak(leakage, x, y):\n return leakage[x][y] == \"*\"\n\n # BFS initialization (queue + visit + path_len)\n from collections import deque\n q = deque()\n q.append([x0, y0])\n\n path_len = 1\n\n # without visit matrix, BFS will be stuck in an infinite loop\n visit = [[False] * n for _ in range(m)]\n visit[x0][y0] = True\n\n # leakage matrix\n leakage = grid[:][:]\n\n # BFS\n while q:\n # iterate path level by level\n for _ in range(len(q)):\n coordinates = q.popleft()\n if coordinates == [x1, y1]:\n return path_len\n\n x = coordinates[0]\n y = coordinates[1]\n\n # left\n if not check_exceed(x - 1, y, m, n) and not check_leak(leakage, x - 1, y):\n if not visit[x - 1][y]:\n visit[x - 1][y] = True\n q.append([x - 1, y])\n # right\n if not check_exceed(x + 1, y, m, n) and not check_leak(leakage, x + 1, y):\n if not visit[x + 1][y]:\n visit[x + 1][y] = True\n q.append([x + 1, y])\n # top\n if not check_exceed(x, y - 1, m, n) and not check_leak(leakage, x, y - 1):\n if not visit[x][y - 1]:\n visit[x][y - 1] = True\n q.append([x, y - 1])\n # bottom\n if not check_exceed(x, y + 1, m, n) and not check_leak(leakage, x, y + 1):\n if not visit[x][y + 1]:\n visit[x][y + 1] = True\n q.append([x, y + 1])\n\n path_len += 1\n\n # iterate leakage\n temp = leakage[:][:]\n\n for i in range(m):\n for j in range(n):\n if check_leak(leakage, i, j):\n # left\n if not check_exceed(i - 1, j, m, n):\n temp[i - 1][j] = \"*\"\n # right\n if not check_exceed(i + 1, j, m, n):\n temp[i + 1][j] = \"*\"\n # top\n if not check_exceed(i, j - 1, m, n):\n temp[i][j - 1] = \"*\"\n # bottom\n if not check_exceed(i, j + 1, m, n):\n temp[i][j + 1] = \"*\"\n leakage = temp[:][:]\n return -1", "def is_connected(G, node, visited, depth_limit=0):\n \n if depth_limit == 20:\n return False\n if all(list(visited.values())):\n return True\n visited[node] = True\n connected = []\n for child in G[node]:\n connected += [is_connected(G, child, visited, depth_limit+1)]\n return all(connected)", "def dfs(starting_puzzle):\n search_stack = [starting_puzzle]\n while search_stack:\n puzzle = search_stack.pop()\n if puzzle.propagate_constraint():\n if puzzle.is_solved():\n return puzzle\n cell = min(puzzle.unset_cells(), key = puzzle.branches)\n for alternative in puzzle.content(cell):\n child = puzzle.copy()\n child.set(cell, alternative)\n search_stack.append(child)\n return None", "def neighbors_of_4(mapdata, x, y):\n if self.is_cell_walkable(mapdata, x+1, y):\n walkFour.add((x+1, y))\n if self.is_cell_walkable(mapdata, x-1, y):\n walkFour.add((x-1, y))\n if self.is_cell_walkable(mapdata, x, y+1):\n walkFour.add((x, y+1))\n if self.is_cell_walkable(x, y-1):\n walkFour.is_cell_walkable((x, y+1))\n\n return walkFour" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }